Compare commits
52 Commits
v9.0.0-alp
...
georgewall
| Author | SHA1 | Date | |
|---|---|---|---|
| 5138dd925d | |||
| 0a14ecca4e | |||
| 965e51b630 | |||
| 62c8c576b9 | |||
| d2581804eb | |||
| 67a52dbc37 | |||
| 96463f1f44 | |||
| 4d4ffca1ba | |||
| a6e23fd3a8 | |||
| a86319b14d | |||
| b030084f24 | |||
| 591bf56cba | |||
| b38bed5bfa | |||
| 489e5c5809 | |||
| 3bc89758bf | |||
| 8ba13d31d8 | |||
| 3da4572d1b | |||
| 56225051df | |||
| d6cb0dd5b7 | |||
| ae7853798c | |||
| f400e68ad1 | |||
| 41a2159f63 | |||
| 710b937bff | |||
| 926b468c6d | |||
| be0b96b5f5 | |||
| 821e77e7ad | |||
| 27774c9d3c | |||
| 9657180af6 | |||
| bfb4196439 | |||
| 40860afe0e | |||
| c7d9b00fe3 | |||
| 25933c003b | |||
| 98b38028aa | |||
| c3f987caaf | |||
| d726942ad1 | |||
| 1650e3d264 | |||
| 46b08caa4f | |||
| 2a93c062e4 | |||
| 9d719ce874 | |||
| d9d54b1bb8 | |||
| 931a80cacb | |||
| 1ab089022e | |||
| b9a2df5407 | |||
| 8b4fcc8ce1 | |||
| 3fc214d2a2 | |||
| 868dd02ffd | |||
| d29e079a1e | |||
| 5d8f357805 | |||
| 42b5781967 | |||
| 8174ba5207 | |||
| fd0c9992b3 | |||
| 11a1297792 |
@ -1,4 +1,4 @@
|
||||
ARG NODE_VERSION=${NODE_VERSION:-18}
|
||||
ARG NODE_VERSION=${NODE_VERSION:-20}
|
||||
FROM node:$NODE_VERSION
|
||||
|
||||
# Install required tools
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
ARG NODE_JS_VERSION=${NODE_JS_VERSION:-18}
|
||||
ARG NODE_JS_VERSION=${NODE_JS_VERSION:-20}
|
||||
FROM node:${NODE_JS_VERSION}
|
||||
|
||||
ARG BUILDER_UID=1000
|
||||
|
||||
@ -1,29 +1,29 @@
|
||||
---
|
||||
agents:
|
||||
provider: "gcp"
|
||||
image: family/core-ubuntu-2204
|
||||
memory: "8G"
|
||||
cpu: "2"
|
||||
|
||||
steps:
|
||||
- label: ":elasticsearch: :javascript: ES JavaScript ({{ matrix.nodejs }}) Test Suite: {{ matrix.suite }}"
|
||||
agents:
|
||||
provider: "gcp"
|
||||
- label: ":elasticsearch: :javascript: ES JavaScript ({{ matrix.nodejs }})"
|
||||
env:
|
||||
NODE_VERSION: "{{ matrix.nodejs }}"
|
||||
TEST_SUITE: "{{ matrix.suite }}"
|
||||
STACK_VERSION: 8.16.0
|
||||
TEST_SUITE: "platinum"
|
||||
STACK_VERSION: 9.0.0
|
||||
GITHUB_TOKEN_PATH: "secret/ci/elastic-elasticsearch-js/github-token"
|
||||
TEST_ES_STACK: "1"
|
||||
matrix:
|
||||
setup:
|
||||
suite:
|
||||
- "free"
|
||||
- "platinum"
|
||||
nodejs:
|
||||
- "18"
|
||||
- "20"
|
||||
- "22"
|
||||
- "23"
|
||||
command: ./.buildkite/run-tests.sh
|
||||
artifact_paths: "./junit-output/junit-*.xml"
|
||||
- wait: ~
|
||||
continue_on_failure: true
|
||||
- label: ":junit: Test results"
|
||||
agents:
|
||||
provider: "gcp"
|
||||
image: family/core-ubuntu-2204
|
||||
plugins:
|
||||
- junit-annotate#v2.6.0:
|
||||
artifacts: "junit-output/junit-*.xml"
|
||||
|
||||
@ -6,26 +6,33 @@ script_path=$(dirname "$(realpath -s "$0")")
|
||||
set -euo pipefail
|
||||
repo=$(pwd)
|
||||
|
||||
export NODE_VERSION=${NODE_VERSION:-18}
|
||||
export NODE_VERSION=${NODE_VERSION:-20}
|
||||
|
||||
echo "--- :javascript: Building Docker image"
|
||||
docker build \
|
||||
--file "$script_path/Dockerfile" \
|
||||
--tag elastic/elasticsearch-js \
|
||||
--build-arg NODE_VERSION="$NODE_VERSION" \
|
||||
.
|
||||
--file "$script_path/Dockerfile" \
|
||||
--tag elastic/elasticsearch-js \
|
||||
--build-arg NODE_VERSION="$NODE_VERSION" \
|
||||
.
|
||||
|
||||
echo "--- :javascript: Running $TEST_SUITE tests"
|
||||
GITHUB_TOKEN=$(vault read -field=token "$GITHUB_TOKEN_PATH")
|
||||
export GITHUB_TOKEN
|
||||
|
||||
echo "--- :javascript: Running tests"
|
||||
mkdir -p "$repo/junit-output"
|
||||
docker run \
|
||||
--network="${network_name}" \
|
||||
--env "TEST_ES_SERVER=${elasticsearch_url}" \
|
||||
--env "ELASTIC_PASSWORD=${elastic_password}" \
|
||||
--env "TEST_SUITE=${TEST_SUITE}" \
|
||||
--env "ELASTIC_USER=elastic" \
|
||||
--env "BUILDKITE=true" \
|
||||
--volume "$repo/junit-output:/junit-output" \
|
||||
--name elasticsearch-js \
|
||||
--rm \
|
||||
elastic/elasticsearch-js \
|
||||
bash -c "npm run test:integration; [ -f ./$TEST_SUITE-report-junit.xml ] && mv ./$TEST_SUITE-report-junit.xml /junit-output/junit-$BUILDKITE_JOB_ID.xml || echo 'No JUnit artifact found'"
|
||||
--network="${network_name}" \
|
||||
--env TEST_ES_STACK \
|
||||
--env STACK_VERSION \
|
||||
--env GITHUB_TOKEN \
|
||||
--env "TEST_ES_SERVER=${elasticsearch_url}" \
|
||||
--env "ELASTIC_PASSWORD=${elastic_password}" \
|
||||
--env "ELASTIC_USER=elastic" \
|
||||
--env "BUILDKITE=true" \
|
||||
--volume "/usr/src/app/node_modules" \
|
||||
--volume "$repo:/usr/src/app" \
|
||||
--volume "$repo/junit-output:/junit-output" \
|
||||
--name elasticsearch-js \
|
||||
--rm \
|
||||
elastic/elasticsearch-js \
|
||||
bash -c "npm run test:integration; [ -f ./report-junit.xml ] && mv ./report-junit.xml /junit-output/junit-$BUILDKITE_JOB_ID.xml || echo 'No JUnit artifact found'"
|
||||
|
||||
@ -6,3 +6,6 @@ elasticsearch
|
||||
lib
|
||||
junit-output
|
||||
.tap
|
||||
rest-api-spec
|
||||
yaml-rest-tests
|
||||
generated-tests
|
||||
|
||||
3
.github/CODEOWNERS
vendored
Normal file
3
.github/CODEOWNERS
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
package.json @joshmock
|
||||
renovate.json @joshmock
|
||||
catalog-info.yaml @joshmock
|
||||
2
.github/ISSUE_TEMPLATE/bug.yaml
vendored
2
.github/ISSUE_TEMPLATE/bug.yaml
vendored
@ -40,7 +40,7 @@ body:
|
||||
id: node-js-version
|
||||
attributes:
|
||||
label: Node.js version
|
||||
placeholder: 18.x, 20.x, etc.
|
||||
placeholder: 20.x, 22.x, etc.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
|
||||
8
.github/workflows/nodejs.yml
vendored
8
.github/workflows/nodejs.yml
vendored
@ -32,7 +32,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
node-version: [18.x, 20.x, 22.x, 23.x]
|
||||
node-version: [20.x, 22.x, 23.x]
|
||||
os: [ubuntu-latest, windows-latest, macOS-latest]
|
||||
|
||||
steps:
|
||||
@ -41,7 +41,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Use Node.js ${{ matrix.node-version }}
|
||||
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
|
||||
@ -71,7 +71,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Use Node.js
|
||||
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
with:
|
||||
node-version: 22.x
|
||||
|
||||
@ -104,7 +104,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Use Bun
|
||||
uses: oven-sh/setup-bun@4bc047ad259df6fc24a6c9b0f9a0cb08cf17fbe5 # v2
|
||||
uses: oven-sh/setup-bun@735343b667d3e6f658f44d0eca948eb6282f2b76 # v2
|
||||
|
||||
- name: Install
|
||||
run: |
|
||||
|
||||
15
.github/workflows/npm-publish.yml
vendored
15
.github/workflows/npm-publish.yml
vendored
@ -16,7 +16,7 @@ jobs:
|
||||
with:
|
||||
persist-credentials: false
|
||||
ref: ${{ github.event.inputs.branch }}
|
||||
- uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
|
||||
- uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
with:
|
||||
node-version: "22.x"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
@ -27,9 +27,20 @@ jobs:
|
||||
run: |
|
||||
version=$(jq -r .version package.json)
|
||||
tag_meta=$(echo "$version" | cut -s -d '-' -f2)
|
||||
# if no meta info on the version (e.g. a '-alpha.1' prefix), publish as a stable release
|
||||
if [[ -z "$tag_meta" ]]; then
|
||||
npm publish --provenance --access public
|
||||
# get latest version on npm
|
||||
latest=$(npm view @elastic/elasticsearch --json | jq -r '.["dist-tags"].latest')
|
||||
|
||||
# if $version is higher than the most recently published version, publish as-is
|
||||
if [[ $(yes | npx semver "$version" "$latest" | tail -n1) == "$version" ]]; then
|
||||
npm publish --provenance --access public
|
||||
else
|
||||
# otherwise, publish with "previous" tag
|
||||
npm publish --provenance --access public --tag "previous"
|
||||
fi
|
||||
else
|
||||
# publish as a non-stable release using the meta name (e.g. 'alpha') as the tag
|
||||
tag=$(echo "$tag_meta" | cut -d '.' -f1)
|
||||
npm publish --provenance --access public --tag "$tag"
|
||||
fi
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@ -68,3 +68,7 @@ bun.lockb
|
||||
test-results
|
||||
processinfo
|
||||
.tap
|
||||
rest-api-spec
|
||||
yaml-rest-tests
|
||||
generated-tests
|
||||
schema
|
||||
|
||||
@ -74,3 +74,6 @@ CONTRIBUTING.md
|
||||
src
|
||||
bun.lockb
|
||||
.tap
|
||||
rest-api-spec
|
||||
yaml-rest-tests
|
||||
generated-tests
|
||||
|
||||
90
README.md
90
README.md
@ -2,7 +2,7 @@
|
||||
|
||||
# Elasticsearch Node.js client
|
||||
|
||||
[](http://standardjs.com/) [](https://buildkite.com/elastic/elasticsearch-javascript-client-integration-tests/builds?branch=main) [](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml) [](https://codecov.io/gh/elastic/elasticsearch-js) [](https://www.npmjs.com/package/@elastic/elasticsearch)
|
||||
[](http://standardjs.com/) [](https://buildkite.com/elastic/elasticsearch-javascript-client-integration-tests/builds?branch=main) [](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml) [](https://codecov.io/gh/elastic/elasticsearch-js) [](https://www.npmjs.com/package/@elastic/elasticsearch)
|
||||
|
||||
**[Download the latest version of Elasticsearch](https://www.elastic.co/downloads/elasticsearch)**
|
||||
or
|
||||
@ -34,25 +34,26 @@ the new features of the 8.13 version of Elasticsearch, the 8.13 client version
|
||||
is required for that. Elasticsearch language clients are only backwards
|
||||
compatible with default distributions and without guarantees made.
|
||||
|
||||
| Elasticsearch Version | Elasticsearch-JS Branch | Supported |
|
||||
| --------------------- | ------------------------ | --------- |
|
||||
| main | main | |
|
||||
| 8.x | 8.x | 8.x |
|
||||
| 7.x | 7.x | 7.17 |
|
||||
| Elasticsearch Version | Elasticsearch-JS Branch |
|
||||
| --------------------- | ----------------------- |
|
||||
| main | main |
|
||||
| 9.x | 9.x |
|
||||
| 8.x | 8.x |
|
||||
| 7.x | 7.x |
|
||||
|
||||
## Usage
|
||||
|
||||
* [Creating an index](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_creating_an_index)
|
||||
* [Indexing a document](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_indexing_documents)
|
||||
* [Getting documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_getting_documents)
|
||||
* [Searching documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_searching_documents)
|
||||
* [Updating documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_updating_documents)
|
||||
* [Deleting documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_deleting_documents)
|
||||
* [Deleting an index](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_deleting_an_index)
|
||||
- [Creating an index](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_creating_an_index)
|
||||
- [Indexing a document](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_indexing_documents)
|
||||
- [Getting documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_getting_documents)
|
||||
- [Searching documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_searching_documents)
|
||||
- [Updating documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_updating_documents)
|
||||
- [Deleting documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_deleting_documents)
|
||||
- [Deleting an index](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_deleting_an_index)
|
||||
|
||||
### Node.js support
|
||||
|
||||
NOTE: The minimum supported version of Node.js is `v18`.
|
||||
NOTE: The minimum supported version of Node.js is `v20`.
|
||||
|
||||
The client versioning follows the Elastic Stack versioning, this means that
|
||||
major, minor, and patch releases are done following a precise schedule that
|
||||
@ -65,58 +66,43 @@ to support that version for at least another minor release. If you are using the
|
||||
with a version of Node.js that will be unsupported soon, you will see a warning
|
||||
in your logs (the client will start logging the warning with two minors in advance).
|
||||
|
||||
Unless you are **always** using a supported version of Node.js,
|
||||
Unless you are **always** using a supported version of Node.js,
|
||||
we recommend defining the client dependency in your
|
||||
`package.json` with the `~` instead of `^`. In this way, you will lock the
|
||||
dependency on the minor release and not the major. (for example, `~7.10.0` instead
|
||||
of `^7.10.0`).
|
||||
|
||||
| Node.js Version | Node.js EOL date | End of support |
|
||||
| --------------- |------------------| ---------------------- |
|
||||
| `8.x` | `December 2019` | `7.11` (early 2021) |
|
||||
| `10.x` | `April 2021` | `7.12` (mid 2021) |
|
||||
| `12.x` | `April 2022` | `8.2` (early 2022) |
|
||||
| `14.x` | `April 2023` | `8.8` (early 2023) |
|
||||
| `16.x` | `September 2023` | `8.11` (late 2023) |
|
||||
|
||||
### Compatibility
|
||||
|
||||
Language clients are forward compatible; meaning that clients support communicating with greater or equal minor versions of Elasticsearch.
|
||||
Elasticsearch language clients are only backwards compatible with default distributions and without guarantees made.
|
||||
|
||||
| Elasticsearch Version | Client Version |
|
||||
| --------------------- |----------------|
|
||||
| `8.x` | `8.x` |
|
||||
| `7.x` | `7.x` |
|
||||
| `6.x` | `6.x` |
|
||||
| `5.x` | `5.x` |
|
||||
|
||||
To install a specific major of the client, run the following command:
|
||||
```
|
||||
npm install @elastic/elasticsearch@<major>
|
||||
```
|
||||
| Node.js Version | Node.js EOL date | End of support |
|
||||
| --------------- | ---------------- | ------------------- |
|
||||
| `8.x` | `December 2019` | `7.11` (early 2021) |
|
||||
| `10.x` | `April 2021` | `7.12` (mid 2021) |
|
||||
| `12.x` | `April 2022` | `8.2` (early 2022) |
|
||||
| `14.x` | `April 2023` | `8.8` (early 2023) |
|
||||
| `16.x` | `September 2023` | `8.11` (late 2023) |
|
||||
| `18.x` | `April 2025` | `9.1` (mid 2025) |
|
||||
|
||||
#### Browser
|
||||
|
||||
> [!WARNING]
|
||||
> There is no official support for the browser environment. It exposes your Elasticsearch instance to everyone, which could lead to security issues.
|
||||
We recommend that you write a lightweight proxy that uses this client instead, you can see a proxy example [here](./docs/examples/proxy).
|
||||
> We recommend that you write a lightweight proxy that uses this client instead, you can see a proxy example [here](./docs/examples/proxy).
|
||||
|
||||
## Documentation
|
||||
|
||||
* [Introduction](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/introduction.html)
|
||||
* [Usage](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html#client-usage)
|
||||
* [Client configuration](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-configuration.html)
|
||||
* [API reference](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html)
|
||||
* [Authentication](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html#authentication)
|
||||
* [Observability](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/observability.html)
|
||||
* [Creating a child client](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/child.html)
|
||||
* [Client helpers](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-helpers.html)
|
||||
* [Typescript support](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/typescript.html)
|
||||
* [Testing](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-testing.html)
|
||||
* [Examples](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/examples.html)
|
||||
- [Introduction](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/introduction.html)
|
||||
- [Usage](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html#client-usage)
|
||||
- [Client configuration](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-configuration.html)
|
||||
- [API reference](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html)
|
||||
- [Authentication](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html#authentication)
|
||||
- [Observability](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/observability.html)
|
||||
- [Creating a child client](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/child.html)
|
||||
- [Client helpers](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-helpers.html)
|
||||
- [Typescript support](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/typescript.html)
|
||||
- [Testing](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-testing.html)
|
||||
- [Examples](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/examples.html)
|
||||
|
||||
## Install multiple versions
|
||||
|
||||
If you are using multiple versions of Elasticsearch, you need to use multiple versions of the client. In the past, install multiple versions of the same package was not possible, but with `npm v6.9`, you can do that via aliasing.
|
||||
|
||||
The command you must run to install different version of the client is:
|
||||
@ -161,7 +147,7 @@ client7.info().then(console.log, console.log)
|
||||
```
|
||||
|
||||
Finally, if you want to install the client for the next version of Elasticsearch
|
||||
*(the one that lives in Elasticsearch’s main branch)*, you can use the following
|
||||
_(the one that lives in Elasticsearch’s main branch)_, you can use the following
|
||||
command:
|
||||
|
||||
```sh
|
||||
|
||||
@ -37,20 +37,7 @@ spec:
|
||||
everyone:
|
||||
access_level: READ_ONLY
|
||||
provider_settings:
|
||||
build_pull_requests: false
|
||||
build_pull_requests: true
|
||||
build_branches: false
|
||||
separate_pull_request_statuses: true
|
||||
cancel_intermediate_builds: true
|
||||
cancel_intermediate_builds_branch_filter: "!main"
|
||||
schedules:
|
||||
main:
|
||||
branch: "main"
|
||||
cronline: "@daily"
|
||||
8_x:
|
||||
branch: "8.x"
|
||||
cronline: "@daily"
|
||||
8_17:
|
||||
branch: "8.17"
|
||||
cronline: "@daily"
|
||||
8_18:
|
||||
branch: "8.18"
|
||||
cronline: "@daily"
|
||||
|
||||
@ -3,16 +3,17 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.streamInference({
|
||||
task_type: "chat_completion",
|
||||
const response = await client.inference.chatCompletionUnified({
|
||||
inference_id: "openai-completion",
|
||||
model: "gpt-4o",
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: "What is Elastic?",
|
||||
},
|
||||
],
|
||||
chat_completion_request: {
|
||||
model: "gpt-4o",
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: "What is Elastic?",
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,8 +3,7 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.inference({
|
||||
task_type: "sparse_embedding",
|
||||
const response = await client.inference.sparseEmbedding({
|
||||
inference_id: "my-elser-model",
|
||||
input:
|
||||
"The sky above the port was the color of television tuned to a dead channel.",
|
||||
|
||||
@ -3,9 +3,8 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.put({
|
||||
task_type: "my-inference-endpoint",
|
||||
inference_id: "_update",
|
||||
const response = await client.inference.update({
|
||||
inference_id: "my-inference-endpoint",
|
||||
inference_config: {
|
||||
service_settings: {
|
||||
api_key: "<API_KEY>",
|
||||
|
||||
@ -3,41 +3,42 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.streamInference({
|
||||
task_type: "chat_completion",
|
||||
const response = await client.inference.chatCompletionUnified({
|
||||
inference_id: "openai-completion",
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{
|
||||
type: "text",
|
||||
text: "What's the price of a scarf?",
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
tools: [
|
||||
{
|
||||
type: "function",
|
||||
function: {
|
||||
name: "get_current_price",
|
||||
description: "Get the current price of a item",
|
||||
parameters: {
|
||||
type: "object",
|
||||
properties: {
|
||||
item: {
|
||||
id: "123",
|
||||
chat_completion_request: {
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{
|
||||
type: "text",
|
||||
text: "What's the price of a scarf?",
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
tools: [
|
||||
{
|
||||
type: "function",
|
||||
function: {
|
||||
name: "get_current_price",
|
||||
description: "Get the current price of a item",
|
||||
parameters: {
|
||||
type: "object",
|
||||
properties: {
|
||||
item: {
|
||||
id: "123",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
tool_choice: {
|
||||
type: "function",
|
||||
function: {
|
||||
name: "get_current_price",
|
||||
],
|
||||
tool_choice: {
|
||||
type: "function",
|
||||
function: {
|
||||
name: "get_current_price",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
@ -3,8 +3,7 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.streamInference({
|
||||
task_type: "completion",
|
||||
const response = await client.inference.streamCompletion({
|
||||
inference_id: "openai-completion",
|
||||
input: "What is Elastic?",
|
||||
});
|
||||
|
||||
@ -3,8 +3,7 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.inference({
|
||||
task_type: "text_embedding",
|
||||
const response = await client.inference.textEmbedding({
|
||||
inference_id: "my-cohere-endpoint",
|
||||
input:
|
||||
"The sky above the port was the color of television tuned to a dead channel.",
|
||||
|
||||
@ -3,30 +3,31 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.streamInference({
|
||||
task_type: "chat_completion",
|
||||
const response = await client.inference.chatCompletionUnified({
|
||||
inference_id: "openai-completion",
|
||||
messages: [
|
||||
{
|
||||
role: "assistant",
|
||||
content: "Let's find out what the weather is",
|
||||
tool_calls: [
|
||||
{
|
||||
id: "call_KcAjWtAww20AihPHphUh46Gd",
|
||||
type: "function",
|
||||
function: {
|
||||
name: "get_current_weather",
|
||||
arguments: '{"location":"Boston, MA"}',
|
||||
chat_completion_request: {
|
||||
messages: [
|
||||
{
|
||||
role: "assistant",
|
||||
content: "Let's find out what the weather is",
|
||||
tool_calls: [
|
||||
{
|
||||
id: "call_KcAjWtAww20AihPHphUh46Gd",
|
||||
type: "function",
|
||||
function: {
|
||||
name: "get_current_weather",
|
||||
arguments: '{"location":"Boston, MA"}',
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
role: "tool",
|
||||
content: "The weather is cold",
|
||||
tool_call_id: "call_KcAjWtAww20AihPHphUh46Gd",
|
||||
},
|
||||
],
|
||||
],
|
||||
},
|
||||
{
|
||||
role: "tool",
|
||||
content: "The weather is cold",
|
||||
tool_call_id: "call_KcAjWtAww20AihPHphUh46Gd",
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,8 +3,7 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.inference({
|
||||
task_type: "completion",
|
||||
const response = await client.inference.completion({
|
||||
inference_id: "openai_chat_completions",
|
||||
input: "What is Elastic?",
|
||||
});
|
||||
|
||||
@ -3,8 +3,7 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.inference({
|
||||
task_type: "rerank",
|
||||
const response = await client.inference.rerank({
|
||||
inference_id: "cohere_rerank",
|
||||
input: ["luke", "like", "leia", "chewy", "r2d2", "star", "wars"],
|
||||
query: "star wars main character",
|
||||
|
||||
@ -1,4 +1,6 @@
|
||||
project: 'Node.js client'
|
||||
products:
|
||||
- id: elasticsearch-client
|
||||
exclude:
|
||||
- examples/proxy/README.md
|
||||
cross_links:
|
||||
|
||||
@ -1023,43 +1023,13 @@ client.info()
|
||||
```
|
||||
|
||||
## client.knnSearch [_knn_search]
|
||||
Run a knn search.
|
||||
Performs a kNN search.
|
||||
|
||||
NOTE: The kNN search API has been replaced by the `knn` option in the search API.
|
||||
|
||||
Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents.
|
||||
Given a query vector, the API finds the k closest vectors and returns those documents as search hits.
|
||||
|
||||
Elasticsearch uses the HNSW algorithm to support efficient kNN search.
|
||||
Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed.
|
||||
This means the results returned are not always the true k closest neighbors.
|
||||
|
||||
The kNN search API supports restricting the search using a filter.
|
||||
The search will return the top k documents that also match the filter query.
|
||||
|
||||
A kNN search response has the exact same structure as a search API response.
|
||||
However, certain sections have a meaning specific to kNN search:
|
||||
|
||||
* The document `_score` is determined by the similarity between the query and document vector.
|
||||
* The `hits.total` object contains the total number of nearest neighbor candidates considered, which is `num_candidates * num_shards`. The `hits.total.relation` will always be `eq`, indicating an exact value.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search-api.html)
|
||||
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html)
|
||||
|
||||
```ts
|
||||
client.knnSearch({ index, knn })
|
||||
client.knnSearch()
|
||||
```
|
||||
### Arguments [_arguments_knn_search]
|
||||
|
||||
#### Request (object) [_request_knn_search]
|
||||
|
||||
- **`index` (string | string[])**: A list of index names to search; use `_all` or to perform the operation on all indices.
|
||||
- **`knn` ({ field, query_vector, k, num_candidates })**: The kNN query to run.
|
||||
- **`_source` (Optional, boolean | { excludes, includes })**: Indicates which source fields are returned for matching documents. These fields are returned in the `hits._source` property of the search response.
|
||||
- **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns.
|
||||
- **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response.
|
||||
- **`fields` (Optional, string | string[])**: The request returns values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns.
|
||||
- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])**: A query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match.
|
||||
- **`routing` (Optional, string)**: A list of specific routing values.
|
||||
|
||||
## client.mget [_mget]
|
||||
Get multiple documents.
|
||||
@ -1591,7 +1561,7 @@ The API uses several _contexts_, which control how scripts are run, what variabl
|
||||
|
||||
Each context requires a script, but additional parameters depend on the context you're using for that script.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html)
|
||||
[Endpoint documentation](https://www.elastic.co/docs/reference/scripting-languages/painless/painless-api-examples)
|
||||
|
||||
```ts
|
||||
client.scriptsPainlessExecute({ ... })
|
||||
@ -1713,7 +1683,7 @@ client.search({ ... })
|
||||
- **`include_named_queries_score` (Optional, boolean)**: If `true`, the response includes the score contribution from any named queries. This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead.
|
||||
- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified.
|
||||
- **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests.
|
||||
- **`preference` (Optional, string)**: The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:<node-id>,<node-id>` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:<node-id>,<node-id>` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. `_shards:<shard>,<shard>` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. `<custom-string>` (any string that does not start with `_`) to route searches with the same `<custom-string>` to the same shards in the same order.
|
||||
- **`preference` (Optional, string)**: The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:<node-id>,<node-id>` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:<node-id>,<node-id>` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. * `_shards:<shard>,<shard>` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. * `<custom-string>` (any string that does not start with `_`) to route searches with the same `<custom-string>` to the same shards in the same order.
|
||||
- **`pre_filter_shard_size` (Optional, number)**: A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met: * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field.
|
||||
- **`request_cache` (Optional, boolean)**: If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings.
|
||||
- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard.
|
||||
@ -2925,12 +2895,13 @@ client.cat.nodes({ ... })
|
||||
- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values.
|
||||
- **`full_id` (Optional, boolean | string)**: If `true`, return the full node ID. If `false`, return the shortened node ID.
|
||||
- **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory.
|
||||
- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards.
|
||||
- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted.
|
||||
- **`h` (Optional, Enum("build" | "completion.size" | "cpu" | "disk.avail" | "disk.total" | "disk.used" | "disk.used_percent" | "fielddata.evictions" | "fielddata.memory_size" | "file_desc.current" | "file_desc.max" | "file_desc.percent" | "flush.total" | "flush.total_time" | "get.current" | "get.exists_time" | "get.exists_total" | "get.missing_time" | "get.missing_total" | "get.time" | "get.total" | "heap.current" | "heap.max" | "heap.percent" | "http_address" | "id" | "indexing.delete_current" | "indexing.delete_time" | "indexing.delete_total" | "indexing.index_current" | "indexing.index_failed" | "indexing.index_failed_due_to_version_conflict" | "indexing.index_time" | "indexing.index_total" | "ip" | "jdk" | "load_1m" | "load_5m" | "load_15m" | "mappings.total_count" | "mappings.total_estimated_overhead_in_bytes" | "master" | "merges.current" | "merges.current_docs" | "merges.current_size" | "merges.total" | "merges.total_docs" | "merges.total_size" | "merges.total_time" | "name" | "node.role" | "pid" | "port" | "query_cache.memory_size" | "query_cache.evictions" | "query_cache.hit_count" | "query_cache.miss_count" | "ram.current" | "ram.max" | "ram.percent" | "refresh.total" | "refresh.time" | "request_cache.memory_size" | "request_cache.evictions" | "request_cache.hit_count" | "request_cache.miss_count" | "script.compilations" | "script.cache_evictions" | "search.fetch_current" | "search.fetch_time" | "search.fetch_total" | "search.open_contexts" | "search.query_current" | "search.query_time" | "search.query_total" | "search.scroll_current" | "search.scroll_time" | "search.scroll_total" | "segments.count" | "segments.fixed_bitset_memory" | "segments.index_writer_memory" | "segments.memory" | "segments.version_map_memory" | "shard_stats.total_count" | "suggest.current" | "suggest.time" | "suggest.total" | "uptime" | "version"[])**: A list of columns names to display.
|
||||
It supports simple wildcards.
|
||||
- **`s` (Optional, string | string[])**: A list of column names or aliases that determines the sort order.
|
||||
Sorting defaults to ascending and can be changed by setting `:asc`
|
||||
or `:desc` as a suffix to the column name.
|
||||
- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node.
|
||||
- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values.
|
||||
- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node.
|
||||
- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values.
|
||||
|
||||
## client.cat.pendingTasks [_cat.pending_tasks]
|
||||
Get pending task information.
|
||||
@ -4418,7 +4389,7 @@ Update the connector draft filtering validation.
|
||||
|
||||
Update the draft filtering validation info for a connector.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-filtering-validation-api.html)
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering-validation)
|
||||
|
||||
```ts
|
||||
client.connector.updateFilteringValidation({ connector_id, validation })
|
||||
@ -4466,7 +4437,7 @@ client.connector.updateName({ connector_id })
|
||||
## client.connector.updateNative [_connector.update_native]
|
||||
Update the connector is_native flag.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-native-api.html)
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-native)
|
||||
|
||||
```ts
|
||||
client.connector.updateNative({ connector_id, is_native })
|
||||
@ -4807,6 +4778,9 @@ By default, the request waits for 1 second for the query results.
|
||||
If the query completes during this period, results are returned
|
||||
Otherwise, a query ID is returned that can later be used to retrieve the results.
|
||||
- **`allow_partial_results` (Optional, boolean)**: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards.
|
||||
If `false`, the query will fail if there are any failures.
|
||||
|
||||
To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`.
|
||||
- **`delimiter` (Optional, string)**: The character to use between values within a CSV row.
|
||||
It is valid only for the CSV format.
|
||||
- **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results.
|
||||
@ -4874,7 +4848,7 @@ Stop async ES|QL query.
|
||||
This API interrupts the query execution and returns the results so far.
|
||||
If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-async-query-stop-api.html)
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-stop)
|
||||
|
||||
```ts
|
||||
client.esql.asyncQueryStop({ id })
|
||||
@ -4889,11 +4863,31 @@ A query ID is also provided when the request was submitted with the `keep_on_com
|
||||
- **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results.
|
||||
If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns.
|
||||
|
||||
## client.esql.getQuery [_esql.get_query]
|
||||
Get a specific running ES|QL query information.
|
||||
Returns an object extended information about a running ES|QL query.
|
||||
```ts
|
||||
client.esql.getQuery({ id })
|
||||
```
|
||||
|
||||
### Arguments [_arguments_esql.get_query]
|
||||
|
||||
#### Request (object) [_request_esql.get_query]
|
||||
- **`id` (string)**: The query ID
|
||||
|
||||
## client.esql.listQueries [_esql.list_queries]
|
||||
Get running ES|QL queries information.
|
||||
Returns an object containing IDs and other information about the running ES|QL queries.
|
||||
```ts
|
||||
client.esql.listQueries()
|
||||
```
|
||||
|
||||
|
||||
## client.esql.query [_esql.query]
|
||||
Run an ES|QL query.
|
||||
Get search results for an ES|QL (Elasticsearch query language) query.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-rest.html)
|
||||
[Endpoint documentation](https://www.elastic.co/docs/explore-analyze/query-filter/languages/esql-rest)
|
||||
|
||||
```ts
|
||||
client.esql.query({ query })
|
||||
@ -4921,6 +4915,9 @@ count.
|
||||
- **`drop_null_columns` (Optional, boolean)**: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results?
|
||||
Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns.
|
||||
- **`allow_partial_results` (Optional, boolean)**: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards.
|
||||
If `false`, the query will fail if there are any failures.
|
||||
|
||||
To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`.
|
||||
|
||||
## client.features.getFeatures [_features.get_features]
|
||||
Get the features.
|
||||
@ -5031,9 +5028,9 @@ client.fleet.msearch({ ... })
|
||||
- **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard
|
||||
after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause
|
||||
Elasticsearch to immediately execute the search.
|
||||
- **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns
|
||||
an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`
|
||||
which is true by default.
|
||||
- **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or shard failures.
|
||||
If false, returns an error with no partial results.
|
||||
Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default.
|
||||
|
||||
## client.fleet.search [_fleet.search]
|
||||
Run a Fleet search.
|
||||
@ -5134,9 +5131,9 @@ the indices stats API.
|
||||
- **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard
|
||||
after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause
|
||||
Elasticsearch to immediately execute the search.
|
||||
- **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns
|
||||
an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`
|
||||
which is true by default.
|
||||
- **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or shard failures.
|
||||
If false, returns an error with no partial results.
|
||||
Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default.
|
||||
|
||||
## client.graph.explore [_graph.explore]
|
||||
Explore graph analytics.
|
||||
@ -5442,12 +5439,12 @@ If no index is specified or the index does not have a default analyzer, the anal
|
||||
- **`analyzer` (Optional, string)**: The name of the analyzer that should be applied to the provided `text`.
|
||||
This could be a built-in analyzer, or an analyzer that’s been configured in the index.
|
||||
- **`attributes` (Optional, string[])**: Array of token attributes used to filter the output of the `explain` parameter.
|
||||
- **`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name } | { type, normalize_kana, normalize_kanji }[])**: Array of character filters used to preprocess characters before the tokenizer.
|
||||
- **`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name, unicode_set_filter } | { type, normalize_kana, normalize_kanji }[])**: Array of character filters used to preprocess characters before the tokenizer.
|
||||
- **`explain` (Optional, boolean)**: If `true`, the response includes token attributes and additional details.
|
||||
- **`field` (Optional, string)**: Field used to derive the analyzer.
|
||||
To use this parameter, you must specify an index.
|
||||
If specified, the `analyzer` parameter overrides this value.
|
||||
- **`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])**: Array of token filters used to apply after the tokenizer.
|
||||
- **`filter` (Optional, string | { type } | { type } | { type, preserve_original } | { type, ignored_scripts, output_unigrams } | { type } | { type } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type } | { type } | { type } | { type, dedup, dictionary, locale, longest_only } | { type, hyphenation_patterns_path, no_sub_matches, no_overlapping_matches } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, bucket_count, hash_count, hash_set_size, with_rotation } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, pattern, replacement } | { type } | { type } | { type, script } | { type } | { type } | { type } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type } | { type } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, ignore_keywords } | { type } | { type, stopwords } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])**: Array of token filters used to apply after the tokenizer.
|
||||
- **`normalizer` (Optional, string)**: Normalizer to use to convert text into a single token.
|
||||
- **`text` (Optional, string | string[])**: Text to analyze.
|
||||
If an array of strings is provided, it is analyzed as a multi-value field.
|
||||
@ -5458,7 +5455,7 @@ Cancel a migration reindex operation.
|
||||
|
||||
Cancel a migration reindex attempt for a data stream or index.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html)
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-cancel-migrate-reindex)
|
||||
|
||||
```ts
|
||||
client.indices.cancelMigrateReindex({ index })
|
||||
@ -5646,6 +5643,15 @@ client.indices.create({ index })
|
||||
|
||||
#### Request (object) [_request_indices.create]
|
||||
- **`index` (string)**: Name of the index you wish to create.
|
||||
Index names must meet the following criteria:
|
||||
|
||||
* Lowercase only
|
||||
* Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, ` ` (space character), `,`, or `#`
|
||||
* Indices prior to 7.0 could contain a colon (`:`), but that has been deprecated and will not be supported in later versions
|
||||
* Cannot start with `-`, `_`, or `+`
|
||||
* Cannot be `.` or `..`
|
||||
* Cannot be longer than 255 bytes (note thtat it is bytes, so multi-byte characters will reach the limit faster)
|
||||
* Names starting with `.` are deprecated, except for hidden indices and internal indices managed by plugins
|
||||
- **`aliases` (Optional, Record<string, { filter, index_routing, is_hidden, is_write_index, routing, search_routing }>)**: Aliases for the index.
|
||||
- **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. If specified, this mapping can include:
|
||||
- Field names
|
||||
@ -5687,7 +5693,7 @@ Create an index from a source index.
|
||||
|
||||
Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html)
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-from)
|
||||
|
||||
```ts
|
||||
client.indices.createFrom({ source, dest })
|
||||
@ -5811,6 +5817,16 @@ client.indices.deleteDataStream({ name })
|
||||
- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.
|
||||
- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`.
|
||||
|
||||
## client.indices.deleteDataStreamOptions [_indices.delete_data_stream_options]
|
||||
Deletes the data stream options of the selected data streams.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html)
|
||||
|
||||
```ts
|
||||
client.indices.deleteDataStreamOptions()
|
||||
```
|
||||
|
||||
|
||||
## client.indices.deleteIndexTemplate [_indices.delete_index_template]
|
||||
Delete an index template.
|
||||
The provided <index-template> may contain multiple template names separated by a comma. If multiple template
|
||||
@ -6268,6 +6284,16 @@ Supports a list of values, such as `open,hidden`.
|
||||
- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.
|
||||
- **`verbose` (Optional, boolean)**: Whether the maximum timestamp for each data stream should be calculated and returned.
|
||||
|
||||
## client.indices.getDataStreamOptions [_indices.get_data_stream_options]
|
||||
Returns the data stream options of the selected data streams.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html)
|
||||
|
||||
```ts
|
||||
client.indices.getDataStreamOptions()
|
||||
```
|
||||
|
||||
|
||||
## client.indices.getFieldMapping [_indices.get_field_mapping]
|
||||
Get mapping definitions.
|
||||
Retrieves mapping definitions for one or more fields.
|
||||
@ -6349,7 +6375,7 @@ Get the migration reindexing status.
|
||||
|
||||
Get the status of a migration reindex attempt for a data stream or index.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html)
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-migration)
|
||||
|
||||
```ts
|
||||
client.indices.getMigrateReindexStatus({ index })
|
||||
@ -6425,7 +6451,7 @@ Reindex all legacy backing indices for a data stream.
|
||||
This operation occurs in a persistent task.
|
||||
The persistent task ID is returned immediately and the reindexing work is completed in that task.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-data-stream.html)
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-reindex)
|
||||
|
||||
```ts
|
||||
client.indices.migrateReindex({ ... })
|
||||
@ -6623,6 +6649,16 @@ error.
|
||||
- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response.
|
||||
If no response is received before the timeout expires, the request fails and returns an error.
|
||||
|
||||
## client.indices.putDataStreamOptions [_indices.put_data_stream_options]
|
||||
Updates the data stream options of the selected data streams.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html)
|
||||
|
||||
```ts
|
||||
client.indices.putDataStreamOptions()
|
||||
```
|
||||
|
||||
|
||||
## client.indices.putIndexTemplate [_indices.put_index_template]
|
||||
Create or update an index template.
|
||||
Index templates define settings, mappings, and aliases that can be applied automatically to new indices.
|
||||
@ -6748,7 +6784,7 @@ a new date field is added instead of string.
|
||||
not used at all by Elasticsearch, but can be used to store
|
||||
application-specific metadata.
|
||||
- **`numeric_detection` (Optional, boolean)**: Automatically map strings into numeric data types for all fields.
|
||||
- **`properties` (Optional, Record<string, { type } | { boost, fielddata, index, null_value, ignore_malformed, script, on_script_error, time_series_dimension, type } | { type, enabled, null_value, boost, coerce, script, on_script_error, ignore_malformed, time_series_metric, analyzer, eager_global_ordinals, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, term_vector, format, precision_step, locale } | { relations, eager_global_ordinals, type } | { boost, eager_global_ordinals, index, index_options, script, on_script_error, normalizer, norms, null_value, similarity, split_queries_on_whitespace, time_series_dimension, type } | { type, fields, meta, copy_to } | { type } | { positive_score_impact, type } | { positive_score_impact, type } | { analyzer, index, index_options, max_shingle_size, norms, search_analyzer, search_quote_analyzer, similarity, term_vector, type } | { analyzer, boost, eager_global_ordinals, fielddata, fielddata_frequency_filter, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, similarity, term_vector, type } | { type } | { type, null_value } | { boost, format, ignore_malformed, index, script, on_script_error, null_value, precision_step, type } | { boost, fielddata, format, ignore_malformed, index, script, on_script_error, null_value, precision_step, locale, type } | { type, default_metric, metrics, time_series_metric } | { type, dims, element_type, index, index_options, similarity } | { boost, depth_limit, doc_values, eager_global_ordinals, index, index_options, null_value, similarity, split_queries_on_whitespace, type } | { enabled, include_in_parent, include_in_root, type } | { enabled, subobjects, type } | { type, enabled, priority, time_series_dimension } | { type, meta, inference_id, search_inference_id } | { type } | { analyzer, contexts, max_input_length, preserve_position_increments, preserve_separators, search_analyzer, type } | { value, type } | { type, index } | { path, type } | { ignore_malformed, type } | { boost, index, ignore_malformed, null_value, on_script_error, script, time_series_dimension, type } | { type } | { analyzer, boost, index, null_value, enable_position_increments, type } | { ignore_malformed, ignore_z_value, null_value, index, on_script_error, script, type } | { coerce, ignore_malformed, ignore_z_value, index, orientation, strategy, type } | { ignore_malformed, ignore_z_value, null_value, type } | { coerce, ignore_malformed, ignore_z_value, orientation, type } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value, scaling_factor } | { type, null_value } | { type, null_value } | { format, type } | { type } | { type } | { type } | { type } | { type } | { type, norms, index_options, index, null_value, rules, language, country, variant, strength, decomposition, alternate, case_level, case_first, numeric, variable_top, hiragana_quaternary_mode }>)**: Mapping for a field. For new fields, this mapping can include:
|
||||
- **`properties` (Optional, Record<string, { type } | { boost, fielddata, index, null_value, ignore_malformed, script, on_script_error, time_series_dimension, type } | { type, enabled, null_value, boost, coerce, script, on_script_error, ignore_malformed, time_series_metric, analyzer, eager_global_ordinals, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, term_vector, format, precision_step, locale } | { relations, eager_global_ordinals, type } | { boost, eager_global_ordinals, index, index_options, script, on_script_error, normalizer, norms, null_value, similarity, split_queries_on_whitespace, time_series_dimension, type } | { type, fields, meta, copy_to } | { type } | { positive_score_impact, type } | { positive_score_impact, type } | { analyzer, index, index_options, max_shingle_size, norms, search_analyzer, search_quote_analyzer, similarity, term_vector, type } | { analyzer, boost, eager_global_ordinals, fielddata, fielddata_frequency_filter, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, similarity, term_vector, type } | { type } | { type, null_value } | { boost, format, ignore_malformed, index, script, on_script_error, null_value, precision_step, type } | { boost, fielddata, format, ignore_malformed, index, script, on_script_error, null_value, precision_step, locale, type } | { type, default_metric, metrics, time_series_metric } | { type, dims, element_type, index, index_options, similarity } | { boost, depth_limit, doc_values, eager_global_ordinals, index, index_options, null_value, similarity, split_queries_on_whitespace, type } | { enabled, include_in_parent, include_in_root, type } | { enabled, subobjects, type } | { type, enabled, priority, time_series_dimension } | { type, meta, inference_id, search_inference_id, chunking_settings } | { type } | { analyzer, contexts, max_input_length, preserve_position_increments, preserve_separators, search_analyzer, type } | { value, type } | { type, index } | { path, type } | { ignore_malformed, type } | { boost, index, ignore_malformed, null_value, on_script_error, script, time_series_dimension, type } | { type } | { analyzer, boost, index, null_value, enable_position_increments, type } | { ignore_malformed, ignore_z_value, null_value, index, on_script_error, script, type } | { coerce, ignore_malformed, ignore_z_value, index, orientation, strategy, type } | { ignore_malformed, ignore_z_value, null_value, type } | { coerce, ignore_malformed, ignore_z_value, orientation, type } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value, scaling_factor } | { type, null_value } | { type, null_value } | { format, type } | { type } | { type } | { type } | { type } | { type } | { type, norms, index_options, index, null_value, rules, language, country, variant, strength, decomposition, alternate, case_level, case_first, numeric, variable_top, hiragana_quaternary_mode }>)**: Mapping for a field. For new fields, this mapping can include:
|
||||
|
||||
- Field name
|
||||
- Field data type
|
||||
@ -7490,6 +7526,17 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
|
||||
## client.inference.chatCompletionUnified [_inference.chat_completion_unified]
|
||||
Perform chat completion inference
|
||||
|
||||
The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation.
|
||||
It only works with the `chat_completion` task type for `openai` and `elastic` inference services.
|
||||
|
||||
IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.
|
||||
For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
|
||||
|
||||
NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming.
|
||||
The Chat completion inference API and the Stream inference API differ in their response structure and capabilities.
|
||||
The Chat completion inference API provides more comprehensive customization options through more fields and function calling support.
|
||||
If you use the `openai` service or the `elastic` service, use the Chat completion inference API.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference)
|
||||
|
||||
```ts
|
||||
@ -7553,30 +7600,42 @@ client.inference.get({ ... })
|
||||
- **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The task type
|
||||
- **`inference_id` (Optional, string)**: The inference Id
|
||||
|
||||
## client.inference.postEisChatCompletion [_inference.post_eis_chat_completion]
|
||||
Perform a chat completion task through the Elastic Inference Service (EIS).
|
||||
## client.inference.inference [_inference.inference]
|
||||
Perform inference on the service.
|
||||
|
||||
Perform a chat completion inference task with the `elastic` service.
|
||||
This API enables you to use machine learning models to perform specific tasks on data that you provide as an input.
|
||||
It returns a response with the results of the tasks.
|
||||
The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-post-eis-chat-completion)
|
||||
For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation.
|
||||
|
||||
> info
|
||||
> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference)
|
||||
|
||||
```ts
|
||||
client.inference.postEisChatCompletion({ eis_inference_id })
|
||||
client.inference.inference({ inference_id, input })
|
||||
```
|
||||
|
||||
### Arguments [_arguments_inference.post_eis_chat_completion]
|
||||
### Arguments [_arguments_inference.inference]
|
||||
|
||||
#### Request (object) [_request_inference.post_eis_chat_completion]
|
||||
- **`eis_inference_id` (string)**: The unique identifier of the inference endpoint.
|
||||
- **`chat_completion_request` (Optional, { messages, model, max_completion_tokens, stop, temperature, tool_choice, tools, top_p })**
|
||||
#### Request (object) [_request_inference.inference]
|
||||
- **`inference_id` (string)**: The unique identifier for the inference endpoint.
|
||||
- **`input` (string | string[])**: The text on which you want to perform the inference task.
|
||||
It can be a single string or an array.
|
||||
|
||||
> info
|
||||
> Inference endpoints for the `completion` task type currently only support a single string as input.
|
||||
- **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The type of inference task that the model performs.
|
||||
- **`query` (Optional, string)**: The query input, which is required only for the `rerank` task.
|
||||
It is not required for other tasks.
|
||||
- **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request.
|
||||
These settings are specific to the task type you specified and override the task settings specified when initializing the service.
|
||||
- **`timeout` (Optional, string | -1 | 0)**: The amount of time to wait for the inference request to complete.
|
||||
|
||||
## client.inference.put [_inference.put]
|
||||
Create an inference endpoint.
|
||||
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
|
||||
After creating the endpoint, wait for the model deployment to complete before using it.
|
||||
To verify the deployment status, use the get trained model statistics API.
|
||||
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
|
||||
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
|
||||
IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.
|
||||
For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.
|
||||
@ -7600,12 +7659,6 @@ Create an AlibabaCloud AI Search inference endpoint.
|
||||
|
||||
Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service.
|
||||
|
||||
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
|
||||
After creating the endpoint, wait for the model deployment to complete before using it.
|
||||
To verify the deployment status, use the get trained model statistics API.
|
||||
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
|
||||
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud)
|
||||
|
||||
```ts
|
||||
@ -7631,12 +7684,6 @@ Creates an inference endpoint to perform an inference task with the `amazonbedro
|
||||
>info
|
||||
> You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.
|
||||
|
||||
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
|
||||
After creating the endpoint, wait for the model deployment to complete before using it.
|
||||
To verify the deployment status, use the get trained model statistics API.
|
||||
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
|
||||
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock)
|
||||
|
||||
```ts
|
||||
@ -7659,12 +7706,6 @@ Create an Anthropic inference endpoint.
|
||||
|
||||
Create an inference endpoint to perform an inference task with the `anthropic` service.
|
||||
|
||||
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
|
||||
After creating the endpoint, wait for the model deployment to complete before using it.
|
||||
To verify the deployment status, use the get trained model statistics API.
|
||||
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
|
||||
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic)
|
||||
|
||||
```ts
|
||||
@ -7688,12 +7729,6 @@ Create an Azure AI studio inference endpoint.
|
||||
|
||||
Create an inference endpoint to perform an inference task with the `azureaistudio` service.
|
||||
|
||||
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
|
||||
After creating the endpoint, wait for the model deployment to complete before using it.
|
||||
To verify the deployment status, use the get trained model statistics API.
|
||||
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
|
||||
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureaistudio)
|
||||
|
||||
```ts
|
||||
@ -7723,12 +7758,6 @@ The list of chat completion models that you can choose from in your Azure OpenAI
|
||||
|
||||
The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings).
|
||||
|
||||
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
|
||||
After creating the endpoint, wait for the model deployment to complete before using it.
|
||||
To verify the deployment status, use the get trained model statistics API.
|
||||
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
|
||||
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureopenai)
|
||||
|
||||
```ts
|
||||
@ -7752,12 +7781,6 @@ Create a Cohere inference endpoint.
|
||||
|
||||
Create an inference endpoint to perform an inference task with the `cohere` service.
|
||||
|
||||
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
|
||||
After creating the endpoint, wait for the model deployment to complete before using it.
|
||||
To verify the deployment status, use the get trained model statistics API.
|
||||
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
|
||||
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-cohere)
|
||||
|
||||
```ts
|
||||
@ -7776,26 +7799,6 @@ These settings are specific to the `cohere` service.
|
||||
- **`task_settings` (Optional, { input_type, return_documents, top_n, truncate })**: Settings to configure the inference task.
|
||||
These settings are specific to the task type you specified.
|
||||
|
||||
## client.inference.putEis [_inference.put_eis]
|
||||
Create an Elastic Inference Service (EIS) inference endpoint.
|
||||
|
||||
Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS).
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-eis)
|
||||
|
||||
```ts
|
||||
client.inference.putEis({ task_type, eis_inference_id, service, service_settings })
|
||||
```
|
||||
|
||||
### Arguments [_arguments_inference.put_eis]
|
||||
|
||||
#### Request (object) [_request_inference.put_eis]
|
||||
- **`task_type` (Enum("chat_completion"))**: The type of the inference task that the model will perform.
|
||||
NOTE: The `chat_completion` task type only supports streaming and only through the _stream API.
|
||||
- **`eis_inference_id` (string)**: The unique identifier of the inference endpoint.
|
||||
- **`service` (Enum("elastic"))**: The type of service supported for the specified task type. In this case, `elastic`.
|
||||
- **`service_settings` ({ model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `elastic` service.
|
||||
|
||||
## client.inference.putElasticsearch [_inference.put_elasticsearch]
|
||||
Create an Elasticsearch inference endpoint.
|
||||
|
||||
@ -7871,12 +7874,6 @@ Create an Google AI Studio inference endpoint.
|
||||
|
||||
Create an inference endpoint to perform an inference task with the `googleaistudio` service.
|
||||
|
||||
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
|
||||
After creating the endpoint, wait for the model deployment to complete before using it.
|
||||
To verify the deployment status, use the get trained model statistics API.
|
||||
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
|
||||
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googleaistudio)
|
||||
|
||||
```ts
|
||||
@ -7897,12 +7894,6 @@ Create a Google Vertex AI inference endpoint.
|
||||
|
||||
Create an inference endpoint to perform an inference task with the `googlevertexai` service.
|
||||
|
||||
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
|
||||
After creating the endpoint, wait for the model deployment to complete before using it.
|
||||
To verify the deployment status, use the get trained model statistics API.
|
||||
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
|
||||
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googlevertexai)
|
||||
|
||||
```ts
|
||||
@ -7939,12 +7930,6 @@ The following models are recommended for the Hugging Face service:
|
||||
* `multilingual-e5-base`
|
||||
* `multilingual-e5-small`
|
||||
|
||||
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
|
||||
After creating the endpoint, wait for the model deployment to complete before using it.
|
||||
To verify the deployment status, use the get trained model statistics API.
|
||||
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
|
||||
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face)
|
||||
|
||||
```ts
|
||||
@ -7968,12 +7953,6 @@ Create an inference endpoint to perform an inference task with the `jinaai` serv
|
||||
To review the available `rerank` models, refer to <https://jina.ai/reranker>.
|
||||
To review the available `text_embedding` models, refer to the <https://jina.ai/embeddings/>.
|
||||
|
||||
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
|
||||
After creating the endpoint, wait for the model deployment to complete before using it.
|
||||
To verify the deployment status, use the get trained model statistics API.
|
||||
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
|
||||
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-jinaai)
|
||||
|
||||
```ts
|
||||
@ -7996,12 +7975,6 @@ Create a Mistral inference endpoint.
|
||||
|
||||
Creates an inference endpoint to perform an inference task with the `mistral` service.
|
||||
|
||||
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
|
||||
After creating the endpoint, wait for the model deployment to complete before using it.
|
||||
To verify the deployment status, use the get trained model statistics API.
|
||||
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
|
||||
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral)
|
||||
|
||||
```ts
|
||||
@ -8023,12 +7996,6 @@ Create an OpenAI inference endpoint.
|
||||
|
||||
Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs.
|
||||
|
||||
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
|
||||
After creating the endpoint, wait for the model deployment to complete before using it.
|
||||
To verify the deployment status, use the get trained model statistics API.
|
||||
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
|
||||
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openai)
|
||||
|
||||
```ts
|
||||
@ -8078,12 +8045,6 @@ Create an inference endpoint to perform an inference task with the `watsonxai` s
|
||||
You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service.
|
||||
You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.
|
||||
|
||||
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
|
||||
After creating the endpoint, wait for the model deployment to complete before using it.
|
||||
To verify the deployment status, use the get trained model statistics API.
|
||||
Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`.
|
||||
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx)
|
||||
|
||||
```ts
|
||||
@ -8268,7 +8229,7 @@ If no response is received before the timeout expires, the request fails and ret
|
||||
Get GeoIP statistics.
|
||||
Get download statistics for GeoIP2 databases that are used with the GeoIP processor.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/geoip-processor.html)
|
||||
[Endpoint documentation](https://www.elastic.co/docs/reference/enrich-processor/geoip-processor)
|
||||
|
||||
```ts
|
||||
client.ingest.geoIpStats()
|
||||
@ -8340,7 +8301,7 @@ Extract structured fields out of a single text field within a document.
|
||||
You must choose which field to extract matched fields from, as well as the grok pattern you expect will match.
|
||||
A grok pattern is like a regular expression that supports aliased expressions that can be reused.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/grok-processor.html)
|
||||
[Endpoint documentation](https://www.elastic.co/docs/reference/enrich-processor/grok-processor)
|
||||
|
||||
```ts
|
||||
client.ingest.processorGrok()
|
||||
@ -8394,7 +8355,7 @@ A value of `-1` indicates that the request should never time out.
|
||||
Create or update a pipeline.
|
||||
Changes made using this API take effect immediately.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/ingest.html)
|
||||
[Endpoint documentation](https://www.elastic.co/docs/manage-data/ingest/transform-enrich/ingest-pipelines)
|
||||
|
||||
```ts
|
||||
client.ingest.putPipeline({ id })
|
||||
@ -9223,7 +9184,7 @@ retrieval. This allows the configuration to be in an acceptable format to
|
||||
be retrieved and then added to another cluster.
|
||||
|
||||
## client.ml.getDataFrameAnalyticsStats [_ml.get_data_frame_analytics_stats]
|
||||
Get data frame analytics jobs usage info.
|
||||
Get data frame analytics job stats.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats)
|
||||
|
||||
@ -9253,7 +9214,7 @@ there are no matches or only partial matches.
|
||||
- **`verbose` (Optional, boolean)**: Defines whether the stats response should be verbose.
|
||||
|
||||
## client.ml.getDatafeedStats [_ml.get_datafeed_stats]
|
||||
Get datafeeds usage info.
|
||||
Get datafeed stats.
|
||||
You can get statistics for multiple datafeeds in a single API request by
|
||||
using a list of datafeeds or a wildcard expression. You can
|
||||
get statistics for all datafeeds by using `_all`, by specifying `*` as the
|
||||
@ -9369,7 +9330,7 @@ influencers are sorted by the `influencer_score` value.
|
||||
means it is unset and results are not limited to specific timestamps.
|
||||
|
||||
## client.ml.getJobStats [_ml.get_job_stats]
|
||||
Get anomaly detection jobs usage info.
|
||||
Get anomaly detection job stats.
|
||||
|
||||
[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats)
|
||||
|
||||
@ -10675,7 +10636,6 @@ client.nodes.getRepositoriesMeteringInfo({ node_id })
|
||||
|
||||
#### Request (object) [_request_nodes.get_repositories_metering_info]
|
||||
- **`node_id` (string | string[])**: List of node IDs or names used to limit returned information.
|
||||
All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes).
|
||||
|
||||
## client.nodes.hotThreads [_nodes.hot_threads]
|
||||
Get the hot threads for nodes.
|
||||
@ -12385,7 +12345,7 @@ client.security.hasPrivileges({ ... })
|
||||
#### Request (object) [_request_security.has_privileges]
|
||||
- **`user` (Optional, string)**: Username
|
||||
- **`application` (Optional, { application, privileges, resources }[])**
|
||||
- **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of the cluster privileges that you want to check.
|
||||
- **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_esql" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_esql" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of the cluster privileges that you want to check.
|
||||
- **`index` (Optional, { names, privileges, allow_restricted_indices }[])**
|
||||
|
||||
## client.security.hasPrivilegesUserProfile [_security.has_privileges_user_profile]
|
||||
@ -12611,7 +12571,7 @@ client.security.putRole({ name })
|
||||
#### Request (object) [_request_security.put_role]
|
||||
- **`name` (string)**: The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role.
|
||||
- **`applications` (Optional, { application, privileges, resources }[])**: A list of application privilege entries.
|
||||
- **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of cluster privileges. These privileges define the cluster-level actions for users with this role.
|
||||
- **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_esql" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_esql" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of cluster privileges. These privileges define the cluster-level actions for users with this role.
|
||||
- **`global` (Optional, Record<string, User-defined value>)**: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges.
|
||||
- **`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])**: A list of indices permissions entries.
|
||||
- **`remote_indices` (Optional, { clusters, field_security, names, privileges, query, allow_restricted_indices }[])**: A list of remote indices permissions entries.
|
||||
@ -13567,8 +13527,6 @@ Multi-target syntax is supported.
|
||||
- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node.
|
||||
If the master node is not available before the timeout expires, the request fails and returns an error.
|
||||
To indicate that the request should never timeout, set it to `-1`.
|
||||
- **`timeout` (Optional, string | -1 | 0)**: The period of time to wait for a response.
|
||||
If no response is received before the timeout expires, the request fails and returns an error.
|
||||
|
||||
## client.snapshot.create [_snapshot.create]
|
||||
Create a snapshot.
|
||||
@ -14260,6 +14218,8 @@ client.synonyms.deleteSynonymRule({ set_id, rule_id })
|
||||
#### Request (object) [_request_synonyms.delete_synonym_rule]
|
||||
- **`set_id` (string)**: The ID of the synonym set to update.
|
||||
- **`rule_id` (string)**: The ID of the synonym rule to delete.
|
||||
- **`refresh` (Optional, boolean)**: If `true`, the request will refresh the analyzers with the deleted synonym rule and wait for the new synonyms to be available before returning.
|
||||
If `false`, analyzers will not be reloaded with the deleted synonym rule
|
||||
|
||||
## client.synonyms.getSynonym [_synonyms.get_synonym]
|
||||
Get a synonym set.
|
||||
@ -14328,6 +14288,8 @@ client.synonyms.putSynonym({ id, synonyms_set })
|
||||
#### Request (object) [_request_synonyms.put_synonym]
|
||||
- **`id` (string)**: The ID of the synonyms set to be created or updated.
|
||||
- **`synonyms_set` ({ id, synonyms } | { id, synonyms }[])**: The synonym rules definitions for the synonyms set.
|
||||
- **`refresh` (Optional, boolean)**: If `true`, the request will refresh the analyzers with the new synonyms set and wait for the new synonyms to be available before returning.
|
||||
If `false`, analyzers will not be reloaded with the new synonym set
|
||||
|
||||
## client.synonyms.putSynonymRule [_synonyms.put_synonym_rule]
|
||||
Create or update a synonym rule.
|
||||
@ -14349,6 +14311,8 @@ client.synonyms.putSynonymRule({ set_id, rule_id, synonyms })
|
||||
- **`set_id` (string)**: The ID of the synonym set.
|
||||
- **`rule_id` (string)**: The ID of the synonym rule to be updated or created.
|
||||
- **`synonyms` (string)**: The synonym rule information definition, which must be in Solr format.
|
||||
- **`refresh` (Optional, boolean)**: If `true`, the request will refresh the analyzers with the new synonym rule and wait for the new synonyms to be available before returning.
|
||||
If `false`, analyzers will not be reloaded with the new synonym rule
|
||||
|
||||
## client.tasks.cancel [_tasks.cancel]
|
||||
Cancel a task.
|
||||
|
||||
@ -82,8 +82,6 @@ auth: {
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### `maxRetries`
|
||||
|
||||
Type: `number`<br>
|
||||
@ -91,8 +89,6 @@ Default: `3`
|
||||
|
||||
Max number of retries for each request.
|
||||
|
||||
---
|
||||
|
||||
### `requestTimeout`
|
||||
|
||||
Type: `number`<br>
|
||||
@ -100,8 +96,6 @@ Default: `No value`
|
||||
|
||||
Max request timeout in milliseconds for each request.
|
||||
|
||||
---
|
||||
|
||||
### `pingTimeout`
|
||||
|
||||
Type: `number`<br>
|
||||
@ -109,8 +103,6 @@ Default: `3000`
|
||||
|
||||
Max ping request timeout in milliseconds for each request.
|
||||
|
||||
---
|
||||
|
||||
### `sniffInterval`
|
||||
|
||||
Type: `number, boolean`<br>
|
||||
@ -122,8 +114,6 @@ Perform a sniff operation every `n` milliseconds.
|
||||
Sniffing might not be the best solution. Before using the various `sniff` options, review this [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how).
|
||||
:::
|
||||
|
||||
---
|
||||
|
||||
### `sniffOnStart`
|
||||
|
||||
Type: `boolean`<br>
|
||||
@ -131,8 +121,6 @@ Default: `false`
|
||||
|
||||
Perform a sniff once the client is started. Be sure to review the sniffing best practices [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how).
|
||||
|
||||
---
|
||||
|
||||
### `sniffEndpoint`
|
||||
|
||||
Type: `string`<br>
|
||||
@ -140,8 +128,6 @@ Default: `'_nodes/_all/http'`
|
||||
|
||||
Endpoint to ping during a sniff. Be sure to review the sniffing best practices [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how).
|
||||
|
||||
---
|
||||
|
||||
### `sniffOnConnectionFault`
|
||||
|
||||
Type: `boolean`<br>
|
||||
@ -149,8 +135,6 @@ Default: `false`
|
||||
|
||||
Perform a sniff on connection fault. Be sure to review the sniffing best practices [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how).
|
||||
|
||||
---
|
||||
|
||||
### `resurrectStrategy`
|
||||
|
||||
Type: `string`<br>
|
||||
@ -159,8 +143,6 @@ Default: `'ping'`
|
||||
Configure the node resurrection strategy.<br>
|
||||
Options: `'ping'`, `'optimistic'`, `'none'`
|
||||
|
||||
---
|
||||
|
||||
### `suggestCompression`
|
||||
|
||||
Type: `boolean`<br>
|
||||
@ -168,8 +150,6 @@ Default: `false`
|
||||
|
||||
Adds an `accept-encoding` header to every request.
|
||||
|
||||
---
|
||||
|
||||
### `compression`
|
||||
|
||||
Type: `string, boolean`<br>
|
||||
@ -178,8 +158,6 @@ Default: `false`
|
||||
Enables gzip request body compression.<br>
|
||||
Options: `'gzip'`, `false`
|
||||
|
||||
---
|
||||
|
||||
### `tls`
|
||||
|
||||
Type: `http.SecureContextOptions`<br>
|
||||
@ -187,8 +165,6 @@ Default: `null`
|
||||
|
||||
The [tls configuraton](https://nodejs.org/api/tls.html).
|
||||
|
||||
---
|
||||
|
||||
### `proxy`
|
||||
|
||||
Type: `string, URL`<br>
|
||||
@ -208,8 +184,6 @@ const client = new Client({
|
||||
})
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### `agent`
|
||||
|
||||
Type: `http.AgentOptions, function`<br>
|
||||
@ -237,8 +211,6 @@ const client = new Client({
|
||||
})
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### `nodeFilter`
|
||||
|
||||
Type: `function`
|
||||
@ -246,19 +218,20 @@ Type: `function`
|
||||
Filter that indicates whether a node should be used for a request. Default function definition:
|
||||
|
||||
```js
|
||||
function defaultNodeFilter (node) {
|
||||
// avoid master only nodes
|
||||
if (node.roles.master === true &&
|
||||
node.roles.data === false &&
|
||||
node.roles.ingest === false) {
|
||||
return false
|
||||
function defaultNodeFilter (conn) {
|
||||
if (conn.roles != null) {
|
||||
if (
|
||||
// avoid master-only nodes
|
||||
conn.roles.master &&
|
||||
!conn.roles.data &&
|
||||
!conn.roles.ingest &&
|
||||
!conn.roles.ml
|
||||
) return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### `nodeSelector`
|
||||
|
||||
Type: `function`<br>
|
||||
@ -276,8 +249,6 @@ function nodeSelector (connections) {
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### `generateRequestId`
|
||||
|
||||
Type: `function`<br>
|
||||
@ -294,8 +265,6 @@ function generateRequestId (params, options) {
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### `name`
|
||||
|
||||
Type: `string, symbol`<br>
|
||||
@ -303,8 +272,6 @@ Default: `elasticsearch-js`
|
||||
|
||||
The name to identify the client instance in the events.
|
||||
|
||||
---
|
||||
|
||||
### `opaqueIdPrefix`
|
||||
|
||||
Type: `string`<br>
|
||||
@ -313,8 +280,6 @@ Default: `null`
|
||||
A string that will be use to prefix any `X-Opaque-Id` header.
|
||||
See [`X-Opaque-Id` support](/reference/observability.md#_x_opaque_id_support) for more details.
|
||||
|
||||
---
|
||||
|
||||
### `headers`
|
||||
|
||||
Type: `object`<br>
|
||||
@ -322,8 +287,6 @@ Default: `{}`
|
||||
|
||||
A set of custom headers to send in every request.
|
||||
|
||||
---
|
||||
|
||||
### `context`
|
||||
|
||||
Type: `object`<br>
|
||||
@ -331,8 +294,6 @@ Default: `null`
|
||||
|
||||
A custom object that you can use for observability in your events. It will be merged with the API level context option.
|
||||
|
||||
---
|
||||
|
||||
### `enableMetaHeader`
|
||||
|
||||
Type: `boolean`<br>
|
||||
@ -340,8 +301,6 @@ Default: `true`
|
||||
|
||||
If true, adds an header named `'x-elastic-client-meta'`, containing some minimal telemetry data, such as the client and platform version.
|
||||
|
||||
---
|
||||
|
||||
### `cloud`
|
||||
|
||||
Type: `object`<br>
|
||||
@ -363,16 +322,12 @@ const client = new Client({
|
||||
})
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### `disablePrototypePoisoningProtection`
|
||||
|
||||
Default: `true`
|
||||
|
||||
`boolean`, `'proto'`, `'constructor'` - The client can protect you against prototype poisoning attacks. For more information, refer to [Square Brackets are the Enemy](https://web.archive.org/web/20200319091159/https://hueniverse.com/square-brackets-are-the-enemy-ff5b9fd8a3e8?gi=184a27ee2a08). If needed, you can enable prototype poisoning protection entirely (`false`) or one of the two checks (`'proto'` or `'constructor'`). For performance reasons, it is disabled by default. To learn more, refer to the [`secure-json-parse` documentation](https://github.com/fastify/secure-json-parse).
|
||||
|
||||
---
|
||||
|
||||
### `caFingerprint`
|
||||
|
||||
Type: `string`<br>
|
||||
@ -380,8 +335,6 @@ Default: `null`
|
||||
|
||||
If configured, verify that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied fingerprint. Only accepts SHA256 digest fingerprints.
|
||||
|
||||
---
|
||||
|
||||
### `maxResponseSize`
|
||||
|
||||
Type: `number`<br>
|
||||
@ -389,8 +342,6 @@ Default: `null`
|
||||
|
||||
When configured, `maxResponseSize` verifies that the uncompressed response size is lower than the configured number. If it’s higher, the request will be canceled. The `maxResponseSize` cannot be higher than the value of `buffer.constants.MAX_STRING_LENGTH`.
|
||||
|
||||
---
|
||||
|
||||
### `maxCompressedResponseSize`
|
||||
|
||||
Type: `number`<br>
|
||||
@ -398,8 +349,6 @@ Default: `null`
|
||||
|
||||
When configured, `maxCompressedResponseSize` verifies that the compressed response size is lower than the configured number. If it’s higher, the request will be canceled. The `maxCompressedResponseSize` cannot be higher than the value of `buffer.constants.MAX_STRING_LENGTH`.
|
||||
|
||||
---
|
||||
|
||||
### `redaction`
|
||||
|
||||
Type: `object`<br>
|
||||
@ -411,8 +360,6 @@ Options for how to redact potentially sensitive data from metadata attached to `
|
||||
[Read about redaction](/reference/advanced-config.md#redaction) for more details
|
||||
::::
|
||||
|
||||
---
|
||||
|
||||
### `serverMode`
|
||||
|
||||
Type: `string`<br>
|
||||
|
||||
@ -51,18 +51,119 @@ console.log(result)
|
||||
|
||||
To create a new instance of the Bulk helper, access it as shown in the example above, the configuration options are:
|
||||
|
||||
| | |
|
||||
| --- | --- |
|
||||
| `datasource` | An array, async generator or a readable stream with the data you need to index/create/update/delete. It can be an array of strings or objects, but also a stream of json strings or JavaScript objects.<br> If it is a stream, we recommend to use the [`split2`](https://www.npmjs.com/package/split2) package, that splits the stream on new lines delimiters.<br> This parameter is mandatory.<br><br>```js<br>const { createReadStream } = require('fs')<br>const split = require('split2')<br>const b = client.helpers.bulk({<br> // if you just use split(), the data will be used as array of strings<br> datasource: createReadStream('./dataset.ndjson').pipe(split())<br> // if you need to manipulate the data, you can pass JSON.parse to split<br> datasource: createReadStream('./dataset.ndjson').pipe(split(JSON.parse))<br>})<br>```<br> |
|
||||
| `onDocument` | A function that is called for each document of the datasource. Inside this function you can manipulate the document and you must return the operation you want to execute with the document. Look at the [Bulk API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) to see the supported operations.<br> This parameter is mandatory.<br><br>```js<br>const b = client.helpers.bulk({<br> onDocument (doc) {<br> return {<br> index: { _index: 'my-index' }<br> }<br> }<br>})<br>```<br> |
|
||||
| `onDrop` | A function that is called for everytime a document can’t be indexed and it has reached the maximum amount of retries.<br><br>```js<br>const b = client.helpers.bulk({<br> onDrop (doc) {<br> console.log(doc)<br> }<br>})<br>```<br> |
|
||||
| `onSuccess` | A function that is called for each successful operation in the bulk request, which includes the result from Elasticsearch along with the original document that was sent, or `null` for delete operations.<br><br>```js<br>const b = client.helpers.bulk({<br> onSuccess ({ result, document }) {<br> console.log(`SUCCESS: Document ${result.index._id} indexed to ${result.index._index}`)<br> }<br>})<br>```<br> |
|
||||
| `flushBytes` | The size of the bulk body in bytes to reach before to send it. Default of 5MB.<br> *Default:* `5000000`<br><br>```js<br>const b = client.helpers.bulk({<br> flushBytes: 1000000<br>})<br>```<br> |
|
||||
| `flushInterval` | How much time (in milliseconds) the helper waits before flushing the body from the last document read.<br> *Default:* `30000`<br><br>```js<br>const b = client.helpers.bulk({<br> flushInterval: 30000<br>})<br>```<br> |
|
||||
| `concurrency` | How many request is executed at the same time.<br> *Default:* `5`<br><br>```js<br>const b = client.helpers.bulk({<br> concurrency: 10<br>})<br>```<br> |
|
||||
| `retries` | How many times a document is retried before to call the `onDrop` callback.<br> *Default:* Client max retries.<br><br>```js<br>const b = client.helpers.bulk({<br> retries: 3<br>})<br>```<br> |
|
||||
| `wait` | How much time to wait before retries in milliseconds.<br> *Default:* 5000.<br><br>```js<br>const b = client.helpers.bulk({<br> wait: 3000<br>})<br>```<br> |
|
||||
| `refreshOnCompletion` | If `true`, at the end of the bulk operation it runs a refresh on all indices or on the specified indices.<br> *Default:* false.<br><br>```js<br>const b = client.helpers.bulk({<br> refreshOnCompletion: true<br> // or<br> refreshOnCompletion: 'index-name'<br>})<br>```<br> |
|
||||
`datasource`
|
||||
: An array, async generator or a readable stream with the data you need to index/create/update/delete. It can be an array of strings or objects, but also a stream of json strings or JavaScript objects.
|
||||
If it is a stream, we recommend to use the [`split2`](https://www.npmjs.com/package/split2) package, that splits the stream on new lines delimiters.
|
||||
This parameter is mandatory.
|
||||
|
||||
```js
|
||||
const { createReadStream } = require('fs')
|
||||
const split = require('split2')
|
||||
const b = client.helpers.bulk({
|
||||
// if you just use split(), the data will be used as array of strings
|
||||
datasource: createReadStream('./dataset.ndjson').pipe(split())
|
||||
// if you need to manipulate the data, you can pass JSON.parse to split
|
||||
datasource: createReadStream('./dataset.ndjson').pipe(split(JSON.parse))
|
||||
})
|
||||
```
|
||||
|
||||
`onDocument`
|
||||
: A function that is called for each document of the datasource. Inside this function you can manipulate the document and you must return the operation you want to execute with the document. Look at the [Bulk API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) to see the supported operations.
|
||||
This parameter is mandatory.
|
||||
|
||||
```js
|
||||
const b = client.helpers.bulk({
|
||||
onDocument (doc) {
|
||||
return {
|
||||
index: { _index: 'my-index' }
|
||||
}
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
`onDrop`
|
||||
: A function that is called for everytime a document can’t be indexed and it has reached the maximum amount of retries.
|
||||
|
||||
```js
|
||||
const b = client.helpers.bulk({
|
||||
onDrop (doc) {
|
||||
console.log(doc)
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
`onSuccess`
|
||||
: A function that is called for each successful operation in the bulk request, which includes the result from Elasticsearch along with the original document that was sent, or `null` for delete operations.
|
||||
|
||||
```js
|
||||
const b = client.helpers.bulk({
|
||||
onSuccess ({ result, document }) {
|
||||
console.log(`SUCCESS: Document ${result.index._id} indexed to ${result.index._index}`)
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
`flushBytes`
|
||||
: The size of the bulk body in bytes to reach before to send it. Default of 5MB.
|
||||
*Default:* `5000000`
|
||||
|
||||
```js
|
||||
const b = client.helpers.bulk({
|
||||
flushBytes: 1000000
|
||||
})
|
||||
```
|
||||
|
||||
`flushInterval`
|
||||
: How much time (in milliseconds) the helper waits before flushing the body from the last document read.
|
||||
*Default:* `30000`
|
||||
|
||||
```js
|
||||
const b = client.helpers.bulk({
|
||||
flushInterval: 30000
|
||||
})
|
||||
```
|
||||
|
||||
`concurrency`
|
||||
: How many request is executed at the same time.
|
||||
*Default:* `5`
|
||||
|
||||
```js
|
||||
const b = client.helpers.bulk({
|
||||
concurrency: 10
|
||||
})
|
||||
```
|
||||
|
||||
`retries`
|
||||
: How many times a document is retried before to call the `onDrop` callback.
|
||||
*Default:* Client max retries.
|
||||
|
||||
```js
|
||||
const b = client.helpers.bulk({
|
||||
retries: 3
|
||||
})
|
||||
```
|
||||
|
||||
`wait`
|
||||
: How much time to wait before retries in milliseconds.
|
||||
*Default:* 5000.
|
||||
|
||||
```js
|
||||
const b = client.helpers.bulk({
|
||||
wait: 3000
|
||||
})
|
||||
```
|
||||
|
||||
`refreshOnCompletion`
|
||||
: If `true`, at the end of the bulk operation it runs a refresh on all indices or on the specified indices.
|
||||
*Default:* false.
|
||||
|
||||
```js
|
||||
const b = client.helpers.bulk({
|
||||
refreshOnCompletion: true
|
||||
// or
|
||||
refreshOnCompletion: 'index-name'
|
||||
})
|
||||
```
|
||||
|
||||
### Supported operations [_supported_operations]
|
||||
|
||||
@ -255,13 +356,55 @@ m.search(
|
||||
|
||||
To create a new instance of the multi search (msearch) helper, you should access it as shown in the example above, the configuration options are:
|
||||
|
||||
| | |
|
||||
| --- | --- |
|
||||
| `operations` | How many search operations should be sent in a single msearch request.<br> *Default:* `5`<br><br>```js<br>const m = client.helpers.msearch({<br> operations: 10<br>})<br>```<br> |
|
||||
| `flushInterval` | How much time (in milliseconds) the helper waits before flushing the operations from the last operation read.<br> *Default:* `500`<br><br>```js<br>const m = client.helpers.msearch({<br> flushInterval: 500<br>})<br>```<br> |
|
||||
| `concurrency` | How many request is executed at the same time.<br> *Default:* `5`<br><br>```js<br>const m = client.helpers.msearch({<br> concurrency: 10<br>})<br>```<br> |
|
||||
| `retries` | How many times an operation is retried before to resolve the request. An operation is retried only in case of a 429 error.<br> *Default:* Client max retries.<br><br>```js<br>const m = client.helpers.msearch({<br> retries: 3<br>})<br>```<br> |
|
||||
| `wait` | How much time to wait before retries in milliseconds.<br> *Default:* 5000.<br><br>```js<br>const m = client.helpers.msearch({<br> wait: 3000<br>})<br>```<br> |
|
||||
`operations`
|
||||
: How many search operations should be sent in a single msearch request.
|
||||
*Default:* `5`
|
||||
|
||||
```js
|
||||
const m = client.helpers.msearch({
|
||||
operations: 10
|
||||
})
|
||||
```
|
||||
|
||||
`flushInterval`
|
||||
: How much time (in milliseconds) the helper waits before flushing the operations from the last operation read.
|
||||
*Default:* `500`
|
||||
|
||||
```js
|
||||
const m = client.helpers.msearch({
|
||||
flushInterval: 500
|
||||
})
|
||||
```
|
||||
|
||||
`concurrency`
|
||||
: How many request is executed at the same time.
|
||||
*Default:* `5`
|
||||
|
||||
```js
|
||||
const m = client.helpers.msearch({
|
||||
concurrency: 10
|
||||
})
|
||||
```
|
||||
|
||||
`retries`
|
||||
: How many times an operation is retried before to resolve the request. An operation is retried only in case of a 429 error.
|
||||
*Default:* Client max retries.
|
||||
|
||||
```js
|
||||
const m = client.helpers.msearch({
|
||||
retries: 3
|
||||
})
|
||||
```
|
||||
|
||||
`wait`
|
||||
: How much time to wait before retries in milliseconds.
|
||||
*Default:* 5000.
|
||||
|
||||
```js
|
||||
const m = client.helpers.msearch({
|
||||
wait: 3000
|
||||
})
|
||||
```
|
||||
|
||||
### Stopping the msearch helper [_stopping_the_msearch_helper]
|
||||
|
||||
@ -475,7 +618,7 @@ Added in `v8.16.0`
|
||||
|
||||
ES|QL can return results in multiple binary formats, including [Apache Arrow](https://arrow.apache.org/)'s streaming format. Because it is a very efficient format to read, it can be valuable for performing high-performance in-memory analytics. And, because the response is streamed as batches of records, it can be used to produce aggregations and other calculations on larger-than-memory data sets.
|
||||
|
||||
`toArrowReader` returns a [`RecordBatchStreamReader`](https://arrow.apache.org/docs/js/classes/Arrow_dom.RecordBatchReader.md).
|
||||
`toArrowReader` returns an [`AsyncRecordBatchStreamReader`](https://github.com/apache/arrow/blob/520ae44272d491bbb52eb3c9b84864ed7088f11a/js/src/ipc/reader.ts#L216).
|
||||
|
||||
```ts
|
||||
const reader = await client.helpers
|
||||
@ -483,7 +626,7 @@ const reader = await client.helpers
|
||||
.toArrowReader()
|
||||
|
||||
// print each record as JSON
|
||||
for (const recordBatch of reader) {
|
||||
for await (const recordBatch of reader) {
|
||||
for (const record of recordBatch) {
|
||||
console.log(record.toJSON())
|
||||
}
|
||||
|
||||
@ -9,6 +9,6 @@ The client is designed to be easily configured for your needs. In the following
|
||||
|
||||
- [Basic configuration](/reference/basic-config.md)
|
||||
- [Advanced configuration](/reference/advanced-config.md)
|
||||
- [Timeout best practices](docs-content://troubleshoot/elasticsearch/elasticsearch-client-javascript-api/nodejs.md)
|
||||
- [Timeout best practices](/reference/timeout-best-practices.md)
|
||||
- [Creating a child client](/reference/child.md)
|
||||
- [Testing](/reference/client-testing.md)
|
||||
- [Testing](/reference/client-testing.md)
|
||||
@ -332,21 +332,22 @@ The supported request specific options are:
|
||||
| Option | Description |
|
||||
| --- | ----------- |
|
||||
| `ignore` | `number[]` - HTTP status codes which should not be considered errors for this request.<br>*Default:* `null` |
|
||||
| `requestTimeout` | `number` or `string` - Max request timeout for the request in milliseconds. This overrides the client default, which is to not time out at all. See [Elasticsearch best practices for HTML clients](elasticsearch://reference/elasticsearch/configuration-reference/networking-settings.md#_http_client_configuration) for more info.<br>_Default:* No timeout |
|
||||
| `requestTimeout` | `number` or `string` - Max request timeout for the request in milliseconds. This overrides the client default, which is to not time out at all. See [Elasticsearch best practices for HTML clients](elasticsearch://reference/elasticsearch/configuration-reference/networking-settings.md#_http_client_configuration) for more info.<br>_Default:_ No timeout |
|
||||
| `retryOnTimeout` | `boolean` - Retry requests that have timed out.*Default:* `false` |
|
||||
| `maxRetries` | `number` - Max number of retries for the request, it overrides the client default.<br>*Default:* `3` |
|
||||
| `compression` | `string` or `boolean` - Enables body compression for the request.<br>*Options:* `false`, `'gzip'`<br>*Default:* `false` |
|
||||
| `asStream` | `boolean` - Instead of getting the parsed body back, you get the raw Node.js stream of data.<br>*Default:* `false` |
|
||||
| `headers` | `object` - Custom headers for the request.<br>*Default:* `null` |
|
||||
|`querystring` | `object` - Custom querystring for the request.<br>*Default:* `null` |
|
||||
| `querystring` | `object` - Custom querystring for the request.<br>*Default:* `null` |
|
||||
| `id` | `any` - Custom request ID. *(overrides the top level request id generator)*<br>*Default:* `null` |
|
||||
| `context` | `any` - Custom object per request. *(you can use it to pass data to the clients events)*<br>*Default:* `null` |
|
||||
| `opaqueId` | `string` - Set the `X-Opaque-Id` HTTP header. See [X-Opaque-Id HTTP header](elasticsearch://reference/elasticsearch/rest-apis/api-conventions.md#x-opaque-id) *Default:* `null` |
|
||||
| `opaqueId` | `string` - Set the `X-Opaque-Id` HTTP header. See [X-Opaque-Id HTTP header](elasticsearch://reference/elasticsearch/rest-apis/api-conventions.md#x-opaque-id)<br>*Default:* `null` |
|
||||
| `maxResponseSize` | `number` - When configured, it verifies that the uncompressed response size is lower than the configured number, if it’s higher it will abort the request. It cannot be higher than buffer.constants.MAX_STRING_LENTGH<br>*Default:* `null` |
|
||||
| `maxCompressedResponseSize` | `number` - When configured, it verifies that the compressed response size is lower than the configured number, if it’s higher it will abort the request. It cannot be higher than buffer.constants.MAX_LENTGH<br>*Default:* `null` |
|
||||
| `signal` | `AbortSignal` - The AbortSignal instance to allow request abortion.<br>*Default:* `null` |
|
||||
| `meta` | `boolean` - Rather than returning the body, return an object containing `body`, `statusCode`, `headers` and `meta` keys<br>*Default*: `false` |
|
||||
| `redaction` | `object` - Options for redacting potentially sensitive data from error metadata. See [Redaction of potentially sensitive data](/reference/advanced-config.md#redaction). | `retryBackoff` |
|
||||
| `redaction` | `object` - Options for redacting potentially sensitive data from error metadata. See [Redaction of potentially sensitive data](/reference/advanced-config.md#redaction). |
|
||||
| `retryBackoff` | `(min: number, max: number, attempt: number) => number;` - A function that calculates how long to sleep, in seconds, before the next request retry<br>_Default:_ A built-in function that uses exponential backoff with jitter. |
|
||||
|
||||
## Using the Client in a Function-as-a-Service Environment [client-faas-env]
|
||||
|
||||
@ -461,9 +462,8 @@ console.log(errors)
|
||||
|
||||
You can find the errors exported by the client in the table below.
|
||||
|
||||
| | | |
|
||||
| --- | --- | --- |
|
||||
| **Error** | **Description** | **Properties** |
|
||||
| --- | --- | --- |
|
||||
| `ElasticsearchClientError` | Every error inherits from this class, it is the basic error generated by the client. | * `name` - `string`<br>* `message` - `string`<br> |
|
||||
| `TimeoutError` | Generated when a request exceeds the `requestTimeout` option. | * `name` - `string`<br>* `message` - `string`<br>* `meta` - `object`, contains all the information about the request<br> |
|
||||
| `ConnectionError` | Generated when an error occurs during the request, it can be a connection error or a malformed stream of data. | * `name` - `string`<br>* `message` - `string`<br>* `meta` - `object`, contains all the information about the request<br> |
|
||||
|
||||
@ -45,15 +45,11 @@ const client = new Client({
|
||||
|
||||
Your Elasticsearch endpoint can be found on the **My deployment** page of your deployment:
|
||||
|
||||
:::{image} images/es-endpoint.jpg
|
||||
:alt: Finding Elasticsearch endpoint
|
||||
:::
|
||||

|
||||
|
||||
You can generate an API key on the **Management** page under Security.
|
||||
|
||||
:::{image} images/create-api-key.png
|
||||
:alt: Create API key
|
||||
:::
|
||||

|
||||
|
||||
For other connection options, refer to the [*Connecting*](/reference/connecting.md) section.
|
||||
|
||||
|
||||
@ -8,7 +8,6 @@ mapped_pages:
|
||||
|
||||
This is the official Node.js client for {{es}}. This page gives a quick overview about the features of the client.
|
||||
|
||||
|
||||
## Features [_features]
|
||||
|
||||
* One-to-one mapping with REST API.
|
||||
@ -19,7 +18,6 @@ This is the official Node.js client for {{es}}. This page gives a quick overview
|
||||
* Child client support.
|
||||
* TypeScript support out of the box.
|
||||
|
||||
|
||||
### Install multiple versions [_install_multiple_versions]
|
||||
|
||||
If you are using multiple versions of {{es}}, you need to use multiple versions of the client as well. In the past, installing multiple versions of the same package was not possible, but with `npm v6.9`, you can do it via aliasing.
|
||||
@ -74,5 +72,3 @@ npm install esmain@github:elastic/elasticsearch-js
|
||||
::::{warning}
|
||||
This command installs the main branch of the client which is not considered stable.
|
||||
::::
|
||||
|
||||
|
||||
|
||||
@ -21,45 +21,41 @@ npm install @elastic/elasticsearch@<major>
|
||||
|
||||
To learn more about the supported major versions, please refer to the [Compatibility matrix](#js-compatibility-matrix).
|
||||
|
||||
|
||||
## Node.js support [nodejs-support]
|
||||
|
||||
::::{note}
|
||||
The minimum supported version of Node.js is `v18`.
|
||||
::::
|
||||
|
||||
|
||||
The client versioning follows the {{stack}} versioning, this means that major, minor, and patch releases are done following a precise schedule that often does not coincide with the [Node.js release](https://nodejs.org/en/about/releases/) times.
|
||||
|
||||
To avoid support insecure and unsupported versions of Node.js, the client **will drop the support of EOL versions of Node.js between minor releases**. Typically, as soon as a Node.js version goes into EOL, the client will continue to support that version for at least another minor release. If you are using the client with a version of Node.js that will be unsupported soon, you will see a warning in your logs (the client will start logging the warning with two minors in advance).
|
||||
|
||||
Unless you are **always** using a supported version of Node.js, we recommend defining the client dependency in your `package.json` with the `~` instead of `^`. In this way, you will lock the dependency on the minor release and not the major. (for example, `~7.10.0` instead of `^7.10.0`).
|
||||
|
||||
| Node.js Version | Node.js EOL date | End of support |
|
||||
| --- | --- | --- |
|
||||
| `8.x` | December 2019 | `7.11` (early 2021) |
|
||||
| `10.x` | April 2021 | `7.12` (mid 2021) |
|
||||
| `12.x` | April 2022 | `8.2` (early 2022) |
|
||||
| `14.x` | April 2023 | `8.8` (early 2023) |
|
||||
| `16.x` | September 2023 | `8.11` (late 2023) |
|
||||
|
||||
| Node.js Version | Node.js EOL date | End of support |
|
||||
| --------------- | ---------------- | ------------------- |
|
||||
| `8.x` | December 2019 | `7.11` (early 2021) |
|
||||
| `10.x` | April 2021 | `7.12` (mid 2021) |
|
||||
| `12.x` | April 2022 | `8.2` (early 2022) |
|
||||
| `14.x` | April 2023 | `8.8` (early 2023) |
|
||||
| `16.x` | September 2023 | `8.11` (late 2023) |
|
||||
| `18.x` | April 2025 | `9.2` (late 2025) |
|
||||
|
||||
## Compatibility matrix [js-compatibility-matrix]
|
||||
|
||||
Language clients are forward compatible; meaning that clients support communicating with greater or equal minor versions of {{es}} without breaking. It does not mean that the client automatically supports new features of newer {{es}} versions; it is only possible after a release of a new client version. For example, a 8.12 client version won’t automatically support the new features of the 8.13 version of {{es}}, the 8.13 client version is required for that. {{es}} language clients are only backwards compatible with default distributions and without guarantees made.
|
||||
|
||||
| {{es}} Version | Client Version | Supported |
|
||||
| --- | --- | --- |
|
||||
| `8.x` | `8.x` | `8.x` |
|
||||
| `7.x` | `7.x` | `7.17` |
|
||||
| `6.x` | `6.x` | |
|
||||
| `5.x` | `5.x` | |
|
||||
|
||||
| -------------- | -------------- | --------- |
|
||||
| `9.x` | `9.x` | `9.x` |
|
||||
| `8.x` | `8.x` | `8.x` |
|
||||
| `7.x` | `7.x` | `7.17` |
|
||||
| `6.x` | `6.x` | |
|
||||
| `5.x` | `5.x` | |
|
||||
|
||||
### Browser [_browser]
|
||||
|
||||
::::{warning}
|
||||
There is no official support for the browser environment. It exposes your {{es}} instance to everyone, which could lead to security issues. We recommend you to write a lightweight proxy that uses this client instead, you can see a proxy example [here](https://github.com/elastic/elasticsearch-js/tree/master/docs/examples/proxy).
|
||||
::::
|
||||
|
||||
|
||||
|
||||
@ -15,7 +15,6 @@ Correlating events can be hard, especially if your applications have a large cod
|
||||
|
||||
All of these observability features are documented below.
|
||||
|
||||
|
||||
## OpenTelemetry [_opentelemetry]
|
||||
|
||||
The client supports OpenTelemetry’s [zero-code instrumentation](https://opentelemetry.io/docs/zero-code/js/) to enable tracking each client request as an [OpenTelemetry span](https://opentelemetry.io/docs/concepts/signals/traces/#spans). These spans follow all of the [semantic OpenTelemetry conventions for Elasticsearch](https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/) except for `db.query.text`.
|
||||
@ -36,7 +35,6 @@ To start sending Elasticsearch trace data to your OpenTelemetry endpoint, follow
|
||||
node --require '@opentelemetry/auto-instrumentations-node/register' index.js
|
||||
```
|
||||
|
||||
|
||||
## Events [_events]
|
||||
|
||||
The client is an event emitter. This means that you can listen for its events to add additional logic to your code, without needing to change the client’s internals or how you use the client. You can find the events' names by accessing the `events` key of the client:
|
||||
@ -65,16 +63,75 @@ client.diagnostic.on('response', (err, result) => {
|
||||
})
|
||||
```
|
||||
|
||||
### Event types
|
||||
|
||||
The client emits the following events:
|
||||
|
||||
| | |
|
||||
| --- | --- |
|
||||
| `serialization` | Emitted before starting serialization and compression. If you want to measure this phase duration, you should measure the time elapsed between this event and `request`.<br><br>```js<br>client.diagnostic.on('serialization', (err, result) => {<br> console.log(err, result)<br>})<br>```<br> |
|
||||
| `request` | Emitted before sending the actual request to {{es}} *(emitted multiple times in case of retries)*.<br><br>```js<br>client.diagnostic.on('request', (err, result) => {<br> console.log(err, result)<br>})<br>```<br> |
|
||||
| `deserialization` | Emitted before starting deserialization and decompression. If you want to measure this phase duration, you should measure the time elapsed between this event and `response`. *(This event might not be emitted in certain situations)*.<br><br>```js<br>client.diagnostic.on('deserialization', (err, result) => {<br> console.log(err, result)<br>})<br>```<br> |
|
||||
| `response` | Emitted once {{es}} response has been received and parsed.<br><br>```js<br>client.diagnostic.on('response', (err, result) => {<br> console.log(err, result)<br>})<br>```<br> |
|
||||
| `sniff` | Emitted when the client ends a sniffing request.<br><br>```js<br>client.diagnostic.on('sniff', (err, result) => {<br> console.log(err, result)<br>})<br>```<br> |
|
||||
| `resurrect` | Emitted if the client is able to resurrect a dead node.<br><br>```js<br>client.diagnostic.on('resurrect', (err, result) => {<br> console.log(err, result)<br>})<br>```<br> |
|
||||
#### `serialization`
|
||||
|
||||
Emitted before starting serialization and compression. If you want to measure this phase duration, you should measure the time elapsed between this event and `request`.
|
||||
|
||||
```js
|
||||
client.diagnostic.on("serialization", (err, result) => {
|
||||
console.log(err, result)
|
||||
})
|
||||
```
|
||||
|
||||
#### `request`
|
||||
|
||||
Emitted before sending the actual request to {{es}} _(emitted multiple times in case of retries)_.
|
||||
|
||||
```js
|
||||
client.diagnostic.on("request", (err, result) => {
|
||||
console.log(err, result)
|
||||
})
|
||||
```
|
||||
|
||||
#### `deserialization`
|
||||
|
||||
Emitted before starting deserialization and decompression. If you want to measure this phase duration, you should measure the time elapsed between this event and `response`.
|
||||
|
||||
This event might not be emitted in certain situations:
|
||||
|
||||
* When `asStream` is set to true, the response is returned in its raw stream form before deserialization occurs
|
||||
* When a response is terminated early due to content length being too large
|
||||
* When a response is terminated early by an `AbortController`
|
||||
|
||||
```js
|
||||
client.diagnostic.on("deserialization", (err, result) => {
|
||||
console.log(err, result)
|
||||
})
|
||||
```
|
||||
|
||||
#### `response`
|
||||
|
||||
Emitted once {{es}} response has been received and parsed.
|
||||
|
||||
```js
|
||||
client.diagnostic.on("response", (err, result) => {
|
||||
console.log(err, result)
|
||||
})
|
||||
```
|
||||
|
||||
#### `sniff`
|
||||
|
||||
Emitted when the client ends a sniffing request.
|
||||
|
||||
```js
|
||||
client.diagnostic.on("sniff", (err, result) => {
|
||||
console.log(err, result)
|
||||
})
|
||||
```
|
||||
|
||||
#### `resurrect`
|
||||
|
||||
Emitted if the client is able to resurrect a dead node.
|
||||
|
||||
```js
|
||||
client.diagnostic.on("resurrect", (err, result) => {
|
||||
console.log(err, result)
|
||||
})
|
||||
```
|
||||
|
||||
The values of `result` in `serialization`, `request`, `deserialization`, `response` and `sniff` are:
|
||||
|
||||
@ -113,7 +170,6 @@ request: {
|
||||
};
|
||||
```
|
||||
|
||||
|
||||
### Events order [_events_order]
|
||||
|
||||
The event order is described in the following graph, in some edge cases, the order is not guaranteed. You can find in [`test/acceptance/events-order.test.js`](https://github.com/elastic/elasticsearch-js/blob/main/test/acceptance/events-order.test.js) how the order changes based on the situation.
|
||||
@ -134,7 +190,6 @@ serialization
|
||||
└─▶ response
|
||||
```
|
||||
|
||||
|
||||
## Correlation ID [_correlation_id]
|
||||
|
||||
Correlating events can be hard, especially if there are many events at the same time. The client offers you an automatic (and configurable) system to help you handle this problem.
|
||||
@ -176,7 +231,7 @@ const client = new Client({
|
||||
// it takes two parameters, the request parameters and options
|
||||
generateRequestId: function (params, options) {
|
||||
// your id generation logic
|
||||
// must be syncronous
|
||||
// must be synchronous
|
||||
return 'id'
|
||||
}
|
||||
})
|
||||
@ -193,7 +248,6 @@ client.search({
|
||||
}).then(console.log, console.log)
|
||||
```
|
||||
|
||||
|
||||
## Context object [_context_object]
|
||||
|
||||
Sometimes, you might need to make some custom data available in your events, you can do that via the `context` option of a request:
|
||||
@ -263,10 +317,9 @@ client.search({
|
||||
}).then(console.log, console.log)
|
||||
```
|
||||
|
||||
|
||||
## Client name [_client_name]
|
||||
|
||||
If you are using multiple instances of the client or if you are using multiple child clients *(which is the recommended way to have multiple instances of the client)*, you might need to recognize which client you are using. The `name` options help you in this regard.
|
||||
If you are using multiple instances of the client or if you are using multiple child clients _(which is the recommended way to have multiple instances of the client)_, you might need to recognize which client you are using. The `name` options help you in this regard.
|
||||
|
||||
```js
|
||||
const { Client } = require('@elastic/elasticsearch')
|
||||
@ -309,7 +362,6 @@ child.search({
|
||||
}).then(console.log, console.log)
|
||||
```
|
||||
|
||||
|
||||
## X-Opaque-Id support [_x_opaque_id_support]
|
||||
|
||||
To improve observability, the client offers an easy way to configure the `X-Opaque-Id` header. If you set the `X-Opaque-Id` in a specific request, this allows you to discover this identifier in the [deprecation logs](docs-content://deploy-manage/monitor/logging-configuration/update-elasticsearch-logging-levels.md#deprecation-logging), helps you with [identifying search slow log origin](elasticsearch://reference/elasticsearch/index-settings/slow-log.md) as well as [identifying running tasks](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks).
|
||||
@ -348,4 +400,3 @@ client.search({
|
||||
opaqueId: 'my-search'
|
||||
}).then(console.log, console.log)
|
||||
```
|
||||
|
||||
|
||||
13
docs/reference/timeout-best-practices.md
Normal file
13
docs/reference/timeout-best-practices.md
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
mapped_pages:
|
||||
- https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/timeout-best-practices.html
|
||||
---
|
||||
|
||||
# Timeout best practices [timeout-best-practices]
|
||||
|
||||
Starting in 9.0.0, this client is configured to not time out any HTTP request by default. {{es}} will always eventually respond to any request, even if it takes several minutes. Reissuing a request that it has not responded to yet can cause performance side effects. See the [official {{es}} recommendations for HTTP clients](elasticsearch://reference/elasticsearch/configuration-reference/networking-settings.md#_http_client_configuration) for more information.
|
||||
|
||||
Prior to 9.0, this client was configured by default to operate like many HTTP client libraries do, by using a relatively short (30 second) timeout on all requests sent to {{es}}, raising a `TimeoutError` when that time period elapsed without receiving a response.
|
||||
|
||||
If you need to set timeouts on Elasticsearch requests, setting the `requestTimeout` value to a millisecond value will cause this client to operate as it did prior to 9.0.
|
||||
|
||||
@ -31,4 +31,5 @@ toc:
|
||||
- file: update_examples.md
|
||||
- file: update_by_query_examples.md
|
||||
- file: reindex_examples.md
|
||||
- file: client-helpers.md
|
||||
- file: client-helpers.md
|
||||
- file: timeout-best-practices.md
|
||||
@ -13,8 +13,6 @@ The types are not 100% complete yet. Some APIs are missing (the newest ones, e.g
|
||||
The client is developed against the [latest](https://www.npmjs.com/package/typescript?activeTab=versions) version of TypeScript. Furthermore, unless you have set `skipLibCheck` to `true`, you should configure `esModuleInterop` to `true`.
|
||||
::::
|
||||
|
||||
|
||||
|
||||
## Example [_example]
|
||||
|
||||
```ts
|
||||
@ -74,7 +72,6 @@ async function run () {
|
||||
run().catch(console.log)
|
||||
```
|
||||
|
||||
|
||||
## Request & Response types [_request_response_types]
|
||||
|
||||
You can import the full TypeScript requests & responses definitions as it follows:
|
||||
@ -82,10 +79,3 @@ You can import the full TypeScript requests & responses definitions as it follow
|
||||
```ts
|
||||
import { estypes } from '@elastic/elasticsearch'
|
||||
```
|
||||
|
||||
If you need the legacy definitions with the body, you can do the following:
|
||||
|
||||
```ts
|
||||
import { estypesWithBody } from '@elastic/elasticsearch'
|
||||
```
|
||||
|
||||
|
||||
@ -3,22 +3,53 @@ navigation_title: "Breaking changes"
|
||||
---
|
||||
|
||||
# Elasticsearch JavaScript Client breaking changes [elasticsearch-javascript-client-breaking-changes]
|
||||
|
||||
Breaking changes can impact your Elastic applications, potentially disrupting normal operations. Before you upgrade, carefully review the Elasticsearch JavaScript Client breaking changes and take the necessary steps to mitigate any issues. To learn how to upgrade, check [Upgrade](docs-content://deploy-manage/upgrade.md).
|
||||
|
||||
% ## Next version [elasticsearch-javascript-client-versionnext-breaking-changes]
|
||||
|
||||
% ::::{dropdown} Title of breaking change
|
||||
% ::::{dropdown} Title of breaking change
|
||||
% Description of the breaking change.
|
||||
% For more information, check [PR #](PR link).
|
||||
% **Impact**<br> Impact of the breaking change.
|
||||
% **Action**<br> Steps for mitigating deprecation impact.
|
||||
% ::::
|
||||
|
||||
% ## 9.0.0 [elasticsearch-javascript-client-900-breaking-changes]
|
||||
## 9.0.0 [elasticsearch-javascript-client-900-breaking-changes]
|
||||
|
||||
% ::::{dropdown} Title of breaking change
|
||||
% Description of the breaking change.
|
||||
% For more information, check [PR #](PR link).
|
||||
% **Impact**<br> Impact of the breaking change.
|
||||
% **Action**<br> Steps for mitigating deprecation impact.
|
||||
% ::::
|
||||
::::{dropdown} Changes to the optional body property
|
||||
|
||||
In 8.x, every API function had a `body` property that would provide a place to put arbitrary values that should go in the HTTP request body, even if they were not noted in the specification or documentation. In 9.0, each API function still includes an optional `body` property, but TypeScript's type checker will disallow properties that should go in the root of the object. A `querystring` parameter has also been added that behaves the same as `body`, but inserts its values into the request querystring.
|
||||
|
||||
**Impact**<br> Some adjustments to API calls may be necessary for code that used a `body` property 8.x, especially to appease the TypeScript type checker, but it should not have any impact on any code that was not using a `body` property. The `estypesWithBody` export and `typesWithBodyKey` module are no longer available.
|
||||
|
||||
**Action**<br> Check existing code for use of the `body` property, and move any properties that should be in the root object according to the API function's request type definition. If using TypeScript, the TypeScript type checker will surface most of these issues for you. Also look for any imports of `estypesWithBody` or `typesWithBodyKey` and update them to `estypes` and `types`, respectively.
|
||||
::::
|
||||
|
||||
::::{dropdown} Changes to API parameter collation into an HTTP request
|
||||
|
||||
The logic for where each parameter in an API function call should be added to its HTTP request has been updated:
|
||||
|
||||
1. If recognized as a `body` parameter according to the Elasticsearch specification, put it in the JSON body
|
||||
2. If recognized as a `path` parameter, put it in the URL path
|
||||
3. If recognized as a `query` parameter or a "common" query parameter (e.g. `pretty`, `error_trace`), put it in the querystring
|
||||
4. If not recognized, and this API accepts a JSON body, put it in the JSON body
|
||||
5. If not recognized and this API does not accept a JSON body, put it in the querystring
|
||||
|
||||
The first two steps are identical in 8.x. The final three steps replace the logic from 8.x that put all unrecognized parameters in the querystring.
|
||||
|
||||
**Impact**<br> Some parameters that were sent via querystring to Elasticsearch may be sent in the JSON request body, and vice versa.
|
||||
|
||||
**Action**<br> If Elasticsearch sends back an error response due to a request not being valid, verify with the client's TypeScript type definitions, or via the docs, that the parameters your code passes are correct.
|
||||
::::
|
||||
|
||||
::::{dropdown} Removal of the default 30-second timeout on all API calls
|
||||
|
||||
The default 30-second timeout on all HTTP requests sent to Elasticsearch has been dropped in favor of having no timeout set at all. The previous behavior still works as it did when setting the `requestTimeout` value.
|
||||
|
||||
See pull request [#2573](https://github.com/elastic/elasticsearch-js/pull/2573) for more information.
|
||||
|
||||
**Impact**<br> Requests to Elasticsearch that used to time out after 30 seconds will now wait for as long as it takes for Elasticsearch to respond.
|
||||
|
||||
**Action**<br> In environments where it is not ideal to wait for an API response indefinitely, manually setting the `requestTimeout` option when instantiating the client still works as it did in 8.x.
|
||||
::::
|
||||
|
||||
@ -5,19 +5,14 @@ navigation_title: "Deprecations"
|
||||
# Elasticsearch JavaScript Client deprecations [elasticsearch-javascript-client-deprecations]
|
||||
Over time, certain Elastic functionality becomes outdated and is replaced or removed. To help with the transition, Elastic deprecates functionality for a period before removal, giving you time to update your applications.
|
||||
|
||||
Review the deprecated functionality for Elasticsearch JavaScript Client. While deprecations have no immediate impact, we strongly encourage you update your implementation after you upgrade. To learn how to upgrade, check out [Upgrade](docs-content://deploy-manage/upgrade.md).
|
||||
Review the deprecated functionality for the Elasticsearch JavaScript Client. While deprecations have no immediate impact, we strongly encourage you update your implementation after you upgrade. To learn how to upgrade, check out [Upgrade](docs-content://deploy-manage/upgrade.md).
|
||||
|
||||
## 9.0.0 [elasticsearch-javascript-client-900-deprecations]
|
||||
|
||||
_No deprecations_
|
||||
|
||||
% ## Next version
|
||||
|
||||
% ::::{dropdown} Deprecation title
|
||||
% Description of the deprecation.
|
||||
% For more information, check [PR #](PR link).
|
||||
% **Impact**<br> Impact of deprecation.
|
||||
% **Action**<br> Steps for mitigating deprecation impact.
|
||||
% ::::
|
||||
|
||||
% ## 9.0.0 [elasticsearch-javascript-client-900-deprecations]
|
||||
|
||||
% ::::{dropdown} Deprecation title
|
||||
% Description of the deprecation.
|
||||
% For more information, check [PR #](PR link).
|
||||
|
||||
@ -15,13 +15,35 @@ To check for security updates, go to [Security announcements for the Elastic sta
|
||||
% ## version.next [elasticsearch-javascript-client-next-release-notes]
|
||||
|
||||
% ### Features and enhancements [elasticsearch-javascript-client-next-features-enhancements]
|
||||
% *
|
||||
% \*
|
||||
|
||||
% ### Fixes [elasticsearch-javascript-client-next-fixes]
|
||||
% *
|
||||
% \*
|
||||
|
||||
## 9.0.0 [elasticsearch-javascript-client-900-release-notes]
|
||||
## 9.0.1
|
||||
|
||||
### Features and enhancements [elasticsearch-javascript-client-900-features-enhancements]
|
||||
### Fixes [elasticsearch-javascript-client-9.0.1-fixes]
|
||||
|
||||
### Fixes [elasticsearch-javascript-client-900-fixes]
|
||||
**Reinstate `nodeFilter` and node `roles` feature:** The docs note a `nodeFilter` option on the client that will, by default, filter the nodes based on any `roles` values that are set at instantiation. At some point, this functionality was partially disabled. This brings the feature back, ensuring that it matches what the documentation has said it does all along.
|
||||
|
||||
**Ensure Apache Arrow ES|QL helper uses async iterator:** the [`esql.toArrowReader()` helper function](/reference/client-helpers.md#_toarrowreader) was trying to return `RecordBatchStreamReader`—a synchronous iterator—despite the fact that the `apache-arrow` package was, in most cases, automatically coercing it to `AsyncRecordBatchStreamReader`, its asynchronous counterpart. It now is always returned as an async iterator.
|
||||
|
||||
## 9.0.0 [elasticsearch-javascript-client-9.0.0-release-notes]
|
||||
|
||||
### Features and enhancements [elasticsearch-javascript-client-9.0.0-features-enhancements]
|
||||
|
||||
- **Compatibility with Elasticsearch 9.0:** All changes and additions to Elasticsearch APIs for its 9.0 release are reflected in this release.
|
||||
- **Serverless client merged in:** the `@elastic/elasticsearch-serverless` client is being deprecated, and its functionality has been merged back into this client. This should have zero impact on the way the client works by default, except that a new `serverMode` option has been added. When it's explicitly set to `"serverless"` by a user, a few default settings and behaviors are changed:
|
||||
|
||||
- turns off sniffing and ignores any sniffing-related options
|
||||
- ignores all nodes passed in config except the first one, and ignores any node filtering and selecting options
|
||||
- enables compression and `TLSv1_2_method` (same as when configured for Elastic Cloud)
|
||||
- adds an `elastic-api-version` HTTP header to all requests
|
||||
- uses `CloudConnectionPool` by default instead of `WeightedConnectionPool`
|
||||
- turns off vendored `content-type` and `accept` headers in favor or standard MIME types
|
||||
|
||||
Docstrings for types that differ between stack and serverless have also been updated to indicate when that is the case.
|
||||
|
||||
- **Improved Cloud ID parsing:** when using a Cloud ID as the `cloud` parameter to instantiate the client, that ID was assumed to be in the correct format. New assertions have been added to verify that format and throw a `ConfigurationError` if it is invalid. See [#2694](https://github.com/elastic/elasticsearch-js/issues/2694).
|
||||
|
||||
% ### Fixes [elasticsearch-javascript-client-9.0.0-fixes]
|
||||
|
||||
@ -5,6 +5,10 @@ navigation_title: "Known issues"
|
||||
|
||||
# Elasticsearch JavaScript Client known issues [elasticsearch-javascript-client-known-issues]
|
||||
|
||||
## 9.0.0
|
||||
|
||||
_No known issues_
|
||||
|
||||
% Use the following template to add entries to this page.
|
||||
|
||||
% :::{dropdown} Title of known issue
|
||||
@ -17,4 +21,4 @@ navigation_title: "Known issues"
|
||||
% **Resolved**
|
||||
% On [Month/Day/Year], this issue was resolved.
|
||||
|
||||
:::
|
||||
% :::
|
||||
2
index.d.ts
vendored
2
index.d.ts
vendored
@ -8,6 +8,6 @@ import SniffingTransport from './lib/sniffingTransport'
|
||||
|
||||
export * from '@elastic/transport'
|
||||
export * as estypes from './lib/api/types'
|
||||
export * as estypesWithBody from './lib/api/typesWithBodyKey'
|
||||
export { Client, SniffingTransport }
|
||||
export type { ClientOptions, NodeOptions } from './lib/client'
|
||||
export * as helpers from './lib/helpers'
|
||||
|
||||
27
package.json
27
package.json
@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "@elastic/elasticsearch",
|
||||
"version": "9.0.0-alpha.5",
|
||||
"versionCanary": "9.0.0-canary.0",
|
||||
"version": "9.0.1",
|
||||
"versionCanary": "9.0.1-canary.0",
|
||||
"description": "The official Elasticsearch client for Node.js",
|
||||
"main": "./index.js",
|
||||
"types": "index.d.ts",
|
||||
@ -18,7 +18,8 @@
|
||||
"test:coverage-100": "npm run build && tap --coverage --100",
|
||||
"test:coverage-report": "npm run build && tap --coverage && nyc report --reporter=text-lcov > coverage.lcov",
|
||||
"test:coverage-ui": "npm run build && tap --coverage --coverage-report=html",
|
||||
"test:integration": "tsc && node test/integration/index.js",
|
||||
"test:integration-build": "npm run build && node test/integration/index.js",
|
||||
"test:integration": "npm run test:integration-build && env tap run --jobs=1 --reporter=junit --reporter-file=report-junit.xml generated-tests/",
|
||||
"lint": "ts-standard src",
|
||||
"lint:fix": "ts-standard --fix src",
|
||||
"license-checker": "license-checker --production --onlyAllow='MIT;Apache-2.0;Apache1.1;ISC;BSD-3-Clause;BSD-2-Clause;0BSD'",
|
||||
@ -57,11 +58,11 @@
|
||||
"node": ">=18"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@elastic/request-converter": "9.0.0",
|
||||
"@elastic/request-converter": "9.0.1",
|
||||
"@sinonjs/fake-timers": "14.0.0",
|
||||
"@types/debug": "4.1.12",
|
||||
"@types/ms": "0.7.34",
|
||||
"@types/node": "22.13.14",
|
||||
"@types/ms": "2.1.0",
|
||||
"@types/node": "22.15.19",
|
||||
"@types/sinonjs__fake-timers": "8.1.5",
|
||||
"@types/split2": "4.2.3",
|
||||
"@types/stoppable": "1.1.3",
|
||||
@ -76,22 +77,22 @@
|
||||
"node-abort-controller": "3.1.1",
|
||||
"node-fetch": "2.7.0",
|
||||
"ora": "5.4.1",
|
||||
"proxy": "1.0.2",
|
||||
"rimraf": "3.0.2",
|
||||
"semver": "7.7.1",
|
||||
"proxy": "2.2.0",
|
||||
"rimraf": "6.0.1",
|
||||
"semver": "7.7.2",
|
||||
"split2": "4.2.0",
|
||||
"stoppable": "1.1.0",
|
||||
"tap": "21.1.0",
|
||||
"ts-node": "10.9.2",
|
||||
"ts-standard": "12.0.2",
|
||||
"typescript": "5.8.2",
|
||||
"typescript": "5.8.3",
|
||||
"workq": "3.0.0",
|
||||
"xmlbuilder2": "3.1.1",
|
||||
"zx": "7.2.3"
|
||||
"zx": "8.5.4"
|
||||
},
|
||||
"dependencies": {
|
||||
"@elastic/transport": "9.0.0-alpha.1",
|
||||
"apache-arrow": "^18.0.0",
|
||||
"@elastic/transport": "^9.0.1",
|
||||
"apache-arrow": "18.x - 20.x",
|
||||
"tslib": "^2.4.0"
|
||||
},
|
||||
"tap": {
|
||||
|
||||
@ -20,13 +20,15 @@
|
||||
"matchManagers": [
|
||||
"dockerfile"
|
||||
],
|
||||
"pinDigests": false
|
||||
"pinDigests": false,
|
||||
"automerge": true
|
||||
},
|
||||
{
|
||||
"matchDatasources": [
|
||||
"docker"
|
||||
],
|
||||
"pinDigests": false
|
||||
"pinDigests": false,
|
||||
"automerge": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@ -3,162 +3,102 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
'use strict'
|
||||
|
||||
const { join } = require('path')
|
||||
const minimist = require('minimist')
|
||||
const stream = require('stream')
|
||||
const { promisify } = require('util')
|
||||
const { createWriteStream, promises } = require('fs')
|
||||
const rimraf = require('rimraf')
|
||||
const { rimraf } = require('rimraf')
|
||||
const fetch = require('node-fetch')
|
||||
const crossZip = require('cross-zip')
|
||||
const ora = require('ora')
|
||||
|
||||
const { mkdir, writeFile } = promises
|
||||
const { mkdir, cp } = promises
|
||||
const pipeline = promisify(stream.pipeline)
|
||||
const unzip = promisify(crossZip.unzip)
|
||||
const rm = promisify(rimraf)
|
||||
|
||||
const esFolder = join(__dirname, '..', 'elasticsearch')
|
||||
const zipFolder = join(esFolder, 'artifacts.zip')
|
||||
const specFolder = join(esFolder, 'rest-api-spec', 'api')
|
||||
const freeTestFolder = join(esFolder, 'rest-api-spec', 'test', 'free')
|
||||
const xPackTestFolder = join(esFolder, 'rest-api-spec', 'test', 'platinum')
|
||||
const artifactInfo = join(esFolder, 'info.json')
|
||||
const testYamlFolder = join(__dirname, '..', 'yaml-rest-tests')
|
||||
const zipFile = join(__dirname, '..', 'elasticsearch-clients-tests.zip')
|
||||
|
||||
async function downloadArtifacts (opts) {
|
||||
if (typeof opts.version !== 'string') {
|
||||
throw new Error('Missing version')
|
||||
}
|
||||
const schemaFolder = join(__dirname, '..', 'schema')
|
||||
const schemaJson = join(schemaFolder, 'schema.json')
|
||||
|
||||
async function downloadArtifacts (localTests, version = 'main') {
|
||||
const log = ora('Checking out spec and test').start()
|
||||
|
||||
log.text = 'Resolving versions'
|
||||
let resolved
|
||||
try {
|
||||
resolved = await resolve(opts.version, opts.hash)
|
||||
} catch (err) {
|
||||
log.fail(err.message)
|
||||
process.exit(1)
|
||||
const { GITHUB_TOKEN } = process.env
|
||||
|
||||
if (version !== 'main') {
|
||||
version = version.split('.').slice(0, 2).join('.')
|
||||
}
|
||||
|
||||
opts.id = opts.id || resolved.id
|
||||
opts.hash = opts.hash || resolved.hash
|
||||
opts.version = resolved.version
|
||||
log.text = 'Clean tests folder'
|
||||
await rimraf(testYamlFolder)
|
||||
await mkdir(testYamlFolder, { recursive: true })
|
||||
|
||||
const info = loadInfo()
|
||||
log.text = `Fetch test YAML files for version ${version}`
|
||||
|
||||
if (info && info.version === opts.version) {
|
||||
if (info.hash === opts.hash && info.id === opts.id) {
|
||||
log.succeed('The artifact copy present locally is already up to date')
|
||||
return
|
||||
if (localTests) {
|
||||
log.text = `Copying local tests from ${localTests}`
|
||||
await cp(localTests, testYamlFolder, { recursive: true })
|
||||
} else {
|
||||
if (!GITHUB_TOKEN) {
|
||||
log.fail("Missing required environment variable 'GITHUB_TOKEN'")
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
const response = await fetch(`https://api.github.com/repos/elastic/elasticsearch-clients-tests/zipball/${version}`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${GITHUB_TOKEN}`,
|
||||
Accept: 'application/vnd.github+json'
|
||||
}
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
log.fail(`unexpected response ${response.statusText}`)
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
log.text = 'Downloading tests zipball'
|
||||
await pipeline(response.body, createWriteStream(zipFile))
|
||||
|
||||
log.text = 'Unzipping tests'
|
||||
await unzip(zipFile, testYamlFolder)
|
||||
|
||||
log.text = 'Cleanup'
|
||||
await rimraf(zipFile)
|
||||
}
|
||||
|
||||
log.text = 'Cleanup checkouts/elasticsearch'
|
||||
await rm(esFolder)
|
||||
await mkdir(esFolder, { recursive: true })
|
||||
log.text = 'Fetching Elasticsearch specification'
|
||||
await rimraf(schemaFolder)
|
||||
await mkdir(schemaFolder, { recursive: true })
|
||||
|
||||
log.text = 'Downloading artifacts'
|
||||
const response = await fetch(resolved.url)
|
||||
const response = await fetch(`https://raw.githubusercontent.com/elastic/elasticsearch-specification/${version}/output/schema/schema.json`)
|
||||
if (!response.ok) {
|
||||
log.fail(`unexpected response ${response.statusText}`)
|
||||
process.exit(1)
|
||||
}
|
||||
await pipeline(response.body, createWriteStream(zipFolder))
|
||||
|
||||
log.text = 'Unzipping'
|
||||
await unzip(zipFolder, esFolder)
|
||||
|
||||
log.text = 'Cleanup'
|
||||
await rm(zipFolder)
|
||||
|
||||
log.text = 'Update info'
|
||||
await writeFile(artifactInfo, JSON.stringify(opts), 'utf8')
|
||||
log.text = 'Downloading schema.json'
|
||||
await pipeline(response.body, createWriteStream(schemaJson))
|
||||
|
||||
log.succeed('Done')
|
||||
}
|
||||
|
||||
function loadInfo () {
|
||||
try {
|
||||
return require(artifactInfo)
|
||||
} catch (err) {
|
||||
return null
|
||||
}
|
||||
async function main () {
|
||||
await downloadArtifacts()
|
||||
}
|
||||
|
||||
async function resolve (version, hash) {
|
||||
const response = await fetch(`https://artifacts-api.elastic.co/v1/versions/${version}`)
|
||||
if (!response.ok) {
|
||||
throw new Error(`unexpected response ${response.statusText}`)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
const esBuilds = data.version.builds
|
||||
.filter(build => build.projects.elasticsearch != null)
|
||||
.map(build => {
|
||||
return {
|
||||
projects: build.projects.elasticsearch,
|
||||
buildId: build.build_id,
|
||||
date: build.start_time,
|
||||
version: build.version
|
||||
}
|
||||
})
|
||||
.sort((a, b) => {
|
||||
const dA = new Date(a.date)
|
||||
const dB = new Date(b.date)
|
||||
if (dA > dB) return -1
|
||||
if (dA < dB) return 1
|
||||
return 0
|
||||
})
|
||||
|
||||
if (hash != null) {
|
||||
const build = esBuilds.find(build => build.projects.commit_hash === hash)
|
||||
if (!build) {
|
||||
throw new Error(`Can't find any build with hash '${hash}'`)
|
||||
}
|
||||
const zipKey = Object.keys(build.projects.packages).find(key => key.startsWith('rest-resources-zip-') && key.endsWith('.zip'))
|
||||
return {
|
||||
url: build.projects.packages[zipKey].url,
|
||||
id: build.buildId,
|
||||
hash: build.projects.commit_hash,
|
||||
version: build.version
|
||||
}
|
||||
}
|
||||
|
||||
const lastBuild = esBuilds[0]
|
||||
const zipKey = Object.keys(lastBuild.projects.packages).find(key => key.startsWith('rest-resources-zip-') && key.endsWith('.zip'))
|
||||
return {
|
||||
url: lastBuild.projects.packages[zipKey].url,
|
||||
id: lastBuild.buildId,
|
||||
hash: lastBuild.projects.commit_hash,
|
||||
version: lastBuild.version
|
||||
}
|
||||
}
|
||||
|
||||
async function main (options) {
|
||||
delete options._
|
||||
await downloadArtifacts(options)
|
||||
}
|
||||
if (require.main === module) {
|
||||
process.on('unhandledRejection', function (err) {
|
||||
console.error(err)
|
||||
process.exit(1)
|
||||
})
|
||||
|
||||
const options = minimist(process.argv.slice(2), {
|
||||
string: ['id', 'version', 'hash']
|
||||
})
|
||||
main(options).catch(t => {
|
||||
main().catch(t => {
|
||||
console.log(t)
|
||||
process.exit(2)
|
||||
})
|
||||
}
|
||||
|
||||
module.exports = downloadArtifacts
|
||||
module.exports.locations = {
|
||||
specFolder,
|
||||
freeTestFolder,
|
||||
xPackTestFolder
|
||||
}
|
||||
module.exports.locations = { testYamlFolder, zipFile, schemaJson }
|
||||
|
||||
@ -6,7 +6,7 @@
|
||||
const { join } = require('path')
|
||||
const { writeFile } = require('fs/promises')
|
||||
const fetch = require('node-fetch')
|
||||
const rimraf = require('rimraf')
|
||||
const { rimraf } = require('rimraf')
|
||||
const ora = require('ora')
|
||||
const { convertRequests } = require('@elastic/request-converter')
|
||||
const minimist = require('minimist')
|
||||
@ -77,6 +77,7 @@ ${source.trim()}
|
||||
}
|
||||
|
||||
const options = minimist(process.argv.slice(2), {
|
||||
boolean: ['debug'],
|
||||
string: ['version'],
|
||||
default: {
|
||||
version: 'master'
|
||||
@ -88,7 +89,7 @@ generate(options.version)
|
||||
.catch(err => log.fail(err.message))
|
||||
.finally(() => {
|
||||
const keys = Object.keys(failures)
|
||||
if (keys.length > 0) {
|
||||
if (keys.length > 0 && options.debug) {
|
||||
let message = 'Some examples failed to generate:\n\n'
|
||||
for (const key of keys) {
|
||||
message += `${key}: ${failures[key]}\n`
|
||||
|
||||
@ -1667,7 +1667,7 @@ export default class Connector {
|
||||
|
||||
/**
|
||||
* Update the connector draft filtering validation. Update the draft filtering validation info for a connector.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-validation-api.html | Elasticsearch API documentation}
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering-validation | Elasticsearch API documentation}
|
||||
*/
|
||||
async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.ConnectorUpdateFilteringValidationResponse>
|
||||
async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.ConnectorUpdateFilteringValidationResponse, unknown>>
|
||||
@ -1838,7 +1838,7 @@ export default class Connector {
|
||||
|
||||
/**
|
||||
* Update the connector is_native flag.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-native-api.html | Elasticsearch API documentation}
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-native | Elasticsearch API documentation}
|
||||
*/
|
||||
async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.ConnectorUpdateNativeResponse>
|
||||
async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.ConnectorUpdateNativeResponse, unknown>>
|
||||
|
||||
@ -85,6 +85,18 @@ export default class Esql {
|
||||
'drop_null_columns'
|
||||
]
|
||||
},
|
||||
'esql.get_query': {
|
||||
path: [
|
||||
'id'
|
||||
],
|
||||
body: [],
|
||||
query: []
|
||||
},
|
||||
'esql.list_queries': {
|
||||
path: [],
|
||||
body: [],
|
||||
query: []
|
||||
},
|
||||
'esql.query': {
|
||||
path: [],
|
||||
body: [
|
||||
@ -253,7 +265,7 @@ export default class Esql {
|
||||
|
||||
/**
|
||||
* Stop async ES|QL query. This API interrupts the query execution and returns the results so far. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-stop-api.html | Elasticsearch API documentation}
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-stop | Elasticsearch API documentation}
|
||||
*/
|
||||
async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.EsqlAsyncQueryStopResponse>
|
||||
async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.EsqlAsyncQueryStopResponse, unknown>>
|
||||
@ -296,9 +308,95 @@ export default class Esql {
|
||||
return await this.transport.request({ path, method, querystring, body, meta }, options)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a specific running ES|QL query information. Returns an object extended information about a running ES|QL query.
|
||||
*/
|
||||
async getQuery (this: That, params: T.EsqlGetQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.EsqlGetQueryResponse>
|
||||
async getQuery (this: That, params: T.EsqlGetQueryRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.EsqlGetQueryResponse, unknown>>
|
||||
async getQuery (this: That, params: T.EsqlGetQueryRequest, options?: TransportRequestOptions): Promise<T.EsqlGetQueryResponse>
|
||||
async getQuery (this: That, params: T.EsqlGetQueryRequest, options?: TransportRequestOptions): Promise<any> {
|
||||
const {
|
||||
path: acceptedPath
|
||||
} = this.acceptedParams['esql.get_query']
|
||||
|
||||
const userQuery = params?.querystring
|
||||
const querystring: Record<string, any> = userQuery != null ? { ...userQuery } : {}
|
||||
|
||||
let body: Record<string, any> | string | undefined
|
||||
const userBody = params?.body
|
||||
if (userBody != null) {
|
||||
if (typeof userBody === 'string') {
|
||||
body = userBody
|
||||
} else {
|
||||
body = { ...userBody }
|
||||
}
|
||||
}
|
||||
|
||||
for (const key in params) {
|
||||
if (acceptedPath.includes(key)) {
|
||||
continue
|
||||
} else if (key !== 'body' && key !== 'querystring') {
|
||||
// @ts-expect-error
|
||||
querystring[key] = params[key]
|
||||
}
|
||||
}
|
||||
|
||||
const method = 'GET'
|
||||
const path = `/_query/queries/${encodeURIComponent(params.id.toString())}`
|
||||
const meta: TransportRequestMetadata = {
|
||||
name: 'esql.get_query',
|
||||
pathParts: {
|
||||
id: params.id
|
||||
}
|
||||
}
|
||||
return await this.transport.request({ path, method, querystring, body, meta }, options)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get running ES|QL queries information. Returns an object containing IDs and other information about the running ES|QL queries.
|
||||
*/
|
||||
async listQueries (this: That, params?: T.EsqlListQueriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.EsqlListQueriesResponse>
|
||||
async listQueries (this: That, params?: T.EsqlListQueriesRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.EsqlListQueriesResponse, unknown>>
|
||||
async listQueries (this: That, params?: T.EsqlListQueriesRequest, options?: TransportRequestOptions): Promise<T.EsqlListQueriesResponse>
|
||||
async listQueries (this: That, params?: T.EsqlListQueriesRequest, options?: TransportRequestOptions): Promise<any> {
|
||||
const {
|
||||
path: acceptedPath
|
||||
} = this.acceptedParams['esql.list_queries']
|
||||
|
||||
const userQuery = params?.querystring
|
||||
const querystring: Record<string, any> = userQuery != null ? { ...userQuery } : {}
|
||||
|
||||
let body: Record<string, any> | string | undefined
|
||||
const userBody = params?.body
|
||||
if (userBody != null) {
|
||||
if (typeof userBody === 'string') {
|
||||
body = userBody
|
||||
} else {
|
||||
body = { ...userBody }
|
||||
}
|
||||
}
|
||||
|
||||
params = params ?? {}
|
||||
for (const key in params) {
|
||||
if (acceptedPath.includes(key)) {
|
||||
continue
|
||||
} else if (key !== 'body' && key !== 'querystring') {
|
||||
// @ts-expect-error
|
||||
querystring[key] = params[key]
|
||||
}
|
||||
}
|
||||
|
||||
const method = 'GET'
|
||||
const path = '/_query/queries'
|
||||
const meta: TransportRequestMetadata = {
|
||||
name: 'esql.list_queries'
|
||||
}
|
||||
return await this.transport.request({ path, method, querystring, body, meta }, options)
|
||||
}
|
||||
|
||||
/**
|
||||
* Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) query.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-rest.html | Elasticsearch API documentation}
|
||||
* @see {@link https://www.elastic.co/docs/explore-analyze/query-filter/languages/esql-rest | Elasticsearch API documentation}
|
||||
*/
|
||||
async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.EsqlQueryResponse>
|
||||
async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.EsqlQueryResponse, unknown>>
|
||||
|
||||
@ -209,6 +209,13 @@ export default class Indices {
|
||||
'expand_wildcards'
|
||||
]
|
||||
},
|
||||
'indices.delete_data_stream_options': {
|
||||
path: [
|
||||
'name'
|
||||
],
|
||||
body: [],
|
||||
query: []
|
||||
},
|
||||
'indices.delete_index_template': {
|
||||
path: [
|
||||
'name'
|
||||
@ -408,6 +415,13 @@ export default class Indices {
|
||||
'verbose'
|
||||
]
|
||||
},
|
||||
'indices.get_data_stream_options': {
|
||||
path: [
|
||||
'name'
|
||||
],
|
||||
body: [],
|
||||
query: []
|
||||
},
|
||||
'indices.get_field_mapping': {
|
||||
path: [
|
||||
'fields',
|
||||
@ -559,6 +573,13 @@ export default class Indices {
|
||||
'timeout'
|
||||
]
|
||||
},
|
||||
'indices.put_data_stream_options': {
|
||||
path: [
|
||||
'name'
|
||||
],
|
||||
body: [],
|
||||
query: []
|
||||
},
|
||||
'indices.put_index_template': {
|
||||
path: [
|
||||
'name'
|
||||
@ -974,7 +995,7 @@ export default class Indices {
|
||||
|
||||
/**
|
||||
* Cancel a migration reindex operation. Cancel a migration reindex attempt for a data stream or index.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migrate-data-stream.html | Elasticsearch API documentation}
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-cancel-migrate-reindex | Elasticsearch API documentation}
|
||||
*/
|
||||
async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesCancelMigrateReindexResponse>
|
||||
async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IndicesCancelMigrateReindexResponse, unknown>>
|
||||
@ -1277,7 +1298,7 @@ export default class Indices {
|
||||
|
||||
/**
|
||||
* Create an index from a source index. Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migrate-data-stream.html | Elasticsearch API documentation}
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-from | Elasticsearch API documentation}
|
||||
*/
|
||||
async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesCreateFromResponse>
|
||||
async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IndicesCreateFromResponse, unknown>>
|
||||
@ -1564,6 +1585,51 @@ export default class Indices {
|
||||
return await this.transport.request({ path, method, querystring, body, meta }, options)
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the data stream options of the selected data streams.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
|
||||
async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
|
||||
async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<T.TODO>
|
||||
async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<any> {
|
||||
const {
|
||||
path: acceptedPath
|
||||
} = this.acceptedParams['indices.delete_data_stream_options']
|
||||
|
||||
const userQuery = params?.querystring
|
||||
const querystring: Record<string, any> = userQuery != null ? { ...userQuery } : {}
|
||||
|
||||
let body: Record<string, any> | string | undefined
|
||||
const userBody = params?.body
|
||||
if (userBody != null) {
|
||||
if (typeof userBody === 'string') {
|
||||
body = userBody
|
||||
} else {
|
||||
body = { ...userBody }
|
||||
}
|
||||
}
|
||||
|
||||
params = params ?? {}
|
||||
for (const key in params) {
|
||||
if (acceptedPath.includes(key)) {
|
||||
continue
|
||||
} else if (key !== 'body' && key !== 'querystring') {
|
||||
querystring[key] = params[key]
|
||||
}
|
||||
}
|
||||
|
||||
const method = 'DELETE'
|
||||
const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_options`
|
||||
const meta: TransportRequestMetadata = {
|
||||
name: 'indices.delete_data_stream_options',
|
||||
pathParts: {
|
||||
name: params.name
|
||||
}
|
||||
}
|
||||
return await this.transport.request({ path, method, querystring, body, meta }, options)
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete an index template. The provided <index-template> may contain multiple template names separated by a comma. If multiple template names are specified then there is no wildcard support and the provided names should match completely with existing templates.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template | Elasticsearch API documentation}
|
||||
@ -2377,6 +2443,51 @@ export default class Indices {
|
||||
return await this.transport.request({ path, method, querystring, body, meta }, options)
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the data stream options of the selected data streams.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
|
||||
async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
|
||||
async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<T.TODO>
|
||||
async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<any> {
|
||||
const {
|
||||
path: acceptedPath
|
||||
} = this.acceptedParams['indices.get_data_stream_options']
|
||||
|
||||
const userQuery = params?.querystring
|
||||
const querystring: Record<string, any> = userQuery != null ? { ...userQuery } : {}
|
||||
|
||||
let body: Record<string, any> | string | undefined
|
||||
const userBody = params?.body
|
||||
if (userBody != null) {
|
||||
if (typeof userBody === 'string') {
|
||||
body = userBody
|
||||
} else {
|
||||
body = { ...userBody }
|
||||
}
|
||||
}
|
||||
|
||||
params = params ?? {}
|
||||
for (const key in params) {
|
||||
if (acceptedPath.includes(key)) {
|
||||
continue
|
||||
} else if (key !== 'body' && key !== 'querystring') {
|
||||
querystring[key] = params[key]
|
||||
}
|
||||
}
|
||||
|
||||
const method = 'GET'
|
||||
const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_options`
|
||||
const meta: TransportRequestMetadata = {
|
||||
name: 'indices.get_data_stream_options',
|
||||
pathParts: {
|
||||
name: params.name
|
||||
}
|
||||
}
|
||||
return await this.transport.request({ path, method, querystring, body, meta }, options)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping | Elasticsearch API documentation}
|
||||
@ -2538,7 +2649,7 @@ export default class Indices {
|
||||
|
||||
/**
|
||||
* Get the migration reindexing status. Get the status of a migration reindex attempt for a data stream or index.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migrate-data-stream.html | Elasticsearch API documentation}
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-migration | Elasticsearch API documentation}
|
||||
*/
|
||||
async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesGetMigrateReindexStatusResponse>
|
||||
async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IndicesGetMigrateReindexStatusResponse, unknown>>
|
||||
@ -2696,7 +2807,7 @@ export default class Indices {
|
||||
|
||||
/**
|
||||
* Reindex legacy backing indices. Reindex all legacy backing indices for a data stream. This operation occurs in a persistent task. The persistent task ID is returned immediately and the reindexing work is completed in that task.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migrate-data-stream.html | Elasticsearch API documentation}
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-reindex | Elasticsearch API documentation}
|
||||
*/
|
||||
async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesMigrateReindexResponse>
|
||||
async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IndicesMigrateReindexResponse, unknown>>
|
||||
@ -3049,6 +3160,51 @@ export default class Indices {
|
||||
return await this.transport.request({ path, method, querystring, body, meta }, options)
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the data stream options of the selected data streams.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
|
||||
async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
|
||||
async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<T.TODO>
|
||||
async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<any> {
|
||||
const {
|
||||
path: acceptedPath
|
||||
} = this.acceptedParams['indices.put_data_stream_options']
|
||||
|
||||
const userQuery = params?.querystring
|
||||
const querystring: Record<string, any> = userQuery != null ? { ...userQuery } : {}
|
||||
|
||||
let body: Record<string, any> | string | undefined
|
||||
const userBody = params?.body
|
||||
if (userBody != null) {
|
||||
if (typeof userBody === 'string') {
|
||||
body = userBody
|
||||
} else {
|
||||
body = { ...userBody }
|
||||
}
|
||||
}
|
||||
|
||||
params = params ?? {}
|
||||
for (const key in params) {
|
||||
if (acceptedPath.includes(key)) {
|
||||
continue
|
||||
} else if (key !== 'body' && key !== 'querystring') {
|
||||
querystring[key] = params[key]
|
||||
}
|
||||
}
|
||||
|
||||
const method = 'PUT'
|
||||
const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_options`
|
||||
const meta: TransportRequestMetadata = {
|
||||
name: 'indices.put_data_stream_options',
|
||||
pathParts: {
|
||||
name: params.name
|
||||
}
|
||||
}
|
||||
return await this.transport.request({ path, method, querystring, body, meta }, options)
|
||||
}
|
||||
|
||||
/**
|
||||
* Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. Index templates are applied during data stream or index creation. For data streams, these settings and mappings are applied when the stream's backing indices are created. Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Multiple matching templates** If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. **Composing aliases, mappings, and settings** When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. Any mappings, settings, or aliases from the parent index template are merged in next. Finally, any configuration on the index request itself is merged. Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. If an entry already exists with the same key, then it is overwritten by the new definition.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-index-template | Elasticsearch API documentation}
|
||||
|
||||
@ -77,14 +77,19 @@ export default class Inference {
|
||||
body: [],
|
||||
query: []
|
||||
},
|
||||
'inference.post_eis_chat_completion': {
|
||||
'inference.inference': {
|
||||
path: [
|
||||
'eis_inference_id'
|
||||
'task_type',
|
||||
'inference_id'
|
||||
],
|
||||
body: [
|
||||
'chat_completion_request'
|
||||
'query',
|
||||
'input',
|
||||
'task_settings'
|
||||
],
|
||||
query: []
|
||||
query: [
|
||||
'timeout'
|
||||
]
|
||||
},
|
||||
'inference.put': {
|
||||
path: [
|
||||
@ -174,17 +179,6 @@ export default class Inference {
|
||||
],
|
||||
query: []
|
||||
},
|
||||
'inference.put_eis': {
|
||||
path: [
|
||||
'task_type',
|
||||
'eis_inference_id'
|
||||
],
|
||||
body: [
|
||||
'service',
|
||||
'service_settings'
|
||||
],
|
||||
query: []
|
||||
},
|
||||
'inference.put_elasticsearch': {
|
||||
path: [
|
||||
'task_type',
|
||||
@ -370,7 +364,7 @@ export default class Inference {
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform chat completion inference
|
||||
* Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai` service or the `elastic` service, use the Chat completion inference API.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference | Elasticsearch API documentation}
|
||||
*/
|
||||
async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceChatCompletionUnifiedResponse>
|
||||
@ -584,27 +578,37 @@ export default class Inference {
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform a chat completion task through the Elastic Inference Service (EIS). Perform a chat completion inference task with the `elastic` service.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-post-eis-chat-completion | Elasticsearch API documentation}
|
||||
* Perform inference on the service. This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. It returns a response with the results of the tasks. The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API. For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation. > info > The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference | Elasticsearch API documentation}
|
||||
*/
|
||||
async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePostEisChatCompletionResponse>
|
||||
async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePostEisChatCompletionResponse, unknown>>
|
||||
async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptions): Promise<T.InferencePostEisChatCompletionResponse>
|
||||
async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest, options?: TransportRequestOptions): Promise<any> {
|
||||
async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceInferenceResponse>
|
||||
async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceInferenceResponse, unknown>>
|
||||
async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise<T.InferenceInferenceResponse>
|
||||
async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise<any> {
|
||||
const {
|
||||
path: acceptedPath,
|
||||
body: acceptedBody,
|
||||
query: acceptedQuery
|
||||
} = this.acceptedParams['inference.post_eis_chat_completion']
|
||||
} = this.acceptedParams['inference.inference']
|
||||
|
||||
const userQuery = params?.querystring
|
||||
const querystring: Record<string, any> = userQuery != null ? { ...userQuery } : {}
|
||||
|
||||
let body: any = params.body ?? undefined
|
||||
let body: Record<string, any> | string | undefined
|
||||
const userBody = params?.body
|
||||
if (userBody != null) {
|
||||
if (typeof userBody === 'string') {
|
||||
body = userBody
|
||||
} else {
|
||||
body = { ...userBody }
|
||||
}
|
||||
}
|
||||
|
||||
for (const key in params) {
|
||||
if (acceptedBody.includes(key)) {
|
||||
body = body ?? {}
|
||||
// @ts-expect-error
|
||||
body = params[key]
|
||||
body[key] = params[key]
|
||||
} else if (acceptedPath.includes(key)) {
|
||||
continue
|
||||
} else if (key !== 'body' && key !== 'querystring') {
|
||||
@ -619,19 +623,27 @@ export default class Inference {
|
||||
}
|
||||
}
|
||||
|
||||
const method = 'POST'
|
||||
const path = `/_inference/chat_completion/${encodeURIComponent(params.eis_inference_id.toString())}/_stream`
|
||||
let method = ''
|
||||
let path = ''
|
||||
if (params.task_type != null && params.inference_id != null) {
|
||||
method = 'POST'
|
||||
path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}`
|
||||
} else {
|
||||
method = 'POST'
|
||||
path = `/_inference/${encodeURIComponent(params.inference_id.toString())}`
|
||||
}
|
||||
const meta: TransportRequestMetadata = {
|
||||
name: 'inference.post_eis_chat_completion',
|
||||
name: 'inference.inference',
|
||||
pathParts: {
|
||||
eis_inference_id: params.eis_inference_id
|
||||
task_type: params.task_type,
|
||||
inference_id: params.inference_id
|
||||
}
|
||||
}
|
||||
return await this.transport.request({ path, method, querystring, body, meta }, options)
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
|
||||
* Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put | Elasticsearch API documentation}
|
||||
*/
|
||||
async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutResponse>
|
||||
@ -686,7 +698,7 @@ export default class Inference {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
* Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud | Elasticsearch API documentation}
|
||||
*/
|
||||
async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAlibabacloudResponse>
|
||||
@ -744,7 +756,7 @@ export default class Inference {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an Amazon Bedrock inference endpoint. Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
* Create an Amazon Bedrock inference endpoint. Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock | Elasticsearch API documentation}
|
||||
*/
|
||||
async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAmazonbedrockResponse>
|
||||
@ -802,7 +814,7 @@ export default class Inference {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
* Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic | Elasticsearch API documentation}
|
||||
*/
|
||||
async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAnthropicResponse>
|
||||
@ -860,7 +872,7 @@ export default class Inference {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
* Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureaistudio | Elasticsearch API documentation}
|
||||
*/
|
||||
async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAzureaistudioResponse>
|
||||
@ -918,7 +930,7 @@ export default class Inference {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
* Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings).
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureopenai | Elasticsearch API documentation}
|
||||
*/
|
||||
async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAzureopenaiResponse>
|
||||
@ -976,7 +988,7 @@ export default class Inference {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
* Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-cohere | Elasticsearch API documentation}
|
||||
*/
|
||||
async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutCohereResponse>
|
||||
@ -1033,64 +1045,6 @@ export default class Inference {
|
||||
return await this.transport.request({ path, method, querystring, body, meta }, options)
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an Elastic Inference Service (EIS) inference endpoint. Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS).
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-eis | Elasticsearch API documentation}
|
||||
*/
|
||||
async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutEisResponse>
|
||||
async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutEisResponse, unknown>>
|
||||
async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptions): Promise<T.InferencePutEisResponse>
|
||||
async putEis (this: That, params: T.InferencePutEisRequest, options?: TransportRequestOptions): Promise<any> {
|
||||
const {
|
||||
path: acceptedPath,
|
||||
body: acceptedBody,
|
||||
query: acceptedQuery
|
||||
} = this.acceptedParams['inference.put_eis']
|
||||
|
||||
const userQuery = params?.querystring
|
||||
const querystring: Record<string, any> = userQuery != null ? { ...userQuery } : {}
|
||||
|
||||
let body: Record<string, any> | string | undefined
|
||||
const userBody = params?.body
|
||||
if (userBody != null) {
|
||||
if (typeof userBody === 'string') {
|
||||
body = userBody
|
||||
} else {
|
||||
body = { ...userBody }
|
||||
}
|
||||
}
|
||||
|
||||
for (const key in params) {
|
||||
if (acceptedBody.includes(key)) {
|
||||
body = body ?? {}
|
||||
// @ts-expect-error
|
||||
body[key] = params[key]
|
||||
} else if (acceptedPath.includes(key)) {
|
||||
continue
|
||||
} else if (key !== 'body' && key !== 'querystring') {
|
||||
if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) {
|
||||
// @ts-expect-error
|
||||
querystring[key] = params[key]
|
||||
} else {
|
||||
body = body ?? {}
|
||||
// @ts-expect-error
|
||||
body[key] = params[key]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const method = 'PUT'
|
||||
const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.eis_inference_id.toString())}`
|
||||
const meta: TransportRequestMetadata = {
|
||||
name: 'inference.put_eis',
|
||||
pathParts: {
|
||||
task_type: params.task_type,
|
||||
eis_inference_id: params.eis_inference_id
|
||||
}
|
||||
}
|
||||
return await this.transport.request({ path, method, querystring, body, meta }, options)
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an Elasticsearch inference endpoint. Create an inference endpoint to perform an inference task with the `elasticsearch` service. > info > Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings. If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. > info > You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elasticsearch | Elasticsearch API documentation}
|
||||
@ -1208,7 +1162,7 @@ export default class Inference {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
* Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googleaistudio | Elasticsearch API documentation}
|
||||
*/
|
||||
async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutGoogleaistudioResponse>
|
||||
@ -1266,7 +1220,7 @@ export default class Inference {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
* Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googlevertexai | Elasticsearch API documentation}
|
||||
*/
|
||||
async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutGooglevertexaiResponse>
|
||||
@ -1324,7 +1278,7 @@ export default class Inference {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. Create the endpoint and copy the URL after the endpoint initialization has been finished. The following models are recommended for the Hugging Face service: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small` When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
* Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. Create the endpoint and copy the URL after the endpoint initialization has been finished. The following models are recommended for the Hugging Face service: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small`
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face | Elasticsearch API documentation}
|
||||
*/
|
||||
async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutHuggingFaceResponse>
|
||||
@ -1382,7 +1336,7 @@ export default class Inference {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to <https://jina.ai/reranker>. To review the available `text_embedding` models, refer to the <https://jina.ai/embeddings/>. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
* Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to <https://jina.ai/reranker>. To review the available `text_embedding` models, refer to the <https://jina.ai/embeddings/>.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-jinaai | Elasticsearch API documentation}
|
||||
*/
|
||||
async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutJinaaiResponse>
|
||||
@ -1440,7 +1394,7 @@ export default class Inference {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
* Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral | Elasticsearch API documentation}
|
||||
*/
|
||||
async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutMistralResponse>
|
||||
@ -1498,7 +1452,7 @@ export default class Inference {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
* Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openai | Elasticsearch API documentation}
|
||||
*/
|
||||
async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutOpenaiResponse>
|
||||
@ -1614,7 +1568,7 @@ export default class Inference {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
|
||||
* Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx | Elasticsearch API documentation}
|
||||
*/
|
||||
async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutWatsonxResponse>
|
||||
|
||||
@ -296,7 +296,7 @@ export default class Ingest {
|
||||
|
||||
/**
|
||||
* Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used with the GeoIP processor.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/geoip-processor.html | Elasticsearch API documentation}
|
||||
* @see {@link https://www.elastic.co/docs/reference/enrich-processor/geoip-processor | Elasticsearch API documentation}
|
||||
*/
|
||||
async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IngestGeoIpStatsResponse>
|
||||
async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IngestGeoIpStatsResponse, unknown>>
|
||||
@ -498,7 +498,7 @@ export default class Ingest {
|
||||
|
||||
/**
|
||||
* Run a grok processor. Extract structured fields out of a single text field within a document. You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/grok-processor.html | Elasticsearch API documentation}
|
||||
* @see {@link https://www.elastic.co/docs/reference/enrich-processor/grok-processor | Elasticsearch API documentation}
|
||||
*/
|
||||
async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IngestProcessorGrokResponse>
|
||||
async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IngestProcessorGrokResponse, unknown>>
|
||||
@ -645,7 +645,7 @@ export default class Ingest {
|
||||
|
||||
/**
|
||||
* Create or update a pipeline. Changes made using this API take effect immediately.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ingest.html | Elasticsearch API documentation}
|
||||
* @see {@link https://www.elastic.co/docs/manage-data/ingest/transform-enrich/ingest-pipelines | Elasticsearch API documentation}
|
||||
*/
|
||||
async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IngestPutPipelineResponse>
|
||||
async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IngestPutPipelineResponse, unknown>>
|
||||
|
||||
@ -26,39 +26,26 @@ interface That {
|
||||
transport: Transport
|
||||
}
|
||||
|
||||
const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty']
|
||||
|
||||
const acceptedParams: Record<string, { path: string[], body: string[], query: string[] }> = {
|
||||
knn_search: {
|
||||
path: [
|
||||
'index'
|
||||
],
|
||||
body: [
|
||||
'_source',
|
||||
'docvalue_fields',
|
||||
'stored_fields',
|
||||
'fields',
|
||||
'filter',
|
||||
'knn'
|
||||
],
|
||||
query: [
|
||||
'routing'
|
||||
]
|
||||
body: [],
|
||||
query: []
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. Given a query vector, the API finds the k closest vectors and returns those documents as search hits. Elasticsearch uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. This means the results returned are not always the true k closest neighbors. The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. A kNN search response has the exact same structure as a search API response. However, certain sections have a meaning specific to kNN search: * The document `_score` is determined by the similarity between the query and document vector. * The `hits.total` object contains the total number of nearest neighbor candidates considered, which is `num_candidates * num_shards`. The `hits.total.relation` will always be `eq`, indicating an exact value.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/knn-search-api.html | Elasticsearch API documentation}
|
||||
* Performs a kNN search.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html | Elasticsearch API documentation}
|
||||
*/
|
||||
export default async function KnnSearchApi<TDocument = unknown> (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.KnnSearchResponse<TDocument>>
|
||||
export default async function KnnSearchApi<TDocument = unknown> (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.KnnSearchResponse<TDocument>, unknown>>
|
||||
export default async function KnnSearchApi<TDocument = unknown> (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptions): Promise<T.KnnSearchResponse<TDocument>>
|
||||
export default async function KnnSearchApi<TDocument = unknown> (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptions): Promise<any> {
|
||||
export default async function KnnSearchApi (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
|
||||
export default async function KnnSearchApi (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
|
||||
export default async function KnnSearchApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<T.TODO>
|
||||
export default async function KnnSearchApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise<any> {
|
||||
const {
|
||||
path: acceptedPath,
|
||||
body: acceptedBody,
|
||||
query: acceptedQuery
|
||||
path: acceptedPath
|
||||
} = acceptedParams.knn_search
|
||||
|
||||
const userQuery = params?.querystring
|
||||
@ -74,22 +61,12 @@ export default async function KnnSearchApi<TDocument = unknown> (this: That, par
|
||||
}
|
||||
}
|
||||
|
||||
params = params ?? {}
|
||||
for (const key in params) {
|
||||
if (acceptedBody.includes(key)) {
|
||||
body = body ?? {}
|
||||
// @ts-expect-error
|
||||
body[key] = params[key]
|
||||
} else if (acceptedPath.includes(key)) {
|
||||
if (acceptedPath.includes(key)) {
|
||||
continue
|
||||
} else if (key !== 'body' && key !== 'querystring') {
|
||||
if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) {
|
||||
// @ts-expect-error
|
||||
querystring[key] = params[key]
|
||||
} else {
|
||||
body = body ?? {}
|
||||
// @ts-expect-error
|
||||
body[key] = params[key]
|
||||
}
|
||||
querystring[key] = params[key]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -2230,7 +2230,7 @@ export default class Ml {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get data frame analytics jobs usage info.
|
||||
* Get data frame analytics job stats.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats | Elasticsearch API documentation}
|
||||
*/
|
||||
async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetDataFrameAnalyticsStatsResponse>
|
||||
@ -2283,7 +2283,7 @@ export default class Ml {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the `<feed_id>`, or by omitting the `<feed_id>`. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds.
|
||||
* Get datafeed stats. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the `<feed_id>`, or by omitting the `<feed_id>`. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeed-stats | Elasticsearch API documentation}
|
||||
*/
|
||||
async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetDatafeedStatsResponse>
|
||||
@ -2499,7 +2499,7 @@ export default class Ml {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get anomaly detection jobs usage info.
|
||||
* Get anomaly detection job stats.
|
||||
* @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats | Elasticsearch API documentation}
|
||||
*/
|
||||
async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetJobStatsResponse>
|
||||
|
||||
@ -42,7 +42,7 @@ const acceptedParams: Record<string, { path: string[], body: string[], query: st
|
||||
|
||||
/**
|
||||
* Run a script. Runs a script and returns a result. Use this API to build and test scripts, such as when defining a script for a runtime field. This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster. The API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is. Each context requires a script, but additional parameters depend on the context you're using for that script.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html | Elasticsearch API documentation}
|
||||
* @see {@link https://www.elastic.co/docs/reference/scripting-languages/painless/painless-api-examples | Elasticsearch API documentation}
|
||||
*/
|
||||
export default async function ScriptsPainlessExecuteApi<TResult = unknown> (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.ScriptsPainlessExecuteResponse<TResult>>
|
||||
export default async function ScriptsPainlessExecuteApi<TResult = unknown> (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.ScriptsPainlessExecuteResponse<TResult>, unknown>>
|
||||
|
||||
@ -55,8 +55,7 @@ export default class Snapshot {
|
||||
'indices'
|
||||
],
|
||||
query: [
|
||||
'master_timeout',
|
||||
'timeout'
|
||||
'master_timeout'
|
||||
]
|
||||
},
|
||||
'snapshot.create': {
|
||||
|
||||
@ -48,7 +48,9 @@ export default class Synonyms {
|
||||
'rule_id'
|
||||
],
|
||||
body: [],
|
||||
query: []
|
||||
query: [
|
||||
'refresh'
|
||||
]
|
||||
},
|
||||
'synonyms.get_synonym': {
|
||||
path: [
|
||||
@ -83,7 +85,9 @@ export default class Synonyms {
|
||||
body: [
|
||||
'synonyms_set'
|
||||
],
|
||||
query: []
|
||||
query: [
|
||||
'refresh'
|
||||
]
|
||||
},
|
||||
'synonyms.put_synonym_rule': {
|
||||
path: [
|
||||
@ -93,7 +97,9 @@ export default class Synonyms {
|
||||
body: [
|
||||
'synonyms'
|
||||
],
|
||||
query: []
|
||||
query: [
|
||||
'refresh'
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
804
src/api/types.ts
804
src/api/types.ts
File diff suppressed because it is too large
Load Diff
@ -132,7 +132,7 @@ export interface ClientOptions {
|
||||
* @defaultValue null */
|
||||
agent?: HttpAgentOptions | UndiciAgentOptions | agentFn | false
|
||||
/** @property nodeFilter A custom function used by the connection pool to determine which nodes are qualified to receive a request
|
||||
* @defaultValue () => true */
|
||||
* @defaultValue A function that uses the Connection `roles` property to avoid master-only nodes */
|
||||
nodeFilter?: nodeFilterFn
|
||||
/** @property nodeSelector A custom function used by the connection pool to determine which node should receive the next request
|
||||
* @defaultValue A "round robin" function that loops sequentially through each node in the pool. */
|
||||
@ -203,10 +203,32 @@ export default class Client extends API {
|
||||
if ((opts.cloud != null || opts.serverMode === 'serverless') && opts[kChild] === undefined) {
|
||||
if (opts.cloud != null) {
|
||||
const { id } = opts.cloud
|
||||
if (typeof id !== 'string') {
|
||||
throw new errors.ConfigurationError('Cloud ID must be a string.')
|
||||
}
|
||||
|
||||
const parts = id.split(':')
|
||||
if (parts.length !== 2 || parts[1] === '') {
|
||||
throw new errors.ConfigurationError(
|
||||
'Cloud ID must be in the format "name:base64string".'
|
||||
)
|
||||
}
|
||||
|
||||
// the cloud id is `cluster-name:base64encodedurl`
|
||||
// the url is a string divided by two '$', the first is the cloud url
|
||||
// the second the elasticsearch instance, the third the kibana instance
|
||||
const cloudUrls = Buffer.from(id.split(':')[1], 'base64').toString().split('$')
|
||||
|
||||
let cloudUrls
|
||||
try {
|
||||
cloudUrls = Buffer.from(parts[1], 'base64').toString().split('$')
|
||||
} catch (err) {
|
||||
throw new errors.ConfigurationError('Cloud ID base64 decoding failed.')
|
||||
}
|
||||
if (cloudUrls.length < 2 || cloudUrls[0] === '' || cloudUrls[1] === '') {
|
||||
throw new errors.ConfigurationError(
|
||||
'Cloud ID base64 must contain at least two "$" separated parts: "<cloudUrl>$<esId>[$<kibanaId>]".'
|
||||
)
|
||||
}
|
||||
|
||||
opts.node = `https://${cloudUrls[1]}.${cloudUrls[0]}`
|
||||
}
|
||||
|
||||
@ -11,7 +11,7 @@ import assert from 'node:assert'
|
||||
import * as timersPromises from 'node:timers/promises'
|
||||
import { Readable } from 'node:stream'
|
||||
import { errors, TransportResult, TransportRequestOptions, TransportRequestOptionsWithMeta } from '@elastic/transport'
|
||||
import { Table, TypeMap, tableFromIPC, RecordBatchStreamReader } from 'apache-arrow/Arrow.node'
|
||||
import { Table, TypeMap, tableFromIPC, AsyncRecordBatchStreamReader } from 'apache-arrow/Arrow.node'
|
||||
import Client from './client'
|
||||
import * as T from './api/types'
|
||||
import { Id } from './api/types'
|
||||
@ -132,19 +132,10 @@ export interface EsqlColumn {
|
||||
type: string
|
||||
}
|
||||
|
||||
export type EsqlValue = any[]
|
||||
|
||||
export type EsqlRow = EsqlValue[]
|
||||
|
||||
export interface EsqlResponse {
|
||||
columns: EsqlColumn[]
|
||||
values: EsqlRow[]
|
||||
}
|
||||
|
||||
export interface EsqlHelper {
|
||||
toRecords: <TDocument>() => Promise<EsqlToRecords<TDocument>>
|
||||
toArrowTable: () => Promise<Table<TypeMap>>
|
||||
toArrowReader: () => Promise<RecordBatchStreamReader>
|
||||
toArrowReader: () => Promise<AsyncRecordBatchStreamReader>
|
||||
}
|
||||
|
||||
export interface EsqlToRecords<TDocument> {
|
||||
@ -963,7 +954,7 @@ export default class Helpers {
|
||||
esql (params: T.EsqlQueryRequest, reqOptions: TransportRequestOptions = {}): EsqlHelper {
|
||||
const client = this[kClient]
|
||||
|
||||
function toRecords<TDocument> (response: EsqlResponse): TDocument[] {
|
||||
function toRecords<TDocument> (response: T.EsqlEsqlResult): TDocument[] {
|
||||
const { columns, values } = response
|
||||
return values.map(row => {
|
||||
const doc: Partial<TDocument> = {}
|
||||
@ -990,8 +981,7 @@ export default class Helpers {
|
||||
|
||||
params.format = 'json'
|
||||
params.columnar = false
|
||||
// @ts-expect-error it's typed as ArrayBuffer but we know it will be JSON
|
||||
const response: EsqlResponse = await client.esql.query(params, reqOptions)
|
||||
const response = await client.esql.query(params, reqOptions)
|
||||
const records: TDocument[] = toRecords(response)
|
||||
const { columns } = response
|
||||
return { records, columns }
|
||||
@ -1005,11 +995,12 @@ export default class Helpers {
|
||||
|
||||
params.format = 'arrow'
|
||||
|
||||
const response = await client.esql.query(params, reqOptions)
|
||||
// @ts-expect-error the return type will be ArrayBuffer when the format is set to 'arrow'
|
||||
const response: ArrayBuffer = await client.esql.query(params, reqOptions)
|
||||
return tableFromIPC(response)
|
||||
},
|
||||
|
||||
async toArrowReader (): Promise<RecordBatchStreamReader> {
|
||||
async toArrowReader (): Promise<AsyncRecordBatchStreamReader> {
|
||||
if (metaHeader !== null) {
|
||||
reqOptions.headers = reqOptions.headers ?? {}
|
||||
reqOptions.headers['x-elastic-client-meta'] = `${metaHeader as string},h=qa`
|
||||
@ -1018,8 +1009,9 @@ export default class Helpers {
|
||||
|
||||
params.format = 'arrow'
|
||||
|
||||
const response = await client.esql.query(params, reqOptions)
|
||||
return RecordBatchStreamReader.from(response)
|
||||
// @ts-expect-error response is a Readable when asStream is true
|
||||
const response: Readable = await client.esql.query(params, reqOptions)
|
||||
return await AsyncRecordBatchStreamReader.from(Readable.from(response))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -10,436 +10,63 @@ process.on('unhandledRejection', function (err) {
|
||||
process.exit(1)
|
||||
})
|
||||
|
||||
const { writeFileSync, readFileSync, readdirSync, statSync } = require('fs')
|
||||
const { join, sep } = require('path')
|
||||
const yaml = require('js-yaml')
|
||||
const minimist = require('minimist')
|
||||
const ms = require('ms')
|
||||
const { Client } = require('../../index')
|
||||
const build = require('./test-runner')
|
||||
const { sleep } = require('./helper')
|
||||
const createJunitReporter = require('./reporter')
|
||||
const assert = require('node:assert')
|
||||
const url = require('node:url')
|
||||
const fs = require('node:fs')
|
||||
const path = require('node:path')
|
||||
const globby = require('globby')
|
||||
const semver = require('semver')
|
||||
const downloadArtifacts = require('../../scripts/download-artifacts')
|
||||
|
||||
const yamlFolder = downloadArtifacts.locations.freeTestFolder
|
||||
const xPackYamlFolder = downloadArtifacts.locations.xPackTestFolder
|
||||
const buildTests = require('./test-builder')
|
||||
|
||||
const MAX_API_TIME = 1000 * 90
|
||||
const MAX_FILE_TIME = 1000 * 30
|
||||
const MAX_TEST_TIME = 1000 * 3
|
||||
const yamlFolder = downloadArtifacts.locations.testYamlFolder
|
||||
|
||||
const options = minimist(process.argv.slice(2), {
|
||||
boolean: ['bail'],
|
||||
string: ['suite', 'test']
|
||||
})
|
||||
|
||||
const freeSkips = {
|
||||
// working on fixes for these
|
||||
'/free/aggregations/bucket_selector.yml': ['bad script'],
|
||||
'/free/aggregations/bucket_script.yml': ['bad script'],
|
||||
|
||||
// either the YAML test definition is wrong, or this fails because JSON.stringify is coercing "1.0" to "1"
|
||||
'/free/aggregations/percentiles_bucket.yml': ['*'],
|
||||
|
||||
// not supported yet
|
||||
'/free/cluster.desired_nodes/10_basic.yml': ['*'],
|
||||
|
||||
// Cannot find methods on `Internal` object
|
||||
'/free/cluster.desired_balance/10_basic.yml': ['*'],
|
||||
'/free/cluster.desired_nodes/20_dry_run.yml': ['*'],
|
||||
'/free/cluster.prevalidate_node_removal/10_basic.yml': ['*'],
|
||||
|
||||
// the v8 client never sends the scroll_id in querystring,
|
||||
// the way the test is structured causes a security exception
|
||||
'free/scroll/10_basic.yml': ['Body params override query string'],
|
||||
'free/scroll/11_clear.yml': [
|
||||
'Body params with array param override query string',
|
||||
'Body params with string param scroll id override query string'
|
||||
],
|
||||
'free/cat.allocation/10_basic.yml': ['*'],
|
||||
'free/cat.snapshots/10_basic.yml': ['Test cat snapshots output'],
|
||||
|
||||
'indices.stats/50_disk_usage.yml': ['Disk usage stats'],
|
||||
'indices.stats/60_field_usage.yml': ['Field usage stats'],
|
||||
|
||||
// skipping because we are booting ES with `discovery.type=single-node`
|
||||
// and this test will fail because of this configuration
|
||||
'nodes.stats/30_discovery.yml': ['*'],
|
||||
|
||||
// the expected error is returning a 503,
|
||||
// which triggers a retry and the node to be marked as dead
|
||||
'search.aggregation/240_max_buckets.yml': ['*'],
|
||||
|
||||
// long values and json do not play nicely together
|
||||
'search.aggregation/40_range.yml': ['Min and max long range bounds'],
|
||||
|
||||
// the yaml runner assumes that null means "does not exists",
|
||||
// while null is a valid json value, so the check will fail
|
||||
'search/320_disallow_queries.yml': ['Test disallow expensive queries'],
|
||||
'free/tsdb/90_unsupported_operations.yml': ['noop update']
|
||||
}
|
||||
|
||||
const platinumDenyList = {
|
||||
'api_key/10_basic.yml': ['Test get api key'],
|
||||
'api_key/20_query.yml': ['*'],
|
||||
'api_key/11_invalidation.yml': ['Test invalidate api key by realm name'],
|
||||
'analytics/histogram.yml': ['Histogram requires values in increasing order'],
|
||||
|
||||
// object keys must me strings, and `0.0.toString()` is `0`
|
||||
'ml/evaluate_data_frame.yml': [
|
||||
'Test binary_soft_classifition precision',
|
||||
'Test binary_soft_classifition recall',
|
||||
'Test binary_soft_classifition confusion_matrix'
|
||||
],
|
||||
|
||||
// The cleanup fails with a index not found when retrieving the jobs
|
||||
'ml/get_datafeed_stats.yml': ['Test get datafeed stats when total_search_time_ms mapping is missing'],
|
||||
'ml/bucket_correlation_agg.yml': ['Test correlation bucket agg simple'],
|
||||
|
||||
// start should be a string
|
||||
'ml/jobs_get_result_overall_buckets.yml': ['Test overall buckets given epoch start and end params'],
|
||||
|
||||
// this can't happen with the client
|
||||
'ml/start_data_frame_analytics.yml': ['Test start with inconsistent body/param ids'],
|
||||
'ml/stop_data_frame_analytics.yml': ['Test stop with inconsistent body/param ids'],
|
||||
'ml/preview_datafeed.yml': ['*'],
|
||||
|
||||
// Investigate why is failing
|
||||
'ml/inference_crud.yml': ['*'],
|
||||
'ml/categorization_agg.yml': ['Test categorization aggregation with poor settings'],
|
||||
'ml/filter_crud.yml': ['*'],
|
||||
|
||||
// investigate why this is failing
|
||||
'monitoring/bulk/10_basic.yml': ['*'],
|
||||
'monitoring/bulk/20_privileges.yml': ['*'],
|
||||
'license/20_put_license.yml': ['*'],
|
||||
'snapshot/10_basic.yml': ['*'],
|
||||
'snapshot/20_operator_privileges_disabled.yml': ['*'],
|
||||
|
||||
// the body is correct, but the regex is failing
|
||||
'sql/sql.yml': ['Getting textual representation'],
|
||||
'searchable_snapshots/10_usage.yml': ['*'],
|
||||
'service_accounts/10_basic.yml': ['*'],
|
||||
|
||||
// we are setting two certificates in the docker config
|
||||
'ssl/10_basic.yml': ['*'],
|
||||
'token/10_basic.yml': ['*'],
|
||||
'token/11_invalidation.yml': ['*'],
|
||||
|
||||
// very likely, the index template has not been loaded yet.
|
||||
// we should run a indices.existsTemplate, but the name of the
|
||||
// template may vary during time.
|
||||
'transforms_crud.yml': [
|
||||
'Test basic transform crud',
|
||||
'Test transform with query and array of indices in source',
|
||||
'Test PUT continuous transform',
|
||||
'Test PUT continuous transform without delay set'
|
||||
],
|
||||
'transforms_force_delete.yml': [
|
||||
'Test force deleting a running transform'
|
||||
],
|
||||
'transforms_cat_apis.yml': ['*'],
|
||||
'transforms_start_stop.yml': ['*'],
|
||||
'transforms_stats.yml': ['*'],
|
||||
'transforms_stats_continuous.yml': ['*'],
|
||||
'transforms_update.yml': ['*'],
|
||||
|
||||
// js does not support ulongs
|
||||
'unsigned_long/10_basic.yml': ['*'],
|
||||
'unsigned_long/20_null_value.yml': ['*'],
|
||||
'unsigned_long/30_multi_fields.yml': ['*'],
|
||||
'unsigned_long/40_different_numeric.yml': ['*'],
|
||||
'unsigned_long/50_script_values.yml': ['*'],
|
||||
|
||||
// the v8 client flattens the body into the parent object
|
||||
'platinum/users/10_basic.yml': ['Test put user with different username in body'],
|
||||
|
||||
// docker issue?
|
||||
'watcher/execute_watch/60_http_input.yml': ['*'],
|
||||
|
||||
// the checks are correct, but for some reason the test is failing on js side
|
||||
// I bet is because the backslashes in the rg
|
||||
'watcher/execute_watch/70_invalid.yml': ['*'],
|
||||
'watcher/put_watch/10_basic.yml': ['*'],
|
||||
'xpack/15_basic.yml': ['*'],
|
||||
|
||||
// test that are failing that needs to be investigated
|
||||
// the error cause can either be in the yaml test or in the specification
|
||||
|
||||
// start should be a string in the yaml test
|
||||
'platinum/ml/delete_job_force.yml': ['Test force delete an open job that is referred by a started datafeed'],
|
||||
'platinum/ml/evaluate_data_frame.yml': ['*'],
|
||||
'platinum/ml/get_datafeed_stats.yml': ['*'],
|
||||
|
||||
// start should be a string in the yaml test
|
||||
'platinum/ml/start_stop_datafeed.yml': ['*']
|
||||
}
|
||||
|
||||
function runner (opts = {}) {
|
||||
const options = { node: opts.node }
|
||||
if (opts.isXPack) {
|
||||
options.tls = {
|
||||
ca: readFileSync(join(__dirname, '..', '..', '.buildkite', 'certs', 'ca.crt'), 'utf8'),
|
||||
rejectUnauthorized: false
|
||||
const getAllFiles = async dir => {
|
||||
const files = await globby(dir, {
|
||||
expandDirectories: {
|
||||
extensions: ['yml', 'yaml']
|
||||
}
|
||||
}
|
||||
const client = new Client(options)
|
||||
log('Loading yaml suite')
|
||||
start({ client, isXPack: opts.isXPack })
|
||||
.catch(err => {
|
||||
if (err.name === 'ResponseError') {
|
||||
console.error(err)
|
||||
console.log(JSON.stringify(err.meta, null, 2))
|
||||
} else {
|
||||
console.error(err)
|
||||
}
|
||||
process.exit(1)
|
||||
})
|
||||
})
|
||||
return files.sort()
|
||||
}
|
||||
|
||||
async function waitCluster (client, times = 0) {
|
||||
try {
|
||||
await client.cluster.health({ wait_for_status: 'green', timeout: '50s' })
|
||||
} catch (err) {
|
||||
if (++times < 10) {
|
||||
await sleep(5000)
|
||||
return waitCluster(client, times)
|
||||
}
|
||||
console.error(err)
|
||||
process.exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
async function start ({ client, isXPack }) {
|
||||
log('Waiting for Elasticsearch')
|
||||
await waitCluster(client)
|
||||
|
||||
const body = await client.info()
|
||||
const { number: version, build_hash: hash } = body.version
|
||||
|
||||
log(`Downloading artifacts for hash ${hash}...`)
|
||||
await downloadArtifacts({ hash, version })
|
||||
|
||||
log(`Testing ${isXPack ? 'Platinum' : 'Free'} api...`)
|
||||
const junit = createJunitReporter()
|
||||
const junitTestSuites = junit.testsuites(`Integration test for ${isXPack ? 'Platinum' : 'Free'} api`)
|
||||
|
||||
const stats = {
|
||||
total: 0,
|
||||
skip: 0,
|
||||
pass: 0,
|
||||
assertions: 0
|
||||
}
|
||||
const folders = getAllFiles(isXPack ? xPackYamlFolder : yamlFolder)
|
||||
.filter(t => !/(README|TODO)/g.test(t))
|
||||
// we cluster the array based on the folder names,
|
||||
// to provide a better test log output
|
||||
.reduce((arr, file) => {
|
||||
const path = file.slice(file.indexOf('/rest-api-spec/test'), file.lastIndexOf('/'))
|
||||
let inserted = false
|
||||
for (let i = 0; i < arr.length; i++) {
|
||||
if (arr[i][0].includes(path)) {
|
||||
inserted = true
|
||||
arr[i].push(file)
|
||||
break
|
||||
}
|
||||
}
|
||||
if (!inserted) arr.push([file])
|
||||
return arr
|
||||
}, [])
|
||||
|
||||
const totalTime = now()
|
||||
for (const folder of folders) {
|
||||
// pretty name
|
||||
const apiName = folder[0].slice(
|
||||
folder[0].indexOf(`${sep}rest-api-spec${sep}test`) + 19,
|
||||
folder[0].lastIndexOf(sep)
|
||||
)
|
||||
|
||||
log('Testing ' + apiName.slice(1))
|
||||
const apiTime = now()
|
||||
|
||||
for (const file of folder) {
|
||||
const testRunner = build({
|
||||
client,
|
||||
version,
|
||||
isXPack: file.includes('platinum')
|
||||
})
|
||||
const fileTime = now()
|
||||
const data = readFileSync(file, 'utf8')
|
||||
// get the test yaml (as object), some file has multiple yaml documents inside,
|
||||
// every document is separated by '---', so we split on the separator
|
||||
// and then we remove the empty strings, finally we parse them
|
||||
const tests = data
|
||||
.split('\n---\n')
|
||||
.map(s => s.trim())
|
||||
// empty strings
|
||||
.filter(Boolean)
|
||||
.map(parse)
|
||||
// null values
|
||||
.filter(Boolean)
|
||||
|
||||
// get setup and teardown if present
|
||||
let setupTest = null
|
||||
let teardownTest = null
|
||||
for (const test of tests) {
|
||||
if (test.setup) setupTest = test.setup
|
||||
if (test.teardown) teardownTest = test.teardown
|
||||
}
|
||||
|
||||
const cleanPath = file.slice(file.lastIndexOf(apiName))
|
||||
|
||||
// skip if --suite CLI arg doesn't match
|
||||
if (options.suite && !cleanPath.endsWith(options.suite)) continue
|
||||
|
||||
log(' ' + cleanPath)
|
||||
const junitTestSuite = junitTestSuites.testsuite(apiName.slice(1) + ' - ' + cleanPath)
|
||||
|
||||
for (const test of tests) {
|
||||
const testTime = now()
|
||||
const name = Object.keys(test)[0]
|
||||
|
||||
// skip setups, teardowns and anything that doesn't match --test flag when present
|
||||
if (name === 'setup' || name === 'teardown') continue
|
||||
if (options.test && !name.endsWith(options.test)) continue
|
||||
|
||||
const junitTestCase = junitTestSuite.testcase(name, `node_${process.version}: ${cleanPath}`)
|
||||
|
||||
stats.total += 1
|
||||
if (shouldSkip(isXPack, file, name)) {
|
||||
stats.skip += 1
|
||||
junitTestCase.skip('This test is in the skip list of the client')
|
||||
junitTestCase.end()
|
||||
continue
|
||||
}
|
||||
log(' - ' + name)
|
||||
try {
|
||||
await testRunner.run(setupTest, test[name], teardownTest, stats, junitTestCase)
|
||||
stats.pass += 1
|
||||
} catch (err) {
|
||||
junitTestCase.failure(err)
|
||||
junitTestCase.end()
|
||||
junitTestSuite.end()
|
||||
junitTestSuites.end()
|
||||
generateJunitXmlReport(junit, isXPack ? 'platinum' : 'free')
|
||||
err.meta = JSON.stringify(err.meta ?? {}, null, 2)
|
||||
console.error(err)
|
||||
|
||||
if (options.bail) {
|
||||
process.exit(1)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
const totalTestTime = now() - testTime
|
||||
junitTestCase.end()
|
||||
if (totalTestTime > MAX_TEST_TIME) {
|
||||
log(' took too long: ' + ms(totalTestTime))
|
||||
} else {
|
||||
log(' took: ' + ms(totalTestTime))
|
||||
}
|
||||
}
|
||||
junitTestSuite.end()
|
||||
const totalFileTime = now() - fileTime
|
||||
if (totalFileTime > MAX_FILE_TIME) {
|
||||
log(` ${cleanPath} took too long: ` + ms(totalFileTime))
|
||||
} else {
|
||||
log(` ${cleanPath} took: ` + ms(totalFileTime))
|
||||
}
|
||||
}
|
||||
const totalApiTime = now() - apiTime
|
||||
if (totalApiTime > MAX_API_TIME) {
|
||||
log(`${apiName} took too long: ` + ms(totalApiTime))
|
||||
} else {
|
||||
log(`${apiName} took: ` + ms(totalApiTime))
|
||||
}
|
||||
}
|
||||
junitTestSuites.end()
|
||||
generateJunitXmlReport(junit, isXPack ? 'platinum' : 'free')
|
||||
log(`Total testing time: ${ms(now() - totalTime)}`)
|
||||
log(`Test stats:
|
||||
- Total: ${stats.total}
|
||||
- Skip: ${stats.skip}
|
||||
- Pass: ${stats.pass}
|
||||
- Fail: ${stats.total - (stats.pass + stats.skip)}
|
||||
- Assertions: ${stats.assertions}
|
||||
`)
|
||||
}
|
||||
|
||||
function log (text) {
|
||||
process.stdout.write(text + '\n')
|
||||
}
|
||||
|
||||
function now () {
|
||||
const ts = process.hrtime()
|
||||
return (ts[0] * 1e3) + (ts[1] / 1e6)
|
||||
}
|
||||
|
||||
function parse (data) {
|
||||
let doc
|
||||
try {
|
||||
doc = yaml.load(data, { schema: yaml.CORE_SCHEMA })
|
||||
} catch (err) {
|
||||
console.error(err)
|
||||
return
|
||||
}
|
||||
return doc
|
||||
}
|
||||
|
||||
function generateJunitXmlReport (junit, suite) {
|
||||
writeFileSync(
|
||||
join(__dirname, '..', '..', `${suite}-report-junit.xml`),
|
||||
junit.prettyPrint()
|
||||
)
|
||||
async function doTestBuilder (version, clientOptions) {
|
||||
await downloadArtifacts(undefined, version)
|
||||
const files = await getAllFiles(yamlFolder)
|
||||
await buildTests(files, clientOptions)
|
||||
}
|
||||
|
||||
if (require.main === module) {
|
||||
const scheme = process.env.TEST_SUITE === 'platinum' ? 'https' : 'http'
|
||||
const node = process.env.TEST_ES_SERVER || `${scheme}://elastic:changeme@localhost:9200`
|
||||
const opts = {
|
||||
node,
|
||||
isXPack: process.env.TEST_SUITE !== 'free'
|
||||
const node = process.env.TEST_ES_SERVER
|
||||
const apiKey = process.env.ES_API_SECRET_KEY
|
||||
const password = process.env.ELASTIC_PASSWORD
|
||||
let version = process.env.STACK_VERSION
|
||||
|
||||
assert(node != null, 'Environment variable missing: TEST_ES_SERVER')
|
||||
assert(apiKey != null || password != null, 'Environment variable missing: ES_API_SECRET_KEY or ELASTIC_PASSWORD')
|
||||
assert(version != null, 'Environment variable missing: STACK_VERSION')
|
||||
|
||||
version = semver.clean(version.includes('SNAPSHOT') ? version.split('-')[0] : version)
|
||||
|
||||
const clientOptions = { node }
|
||||
if (apiKey != null) {
|
||||
clientOptions.auth = { apiKey }
|
||||
} else {
|
||||
clientOptions.auth = { username: 'elastic', password }
|
||||
}
|
||||
runner(opts)
|
||||
}
|
||||
|
||||
const shouldSkip = (isXPack, file, name) => {
|
||||
if (options.suite || options.test) return false
|
||||
|
||||
let list = Object.keys(freeSkips)
|
||||
for (let i = 0; i < list.length; i++) {
|
||||
const freeTest = freeSkips[list[i]]
|
||||
for (let j = 0; j < freeTest.length; j++) {
|
||||
if (file.endsWith(list[i]) && (name === freeTest[j] || freeTest[j] === '*')) {
|
||||
const testName = file.slice(file.indexOf(`${sep}elasticsearch${sep}`)) + ' / ' + name
|
||||
log(`Skipping test ${testName} because it is denylisted in the free test suite`)
|
||||
return true
|
||||
}
|
||||
const nodeUrl = new url.URL(node)
|
||||
if (nodeUrl.protocol === 'https:') {
|
||||
clientOptions.tls = {
|
||||
ca: fs.readFileSync(path.join(__dirname, '..', '..', '.buildkite', 'certs', 'ca.crt'), 'utf8'),
|
||||
rejectUnauthorized: false
|
||||
}
|
||||
}
|
||||
|
||||
if (file.includes('x-pack') || isXPack) {
|
||||
list = Object.keys(platinumDenyList)
|
||||
for (let i = 0; i < list.length; i++) {
|
||||
const platTest = platinumDenyList[list[i]]
|
||||
for (let j = 0; j < platTest.length; j++) {
|
||||
if (file.endsWith(list[i]) && (name === platTest[j] || platTest[j] === '*')) {
|
||||
const testName = file.slice(file.indexOf(`${sep}elasticsearch${sep}`)) + ' / ' + name
|
||||
log(`Skipping test ${testName} because it is denylisted in the platinum test suite`)
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
doTestBuilder(version, clientOptions)
|
||||
.then(() => process.exit(0))
|
||||
.catch(err => {
|
||||
console.error(err)
|
||||
process.exit(1)
|
||||
})
|
||||
}
|
||||
|
||||
const getAllFiles = dir =>
|
||||
readdirSync(dir).reduce((files, file) => {
|
||||
const name = join(dir, file)
|
||||
const isDirectory = statSync(name).isDirectory()
|
||||
return isDirectory ? [...files, ...getAllFiles(name)] : [...files, name]
|
||||
}, [])
|
||||
|
||||
module.exports = runner
|
||||
|
||||
@ -1,115 +0,0 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and contributors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert')
|
||||
const { create } = require('xmlbuilder2')
|
||||
|
||||
function createJunitReporter () {
|
||||
const report = {}
|
||||
|
||||
return { testsuites, prettyPrint }
|
||||
|
||||
function prettyPrint () {
|
||||
return create(report).end({ prettyPrint: true })
|
||||
}
|
||||
|
||||
function testsuites (name) {
|
||||
assert(name, 'The testsuites name is required')
|
||||
assert(report.testsuites === undefined, 'Cannot set more than one testsuites block')
|
||||
const startTime = Date.now()
|
||||
|
||||
report.testsuites = {
|
||||
'@id': new Date().toISOString(),
|
||||
'@name': name
|
||||
}
|
||||
|
||||
const testsuiteList = []
|
||||
|
||||
return {
|
||||
testsuite: createTestSuite(testsuiteList),
|
||||
end () {
|
||||
report.testsuites['@time'] = Math.round((Date.now() - startTime) / 1000)
|
||||
report.testsuites['@tests'] = testsuiteList.reduce((acc, val) => {
|
||||
acc += val['@tests']
|
||||
return acc
|
||||
}, 0)
|
||||
report.testsuites['@failures'] = testsuiteList.reduce((acc, val) => {
|
||||
acc += val['@failures']
|
||||
return acc
|
||||
}, 0)
|
||||
report.testsuites['@skipped'] = testsuiteList.reduce((acc, val) => {
|
||||
acc += val['@skipped']
|
||||
return acc
|
||||
}, 0)
|
||||
if (testsuiteList.length) {
|
||||
report.testsuites.testsuite = testsuiteList
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function createTestSuite (testsuiteList) {
|
||||
return function testsuite (name) {
|
||||
assert(name, 'The testsuite name is required')
|
||||
const startTime = Date.now()
|
||||
const suite = {
|
||||
'@id': new Date().toISOString(),
|
||||
'@name': name
|
||||
}
|
||||
const testcaseList = []
|
||||
testsuiteList.push(suite)
|
||||
return {
|
||||
testcase: createTestCase(testcaseList),
|
||||
end () {
|
||||
suite['@time'] = Math.round((Date.now() - startTime) / 1000)
|
||||
suite['@tests'] = testcaseList.length
|
||||
suite['@failures'] = testcaseList.filter(t => t.failure).length
|
||||
suite['@skipped'] = testcaseList.filter(t => t.skipped).length
|
||||
if (testcaseList.length) {
|
||||
suite.testcase = testcaseList
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function createTestCase (testcaseList) {
|
||||
return function testcase (name, file) {
|
||||
assert(name, 'The testcase name is required')
|
||||
const startTime = Date.now()
|
||||
const tcase = {
|
||||
'@id': new Date().toISOString(),
|
||||
'@name': name
|
||||
}
|
||||
if (file) tcase['@file'] = file
|
||||
testcaseList.push(tcase)
|
||||
return {
|
||||
failure (error) {
|
||||
assert(error, 'The failure error object is required')
|
||||
tcase.failure = {
|
||||
'#': error.stack,
|
||||
'@message': error.message,
|
||||
'@type': error.code
|
||||
}
|
||||
},
|
||||
skip (reason) {
|
||||
if (typeof reason !== 'string') {
|
||||
reason = JSON.stringify(reason, null, 2)
|
||||
}
|
||||
tcase.skipped = {
|
||||
'#': reason
|
||||
}
|
||||
},
|
||||
end () {
|
||||
tcase['@time'] = Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = createJunitReporter
|
||||
482
test/integration/test-builder.js
Normal file
482
test/integration/test-builder.js
Normal file
@ -0,0 +1,482 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and contributors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
'use strict'
|
||||
|
||||
const { join, sep } = require('node:path')
|
||||
const { readFileSync, writeFileSync, promises } = require('node:fs')
|
||||
const yaml = require('js-yaml')
|
||||
const { rimraf } = require('rimraf')
|
||||
const { mkdir } = promises
|
||||
|
||||
const generatedTestsPath = join(__dirname, '..', '..', 'generated-tests')
|
||||
|
||||
const stackSkips = [
|
||||
// test definition bug: response is empty string
|
||||
'cat/fielddata.yml',
|
||||
// test definition bug: response is empty string
|
||||
'cluster/delete_voting_config_exclusions.yml',
|
||||
// test definition bug: response is empty string
|
||||
'cluster/voting_config_exclusions.yml',
|
||||
// client bug: ILM request takes a "body" param, but "body" is a special keyword in the JS client
|
||||
'ilm/10_basic.yml',
|
||||
// health report is... not healthy
|
||||
'health_report.yml',
|
||||
// TODO: `contains` action only supports checking for primitives inside arrays or strings inside strings, not referenced values like objects inside arrays
|
||||
'entsearch/10_basic.yml',
|
||||
// test definition bug: error message does not match
|
||||
'entsearch/30_sync_jobs_stack.yml',
|
||||
// no handler found for uri [/knn_test/_knn_search]
|
||||
'knn_search.yml',
|
||||
// TODO: fix license on ES startup - "Operation failed: Current license is basic."
|
||||
'license/10_stack.yml',
|
||||
// response.body should be truthy. found: ""
|
||||
'logstash/10_basic.yml',
|
||||
// test definition bug? security_exception: unable to authenticate user [x_pack_rest_user] for REST request [/_ml/trained_models/test_model/definition/0]
|
||||
'machine_learning/clear_tm_deployment_cache.yml',
|
||||
// client bug: 0.99995 does not equal 0.5
|
||||
'machine_learning/data_frame_evaluate.yml',
|
||||
// test definition bug? regex has whitespace, maybe needs to be removed
|
||||
'machine_learning/explain_data_frame_analytics.yml',
|
||||
// client bug: 4 != 227
|
||||
'machine_learning/preview_datafeed.yml',
|
||||
// test definition bug: error message does not match
|
||||
'machine_learning/revert_model_snapshot.yml',
|
||||
// test definition bug: error message does not match
|
||||
'machine_learning/update_model_snapshot.yml',
|
||||
// version_conflict_engine_exception
|
||||
'machine_learning/jobs_crud.yml',
|
||||
// test definition bug: error message does not match
|
||||
'machine_learning/model_snapshots.yml',
|
||||
// test definition bug: error message does not match
|
||||
'query_rules/30_test.yml',
|
||||
// client bug: 0 != 0.1
|
||||
'script/10_basic.yml',
|
||||
// client bug: request takes a "body" param, but "body" is a special keyword in the JS client
|
||||
'searchable_snapshots/10_basic.yml',
|
||||
// test builder bug: does `match` action need to support "array contains value"?
|
||||
'security/10_api_key_basic.yml',
|
||||
// test definition bug: error message does not match
|
||||
'security/140_user.yml',
|
||||
// test definition bug: error message does not match
|
||||
'security/30_privileges_stack.yml',
|
||||
// unknown issue: $profile.enabled path doesn't exist in response
|
||||
'security/130_user_profile.yml',
|
||||
// test definition bug: error message does not match
|
||||
'security/change_password.yml',
|
||||
// test builder bug: media_type_header_exception
|
||||
'simulate/ingest.yml',
|
||||
// client bug: request takes a "body" param, but "body" is a special keyword in the JS client
|
||||
'snapshot/10_basic.yml',
|
||||
// test definition bug: illegal_argument_exception
|
||||
'sql/10_basic.yml',
|
||||
// test definition bug: illegal_argument_exception
|
||||
'text_structure/10_basic.yml',
|
||||
// test definition bug: illegal_argument_exception
|
||||
'transform/10_basic.yml',
|
||||
]
|
||||
|
||||
const serverlessSkips = [
|
||||
// TODO: sql.getAsync does not set a content-type header but ES expects one
|
||||
// transport only sets a content-type if the body is not empty
|
||||
'sql/10_basic.yml',
|
||||
// TODO: bulk call in setup fails due to "malformed action/metadata line"
|
||||
// bulk body is being sent as a Buffer, unsure if related.
|
||||
'transform/10_basic.yml',
|
||||
// TODO: scripts_painless_execute expects {"result":"0.1"}, gets {"result":"0"}
|
||||
// body sent as Buffer, unsure if related
|
||||
'script/10_basic.yml',
|
||||
// TODO: expects {"outlier_detection.auc_roc.value":0.99995}, gets {"outlier_detection.auc_roc.value":0.5}
|
||||
// remove if/when https://github.com/elastic/elasticsearch-clients-tests/issues/37 is resolved
|
||||
'machine_learning/data_frame_evaluate.yml',
|
||||
// TODO: Cannot perform requested action because job [job-crud-test-apis] is not open
|
||||
'machine_learning/jobs_crud.yml',
|
||||
// TODO: test runner needs to support ignoring 410 errors
|
||||
'enrich/10_basic.yml',
|
||||
// TODO: parameter `enabled` is not allowed in source
|
||||
// Same underlying problem as https://github.com/elastic/elasticsearch-clients-tests/issues/55
|
||||
'cluster/component_templates.yml',
|
||||
// TODO: expecting `ct_field` field mapping to be returned, but instead only finds `field`
|
||||
'indices/simulate_template.yml',
|
||||
'indices/simulate_index_template.yml',
|
||||
// TODO: test currently times out
|
||||
'inference/10_basic.yml',
|
||||
// TODO: Fix: "Trained model deployment [test_model] is not allocated to any nodes"
|
||||
'machine_learning/20_trained_model_serverless.yml',
|
||||
// TODO: query_rules api not available yet
|
||||
'query_rules/10_query_rules.yml',
|
||||
'query_rules/20_rulesets.yml',
|
||||
'query_rules/30_test.yml',
|
||||
// TODO: security.putRole API not available
|
||||
'security/50_roles_serverless.yml',
|
||||
// TODO: expected undefined to equal 'some_table'
|
||||
'entsearch/50_connector_updates.yml',
|
||||
// TODO: resource_not_found_exception
|
||||
'tasks_serverless.yml',
|
||||
]
|
||||
|
||||
function parse (data) {
|
||||
let doc
|
||||
try {
|
||||
doc = yaml.load(data, { schema: yaml.CORE_SCHEMA })
|
||||
} catch (err) {
|
||||
console.error(err)
|
||||
return
|
||||
}
|
||||
return doc
|
||||
}
|
||||
|
||||
async function build (yamlFiles, clientOptions) {
|
||||
await rimraf(generatedTestsPath)
|
||||
await mkdir(generatedTestsPath, { recursive: true })
|
||||
|
||||
for (const file of yamlFiles) {
|
||||
const apiName = file.split(`${sep}tests${sep}`)[1]
|
||||
const data = readFileSync(file, 'utf8')
|
||||
|
||||
const tests = data
|
||||
.split('\n---\n')
|
||||
.map(s => s.trim())
|
||||
// empty strings
|
||||
.filter(Boolean)
|
||||
.map(parse)
|
||||
// null values
|
||||
.filter(Boolean)
|
||||
|
||||
let code = "import { test } from 'tap'\n"
|
||||
code += "import { Client } from '@elastic/elasticsearch'\n\n"
|
||||
|
||||
const requires = tests.find(test => test.requires != null)
|
||||
let skip = new Set()
|
||||
if (requires != null) {
|
||||
const { serverless = true, stack = true } = requires.requires
|
||||
if (!serverless) skip.add('process.env.TEST_ES_SERVERLESS === "1"')
|
||||
if (!stack) skip.add('process.env.TEST_ES_STACK === "1"')
|
||||
}
|
||||
|
||||
if (stackSkips.includes(apiName)) skip.add('process.env.TEST_ES_STACK === "1"')
|
||||
if (serverlessSkips.includes(apiName)) skip.add('process.env.TEST_ES_SERVERLESS === "1"')
|
||||
|
||||
if (skip.size > 0) {
|
||||
code += `test('${apiName}', { skip: ${Array.from(skip).join(' || ')} }, t => {\n`
|
||||
} else {
|
||||
code += `test('${apiName}', t => {\n`
|
||||
}
|
||||
|
||||
for (const test of tests) {
|
||||
if (test.setup != null) {
|
||||
code += ' t.before(async () => {\n'
|
||||
code += indent(buildActions(test.setup), 4)
|
||||
code += ' })\n\n'
|
||||
}
|
||||
|
||||
if (test.teardown != null) {
|
||||
code += ' t.after(async () => {\n'
|
||||
code += indent(buildActions(test.teardown), 4)
|
||||
code += ' })\n\n'
|
||||
}
|
||||
|
||||
for (const key of Object.keys(test).filter(k => !['setup', 'teardown', 'requires'].includes(k))) {
|
||||
if (test[key].find(action => Object.keys(action)[0] === 'skip') != null) {
|
||||
code += ` t.test('${key}', { skip: true }, async t => {\n`
|
||||
} else {
|
||||
code += ` t.test('${key}', async t => {\n`
|
||||
}
|
||||
code += indent(buildActions(test[key]), 4)
|
||||
code += '\n t.end()\n'
|
||||
code += ' })\n'
|
||||
}
|
||||
// if (test.requires != null) requires = test.requires
|
||||
}
|
||||
|
||||
code += '\n t.end()\n'
|
||||
code += '})\n'
|
||||
|
||||
const testDir = join(generatedTestsPath, apiName.split(sep).slice(0, -1).join(sep))
|
||||
const testFile = join(testDir, apiName.split(sep).pop().replace(/\.ya?ml$/, '.mjs'))
|
||||
await mkdir(testDir, { recursive: true })
|
||||
writeFileSync(testFile, code, 'utf8')
|
||||
}
|
||||
|
||||
function buildActions (actions) {
|
||||
let code = `const client = new Client(${JSON.stringify(clientOptions, null, 2)})\n`
|
||||
code += 'let response\n\n'
|
||||
|
||||
const vars = new Set()
|
||||
|
||||
for (const action of actions) {
|
||||
const key = Object.keys(action)[0]
|
||||
switch (key) {
|
||||
case 'do':
|
||||
code += buildDo(action.do)
|
||||
break
|
||||
case 'set':
|
||||
const setResult = buildSet(action.set, vars)
|
||||
vars.add(setResult.varName)
|
||||
code += setResult.code
|
||||
break
|
||||
case 'transform_and_set':
|
||||
code += buildTransformAndSet(action.transform_and_set)
|
||||
break
|
||||
case 'match':
|
||||
code += buildMatch(action.match)
|
||||
break
|
||||
case 'lt':
|
||||
code += buildLt(action.lt)
|
||||
break
|
||||
case 'lte':
|
||||
code += buildLte(action.lte)
|
||||
break
|
||||
case 'gt':
|
||||
code += buildGt(action.gt)
|
||||
break
|
||||
case 'gte':
|
||||
code += buildGte(action.gte)
|
||||
break
|
||||
case 'length':
|
||||
code += buildLength(action.length)
|
||||
break
|
||||
case 'is_true':
|
||||
code += buildIsTrue(action.is_true)
|
||||
break
|
||||
case 'is_false':
|
||||
code += buildIsFalse(action.is_false)
|
||||
break
|
||||
case 'contains':
|
||||
code += buildContains(action.contains)
|
||||
break
|
||||
case 'exists':
|
||||
code += buildExists(action.exists)
|
||||
break
|
||||
case 'skip':
|
||||
break
|
||||
default:
|
||||
console.warn(`Action not supported: ${key}`)
|
||||
break
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
}
|
||||
|
||||
function buildDo (action) {
|
||||
let code = ''
|
||||
const keys = Object.keys(action)
|
||||
if (keys.includes('catch')) {
|
||||
code += 'try {\n'
|
||||
code += indent(buildRequest(action), 2)
|
||||
code += '} catch (err) {\n'
|
||||
code += ` t.match(err.toString(), ${buildValLiteral(action.catch)})\n`
|
||||
code += '}\n'
|
||||
} else {
|
||||
code += buildRequest(action)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
function buildRequest(action) {
|
||||
let code = ''
|
||||
|
||||
const options = { meta: true }
|
||||
|
||||
for (const key of Object.keys(action)) {
|
||||
if (key === 'catch') continue
|
||||
|
||||
if (key === 'headers') {
|
||||
options.headers = action.headers
|
||||
continue
|
||||
}
|
||||
|
||||
const params = action[key]
|
||||
if (params.ignore != null) {
|
||||
if (Array.isArray(params.ignore)) {
|
||||
options.ignore = params.ignore
|
||||
} else {
|
||||
options.ignore = [params.ignore]
|
||||
}
|
||||
}
|
||||
|
||||
code += `response = await client.${toCamelCase(key)}(${buildApiParams(action[key])}, ${JSON.stringify(options)})\n`
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
function buildSet (action, vars) {
|
||||
const key = Object.keys(action)[0]
|
||||
const varName = action[key]
|
||||
const lookup = buildLookup(key)
|
||||
|
||||
let code = ''
|
||||
if (vars.has(varName)) {
|
||||
code = `${varName} = ${lookup}\n`
|
||||
} else {
|
||||
code =`let ${varName} = ${lookup}\n`
|
||||
}
|
||||
return { code, varName }
|
||||
}
|
||||
|
||||
function buildTransformAndSet (action) {
|
||||
return `// TODO buildTransformAndSet: ${JSON.stringify(action)}\n`
|
||||
}
|
||||
|
||||
function buildMatch (action) {
|
||||
const key = Object.keys(action)[0]
|
||||
let lookup = buildLookup(key)
|
||||
const val = buildValLiteral(action[key])
|
||||
return `t.match(${lookup}, ${val})\n`
|
||||
}
|
||||
|
||||
function buildLt (action) {
|
||||
const key = Object.keys(action)[0]
|
||||
const lookup = buildLookup(key)
|
||||
const val = buildValLiteral(action[key])
|
||||
return `t.ok(${lookup} < ${val})\n`
|
||||
}
|
||||
|
||||
function buildLte (action) {
|
||||
const key = Object.keys(action)[0]
|
||||
const lookup = buildLookup(key)
|
||||
const val = buildValLiteral(action[key])
|
||||
return `t.ok(${lookup} <= ${val})\n`
|
||||
}
|
||||
|
||||
function buildGt (action) {
|
||||
const key = Object.keys(action)[0]
|
||||
const lookup = buildLookup(key)
|
||||
const val = buildValLiteral(action[key])
|
||||
return `t.ok(${lookup} > ${val})\n`
|
||||
}
|
||||
|
||||
function buildGte (action) {
|
||||
const key = Object.keys(action)[0]
|
||||
const lookup = buildLookup(key)
|
||||
const val = buildValLiteral(action[key])
|
||||
return `t.ok(${lookup} >= ${val})\n`
|
||||
}
|
||||
|
||||
function buildLength (action) {
|
||||
const key = Object.keys(action)[0]
|
||||
const lookup = buildLookup(key)
|
||||
const val = buildValLiteral(action[key])
|
||||
|
||||
let code = ''
|
||||
code += `if (typeof ${lookup} === 'object' && !Array.isArray(${lookup})) {\n`
|
||||
code += ` t.equal(Object.keys(${lookup}).length, ${val})\n`
|
||||
code += `} else {\n`
|
||||
code += ` t.equal(${lookup}.length, ${val})\n`
|
||||
code += `}\n`
|
||||
return code
|
||||
}
|
||||
|
||||
function buildIsTrue (action) {
|
||||
let lookup = `${buildLookup(action)}`
|
||||
let errMessage = `\`${action} should be truthy. found: '\$\{JSON.stringify(${lookup})\}'\``
|
||||
if (lookup.includes('JSON.stringify')) errMessage = `\`${action} should be truthy. found: '\$\{${lookup}\}'\``
|
||||
return `t.ok(${lookup} === "true" || (Boolean(${lookup}) && ${lookup} !== "false"), ${errMessage})\n`
|
||||
}
|
||||
|
||||
function buildIsFalse (action) {
|
||||
let lookup = `${buildLookup(action)}`
|
||||
let errMessage = `\`${action} should be falsy. found: '\$\{JSON.stringify(${lookup})\}'\``
|
||||
if (lookup.includes('JSON.stringify')) errMessage = `\`${action} should be falsy. found: '\$\{${lookup}\}'\``
|
||||
return `t.ok(${lookup} === "false" || !Boolean(${lookup}), ${errMessage})\n`
|
||||
}
|
||||
|
||||
function buildContains (action) {
|
||||
const key = Object.keys(action)[0]
|
||||
const lookup = buildLookup(key)
|
||||
const val = buildValLiteral(action[key])
|
||||
return `t.ok(${lookup}.includes(${val}), '${JSON.stringify(val)} not found in ${key}')\n`
|
||||
}
|
||||
|
||||
function buildExists (keyName) {
|
||||
const lookup = buildLookup(keyName)
|
||||
return `t.ok(${lookup} != null, \`Key "${keyName}" not found in response body: \$\{JSON.stringify(response.body, null, 2)\}\`)\n`
|
||||
}
|
||||
|
||||
function buildApiParams (params) {
|
||||
if (Object.keys(params).length === 0) {
|
||||
return 'undefined'
|
||||
} else {
|
||||
const out = {}
|
||||
Object.keys(params).filter(k => k !== 'ignore' && k !== 'headers').forEach(k => out[k] = params[k])
|
||||
return buildValLiteral(out)
|
||||
}
|
||||
}
|
||||
|
||||
function toCamelCase (name) {
|
||||
return name.replace(/_([a-z])/g, g => g[1].toUpperCase())
|
||||
}
|
||||
|
||||
function indent (str, spaces) {
|
||||
const tabs = ' '.repeat(spaces)
|
||||
return str.replace(/\s+$/, '').split('\n').map(l => `${tabs}${l}`).join('\n') + '\n'
|
||||
}
|
||||
|
||||
function buildLookup (path) {
|
||||
if (path === '$body') return '(typeof response.body === "string" ? response.body : JSON.stringify(response.body))'
|
||||
|
||||
const outPath = path.split('.').map(step => {
|
||||
if (parseInt(step, 10).toString() === step) {
|
||||
return `[${step}]`
|
||||
} else if (step.match(/^\$[a-zA-Z0-9_]+$/)) {
|
||||
const lookup = step.replace(/^\$/, '')
|
||||
if (lookup === 'body') return ''
|
||||
return `[${lookup}]`
|
||||
} else if (step === '') {
|
||||
return ''
|
||||
} else {
|
||||
return `['${step}']`
|
||||
}
|
||||
}).join('')
|
||||
return `response.body${outPath}`
|
||||
}
|
||||
|
||||
function buildValLiteral (val) {
|
||||
if (typeof val === 'string') val = val.trim()
|
||||
if (isRegExp(val)) {
|
||||
return JSON.stringify(val).replace(/^"/, '').replace(/"$/, '').replaceAll('\\\\', '\\')
|
||||
} else if (isVariable(val)) {
|
||||
if (val === '$body') return 'JSON.stringify(response.body)'
|
||||
return val.replace(/^\$/, '')
|
||||
} else if (isPlainObject(val)) {
|
||||
return JSON.stringify(cleanObject(val), null, 2).replace(/"\$([a-zA-Z0-9_]+)"/g, '$1')
|
||||
} else {
|
||||
return JSON.stringify(val)
|
||||
}
|
||||
}
|
||||
|
||||
function isRegExp (str) {
|
||||
return typeof str === 'string' && str.startsWith('/') && str.endsWith('/')
|
||||
}
|
||||
|
||||
function isVariable (str) {
|
||||
return typeof str === 'string' && str.match(/^\$[a-zA-Z0-9_]+$/) != null
|
||||
}
|
||||
|
||||
function cleanObject (obj) {
|
||||
Object.keys(obj).forEach(key => {
|
||||
let val = obj[key]
|
||||
if (typeof val === 'string' && val.trim().startsWith('{') && val.trim().endsWith('}')) {
|
||||
// attempt to parse as object
|
||||
try {
|
||||
val = JSON.parse(val)
|
||||
} catch {
|
||||
}
|
||||
} else if (isPlainObject(val)) {
|
||||
val = cleanObject(val)
|
||||
} else if (Array.isArray(val)) {
|
||||
val = val.map(item => isPlainObject(item) ? cleanObject(item) : item)
|
||||
}
|
||||
obj[key] = val
|
||||
})
|
||||
return obj
|
||||
}
|
||||
|
||||
function isPlainObject(obj) {
|
||||
return typeof obj === 'object' && !Array.isArray(obj) && obj != null
|
||||
}
|
||||
|
||||
module.exports = build
|
||||
File diff suppressed because it is too large
Load Diff
@ -64,6 +64,31 @@ test('Missing node(s)', t => {
|
||||
t.end()
|
||||
})
|
||||
|
||||
test('multi nodes with roles, using default node filter', async t => {
|
||||
const client = new Client({
|
||||
nodes: [
|
||||
{
|
||||
url: new URL('http://node1:9200'),
|
||||
roles: { master: true, data: false, ingest: false, ml: false }
|
||||
},
|
||||
{
|
||||
url: new URL('http://node2:9200'),
|
||||
roles: { master: true, data: true, ingest: false, ml: false }
|
||||
},
|
||||
]
|
||||
})
|
||||
const conn = client.connectionPool.getConnection({
|
||||
now: Date.now() + 1000 * 60 * 3,
|
||||
requestId: 1,
|
||||
name: 'elasticsearch-js',
|
||||
context: null
|
||||
})
|
||||
|
||||
t.equal(conn?.url.hostname, 'node2')
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
test('Custom headers', t => {
|
||||
const client = new Client({
|
||||
node: 'http://localhost:9200',
|
||||
@ -287,9 +312,25 @@ test('Elastic Cloud config', t => {
|
||||
t.equal(connection?.url.hostname, 'abcd.localhost')
|
||||
t.equal(connection?.url.protocol, 'https:')
|
||||
|
||||
t.test('Invalid Cloud ID will throw ConfigurationError', t => {
|
||||
t.throws(() => new Client({
|
||||
cloud : {
|
||||
id : 'invalidCloudIdThatIsNotBase64'
|
||||
},
|
||||
auth : {
|
||||
username: 'elastic',
|
||||
password: 'changeme'
|
||||
}
|
||||
|
||||
}), errors.ConfigurationError)
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
|
||||
|
||||
test('Override default Elastic Cloud options', t => {
|
||||
const client = new Client({
|
||||
cloud: {
|
||||
|
||||
@ -121,11 +121,35 @@ test('ES|QL helper', t => {
|
||||
const result = await client.helpers.esql({ query: 'FROM sample_data' }).toArrowTable()
|
||||
t.ok(result instanceof arrow.Table)
|
||||
|
||||
const testRecords = [
|
||||
[
|
||||
['amount', 4.900000095367432],
|
||||
['date', 1729532586965]
|
||||
],
|
||||
[
|
||||
['amount', 8.199999809265137],
|
||||
['date', 1729446186965],
|
||||
],
|
||||
[
|
||||
['amount', 15.5],
|
||||
['date', 1729359786965],
|
||||
],
|
||||
[
|
||||
['amount', 9.899999618530273],
|
||||
['date', 1729273386965],
|
||||
],
|
||||
[
|
||||
['amount', 13.899999618530273],
|
||||
['date', 1729186986965],
|
||||
]
|
||||
]
|
||||
|
||||
let count = 0
|
||||
const table = [...result]
|
||||
t.same(table[0], [
|
||||
["amount", 4.900000095367432],
|
||||
["date", 1729532586965],
|
||||
])
|
||||
for (const record of table) {
|
||||
t.same(record, testRecords[count])
|
||||
count++
|
||||
}
|
||||
t.end()
|
||||
})
|
||||
|
||||
@ -158,17 +182,28 @@ test('ES|QL helper', t => {
|
||||
t.end()
|
||||
})
|
||||
|
||||
test('toArrowReader', t => {
|
||||
t.test('Parses a binary response into an Arrow stream reader', async t => {
|
||||
const binaryContent = '/////zABAAAQAAAAAAAKAA4ABgANAAgACgAAAAAABAAQAAAAAAEKAAwAAAAIAAQACgAAAAgAAAAIAAAAAAAAAAIAAAB8AAAABAAAAJ7///8UAAAARAAAAEQAAAAAAAoBRAAAAAEAAAAEAAAAjP///wgAAAAQAAAABAAAAGRhdGUAAAAADAAAAGVsYXN0aWM6dHlwZQAAAAAAAAAAgv///wAAAQAEAAAAZGF0ZQAAEgAYABQAEwASAAwAAAAIAAQAEgAAABQAAABMAAAAVAAAAAAAAwFUAAAAAQAAAAwAAAAIAAwACAAEAAgAAAAIAAAAEAAAAAYAAABkb3VibGUAAAwAAABlbGFzdGljOnR5cGUAAAAAAAAAAAAABgAIAAYABgAAAAAAAgAGAAAAYW1vdW50AAAAAAAA/////7gAAAAUAAAAAAAAAAwAFgAOABUAEAAEAAwAAABgAAAAAAAAAAAABAAQAAAAAAMKABgADAAIAAQACgAAABQAAABYAAAABQAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAQAAAAAAAAAIAAAAAAAAACgAAAAAAAAAMAAAAAAAAAABAAAAAAAAADgAAAAAAAAAKAAAAAAAAAAAAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAHwAAAAAAAAAAAACgmZkTQAAAAGBmZiBAAAAAAAAAL0AAAADAzMwjQAAAAMDMzCtAHwAAAAAAAADV6yywkgEAANWPBquSAQAA1TPgpZIBAADV17mgkgEAANV7k5uSAQAA/////wAAAAA='
|
||||
test('toArrowReader', async t => {
|
||||
const testRecords = [
|
||||
{ amount: 4.900000095367432, },
|
||||
{ amount: 8.199999809265137, },
|
||||
{ amount: 15.5, },
|
||||
{ amount: 9.899999618530273, },
|
||||
{ amount: 13.899999618530273, },
|
||||
]
|
||||
|
||||
// build reusable Arrow table
|
||||
const table = arrow.tableFromJSON(testRecords)
|
||||
const rawData = await arrow.RecordBatchStreamWriter.writeAll(table).toUint8Array()
|
||||
|
||||
t.test('Parses a binary response into an Arrow stream reader', async t => {
|
||||
const MockConnection = connection.buildMockConnection({
|
||||
onRequest (_params) {
|
||||
return {
|
||||
body: Buffer.from(binaryContent, 'base64'),
|
||||
body: Buffer.from(rawData),
|
||||
statusCode: 200,
|
||||
headers: {
|
||||
'content-type': 'application/vnd.elasticsearch+arrow+stream'
|
||||
'content-type': 'application/vnd.elasticsearch+arrow+stream',
|
||||
'transfer-encoding': 'chunked'
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -182,26 +217,28 @@ test('ES|QL helper', t => {
|
||||
const result = await client.helpers.esql({ query: 'FROM sample_data' }).toArrowReader()
|
||||
t.ok(result.isStream())
|
||||
|
||||
const recordBatch = result.next().value
|
||||
t.same(recordBatch.get(0)?.toJSON(), {
|
||||
amount: 4.900000095367432,
|
||||
date: 1729532586965,
|
||||
})
|
||||
let count = 0
|
||||
for await (const recordBatch of result) {
|
||||
for (const record of recordBatch) {
|
||||
t.same(record.toJSON(), testRecords[count])
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('ESQL helper uses correct x-elastic-client-meta helper value', async t => {
|
||||
const binaryContent = '/////zABAAAQAAAAAAAKAA4ABgANAAgACgAAAAAABAAQAAAAAAEKAAwAAAAIAAQACgAAAAgAAAAIAAAAAAAAAAIAAAB8AAAABAAAAJ7///8UAAAARAAAAEQAAAAAAAoBRAAAAAEAAAAEAAAAjP///wgAAAAQAAAABAAAAGRhdGUAAAAADAAAAGVsYXN0aWM6dHlwZQAAAAAAAAAAgv///wAAAQAEAAAAZGF0ZQAAEgAYABQAEwASAAwAAAAIAAQAEgAAABQAAABMAAAAVAAAAAAAAwFUAAAAAQAAAAwAAAAIAAwACAAEAAgAAAAIAAAAEAAAAAYAAABkb3VibGUAAAwAAABlbGFzdGljOnR5cGUAAAAAAAAAAAAABgAIAAYABgAAAAAAAgAGAAAAYW1vdW50AAAAAAAA/////7gAAAAUAAAAAAAAAAwAFgAOABUAEAAEAAwAAABgAAAAAAAAAAAABAAQAAAAAAMKABgADAAIAAQACgAAABQAAABYAAAABQAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAQAAAAAAAAAIAAAAAAAAACgAAAAAAAAAMAAAAAAAAAABAAAAAAAAADgAAAAAAAAAKAAAAAAAAAAAAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAHwAAAAAAAAAAAACgmZkTQAAAAGBmZiBAAAAAAAAAL0AAAADAzMwjQAAAAMDMzCtAHwAAAAAAAADV6yywkgEAANWPBquSAQAA1TPgpZIBAADV17mgkgEAANV7k5uSAQAA/////wAAAAA='
|
||||
|
||||
const MockConnection = connection.buildMockConnection({
|
||||
onRequest (params) {
|
||||
const header = params.headers?.['x-elastic-client-meta'] ?? ''
|
||||
t.ok(header.includes('h=qa'), `Client meta header does not include ESQL helper value: ${header}`)
|
||||
return {
|
||||
body: Buffer.from(binaryContent, 'base64'),
|
||||
body: Buffer.from(rawData),
|
||||
statusCode: 200,
|
||||
headers: {
|
||||
'content-type': 'application/vnd.elasticsearch+arrow+stream'
|
||||
'content-type': 'application/vnd.elasticsearch+arrow+stream',
|
||||
'transfer-encoding': 'chunked'
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -240,10 +277,12 @@ test('ES|QL helper', t => {
|
||||
new arrow.RecordBatch(schema, batch3.data),
|
||||
])
|
||||
|
||||
const rawData = await arrow.RecordBatchStreamWriter.writeAll(table).toUint8Array()
|
||||
|
||||
const MockConnection = connection.buildMockConnection({
|
||||
onRequest (_params) {
|
||||
return {
|
||||
body: Buffer.from(arrow.tableToIPC(table, "stream")),
|
||||
body: Buffer.from(rawData),
|
||||
statusCode: 200,
|
||||
headers: {
|
||||
'content-type': 'application/vnd.elasticsearch+arrow+stream'
|
||||
@ -261,7 +300,7 @@ test('ES|QL helper', t => {
|
||||
t.ok(result.isStream())
|
||||
|
||||
let counter = 0
|
||||
for (const batch of result) {
|
||||
for await (const batch of result) {
|
||||
for (const row of batch) {
|
||||
counter++
|
||||
const { id, val } = row.toJSON()
|
||||
|
||||
Reference in New Issue
Block a user