Compare commits

..

23 Commits

Author SHA1 Message Date
1ef1754623 Bump to 8.16.2 (#2489) 2024-11-21 10:38:16 -06:00
be2fe317f2 [Backport 8.16] Ignore tap artifacts (#2492)
Co-authored-by: Josh Mock <joshua.mock@elastic.co>
2024-11-21 10:22:26 -06:00
1d2d934b50 Bump to 8.16.1 (#2478) 2024-11-18 12:45:53 -06:00
d0f5ada03d [Backport 8.16] Fix ECMAScript import (#2477) 2024-11-18 12:27:10 -06:00
47de886973 Auto-generated code for 8.16 (#2470) 2024-11-18 11:27:50 -06:00
1e103baec1 Bump version to 8.16.0 (#2464) 2024-11-14 11:50:48 -06:00
7d5f622506 Backport ts-standard upgrade (#2462) 2024-11-13 11:15:54 -06:00
8377b58af3 [Backport 8.16] Address feedback and add clarity (#2452)
Co-authored-by: Marci W <333176+marciw@users.noreply.github.com>
2024-11-12 11:07:38 -06:00
c150efbd21 Auto-generated code for 8.16 (#2442) 2024-11-11 11:04:42 -06:00
e7663aabde [Backport 8.16] Add changelog for 8.15.2 (#2446)
Co-authored-by: Josh Mock <joshua.mock@elastic.co>
2024-11-11 09:52:14 -06:00
c9615dc0ef [Backport 8.16] Add _id to the result of helpers.search (#2435)
Co-authored-by: Rami <72725910+ramikg@users.noreply.github.com>
2024-11-06 12:28:18 -06:00
bb5fb24d73 [Backport 8.16] Add streaming support to Arrow helper (#2430) 2024-11-04 16:20:06 -06:00
38358e20ab Auto-generated code for 8.16 (#2427) 2024-11-04 09:57:37 -06:00
9479d82644 Auto-generated code for 8.16 (#2410)
Co-authored-by: Josh Mock <joshua.mock@elastic.co>
2024-10-28 15:11:56 -05:00
18df52feb4 [Backport 8.16] Skip flaky test (#2419)
Co-authored-by: Josh Mock <joshua.mock@elastic.co>
2024-10-28 11:57:58 -05:00
f72f9e9a5a [Backport 8.16] Don't generate coverage during standard unit test run (#2406)
Co-authored-by: Josh Mock <joshua.mock@elastic.co>
2024-10-24 12:07:48 -05:00
c4151ceb35 [Backport 8.16] Upgrade tap to latest (#2401)
Co-authored-by: Josh Mock <joshua.mock@elastic.co>
2024-10-24 11:38:29 -05:00
f3aedc7ad0 Manual backport of #2375 (#2397) 2024-10-23 08:47:50 -05:00
586c42161d [Backport 8.16] Respect disablePrototypePoisoningProtection option (#2395)
Co-authored-by: Josh Mock <joshua.mock@elastic.co>
2024-10-22 15:03:23 -05:00
9947b0e365 [Backport 8.16] Add doc about timeout best practices (#2393)
Co-authored-by: Josh Mock <joshua.mock@elastic.co>
2024-10-22 15:01:15 -05:00
52b7264b45 [Backport 8.16] Update changelog for 8.15.1 (#2392)
Co-authored-by: Josh Mock <joshua.mock@elastic.co>
2024-10-22 15:00:57 -05:00
fceebae8ae Auto-generated code for 8.x (#2372) 2024-10-14 11:18:55 -05:00
e45ed28c05 Auto-generated code for 8.x (#2369) 2024-09-30 13:41:23 -05:00
263 changed files with 31033 additions and 35993 deletions

View File

@ -25,7 +25,7 @@ steps:
provider: "gcp"
image: family/core-ubuntu-2204
plugins:
- junit-annotate#v2.6.0:
- junit-annotate#v2.4.1:
artifacts: "junit-output/junit-*.xml"
job-uuid-file-pattern: "junit-(.*).xml"
fail-build-on-error: true

14
.github/make.sh vendored
View File

@ -65,7 +65,7 @@ codegen)
if [ -v "$VERSION" ] || [[ -z "$VERSION" ]]; then
# fall back to branch name or `main` if no VERSION is set
branch_name=$(git rev-parse --abbrev-ref HEAD)
if [[ "$branch_name" =~ ^[0-9]+\.([0-9]+|x) ]]; then
if [[ "$branch_name" =~ ^[0-9]+\.[0-9]+ ]]; then
echo -e "\033[36;1mTARGET: codegen -> No VERSION argument found, using branch name: \`$branch_name\`\033[0m"
VERSION="$branch_name"
else
@ -150,7 +150,7 @@ if [[ -z "${BUILDKITE+x}" ]] && [[ -z "${CI+x}" ]] && [[ -z "${GITHUB_ACTIONS+x}
-u "$(id -u):$(id -g)" \
--volume "$repo:/usr/src/elasticsearch-js" \
--volume /usr/src/elasticsearch-js/node_modules \
--volume "$(realpath "$repo/../elastic-client-generator-js"):/usr/src/elastic-client-generator-js" \
--volume "$(realpath $repo/../elastic-client-generator-js):/usr/src/elastic-client-generator-js" \
--env "WORKFLOW=$WORKFLOW" \
--name make-elasticsearch-js \
--rm \
@ -159,14 +159,6 @@ if [[ -z "${BUILDKITE+x}" ]] && [[ -z "${CI+x}" ]] && [[ -z "${GITHUB_ACTIONS+x}
node .buildkite/make.mjs --task $TASK ${TASK_ARGS[*]}"
else
echo -e "\033[34;1mINFO: Running in CI mode"
# determine branch to clone
GENERATOR_BRANCH="main"
if [[ "$VERSION" == 8.* ]]; then
GENERATOR_BRANCH="8.x"
fi
echo -e "\033[34;1mINFO: Generator branch: $GENERATOR_BRANCH"
docker run \
--volume "$repo:/usr/src/elasticsearch-js" \
--volume /usr/src/elasticsearch-js/node_modules \
@ -176,7 +168,7 @@ else
--rm \
$product \
/bin/bash -c "cd /usr/src && \
git clone --branch $GENERATOR_BRANCH https://$CLIENTS_GITHUB_TOKEN@github.com/elastic/elastic-client-generator-js.git && \
git clone https://$CLIENTS_GITHUB_TOKEN@github.com/elastic/elastic-client-generator-js.git && \
mkdir -p /usr/src/elastic-client-generator-js/output && \
cd /usr/src/elasticsearch-js && \
node .buildkite/make.mjs --task $TASK ${TASK_ARGS[*]}"

26
.github/stale.yml vendored Normal file
View File

@ -0,0 +1,26 @@
# Number of days of inactivity before an issue becomes stale
daysUntilStale: 15
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 7
# Issues with these labels will never be considered stale
exemptLabels:
- "discussion"
- "feature request"
- "bug"
- "todo"
- "good first issue"
# Label to use when marking an issue as stale
staleLabel: stale
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: |
We understand that this might be important for you, but this issue has been automatically marked as stale because it has not had recent activity either from our end or yours.
It will be closed if no further activity occurs, please write a comment if you would like to keep this going.
Note: in the past months we have built a new client, that has just landed in master. If you want to open an issue or a pr for the legacy client, you should do that in https://github.com/elastic/elasticsearch-js-legacy
# Comment to post when closing a stale issue. Set to `false` to disable
closeComment: false

View File

@ -1,19 +0,0 @@
name: docs-build
on:
push:
branches:
- main
pull_request_target: ~
merge_group: ~
jobs:
docs-preview:
uses: elastic/docs-builder/.github/workflows/preview-build.yml@main
with:
path-pattern: docs/**
permissions:
deployments: write
id-token: write
contents: read
pull-requests: read

View File

@ -1,14 +0,0 @@
name: docs-cleanup
on:
pull_request_target:
types:
- closed
jobs:
docs-preview:
uses: elastic/docs-builder/.github/workflows/preview-cleanup.yml@main
permissions:
contents: none
id-token: write
deployments: write

View File

@ -11,10 +11,10 @@ jobs:
outputs:
src-only: "${{ steps.changes.outputs.src-only }}"
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: dorny/paths-filter/@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
- uses: dorny/paths-filter/@v3.0.2
id: changes
with:
filters: |
@ -36,12 +36,12 @@ jobs:
os: [ubuntu-latest, windows-latest, macOS-latest]
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- uses: actions/checkout@v4
with:
persist-credentials: false
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@1d0ff469b7ec7b3cb9d8673fde0c81c44821de2a # v4
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node-version }}
@ -66,12 +66,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- uses: actions/checkout@v4
with:
persist-credentials: false
- name: Use Node.js
uses: actions/setup-node@1d0ff469b7ec7b3cb9d8673fde0c81c44821de2a # v4
uses: actions/setup-node@v4
with:
node-version: 22.x
@ -96,12 +96,12 @@ jobs:
os: [ubuntu-latest, windows-latest, macOS-latest]
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- uses: actions/checkout@v4
with:
persist-credentials: false
- name: Use Bun
uses: oven-sh/setup-bun@4bc047ad259df6fc24a6c9b0f9a0cb08cf17fbe5 # v2
uses: oven-sh/setup-bun@v2
- name: Install
run: |
@ -118,3 +118,13 @@ jobs:
- name: ECMAScript module test
run: |
bun run test:esm
auto-approve:
name: Auto-approve
needs: [test, license]
runs-on: ubuntu-latest
permissions:
pull-requests: write
if: github.actor == 'elasticmachine'
steps:
- uses: hmarr/auto-approve-action@v4

View File

@ -12,49 +12,27 @@ jobs:
contents: write
id-token: write
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- uses: actions/checkout@v4
with:
persist-credentials: false
ref: ${{ github.event.inputs.branch }}
- uses: actions/setup-node@1d0ff469b7ec7b3cb9d8673fde0c81c44821de2a # v4
- uses: actions/setup-node@v4
with:
node-version: "22.x"
registry-url: "https://registry.npmjs.org"
- run: npm install -g npm
- run: npm install
- run: npm test
- name: npm publish
run: |
version=$(jq -r .version package.json)
tag_meta=$(echo "$version" | cut -s -d '-' -f2)
if [[ -z "$tag_meta" ]]; then
npm publish --provenance --access public
else
tag=$(echo "$tag_meta" | cut -d '.' -f1)
npm publish --provenance --access public --tag "$tag"
fi
- run: npm publish --provenance --access public
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: Publish version on GitHub
run: |
- run: |
version=$(jq -r .version package.json)
tag_meta=$(echo "$version" | cut -s -d '-' -f2)
if [[ -z "$tag_meta" ]]; then
gh release create \
-n "[Changelog](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/$BRANCH_NAME/changelog-client.html)"
--target "$BRANCH_NAME" \
--title "v$version" \
"v$version"
else
tag_main=$(echo "$version" | cut -d '-' -f1)
gh release create \
-n "This is a $tag_main pre-release. Changes may not be stable." \
--latest=false \
--prerelease \
--target "$BRANCH_NAME" \
--title "v$version" \
"v$version"
fi
gh release create \
-n "[Changelog](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/$BRANCH_NAME/changelog-client.html)" \
--target "$BRANCH_NAME" \
-t "v$version" \
"v$version"
env:
BRANCH_NAME: ${{ github.event.inputs.branch }}
GH_TOKEN: ${{ github.token }}

43
.github/workflows/serverless-patch.sh vendored Executable file
View File

@ -0,0 +1,43 @@
#!/usr/bin/env bash
set -exuo pipefail
merge_commit_sha=$(jq -r '.pull_request.merge_commit_sha' "$GITHUB_EVENT_PATH")
pull_request_id=$(jq -r '.pull_request.number' "$GITHUB_EVENT_PATH")
pr_shortcode="elastic/elasticsearch-js#$pull_request_id"
# generate patch file
cd "$GITHUB_WORKSPACE/stack"
git format-patch -1 --stdout "$merge_commit_sha" > /tmp/patch.diff
# set committer info
git config --global user.email "elasticmachine@users.noreply.github.com"
git config --global user.name "Elastic Machine"
# apply patch file
cd "$GITHUB_WORKSPACE/serverless"
git am -C1 --reject /tmp/patch.diff || git am --quit
# generate PR body comment
comment="Patch applied from $pr_shortcode"
# enumerate rejected patches in PR comment
has_rejects='false'
for f in ./**/*.rej; do
has_rejects='true'
comment="$comment
## Rejected patch \`$f\` must be resolved:
\`\`\`diff
$(cat "$f")
\`\`\`
"
done
# delete .rej files
rm -fv ./**/*.rej
# send data to output parameters
echo "$comment" > /tmp/pr_body
echo "PR_DRAFT=$has_rejects" >> "$GITHUB_OUTPUT"

53
.github/workflows/serverless-patch.yml vendored Normal file
View File

@ -0,0 +1,53 @@
---
name: Apply PR changes to serverless
on:
pull_request_target:
types:
- closed
- labeled
jobs:
apply-patch:
name: Apply patch
runs-on: ubuntu-latest
# Only react to merged PRs for security reasons.
# See https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target.
if: >
github.event.pull_request.merged
&& (
(
github.event.action == 'closed'
&& contains(github.event.pull_request.labels.*.name, 'apply-to-serverless')
)
||
(
github.event.action == 'labeled'
&& github.event.label.name == 'apply-to-serverless'
)
)
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
repository: elastic/elasticsearch-js
ref: main
path: stack
fetch-depth: 0
- uses: actions/checkout@v4
with:
persist-credentials: false
repository: elastic/elasticsearch-serverless-js
ref: main
path: serverless
- name: Apply patch from stack to serverless
id: apply-patch
run: $GITHUB_WORKSPACE/stack/.github/workflows/serverless-patch.sh
- uses: peter-evans/create-pull-request@v6
with:
token: ${{ secrets.GH_TOKEN }}
path: serverless
title: "Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}"
commit-message: "Apply patch from elastic/elasticsearch-js#${{ github.event.pull_request.number }}"
body-path: /tmp/pr_body
draft: "${{ steps.apply-patch.outputs.PR_DRAFT }}"
add-paths: ":!*.rej"

View File

@ -1,21 +1,21 @@
---
name: "Close stale issues and PRs"
name: 'Close stale issues and PRs'
on:
schedule:
- cron: "30 1 * * *"
- cron: '30 1 * * *'
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9
- uses: actions/stale@v8
with:
stale-issue-label: stale
stale-pr-label: stale
days-before-stale: 90
days-before-close: 14
exempt-issue-labels: "good first issue,tracking"
exempt-issue-labels: 'good first issue'
close-issue-label: closed-stale
close-pr-label: closed-stale
stale-issue-message: "This issue is stale because it has been open 90 days with no activity. Remove the `stale` label, or leave a comment, or this will be closed in 14 days."
stale-pr-message: "This pull request is stale because it has been open 90 days with no activity. Remove the `stale` label, or leave a comment, or this will be closed in 14 days."
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove the `stale` label, or leave a comment, or this will be closed in 14 days.'
stale-pr-message: 'This pull request is stale because it has been open 90 days with no activity. Remove the `stale` label, or leave a comment, or this will be closed in 14 days.'

View File

@ -28,9 +28,6 @@ spec:
spec:
repository: elastic/elasticsearch-js
pipeline_file: .buildkite/pipeline.yml
env:
ELASTIC_SLACK_NOTIFICATIONS_ENABLED: "true"
SLACK_NOTIFICATIONS_CHANNEL: "#devtools-notify-javascript"
teams:
devtools-team:
access_level: MANAGE_BUILD_AND_READ
@ -45,12 +42,6 @@ spec:
main:
branch: "main"
cronline: "@daily"
8_x:
branch: "8.x"
cronline: "@daily"
8_17:
branch: "8.17"
cronline: "@daily"
8_18:
branch: "8.18"
8_14:
branch: "8.14"
cronline: "@daily"

View File

@ -13,6 +13,7 @@ const client = new Client({
cloud: { id: '<cloud-id>' },
auth: { apiKey: 'base64EncodedKey' },
maxRetries: 5,
requestTimeout: 60000,
sniffOnStart: true
})
----
@ -81,7 +82,7 @@ _Default:_ `3`
|`requestTimeout`
|`number` - Max request timeout in milliseconds for each request. +
_Default:_ No value
_Default:_ `30000`
|`pingTimeout`
|`number` - Max ping request timeout in milliseconds for each request. +

View File

@ -1,77 +1,6 @@
[[changelog-client]]
== Release notes
[discrete]
=== 9.0.0
[discrete]
==== Breaking changes
[discrete]
===== Drop support for deprecated `body` parameter
In 8.0, the top-level `body` parameter that was available on all API functions <<remove-body-key,was deprecated>>. In 9.0 this property is completely removed.
[discrete]
===== Remove the default 30-second timeout on all requests sent to Elasticsearch
Setting HTTP timeouts on Elasticsearch requests goes against Elastic's recommendations. See <<timeout-best-practices>> for more information.
[discrete]
=== 8.17.1
[discrete]
==== Fixes
[discrete]
===== Improved support for Elasticsearch `v8.17`
Updated TypeScript types based on fixes and improvements to the Elasticsearch specification.
[discrete]
===== Report correct transport connection type in telemetry
The client's telemetry reporting mechanism was incorrectly reporting all traffic as using `HttpConnection` when the default is `UndiciConnection`. https://github.com/elastic/elasticsearch-js/issues/2324[#2324]
[discrete]
=== 8.17.0
[discrete]
==== Features
[discrete]
===== Support for Elasticsearch `v8.17`
You can find all the API changes
https://www.elastic.co/guide/en/elasticsearch/reference/8.17/release-notes-8.17.0.html[here].
[discrete]
=== 8.16.4
[discrete]
==== Fixes
[discrete]
===== Improved support for Elasticsearch `v8.16`
Updated TypeScript types based on fixes and improvements to the Elasticsearch specification.
[discrete]
===== Report correct transport connection type in telemetry
The client's telemetry reporting mechanism was incorrectly reporting all traffic as using `HttpConnection` when the default is `UndiciConnection`. https://github.com/elastic/elasticsearch-js/issues/2324[#2324]
[discrete]
=== 8.16.3
[discrete]
==== Fixes
[discrete]
===== Improved support for Elasticsearch `v8.16`
Updated TypeScript types based on fixes and improvements to the Elasticsearch specification.
[discrete]
=== 8.16.2
@ -716,7 +645,6 @@ ac.abort()
----
[discrete]
[[remove-body-key]]
===== Remove the body key from the request
*Breaking: Yes* | *Migration effort: Small*

View File

@ -1,22 +1,22 @@
[[child]]
=== Creating a child client
There are some use cases where you may need multiple instances of the client.
You can easily do that by calling `new Client()` as many times as you need, but
you will lose all the benefits of using one single client, such as the long
living connections and the connection pool handling. To avoid this problem, the
client offers a `child` API, which returns a new client instance that shares the
There are some use cases where you may need multiple instances of the client.
You can easily do that by calling `new Client()` as many times as you need, but
you will lose all the benefits of using one single client, such as the long
living connections and the connection pool handling. To avoid this problem, the
client offers a `child` API, which returns a new client instance that shares the
connection pool with the parent client.
NOTE: The event emitter is shared between the parent and the child(ren). If you
extend the parent client, the child client will have the same extensions, while
NOTE: The event emitter is shared between the parent and the child(ren). If you
extend the parent client, the child client will have the same extensions, while
if the child client adds an extension, the parent client will not be extended.
You can pass to the `child` every client option you would pass to a normal
client, but the connection pool specific options (`ssl`, `agent`, `pingTimeout`,
You can pass to the `child` every client option you would pass to a normal
client, but the connection pool specific options (`ssl`, `agent`, `pingTimeout`,
`Connection`, and `resurrectStrategy`).
CAUTION: If you call `close` in any of the parent/child clients, every client
CAUTION: If you call `close` in any of the parent/child clients, every client
will be closed.
[source,js]
@ -28,8 +28,9 @@ const client = new Client({
})
const child = client.child({
headers: { 'x-foo': 'bar' },
requestTimeout: 1000
})
client.info().then(console.log, console.log)
child.info().then(console.log, console.log)
----
----

View File

@ -414,8 +414,8 @@ The supported request specific options are:
_Default:_ `null`
|`requestTimeout`
|`number | string | null` - Max request timeout for the request in milliseconds. This overrides the client default, which is to not time out at all. See https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#_http_client_configuration[Elasticsearch best practices for HTML clients] for more info. +
_Default:_ No timeout
|`number | string` - Max request timeout for the request in milliseconds, it overrides the client default. +
_Default:_ `30000`
|`retryOnTimeout`
|`boolean` - Retry requests that have timed out.

View File

@ -1,11 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.getDataStream({
name: "my-data-stream",
filter_path: "data_streams.indices.index_name",
});
console.log(response);
----

View File

@ -1,46 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "retrievers_example",
retriever: {
rrf: {
retrievers: [
{
standard: {
query: {
range: {
year: {
gt: 2023,
},
},
},
},
},
{
standard: {
query: {
term: {
topic: "elastic",
},
},
},
},
],
rank_window_size: 10,
rank_constant: 1,
},
},
_source: false,
aggs: {
topics: {
terms: {
field: "topic",
},
},
},
});
console.log(response);
----

View File

@ -1,18 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.inference.put({
task_type: "rerank",
inference_id: "my-rerank-model",
inference_config: {
service: "cohere",
service_settings: {
model_id: "rerank-english-v3.0",
api_key: "{{COHERE_API_KEY}}",
},
},
});
console.log(response);
----

View File

@ -1,42 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.create({
index: "my-rank-vectors-bit",
mappings: {
properties: {
my_vector: {
type: "rank_vectors",
element_type: "bit",
},
},
},
});
console.log(response);
const response1 = await client.bulk({
index: "my-rank-vectors-bit",
refresh: "true",
operations: [
{
index: {
_id: "1",
},
},
{
my_vector: [127, -127, 0, 1, 42],
},
{
index: {
_id: "2",
},
},
{
my_vector: "8100012a7f",
},
],
});
console.log(response1);
----

View File

@ -1,49 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "retrievers_example_nested",
retriever: {
rrf: {
retrievers: [
{
standard: {
query: {
nested: {
path: "nested_field",
inner_hits: {
name: "nested_vector",
_source: false,
fields: ["nested_field.paragraph_id"],
},
query: {
knn: {
field: "nested_field.nested_vector",
query_vector: [1, 0, 0.5],
k: 10,
},
},
},
},
},
},
{
standard: {
query: {
term: {
topic: "ai",
},
},
},
},
],
rank_window_size: 10,
rank_constant: 1,
},
},
_source: ["topic"],
});
console.log(response);
----

View File

@ -3,12 +3,8 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_query/async",
querystring: {
format: "json",
},
const response = await client.esql.asyncQuery({
format: "json",
body: {
query:
"\n FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ",

View File

@ -1,57 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "retrievers_example",
retriever: {
rrf: {
retrievers: [
{
standard: {
query: {
term: {
topic: "elastic",
},
},
},
},
{
rrf: {
retrievers: [
{
standard: {
query: {
query_string: {
query:
"(information retrieval) OR (artificial intelligence)",
default_field: "text",
},
},
},
},
{
knn: {
field: "vector",
query_vector: [0.23, 0.67, 0.89],
k: 3,
num_candidates: 5,
},
},
],
rank_window_size: 10,
rank_constant: 1,
},
},
],
rank_window_size: 10,
rank_constant: 1,
},
},
_source: false,
size: 1,
explain: true,
});
console.log(response);
----

View File

@ -3,9 +3,8 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_application/search_application/my-app/_render_query",
const response = await client.searchApplication.renderQuery({
name: "my-app",
body: {
params: {
query_string: "my first query",

View File

@ -0,0 +1,15 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.cluster.putSettings({
persistent: {
"cluster.routing.allocation.disk.watermark.low": "100gb",
"cluster.routing.allocation.disk.watermark.high": "50gb",
"cluster.routing.allocation.disk.watermark.flood_stage": "10gb",
"cluster.info.update.interval": "1m",
},
});
console.log(response);
----

View File

@ -1,20 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_inference/chat_completion/openai-completion/_stream",
body: {
model: "gpt-4o",
messages: [
{
role: "user",
content: "What is Elastic?",
},
],
},
});
console.log(response);
----

View File

@ -1,11 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.addBlock({
index: ".ml-anomalies-custom-example",
block: "read_only",
});
console.log(response);
----

View File

@ -6,15 +6,14 @@
const response = await client.search({
index: "test-index",
query: {
match: {
my_semantic_field: "Which country is Paris in?",
},
},
highlight: {
fields: {
my_semantic_field: {
number_of_fragments: 2,
order: "score",
nested: {
path: "inference_field.inference.chunks",
query: {
sparse_vector: {
field: "inference_field.inference.chunks.embeddings",
inference_id: "my-inference-id",
query: "mountain lake",
},
},
},
},

View File

@ -10,7 +10,7 @@ const response = await client.ingest.putPipeline({
{
attachment: {
field: "data",
remove_binary: true,
remove_binary: false,
},
},
],

View File

@ -11,7 +11,7 @@ const response = await client.searchApplication.put({
script: {
lang: "mustache",
source:
'\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n {{/query}}\n ],\n "filter": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n "_source": {\n "includes": ["title", "plot"]\n },\n "highlight": {\n "fields": {\n "title": { "fragment_size": 0 },\n "plot": { "fragment_size": 200 }\n }\n },\n "aggs": {{#toJson}}_es_aggs{{/toJson}},\n "from": {{from}},\n "size": {{size}},\n "sort": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ',
'\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n \n {{/query}}\n ],\n "filter": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n "_source": {\n "includes": ["title", "plot"]\n },\n "highlight": {\n "fields": {\n "title": { "fragment_size": 0 },\n "plot": { "fragment_size": 200 }\n }\n },\n "aggs": {{#toJson}}_es_aggs{{/toJson}},\n "from": {{from}},\n "size": {{size}},\n "sort": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ',
params: {
query: "",
_es_filters: {},

View File

@ -8,6 +8,11 @@ const response = await client.search({
query: {
bool: {
must: [
{
term: {
"category.keyword": "Main Course",
},
},
{
term: {
tags: "vegetarian",
@ -22,11 +27,6 @@ const response = await client.search({
},
],
should: [
{
term: {
category: "Main Course",
},
},
{
multi_match: {
query: "curry spicy",

View File

@ -3,9 +3,7 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_ingest/_simulate",
const response = await client.simulate.ingest({
body: {
docs: [
{

View File

@ -9,6 +9,7 @@ const response = await client.indices.create({
properties: {
inference_field: {
type: "semantic_text",
inference_id: "my-elser-endpoint",
},
},
},

View File

@ -1,19 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.security.queryRole({
query: {
bool: {
must_not: {
term: {
"metadata._reserved": true,
},
},
},
},
sort: ["name"],
});
console.log(response);
----

View File

@ -14,7 +14,6 @@ const response = await client.indices.putSettings({
"index.search.slowlog.threshold.fetch.info": "800ms",
"index.search.slowlog.threshold.fetch.debug": "500ms",
"index.search.slowlog.threshold.fetch.trace": "200ms",
"index.search.slowlog.include.user": true,
},
});
console.log(response);

View File

@ -1,67 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.create({
index: "my-rank-vectors-bit",
mappings: {
properties: {
my_vector: {
type: "rank_vectors",
element_type: "bit",
},
},
},
});
console.log(response);
const response1 = await client.bulk({
index: "my-rank-vectors-bit",
refresh: "true",
operations: [
{
index: {
_id: "1",
},
},
{
my_vector: [127, -127, 0, 1, 42],
},
{
index: {
_id: "2",
},
},
{
my_vector: "8100012a7f",
},
],
});
console.log(response1);
const response2 = await client.search({
index: "my-rank-vectors-bit",
query: {
script_score: {
query: {
match_all: {},
},
script: {
source: "maxSimDotProduct(params.query_vector, 'my_vector')",
params: {
query_vector: [
[
0.35, 0.77, 0.95, 0.15, 0.11, 0.08, 0.58, 0.06, 0.44, 0.52, 0.21,
0.62, 0.65, 0.16, 0.64, 0.39, 0.93, 0.06, 0.93, 0.31, 0.92, 0,
0.66, 0.86, 0.92, 0.03, 0.81, 0.31, 0.2, 0.92, 0.95, 0.64, 0.19,
0.26, 0.77, 0.64, 0.78, 0.32, 0.97, 0.84,
],
],
},
},
},
},
});
console.log(response2);
----

View File

@ -0,0 +1,28 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.esql.query({
format: "txt",
query:
"\n FROM library\n | SORT page_count DESC\n | KEEP name, author\n | LOOKUP era ON author\n | LIMIT 5\n ",
tables: {
era: {
author: {
keyword: [
"Frank Herbert",
"Peter F. Hamilton",
"Vernor Vinge",
"Alastair Reynolds",
"James S.A. Corey",
],
},
era: {
keyword: ["The New Wave", "Diamond", "Diamond", "Diamond", "Hadron"],
},
},
},
});
console.log(response);
----

View File

@ -1,11 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.addBlock({
index: ".ml-anomalies-custom-example",
block: "write",
});
console.log(response);
----

View File

@ -3,9 +3,7 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_security/oidc/logout",
const response = await client.security.oidcLogout({
body: {
token:
"dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==",

View File

@ -1,26 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "my-rank-vectors-float",
query: {
script_score: {
query: {
match_all: {},
},
script: {
source: "maxSimDotProduct(params.query_vector, 'my_vector')",
params: {
query_vector: [
[0.5, 10, 6],
[-0.5, 10, 10],
],
},
},
},
},
});
console.log(response);
----

View File

@ -1,35 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.ingest.putPipeline({
id: "attachment",
description: "Extract attachment information including original binary",
processors: [
{
attachment: {
field: "data",
remove_binary: false,
},
},
],
});
console.log(response);
const response1 = await client.index({
index: "my-index-000001",
id: "my_id",
pipeline: "attachment",
document: {
data: "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=",
},
});
console.log(response1);
const response2 = await client.get({
index: "my-index-000001",
id: "my_id",
});
console.log(response2);
----

View File

@ -1,24 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.create({
index: "test-index",
query: {
match: {
my_field: "Which country is Paris in?",
},
},
highlight: {
fields: {
my_field: {
type: "semantic",
number_of_fragments: 2,
order: "score",
},
},
},
});
console.log(response);
----

View File

@ -3,12 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "GET",
path: "/_query/async/FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE&#x3D;",
querystring: {
wait_for_completion_timeout: "30s",
},
const response = await client.esql.asyncQueryGet({
id: "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=",
wait_for_completion_timeout: "30s",
body: null,
});
console.log(response);
----

View File

@ -5,8 +5,11 @@
----
const response = await client.cluster.putSettings({
persistent: {
"cluster.routing.allocation.disk.watermark.low": "90%",
"cluster.routing.allocation.disk.watermark.high": "95%",
"cluster.indices.close.enable": false,
"indices.recovery.max_bytes_per_sec": "50mb",
},
transient: {
"*": null,
},
});
console.log(response);

View File

@ -4,10 +4,9 @@
[source, js]
----
const response = await client.indices.putSettings({
index: "*",
index: "my-index-000001",
settings: {
"index.indexing.slowlog.include.user": true,
"index.indexing.slowlog.threshold.index.warn": "30s",
},
});
console.log(response);

View File

@ -1,23 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.create({
index: "test-index",
mappings: {
properties: {
source_field: {
type: "text",
fields: {
infer_field: {
type: "semantic_text",
inference_id: ".elser-2-elasticsearch",
},
},
},
},
},
});
console.log(response);
----

View File

@ -45,7 +45,7 @@ console.log(response);
const response1 = await client.indices.putIndexTemplate({
name: 2,
index_patterns: ["k9s*"],
index_patterns: ["k8s*"],
composed_of: ["destination_template"],
data_stream: {},
});

View File

@ -1,23 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.inference.put({
task_type: "rerank",
inference_id: "my-elastic-rerank",
inference_config: {
service: "elasticsearch",
service_settings: {
model_id: ".rerank-v1",
num_threads: 1,
adaptive_allocations: {
enabled: true,
min_number_of_allocations: 1,
max_number_of_allocations: 4,
},
},
},
});
console.log(response);
----

View File

@ -1,28 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "my-index-*",
query: {
bool: {
must: [
{
match: {
"user.id": "kimchy",
},
},
],
must_not: [
{
terms: {
_index: ["my-index-01"],
},
},
],
},
},
});
console.log(response);
----

View File

@ -3,8 +3,8 @@
[source, js]
----
const response = await client.migration.deprecations({
index: ".ml-anomalies-*",
const response = await client.indices.unfreeze({
index: "my-index-000001",
});
console.log(response);
----

View File

@ -1,31 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.ilm.putLifecycle({
name: "my_policy",
policy: {
phases: {
hot: {
actions: {
rollover: {
max_primary_shard_size: "50gb",
},
searchable_snapshot: {
snapshot_repository: "backing_repo",
replicate_for: "14d",
},
},
},
delete: {
min_age: "28d",
actions: {
delete: {},
},
},
},
},
});
console.log(response);
----

View File

@ -4,11 +4,9 @@
[source, js]
----
const response = await client.indices.putSettings({
index: ".reindexed-v9-ml-anomalies-custom-example",
index: "my-index-000001",
settings: {
index: {
number_of_replicas: 0,
},
"index.search.slowlog.include.user": true,
},
});
console.log(response);

View File

@ -6,7 +6,6 @@
const response = await client.indices.resolveCluster({
name: "not-present,clust*:my-index*,oldcluster:*",
ignore_unavailable: "false",
timeout: "5s",
});
console.log(response);
----

View File

@ -10,7 +10,7 @@ const response = await client.ingest.putPipeline({
{
attachment: {
field: "data",
remove_binary: true,
remove_binary: false,
},
},
],

View File

@ -1,70 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "movies",
size: 10,
retriever: {
rescorer: {
rescore: {
window_size: 50,
query: {
rescore_query: {
script_score: {
query: {
match_all: {},
},
script: {
source:
"cosineSimilarity(params.queryVector, 'product-vector_final_stage') + 1.0",
params: {
queryVector: [-0.5, 90, -10, 14.8, -156],
},
},
},
},
},
},
retriever: {
rrf: {
rank_window_size: 100,
retrievers: [
{
standard: {
query: {
sparse_vector: {
field: "plot_embedding",
inference_id: "my-elser-model",
query: "films that explore psychological depths",
},
},
},
},
{
standard: {
query: {
multi_match: {
query: "crime",
fields: ["plot", "title"],
},
},
},
},
{
knn: {
field: "vector",
query_vector: [10, 22, 77],
k: 10,
num_candidates: 10,
},
},
],
},
},
},
},
});
console.log(response);
----

View File

@ -1,23 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.create({
index: "my-index",
settings: {
index: {
number_of_shards: 3,
"blocks.write": true,
},
},
mappings: {
properties: {
field1: {
type: "text",
},
},
},
});
console.log(response);
----

View File

@ -0,0 +1,23 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.bulk({
index: "test-index",
operations: [
{
update: {
_id: "1",
},
},
{
doc: {
infer_field: "updated inference field",
source_field: "updated source field",
},
},
],
});
console.log(response);
----

View File

@ -1,19 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: ".ml-anomalies-custom-example",
size: 0,
aggs: {
job_ids: {
terms: {
field: "job_id",
size: 100,
},
},
},
});
console.log(response);
----

View File

@ -1,61 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "retrievers_example",
retriever: {
linear: {
retrievers: [
{
retriever: {
standard: {
query: {
function_score: {
query: {
term: {
topic: "ai",
},
},
functions: [
{
script_score: {
script: {
source: "doc['timestamp'].value.millis",
},
},
},
],
boost_mode: "replace",
},
},
sort: {
timestamp: {
order: "asc",
},
},
},
},
weight: 2,
normalizer: "minmax",
},
{
retriever: {
knn: {
field: "vector",
query_vector: [0.23, 0.67, 0.89],
k: 3,
num_candidates: 5,
},
},
weight: 1.5,
},
],
rank_window_size: 10,
},
},
_source: false,
});
console.log(response);
----

View File

@ -1,16 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.updateAliases({
actions: [
{
remove_index: {
index: "my-index-2099.05.06-000001",
},
},
],
});
console.log(response);
----

View File

@ -3,9 +3,7 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_query/async",
const response = await client.esql.asyncQuery({
body: {
query:
"\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n ",

View File

@ -1,18 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "kibana_sample_data_ecommerce",
size: 0,
aggs: {
order_stats: {
stats: {
field: "taxful_total_price",
},
},
},
});
console.log(response);
----

View File

@ -3,9 +3,9 @@
[source, js]
----
const response = await client.transport.request({
method: "GET",
path: "/_query/async/FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM&#x3D;",
const response = await client.esql.asyncQueryGet({
id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=",
body: null,
});
console.log(response);
----

View File

@ -6,11 +6,15 @@
const response = await client.update({
index: "test",
id: 1,
doc: {
product_price: 100,
script: {
source: "ctx._source.counter += params.count",
lang: "painless",
params: {
count: 4,
},
},
upsert: {
product_price: 50,
counter: 1,
},
});
console.log(response);

View File

@ -1,47 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_inference/chat_completion/openai-completion/_stream",
body: {
messages: [
{
role: "user",
content: [
{
type: "text",
text: "What's the price of a scarf?",
},
],
},
],
tools: [
{
type: "function",
function: {
name: "get_current_price",
description: "Get the current price of a item",
parameters: {
type: "object",
properties: {
item: {
id: "123",
},
},
},
},
},
],
tool_choice: {
type: "function",
function: {
name: "get_current_price",
},
},
},
});
console.log(response);
----

View File

@ -3,18 +3,15 @@
[source, js]
----
const response = await client.search({
index: "image-index",
const response = await client.knnSearch({
index: "my-index",
knn: {
field: "image-vector",
query_vector: [-5, 9, -12],
field: "image_vector",
query_vector: [0.3, 0.1, 1.2],
k: 10,
num_candidates: 100,
rescore_vector: {
oversample: 2,
},
},
fields: ["title", "file-type"],
_source: ["name", "file_type"],
});
console.log(response);
----

View File

@ -3,9 +3,9 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_inference/completion/openai-completion/_stream",
const response = await client.inference.streamInference({
task_type: "completion",
inference_id: "openai-completion",
body: {
input: "What is Elastic?",
},

View File

@ -3,8 +3,8 @@
[source, js]
----
const response = await client.indices.getAlias({
index: ".ml-anomalies-custom-example",
const response = await client.cluster.getSettings({
flat_settings: "true",
});
console.log(response);
----

View File

@ -1,17 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.inference.put({
task_type: "sparse_embedding",
inference_id: "elser-model-eis",
inference_config: {
service: "elastic",
service_settings: {
model_name: "elser",
},
},
});
console.log(response);
----

View File

@ -1,18 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.ingest.simulate({
id: "query_helper_pipeline",
docs: [
{
_source: {
content:
"artificial intelligence in medicine articles published in the last 12 months",
},
},
],
});
console.log(response);
----

View File

@ -3,9 +3,7 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_security/oidc/prepare",
const response = await client.security.oidcPrepareAuthentication({
body: {
realm: "oidc1",
state: "lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO",

View File

@ -1,16 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "jinaai-index",
query: {
semantic: {
field: "content",
query: "who inspired taking care of the sea?",
},
},
});
console.log(response);
----

View File

@ -4,7 +4,7 @@
[source, js]
----
const response = await client.indices.create({
index: "my-index-000003",
index: "my-index-000002",
mappings: {
properties: {
inference_field: {

View File

@ -1,10 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.getSettings({
index: ".reindexed-v9-ml-anomalies-custom-example",
});
console.log(response);
----

View File

@ -4,12 +4,16 @@
[source, js]
----
const response = await client.indices.create({
index: "jinaai-index",
index: "semantic-embeddings",
mappings: {
properties: {
content: {
semantic_text: {
type: "semantic_text",
inference_id: "jinaai-embeddings",
inference_id: "my-elser-endpoint",
},
content: {
type: "text",
copy_to: "semantic_text",
},
},
},

View File

@ -11,7 +11,7 @@ const response = await client.ingest.putPipeline({
attachment: {
field: "data",
properties: ["content", "title"],
remove_binary: true,
remove_binary: false,
},
},
],

View File

@ -3,12 +3,8 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_query/async",
querystring: {
format: "json",
},
const response = await client.esql.asyncQuery({
format: "json",
body: {
query:
"\n FROM cluster_one:my-index*,cluster_two:logs*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ",

View File

@ -1,15 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.putSettings({
index: "*",
settings: {
"index.search.slowlog.include.user": true,
"index.search.slowlog.threshold.fetch.warn": "30s",
"index.search.slowlog.threshold.query.warn": "30s",
},
});
console.log(response);
----

View File

@ -11,7 +11,7 @@ const response = await client.searchApplication.put({
script: {
lang: "mustache",
source:
'\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n {{/query}}\n ],\n "filter": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n "_source": {\n "includes": ["title", "plot"]\n },\n "aggs": {{#toJson}}_es_aggs{{/toJson}},\n "from": {{from}},\n "size": {{size}},\n "sort": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ',
'\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n \n {{/query}}\n ],\n "filter": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n "_source": {\n "includes": ["title", "plot"]\n },\n "aggs": {{#toJson}}_es_aggs{{/toJson}},\n "from": {{from}},\n "size": {{size}},\n "sort": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ',
params: {
query: "",
_es_filters: {},

View File

@ -1,16 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.reindex({
wait_for_completion: "false",
source: {
index: ".ml-anomalies-custom-example",
},
dest: {
index: ".reindexed-v9-ml-anomalies-custom-example",
},
});
console.log(response);
----

View File

@ -3,9 +3,7 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_security/api_key/_bulk_update",
const response = await client.security.bulkUpdateApiKeys({
body: {
ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"],
},

View File

@ -1,24 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "my-index-000001",
query: {
prefix: {
full_name: {
value: "ki",
},
},
},
highlight: {
fields: {
full_name: {
matched_fields: ["full_name._index_prefix"],
},
},
},
});
console.log(response);
----

View File

@ -1,33 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "kibana_sample_data_ecommerce",
size: 0,
aggs: {
daily_sales: {
date_histogram: {
field: "order_date",
calendar_interval: "day",
},
aggs: {
daily_revenue: {
sum: {
field: "taxful_total_price",
},
},
smoothed_revenue: {
moving_fn: {
buckets_path: "daily_revenue",
window: 3,
script: "MovingFunctions.unweightedAvg(values)",
},
},
},
},
},
});
console.log(response);
----

View File

@ -0,0 +1,26 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "test-index",
query: {
nested: {
path: "inference_field.inference.chunks",
query: {
knn: {
field: "inference_field.inference.chunks.embeddings",
query_vector_builder: {
text_embedding: {
model_id: "my_inference_id",
model_text: "mountain lake",
},
},
},
},
},
},
});
console.log(response);
----

View File

@ -1,44 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "retrievers_example",
retriever: {
rrf: {
retrievers: [
{
knn: {
field: "vector",
query_vector: [0.23, 0.67, 0.89],
k: 3,
num_candidates: 5,
},
},
{
text_similarity_reranker: {
retriever: {
standard: {
query: {
term: {
topic: "ai",
},
},
},
},
field: "text",
inference_id: "my-rerank-model",
inference_text:
"Can I use generative AI to identify user intent and improve search relevance?",
},
},
],
rank_window_size: 10,
rank_constant: 1,
},
},
_source: false,
});
console.log(response);
----

View File

@ -3,9 +3,7 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_text_structure/find_message_structure",
const response = await client.textStructure.findMessageStructure({
body: {
messages: [
"[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128",

View File

@ -1,46 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "retrievers_example",
retriever: {
text_similarity_reranker: {
retriever: {
rrf: {
retrievers: [
{
standard: {
query: {
query_string: {
query:
"(information retrieval) OR (artificial intelligence)",
default_field: "text",
},
},
},
},
{
knn: {
field: "vector",
query_vector: [0.23, 0.67, 0.89],
k: 3,
num_candidates: 5,
},
},
],
rank_window_size: 10,
rank_constant: 1,
},
},
field: "text",
inference_id: "my-rerank-model",
inference_text:
"What are the state of the art applications of AI in information retrieval?",
},
},
_source: false,
});
console.log(response);
----

View File

@ -1,35 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
query: {
intervals: {
my_text: {
all_of: {
ordered: false,
max_gaps: 1,
intervals: [
{
match: {
query: "my favorite food",
max_gaps: 0,
ordered: true,
},
},
{
match: {
query: "cold porridge",
max_gaps: 4,
ordered: true,
},
},
],
},
},
},
},
});
console.log(response);
----

View File

@ -12,7 +12,7 @@ const response = await client.inference.put({
adaptive_allocations: {
enabled: true,
min_number_of_allocations: 1,
max_number_of_allocations: 4,
max_number_of_allocations: 10,
},
num_threads: 1,
model_id: ".elser_model_2",

View File

@ -1,11 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.transport.request({
method: "DELETE",
path: "/_ingest/ip_location/database/my-database-id",
});
console.log(response);
----

View File

@ -1,37 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "kibana_sample_data_ecommerce",
size: 0,
aggs: {
daily_sales: {
date_histogram: {
field: "order_date",
calendar_interval: "day",
format: "yyyy-MM-dd",
},
aggs: {
revenue: {
sum: {
field: "taxful_total_price",
},
},
unique_customers: {
cardinality: {
field: "customer_id",
},
},
avg_basket_size: {
avg: {
field: "total_quantity",
},
},
},
},
},
});
console.log(response);
----

View File

@ -3,9 +3,7 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_security/api_key/_bulk_update",
const response = await client.security.bulkUpdateApiKeys({
body: {
ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"],
role_descriptors: {

View File

@ -1,34 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_inference/chat_completion/openai-completion/_stream",
body: {
messages: [
{
role: "assistant",
content: "Let's find out what the weather is",
tool_calls: [
{
id: "call_KcAjWtAww20AihPHphUh46Gd",
type: "function",
function: {
name: "get_current_weather",
arguments: '{"location":"Boston, MA"}',
},
},
],
},
{
role: "tool",
content: "The weather is cold",
tool_call_id: "call_KcAjWtAww20AihPHphUh46Gd",
},
],
},
});
console.log(response);
----

View File

@ -11,8 +11,6 @@ const response = await client.indices.putSettings({
"index.indexing.slowlog.threshold.index.debug": "2s",
"index.indexing.slowlog.threshold.index.trace": "500ms",
"index.indexing.slowlog.source": "1000",
"index.indexing.slowlog.reformat": true,
"index.indexing.slowlog.include.user": true,
},
});
console.log(response);

View File

@ -7,14 +7,14 @@ const response = await client.indices.create({
index: "test-index",
mappings: {
properties: {
infer_field: {
type: "semantic_text",
inference_id: "my-elser-endpoint",
},
source_field: {
type: "text",
copy_to: "infer_field",
},
infer_field: {
type: "semantic_text",
inference_id: ".elser-2-elasticsearch",
},
},
},
});

View File

@ -1,12 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.esql.query({
query:
'\nFROM library\n| EVAL year = DATE_EXTRACT("year", release_date)\n| WHERE page_count > ? AND match(author, ?, {"minimum_should_match": ?})\n| LIMIT 5\n',
params: [300, "Frank Herbert", 2],
});
console.log(response);
----

View File

@ -3,8 +3,8 @@
[source, js]
----
const response = await client.indices.rollover({
alias: "datastream",
const response = await client.security.queryRole({
sort: ["name"],
});
console.log(response);
----

View File

@ -1,39 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "kibana_sample_data_ecommerce",
size: 0,
aggs: {
categories: {
terms: {
field: "category.keyword",
size: 5,
order: {
total_revenue: "desc",
},
},
aggs: {
total_revenue: {
sum: {
field: "taxful_total_price",
},
},
avg_order_value: {
avg: {
field: "taxful_total_price",
},
},
total_items: {
sum: {
field: "total_quantity",
},
},
},
},
},
});
console.log(response);
----

View File

@ -4,18 +4,17 @@
[source, js]
----
const response = await client.inference.put({
task_type: "rerank",
inference_id: "my-elastic-rerank",
task_type: "sparse_embedding",
inference_id: "my-elser-endpoint",
inference_config: {
service: "elasticsearch",
service: "elser",
service_settings: {
model_id: ".rerank-v1",
num_threads: 1,
adaptive_allocations: {
enabled: true,
min_number_of_allocations: 1,
min_number_of_allocations: 3,
max_number_of_allocations: 10,
},
num_threads: 1,
},
},
});

View File

@ -1,17 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_ingest/ip_location/database/my-database-1",
body: {
name: "GeoIP2-Domain",
maxmind: {
account_id: "1234567",
},
},
});
console.log(response);
----

View File

@ -1,42 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.bulk({
index: "jinaai-index",
operations: [
{
index: {
_index: "jinaai-index",
_id: "1",
},
},
{
content:
"Sarah Johnson is a talented marine biologist working at the Oceanographic Institute. Her groundbreaking research on coral reef ecosystems has garnered international attention and numerous accolades.",
},
{
index: {
_index: "jinaai-index",
_id: "2",
},
},
{
content:
"She spends months at a time diving in remote locations, meticulously documenting the intricate relationships between various marine species. ",
},
{
index: {
_index: "jinaai-index",
_id: "3",
},
},
{
content:
"Her dedication to preserving these delicate underwater environments has inspired a new generation of conservationists.",
},
],
});
console.log(response);
----

Some files were not shown because too many files have changed in this diff Show More