Compare commits
9 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 120845a662 | |||
| a00307827a | |||
| 25e8e84fe5 | |||
| 279c29d8f9 | |||
| 07f7ffea76 | |||
| 3d3c72dc40 | |||
| 3627a4c56b | |||
| fecda564da | |||
| f9a5a18a71 |
@ -1,6 +1,22 @@
|
||||
[[changelog-client]]
|
||||
== Release notes
|
||||
|
||||
[discrete]
|
||||
=== 8.16.4
|
||||
|
||||
[discrete]
|
||||
==== Fixes
|
||||
|
||||
[discrete]
|
||||
===== Improved support for Elasticsearch `v8.16`
|
||||
|
||||
Updated TypeScript types based on fixes and improvements to the Elasticsearch specification.
|
||||
|
||||
[discrete]
|
||||
===== Report correct transport connection type in telemetry
|
||||
|
||||
The client's telemetry reporting mechanism was incorrectly reporting all traffic as using `HttpConnection` when the default is `UndiciConnection`. https://github.com/elastic/elasticsearch-js/issues/2324[#2324]
|
||||
|
||||
[discrete]
|
||||
=== 8.16.3
|
||||
|
||||
|
||||
11
docs/doc_examples/00ad41bde67beac991534ae0e04b1296.asciidoc
Normal file
11
docs/doc_examples/00ad41bde67beac991534ae0e04b1296.asciidoc
Normal file
@ -0,0 +1,11 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.getDataStream({
|
||||
name: "my-data-stream",
|
||||
filter_path: "data_streams.indices.index_name",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
10
docs/doc_examples/0722b302b2b3275a988d858044f99d5d.asciidoc
Normal file
10
docs/doc_examples/0722b302b2b3275a988d858044f99d5d.asciidoc
Normal file
@ -0,0 +1,10 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.getMapping({
|
||||
index: "kibana_sample_data_ecommerce",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -11,6 +11,8 @@ const response = await client.indices.putSettings({
|
||||
"index.indexing.slowlog.threshold.index.debug": "2s",
|
||||
"index.indexing.slowlog.threshold.index.trace": "500ms",
|
||||
"index.indexing.slowlog.source": "1000",
|
||||
"index.indexing.slowlog.reformat": true,
|
||||
"index.indexing.slowlog.include.user": true,
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
42
docs/doc_examples/082e78c7a2061a7c4a52b494e5ede0e8.asciidoc
Normal file
42
docs/doc_examples/082e78c7a2061a7c4a52b494e5ede0e8.asciidoc
Normal file
@ -0,0 +1,42 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.create({
|
||||
index: "my-rank-vectors-bit",
|
||||
mappings: {
|
||||
properties: {
|
||||
my_vector: {
|
||||
type: "rank_vectors",
|
||||
element_type: "bit",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
|
||||
const response1 = await client.bulk({
|
||||
index: "my-rank-vectors-bit",
|
||||
refresh: "true",
|
||||
operations: [
|
||||
{
|
||||
index: {
|
||||
_id: "1",
|
||||
},
|
||||
},
|
||||
{
|
||||
my_vector: [127, -127, 0, 1, 42],
|
||||
},
|
||||
{
|
||||
index: {
|
||||
_id: "2",
|
||||
},
|
||||
},
|
||||
{
|
||||
my_vector: "8100012a7f",
|
||||
},
|
||||
],
|
||||
});
|
||||
console.log(response1);
|
||||
----
|
||||
15
docs/doc_examples/0f028f71f04c1d569fab402869565a84.asciidoc
Normal file
15
docs/doc_examples/0f028f71f04c1d569fab402869565a84.asciidoc
Normal file
@ -0,0 +1,15 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.putSettings({
|
||||
index: ".reindexed-v9-ml-anomalies-custom-example",
|
||||
settings: {
|
||||
index: {
|
||||
number_of_replicas: "<original_number_of_replicas>",
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
20
docs/doc_examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc
Normal file
20
docs/doc_examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc
Normal file
@ -0,0 +1,20 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "POST",
|
||||
path: "/_inference/chat_completion/openai-completion/_stream",
|
||||
body: {
|
||||
model: "gpt-4o",
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: "What is Elastic?",
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
11
docs/doc_examples/12adea5d76f73d94d80d42f53f67563f.asciidoc
Normal file
11
docs/doc_examples/12adea5d76f73d94d80d42f53f67563f.asciidoc
Normal file
@ -0,0 +1,11 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.addBlock({
|
||||
index: ".ml-anomalies-custom-example",
|
||||
block: "read_only",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -3,11 +3,13 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.inference({
|
||||
const response = await client.inference.put({
|
||||
task_type: "my-inference-endpoint",
|
||||
inference_id: "_update",
|
||||
service_settings: {
|
||||
api_key: "<API_KEY>",
|
||||
inference_config: {
|
||||
service_settings: {
|
||||
api_key: "<API_KEY>",
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
19
docs/doc_examples/1ead35c954963e83f89872048dabdbe9.asciidoc
Normal file
19
docs/doc_examples/1ead35c954963e83f89872048dabdbe9.asciidoc
Normal file
@ -0,0 +1,19 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.security.queryRole({
|
||||
query: {
|
||||
bool: {
|
||||
must_not: {
|
||||
term: {
|
||||
"metadata._reserved": true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
sort: ["name"],
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
67
docs/doc_examples/246763219ec06172f7aa57bba28d344a.asciidoc
Normal file
67
docs/doc_examples/246763219ec06172f7aa57bba28d344a.asciidoc
Normal file
@ -0,0 +1,67 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.create({
|
||||
index: "my-rank-vectors-bit",
|
||||
mappings: {
|
||||
properties: {
|
||||
my_vector: {
|
||||
type: "rank_vectors",
|
||||
element_type: "bit",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
|
||||
const response1 = await client.bulk({
|
||||
index: "my-rank-vectors-bit",
|
||||
refresh: "true",
|
||||
operations: [
|
||||
{
|
||||
index: {
|
||||
_id: "1",
|
||||
},
|
||||
},
|
||||
{
|
||||
my_vector: [127, -127, 0, 1, 42],
|
||||
},
|
||||
{
|
||||
index: {
|
||||
_id: "2",
|
||||
},
|
||||
},
|
||||
{
|
||||
my_vector: "8100012a7f",
|
||||
},
|
||||
],
|
||||
});
|
||||
console.log(response1);
|
||||
|
||||
const response2 = await client.search({
|
||||
index: "my-rank-vectors-bit",
|
||||
query: {
|
||||
script_score: {
|
||||
query: {
|
||||
match_all: {},
|
||||
},
|
||||
script: {
|
||||
source: "maxSimDotProduct(params.query_vector, 'my_vector')",
|
||||
params: {
|
||||
query_vector: [
|
||||
[
|
||||
0.35, 0.77, 0.95, 0.15, 0.11, 0.08, 0.58, 0.06, 0.44, 0.52, 0.21,
|
||||
0.62, 0.65, 0.16, 0.64, 0.39, 0.93, 0.06, 0.93, 0.31, 0.92, 0,
|
||||
0.66, 0.86, 0.92, 0.03, 0.81, 0.31, 0.2, 0.92, 0.95, 0.64, 0.19,
|
||||
0.26, 0.77, 0.64, 0.78, 0.32, 0.97, 0.84,
|
||||
],
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response2);
|
||||
----
|
||||
11
docs/doc_examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc
Normal file
11
docs/doc_examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc
Normal file
@ -0,0 +1,11 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.addBlock({
|
||||
index: ".ml-anomalies-custom-example",
|
||||
block: "write",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
26
docs/doc_examples/2a21674c40f9b182a8944769d20b2357.asciidoc
Normal file
26
docs/doc_examples/2a21674c40f9b182a8944769d20b2357.asciidoc
Normal file
@ -0,0 +1,26 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "my-rank-vectors-float",
|
||||
query: {
|
||||
script_score: {
|
||||
query: {
|
||||
match_all: {},
|
||||
},
|
||||
script: {
|
||||
source: "maxSimDotProduct(params.query_vector, 'my_vector')",
|
||||
params: {
|
||||
query_vector: [
|
||||
[0.5, 10, 6],
|
||||
[-0.5, 10, 10],
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -6,13 +6,13 @@
|
||||
const response = await client.indices.create({
|
||||
index: "test-index",
|
||||
query: {
|
||||
semantic: {
|
||||
field: "my_semantic_field",
|
||||
match: {
|
||||
my_field: "Which country is Paris in?",
|
||||
},
|
||||
},
|
||||
highlight: {
|
||||
fields: {
|
||||
my_semantic_field: {
|
||||
my_field: {
|
||||
type: "semantic",
|
||||
number_of_fragments: 2,
|
||||
order: "score",
|
||||
23
docs/doc_examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc
Normal file
23
docs/doc_examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc
Normal file
@ -0,0 +1,23 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.create({
|
||||
index: "test-index",
|
||||
mappings: {
|
||||
properties: {
|
||||
source_field: {
|
||||
type: "text",
|
||||
fields: {
|
||||
infer_field: {
|
||||
type: "semantic_text",
|
||||
inference_id: ".elser-2-elasticsearch",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
28
docs/doc_examples/31832bd71c31c46a1ccf8d1c210d89d4.asciidoc
Normal file
28
docs/doc_examples/31832bd71c31c46a1ccf8d1c210d89d4.asciidoc
Normal file
@ -0,0 +1,28 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "my-index-*",
|
||||
query: {
|
||||
bool: {
|
||||
must: [
|
||||
{
|
||||
match: {
|
||||
"user.id": "kimchy",
|
||||
},
|
||||
},
|
||||
],
|
||||
must_not: [
|
||||
{
|
||||
terms: {
|
||||
_index: ["my-index-01"],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
31
docs/doc_examples/32c8c86702ccd68eb70f1573409c2a1f.asciidoc
Normal file
31
docs/doc_examples/32c8c86702ccd68eb70f1573409c2a1f.asciidoc
Normal file
@ -0,0 +1,31 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.ilm.putLifecycle({
|
||||
name: "my_policy",
|
||||
policy: {
|
||||
phases: {
|
||||
hot: {
|
||||
actions: {
|
||||
rollover: {
|
||||
max_primary_shard_size: "50gb",
|
||||
},
|
||||
searchable_snapshot: {
|
||||
snapshot_repository: "backing_repo",
|
||||
replicate_for: "14d",
|
||||
},
|
||||
},
|
||||
},
|
||||
delete: {
|
||||
min_age: "28d",
|
||||
actions: {
|
||||
delete: {},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -14,6 +14,7 @@ const response = await client.indices.putSettings({
|
||||
"index.search.slowlog.threshold.fetch.info": "800ms",
|
||||
"index.search.slowlog.threshold.fetch.debug": "500ms",
|
||||
"index.search.slowlog.threshold.fetch.trace": "200ms",
|
||||
"index.search.slowlog.include.user": true,
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
70
docs/doc_examples/36792c81c053e0555407d1e83e7e054f.asciidoc
Normal file
70
docs/doc_examples/36792c81c053e0555407d1e83e7e054f.asciidoc
Normal file
@ -0,0 +1,70 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "movies",
|
||||
size: 10,
|
||||
retriever: {
|
||||
rescorer: {
|
||||
rescore: {
|
||||
window_size: 50,
|
||||
query: {
|
||||
rescore_query: {
|
||||
script_score: {
|
||||
query: {
|
||||
match_all: {},
|
||||
},
|
||||
script: {
|
||||
source:
|
||||
"cosineSimilarity(params.queryVector, 'product-vector_final_stage') + 1.0",
|
||||
params: {
|
||||
queryVector: [-0.5, 90, -10, 14.8, -156],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
retriever: {
|
||||
rrf: {
|
||||
rank_window_size: 100,
|
||||
retrievers: [
|
||||
{
|
||||
standard: {
|
||||
query: {
|
||||
sparse_vector: {
|
||||
field: "plot_embedding",
|
||||
inference_id: "my-elser-model",
|
||||
query: "films that explore psychological depths",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
standard: {
|
||||
query: {
|
||||
multi_match: {
|
||||
query: "crime",
|
||||
fields: ["plot", "title"],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
knn: {
|
||||
field: "vector",
|
||||
query_vector: [10, 22, 77],
|
||||
k: 10,
|
||||
num_candidates: 10,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
23
docs/doc_examples/3722dad876023e0757138dd5a6d3240e.asciidoc
Normal file
23
docs/doc_examples/3722dad876023e0757138dd5a6d3240e.asciidoc
Normal file
@ -0,0 +1,23 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.create({
|
||||
index: "my-index",
|
||||
settings: {
|
||||
index: {
|
||||
number_of_shards: 3,
|
||||
"blocks.write": true,
|
||||
},
|
||||
},
|
||||
mappings: {
|
||||
properties: {
|
||||
field1: {
|
||||
type: "text",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -1,23 +0,0 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.bulk({
|
||||
index: "test-index",
|
||||
operations: [
|
||||
{
|
||||
update: {
|
||||
_id: "1",
|
||||
},
|
||||
},
|
||||
{
|
||||
doc: {
|
||||
infer_field: "updated inference field",
|
||||
source_field: "updated source field",
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
19
docs/doc_examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc
Normal file
19
docs/doc_examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc
Normal file
@ -0,0 +1,19 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: ".ml-anomalies-custom-example",
|
||||
size: 0,
|
||||
aggs: {
|
||||
job_ids: {
|
||||
terms: {
|
||||
field: "job_id",
|
||||
size: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
61
docs/doc_examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc
Normal file
61
docs/doc_examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc
Normal file
@ -0,0 +1,61 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "retrievers_example",
|
||||
retriever: {
|
||||
linear: {
|
||||
retrievers: [
|
||||
{
|
||||
retriever: {
|
||||
standard: {
|
||||
query: {
|
||||
function_score: {
|
||||
query: {
|
||||
term: {
|
||||
topic: "ai",
|
||||
},
|
||||
},
|
||||
functions: [
|
||||
{
|
||||
script_score: {
|
||||
script: {
|
||||
source: "doc['timestamp'].value.millis",
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
boost_mode: "replace",
|
||||
},
|
||||
},
|
||||
sort: {
|
||||
timestamp: {
|
||||
order: "asc",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
weight: 2,
|
||||
normalizer: "minmax",
|
||||
},
|
||||
{
|
||||
retriever: {
|
||||
knn: {
|
||||
field: "vector",
|
||||
query_vector: [0.23, 0.67, 0.89],
|
||||
k: 3,
|
||||
num_candidates: 5,
|
||||
},
|
||||
},
|
||||
weight: 1.5,
|
||||
},
|
||||
],
|
||||
rank_window_size: 10,
|
||||
},
|
||||
},
|
||||
_source: false,
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
16
docs/doc_examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc
Normal file
16
docs/doc_examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc
Normal file
@ -0,0 +1,16 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.updateAliases({
|
||||
actions: [
|
||||
{
|
||||
remove_index: {
|
||||
index: "my-index-2099.05.06-000001",
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
18
docs/doc_examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc
Normal file
18
docs/doc_examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc
Normal file
@ -0,0 +1,18 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "kibana_sample_data_ecommerce",
|
||||
size: 0,
|
||||
aggs: {
|
||||
order_stats: {
|
||||
stats: {
|
||||
field: "taxful_total_price",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
18
docs/doc_examples/41d24383d29b2808a65258a0a3256e96.asciidoc
Normal file
18
docs/doc_examples/41d24383d29b2808a65258a0a3256e96.asciidoc
Normal file
@ -0,0 +1,18 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.create({
|
||||
index: "jinaai-index",
|
||||
mappings: {
|
||||
properties: {
|
||||
content: {
|
||||
type: "semantic_text",
|
||||
inference_id: "jinaai-embeddings",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
47
docs/doc_examples/45954b8aaedfed57012be8b6538b0a24.asciidoc
Normal file
47
docs/doc_examples/45954b8aaedfed57012be8b6538b0a24.asciidoc
Normal file
@ -0,0 +1,47 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "POST",
|
||||
path: "/_inference/chat_completion/openai-completion/_stream",
|
||||
body: {
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{
|
||||
type: "text",
|
||||
text: "What's the price of a scarf?",
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
tools: [
|
||||
{
|
||||
type: "function",
|
||||
function: {
|
||||
name: "get_current_price",
|
||||
description: "Get the current price of a item",
|
||||
parameters: {
|
||||
type: "object",
|
||||
properties: {
|
||||
item: {
|
||||
id: "123",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
tool_choice: {
|
||||
type: "function",
|
||||
function: {
|
||||
name: "get_current_price",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
17
docs/doc_examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc
Normal file
17
docs/doc_examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc
Normal file
@ -0,0 +1,17 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.put({
|
||||
task_type: "sparse_embedding",
|
||||
inference_id: "elser-model-eis",
|
||||
inference_config: {
|
||||
service: "elastic",
|
||||
service_settings: {
|
||||
model_name: "elser",
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
20
docs/doc_examples/519e46350316a33162740e5d7968aa2c.asciidoc
Normal file
20
docs/doc_examples/519e46350316a33162740e5d7968aa2c.asciidoc
Normal file
@ -0,0 +1,20 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "image-index",
|
||||
knn: {
|
||||
field: "image-vector",
|
||||
query_vector: [-5, 9, -12],
|
||||
k: 10,
|
||||
num_candidates: 100,
|
||||
rescore_vector: {
|
||||
oversample: 2,
|
||||
},
|
||||
},
|
||||
fields: ["title", "file-type"],
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -5,7 +5,7 @@
|
||||
----
|
||||
const response = await client.cluster.putSettings({
|
||||
persistent: {
|
||||
"cluster.routing.allocation.disk.watermark.low": "30gb",
|
||||
"migrate.data_stream_reindex_max_request_per_second": 10000,
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
16
docs/doc_examples/5836b09198feb1269ed12839b416123d.asciidoc
Normal file
16
docs/doc_examples/5836b09198feb1269ed12839b416123d.asciidoc
Normal file
@ -0,0 +1,16 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "jinaai-index",
|
||||
query: {
|
||||
semantic: {
|
||||
field: "content",
|
||||
query: "who inspired taking care of the sea?",
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
10
docs/doc_examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc
Normal file
10
docs/doc_examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc
Normal file
@ -0,0 +1,10 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.getSettings({
|
||||
index: ".reindexed-v9-ml-anomalies-custom-example",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -4,9 +4,11 @@
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.putSettings({
|
||||
index: "my-index-000001",
|
||||
index: "*",
|
||||
settings: {
|
||||
"index.search.slowlog.include.user": true,
|
||||
"index.search.slowlog.threshold.fetch.warn": "30s",
|
||||
"index.search.slowlog.threshold.query.warn": "30s",
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
@ -6,14 +6,15 @@
|
||||
const response = await client.search({
|
||||
index: "test-index",
|
||||
query: {
|
||||
nested: {
|
||||
path: "inference_field.inference.chunks",
|
||||
query: {
|
||||
sparse_vector: {
|
||||
field: "inference_field.inference.chunks.embeddings",
|
||||
inference_id: "my-inference-id",
|
||||
query: "mountain lake",
|
||||
},
|
||||
match: {
|
||||
my_semantic_field: "Which country is Paris in?",
|
||||
},
|
||||
},
|
||||
highlight: {
|
||||
fields: {
|
||||
my_semantic_field: {
|
||||
number_of_fragments: 2,
|
||||
order: "score",
|
||||
},
|
||||
},
|
||||
},
|
||||
16
docs/doc_examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc
Normal file
16
docs/doc_examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc
Normal file
@ -0,0 +1,16 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.reindex({
|
||||
wait_for_completion: "false",
|
||||
source: {
|
||||
index: ".ml-anomalies-custom-example",
|
||||
},
|
||||
dest: {
|
||||
index: ".reindexed-v9-ml-anomalies-custom-example",
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -12,6 +12,13 @@ const response = await client.search({
|
||||
fields: ["my_field", "my_field._2gram", "my_field._3gram"],
|
||||
},
|
||||
},
|
||||
highlight: {
|
||||
fields: {
|
||||
my_field: {
|
||||
matched_fields: ["my_field._index_prefix"],
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
24
docs/doc_examples/730045fae3743c39b612813a42c330c3.asciidoc
Normal file
24
docs/doc_examples/730045fae3743c39b612813a42c330c3.asciidoc
Normal file
@ -0,0 +1,24 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "my-index-000001",
|
||||
query: {
|
||||
prefix: {
|
||||
full_name: {
|
||||
value: "ki",
|
||||
},
|
||||
},
|
||||
},
|
||||
highlight: {
|
||||
fields: {
|
||||
full_name: {
|
||||
matched_fields: ["full_name._index_prefix"],
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
33
docs/doc_examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc
Normal file
33
docs/doc_examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc
Normal file
@ -0,0 +1,33 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "kibana_sample_data_ecommerce",
|
||||
size: 0,
|
||||
aggs: {
|
||||
daily_sales: {
|
||||
date_histogram: {
|
||||
field: "order_date",
|
||||
calendar_interval: "day",
|
||||
},
|
||||
aggs: {
|
||||
daily_revenue: {
|
||||
sum: {
|
||||
field: "taxful_total_price",
|
||||
},
|
||||
},
|
||||
smoothed_revenue: {
|
||||
moving_fn: {
|
||||
buckets_path: "daily_revenue",
|
||||
window: 3,
|
||||
script: "MovingFunctions.unweightedAvg(values)",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -1,26 +0,0 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "test-index",
|
||||
query: {
|
||||
nested: {
|
||||
path: "inference_field.inference.chunks",
|
||||
query: {
|
||||
knn: {
|
||||
field: "inference_field.inference.chunks.embeddings",
|
||||
query_vector_builder: {
|
||||
text_embedding: {
|
||||
model_id: "my_inference_id",
|
||||
model_text: "mountain lake",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -5,10 +5,8 @@
|
||||
----
|
||||
const response = await client.cluster.putSettings({
|
||||
persistent: {
|
||||
"cluster.routing.allocation.disk.watermark.low": "100gb",
|
||||
"cluster.routing.allocation.disk.watermark.high": "50gb",
|
||||
"cluster.routing.allocation.disk.watermark.flood_stage": "10gb",
|
||||
"cluster.info.update.interval": "1m",
|
||||
"cluster.routing.allocation.disk.watermark.low": "90%",
|
||||
"cluster.routing.allocation.disk.watermark.high": "95%",
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
35
docs/doc_examples/790684b45bef2bb848ea932f0fd0cfbd.asciidoc
Normal file
35
docs/doc_examples/790684b45bef2bb848ea932f0fd0cfbd.asciidoc
Normal file
@ -0,0 +1,35 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
query: {
|
||||
intervals: {
|
||||
my_text: {
|
||||
all_of: {
|
||||
ordered: false,
|
||||
max_gaps: 1,
|
||||
intervals: [
|
||||
{
|
||||
match: {
|
||||
query: "my favorite food",
|
||||
max_gaps: 0,
|
||||
ordered: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
match: {
|
||||
query: "cold porridge",
|
||||
max_gaps: 4,
|
||||
ordered: true,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -7,14 +7,14 @@ const response = await client.indices.create({
|
||||
index: "test-index",
|
||||
mappings: {
|
||||
properties: {
|
||||
infer_field: {
|
||||
type: "semantic_text",
|
||||
inference_id: ".elser-2-elasticsearch",
|
||||
},
|
||||
source_field: {
|
||||
type: "text",
|
||||
copy_to: "infer_field",
|
||||
},
|
||||
infer_field: {
|
||||
type: "semantic_text",
|
||||
inference_id: ".elser-2-elasticsearch",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
37
docs/doc_examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc
Normal file
37
docs/doc_examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc
Normal file
@ -0,0 +1,37 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "kibana_sample_data_ecommerce",
|
||||
size: 0,
|
||||
aggs: {
|
||||
daily_sales: {
|
||||
date_histogram: {
|
||||
field: "order_date",
|
||||
calendar_interval: "day",
|
||||
format: "yyyy-MM-dd",
|
||||
},
|
||||
aggs: {
|
||||
revenue: {
|
||||
sum: {
|
||||
field: "taxful_total_price",
|
||||
},
|
||||
},
|
||||
unique_customers: {
|
||||
cardinality: {
|
||||
field: "customer_id",
|
||||
},
|
||||
},
|
||||
avg_basket_size: {
|
||||
avg: {
|
||||
field: "total_quantity",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
34
docs/doc_examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc
Normal file
34
docs/doc_examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc
Normal file
@ -0,0 +1,34 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "POST",
|
||||
path: "/_inference/chat_completion/openai-completion/_stream",
|
||||
body: {
|
||||
messages: [
|
||||
{
|
||||
role: "assistant",
|
||||
content: "Let's find out what the weather is",
|
||||
tool_calls: [
|
||||
{
|
||||
id: "call_KcAjWtAww20AihPHphUh46Gd",
|
||||
type: "function",
|
||||
function: {
|
||||
name: "get_current_weather",
|
||||
arguments: '{"location":"Boston, MA"}',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
role: "tool",
|
||||
content: "The weather is cold",
|
||||
tool_call_id: "call_KcAjWtAww20AihPHphUh46Gd",
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -4,9 +4,11 @@
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.putSettings({
|
||||
index: "my-index-000001",
|
||||
index: ".reindexed-v9-ml-anomalies-custom-example",
|
||||
settings: {
|
||||
"index.blocks.read_only_allow_delete": null,
|
||||
index: {
|
||||
number_of_replicas: 0,
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
12
docs/doc_examples/89f547649895176c246bb8c41313ff21.asciidoc
Normal file
12
docs/doc_examples/89f547649895176c246bb8c41313ff21.asciidoc
Normal file
@ -0,0 +1,12 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.esql.query({
|
||||
query:
|
||||
'\nFROM library\n| EVAL year = DATE_EXTRACT("year", release_date)\n| WHERE page_count > ? AND match(author, ?, {"minimum_should_match": ?})\n| LIMIT 5\n',
|
||||
params: [300, "Frank Herbert", 2],
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -3,8 +3,8 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.security.queryRole({
|
||||
sort: ["name"],
|
||||
const response = await client.indices.getAlias({
|
||||
index: ".ml-anomalies-custom-example",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
39
docs/doc_examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc
Normal file
39
docs/doc_examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc
Normal file
@ -0,0 +1,39 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "kibana_sample_data_ecommerce",
|
||||
size: 0,
|
||||
aggs: {
|
||||
categories: {
|
||||
terms: {
|
||||
field: "category.keyword",
|
||||
size: 5,
|
||||
order: {
|
||||
total_revenue: "desc",
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
total_revenue: {
|
||||
sum: {
|
||||
field: "taxful_total_price",
|
||||
},
|
||||
},
|
||||
avg_order_value: {
|
||||
avg: {
|
||||
field: "taxful_total_price",
|
||||
},
|
||||
},
|
||||
total_items: {
|
||||
sum: {
|
||||
field: "total_quantity",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
42
docs/doc_examples/9250ac57ec81d5192e8ad4c462438489.asciidoc
Normal file
42
docs/doc_examples/9250ac57ec81d5192e8ad4c462438489.asciidoc
Normal file
@ -0,0 +1,42 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.bulk({
|
||||
index: "jinaai-index",
|
||||
operations: [
|
||||
{
|
||||
index: {
|
||||
_index: "jinaai-index",
|
||||
_id: "1",
|
||||
},
|
||||
},
|
||||
{
|
||||
content:
|
||||
"Sarah Johnson is a talented marine biologist working at the Oceanographic Institute. Her groundbreaking research on coral reef ecosystems has garnered international attention and numerous accolades.",
|
||||
},
|
||||
{
|
||||
index: {
|
||||
_index: "jinaai-index",
|
||||
_id: "2",
|
||||
},
|
||||
},
|
||||
{
|
||||
content:
|
||||
"She spends months at a time diving in remote locations, meticulously documenting the intricate relationships between various marine species. ",
|
||||
},
|
||||
{
|
||||
index: {
|
||||
_index: "jinaai-index",
|
||||
_id: "3",
|
||||
},
|
||||
},
|
||||
{
|
||||
content:
|
||||
"Her dedication to preserving these delicate underwater environments has inspired a new generation of conservationists.",
|
||||
},
|
||||
],
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -5,6 +5,9 @@
|
||||
----
|
||||
const response = await client.indices.create({
|
||||
index: "retrievers_example_nested",
|
||||
settings: {
|
||||
number_of_shards: 1,
|
||||
},
|
||||
mappings: {
|
||||
properties: {
|
||||
nested_field: {
|
||||
@ -18,6 +21,9 @@ const response = await client.indices.create({
|
||||
dims: 3,
|
||||
similarity: "l2_norm",
|
||||
index: true,
|
||||
index_options: {
|
||||
type: "flat",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
30
docs/doc_examples/9cc952d4a03264b700136cbc45abc8c6.asciidoc
Normal file
30
docs/doc_examples/9cc952d4a03264b700136cbc45abc8c6.asciidoc
Normal file
@ -0,0 +1,30 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.create({
|
||||
index: "my-rank-vectors-byte",
|
||||
mappings: {
|
||||
properties: {
|
||||
my_vector: {
|
||||
type: "rank_vectors",
|
||||
element_type: "byte",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
|
||||
const response1 = await client.index({
|
||||
index: "my-rank-vectors-byte",
|
||||
id: 1,
|
||||
document: {
|
||||
my_vector: [
|
||||
[1, 2, 3],
|
||||
[4, 5, 6],
|
||||
],
|
||||
},
|
||||
});
|
||||
console.log(response1);
|
||||
----
|
||||
12
docs/doc_examples/a46f566ca031375658c22f89b87dc6d2.asciidoc
Normal file
12
docs/doc_examples/a46f566ca031375658c22f89b87dc6d2.asciidoc
Normal file
@ -0,0 +1,12 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.cat.indices({
|
||||
index: ".ml-anomalies-custom-example",
|
||||
v: "true",
|
||||
h: "index,store.size",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
12
docs/doc_examples/a675fafa7c688cb3ea1be09bf887ebf0.asciidoc
Normal file
12
docs/doc_examples/a675fafa7c688cb3ea1be09bf887ebf0.asciidoc
Normal file
@ -0,0 +1,12 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.get({
|
||||
index: ".migrated-ds-my-data-stream-2025.01.23-000001",
|
||||
human: "true",
|
||||
filter_path: "*.settings.index.version.created_string",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
18
docs/doc_examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc
Normal file
18
docs/doc_examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc
Normal file
@ -0,0 +1,18 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "kibana_sample_data_ecommerce",
|
||||
size: 0,
|
||||
aggs: {
|
||||
avg_order_value: {
|
||||
avg: {
|
||||
field: "taxful_total_price",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
21
docs/doc_examples/b6d278737d27973e498ac61cda9e5126.asciidoc
Normal file
21
docs/doc_examples/b6d278737d27973e498ac61cda9e5126.asciidoc
Normal file
@ -0,0 +1,21 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "kibana_sample_data_ecommerce",
|
||||
size: 0,
|
||||
aggs: {
|
||||
daily_orders: {
|
||||
date_histogram: {
|
||||
field: "order_date",
|
||||
calendar_interval: "day",
|
||||
format: "yyyy-MM-dd",
|
||||
min_doc_count: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -6,6 +6,7 @@
|
||||
const response = await client.indices.resolveCluster({
|
||||
name: "not-present,clust*:my-index*,oldcluster:*",
|
||||
ignore_unavailable: "false",
|
||||
timeout: "5s",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
12
docs/doc_examples/bccd4eb26b1a325d103b12e198a13c08.asciidoc
Normal file
12
docs/doc_examples/bccd4eb26b1a325d103b12e198a13c08.asciidoc
Normal file
@ -0,0 +1,12 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.getSettings({
|
||||
index: "_all",
|
||||
expand_wildcards: "all",
|
||||
filter_path: "*.settings.index.*.slowlog",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -6,15 +6,11 @@
|
||||
const response = await client.update({
|
||||
index: "test",
|
||||
id: 1,
|
||||
script: {
|
||||
source: "ctx._source.counter += params.count",
|
||||
lang: "painless",
|
||||
params: {
|
||||
count: 4,
|
||||
},
|
||||
doc: {
|
||||
product_price: 100,
|
||||
},
|
||||
upsert: {
|
||||
counter: 1,
|
||||
product_price: 50,
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
22
docs/doc_examples/bdc55256fa5f701680631a149dbb75a9.asciidoc
Normal file
22
docs/doc_examples/bdc55256fa5f701680631a149dbb75a9.asciidoc
Normal file
@ -0,0 +1,22 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "kibana_sample_data_ecommerce",
|
||||
size: 0,
|
||||
aggs: {
|
||||
sales_by_category: {
|
||||
terms: {
|
||||
field: "category.keyword",
|
||||
size: 5,
|
||||
order: {
|
||||
_count: "desc",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
31
docs/doc_examples/bdd28276618235487ac96bd6679bc206.asciidoc
Normal file
31
docs/doc_examples/bdd28276618235487ac96bd6679bc206.asciidoc
Normal file
@ -0,0 +1,31 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "kibana_sample_data_ecommerce",
|
||||
size: 0,
|
||||
aggs: {
|
||||
daily_sales: {
|
||||
date_histogram: {
|
||||
field: "order_date",
|
||||
calendar_interval: "day",
|
||||
},
|
||||
aggs: {
|
||||
revenue: {
|
||||
sum: {
|
||||
field: "taxful_total_price",
|
||||
},
|
||||
},
|
||||
cumulative_revenue: {
|
||||
cumulative_sum: {
|
||||
buckets_path: "revenue",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
22
docs/doc_examples/bf3c3bc41c593a80faebef1df353e483.asciidoc
Normal file
22
docs/doc_examples/bf3c3bc41c593a80faebef1df353e483.asciidoc
Normal file
@ -0,0 +1,22 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.put({
|
||||
task_type: "rerank",
|
||||
inference_id: "jinaai-rerank",
|
||||
inference_config: {
|
||||
service: "jinaai",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
model_id: "jina-reranker-v2-base-multilingual",
|
||||
},
|
||||
task_settings: {
|
||||
top_n: 10,
|
||||
return_documents: true,
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
11
docs/doc_examples/c3b77e11b16e37e9e37e28dec922432e.asciidoc
Normal file
11
docs/doc_examples/c3b77e11b16e37e9e37e28dec922432e.asciidoc
Normal file
@ -0,0 +1,11 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.esql.query({
|
||||
query:
|
||||
'\nFROM library\n| WHERE match(author, "Frank Herbert", {"minimum_should_match": 2, "operator": "AND"})\n| LIMIT 5\n',
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
35
docs/doc_examples/cdb7613b445e6ed6e8b473f9cae1af90.asciidoc
Normal file
35
docs/doc_examples/cdb7613b445e6ed6e8b473f9cae1af90.asciidoc
Normal file
@ -0,0 +1,35 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
query: {
|
||||
intervals: {
|
||||
my_text: {
|
||||
all_of: {
|
||||
ordered: true,
|
||||
max_gaps: 1,
|
||||
intervals: [
|
||||
{
|
||||
match: {
|
||||
query: "my favorite food",
|
||||
max_gaps: 0,
|
||||
ordered: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
match: {
|
||||
query: "cold porridge",
|
||||
max_gaps: 4,
|
||||
ordered: true,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
11
docs/doc_examples/d2e7dead222cfbebbd2c21a7cc1893b4.asciidoc
Normal file
11
docs/doc_examples/d2e7dead222cfbebbd2c21a7cc1893b4.asciidoc
Normal file
@ -0,0 +1,11 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.cluster.state({
|
||||
metric: "metadata",
|
||||
filter_path: "metadata.indices.*.system",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
28
docs/doc_examples/d3672a87a857ddb87519788236e57497.asciidoc
Normal file
28
docs/doc_examples/d3672a87a857ddb87519788236e57497.asciidoc
Normal file
@ -0,0 +1,28 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "jinaai-index",
|
||||
retriever: {
|
||||
text_similarity_reranker: {
|
||||
retriever: {
|
||||
standard: {
|
||||
query: {
|
||||
semantic: {
|
||||
field: "content",
|
||||
query: "who inspired taking care of the sea?",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
field: "content",
|
||||
rank_window_size: 100,
|
||||
inference_id: "jinaai-rerank",
|
||||
inference_text: "who inspired taking care of the sea?",
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
44
docs/doc_examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc
Normal file
44
docs/doc_examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc
Normal file
@ -0,0 +1,44 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "retrievers_example",
|
||||
retriever: {
|
||||
linear: {
|
||||
retrievers: [
|
||||
{
|
||||
retriever: {
|
||||
standard: {
|
||||
query: {
|
||||
query_string: {
|
||||
query: "(information retrieval) OR (artificial intelligence)",
|
||||
default_field: "text",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
weight: 2,
|
||||
normalizer: "minmax",
|
||||
},
|
||||
{
|
||||
retriever: {
|
||||
knn: {
|
||||
field: "vector",
|
||||
query_vector: [0.23, 0.67, 0.89],
|
||||
k: 3,
|
||||
num_candidates: 5,
|
||||
},
|
||||
},
|
||||
weight: 1.5,
|
||||
normalizer: "minmax",
|
||||
},
|
||||
],
|
||||
rank_window_size: 10,
|
||||
},
|
||||
},
|
||||
_source: false,
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
17
docs/doc_examples/d6a4548b29e939fb197189c20c7c016f.asciidoc
Normal file
17
docs/doc_examples/d6a4548b29e939fb197189c20c7c016f.asciidoc
Normal file
@ -0,0 +1,17 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.put({
|
||||
task_type: "chat_completion",
|
||||
inference_id: "chat-completion-endpoint",
|
||||
inference_config: {
|
||||
service: "elastic",
|
||||
service_settings: {
|
||||
model_id: "model-1",
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
57
docs/doc_examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc
Normal file
57
docs/doc_examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc
Normal file
@ -0,0 +1,57 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.updateAliases({
|
||||
actions: [
|
||||
{
|
||||
add: {
|
||||
index: ".reindexed-v9-ml-anomalies-custom-example",
|
||||
alias: ".ml-anomalies-example1",
|
||||
filter: {
|
||||
term: {
|
||||
job_id: {
|
||||
value: "example1",
|
||||
},
|
||||
},
|
||||
},
|
||||
is_hidden: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
add: {
|
||||
index: ".reindexed-v9-ml-anomalies-custom-example",
|
||||
alias: ".ml-anomalies-example2",
|
||||
filter: {
|
||||
term: {
|
||||
job_id: {
|
||||
value: "example2",
|
||||
},
|
||||
},
|
||||
},
|
||||
is_hidden: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
remove: {
|
||||
index: ".ml-anomalies-custom-example",
|
||||
aliases: ".ml-anomalies-*",
|
||||
},
|
||||
},
|
||||
{
|
||||
remove_index: {
|
||||
index: ".ml-anomalies-custom-example",
|
||||
},
|
||||
},
|
||||
{
|
||||
add: {
|
||||
index: ".reindexed-v9-ml-anomalies-custom-example",
|
||||
alias: ".ml-anomalies-custom-example",
|
||||
is_hidden: true,
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -7,7 +7,7 @@ const response = await client.inference.put({
|
||||
task_type: "sparse_embedding",
|
||||
inference_id: "elser_embeddings",
|
||||
inference_config: {
|
||||
service: "elser",
|
||||
service: "elasticsearch",
|
||||
service_settings: {
|
||||
num_allocations: 1,
|
||||
num_threads: 1,
|
||||
29
docs/doc_examples/e375c7da666276c4df6664c6821cd5f4.asciidoc
Normal file
29
docs/doc_examples/e375c7da666276c4df6664c6821cd5f4.asciidoc
Normal file
@ -0,0 +1,29 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.create({
|
||||
index: "my-rank-vectors-float",
|
||||
mappings: {
|
||||
properties: {
|
||||
my_vector: {
|
||||
type: "rank_vectors",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
|
||||
const response1 = await client.index({
|
||||
index: "my-rank-vectors-float",
|
||||
id: 1,
|
||||
document: {
|
||||
my_vector: [
|
||||
[0.5, 10, 6],
|
||||
[-0.5, 10, 10],
|
||||
],
|
||||
},
|
||||
});
|
||||
console.log(response1);
|
||||
----
|
||||
@ -3,8 +3,8 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.unfreeze({
|
||||
index: "my-index-000001",
|
||||
const response = await client.migration.deprecations({
|
||||
index: ".ml-anomalies-*",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -4,9 +4,10 @@
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.putSettings({
|
||||
index: "my-index-000001",
|
||||
index: "*",
|
||||
settings: {
|
||||
"index.indexing.slowlog.include.user": true,
|
||||
"index.indexing.slowlog.threshold.index.warn": "30s",
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
@ -30,6 +30,13 @@ const response = await client.search({
|
||||
],
|
||||
},
|
||||
},
|
||||
highlight: {
|
||||
fields: {
|
||||
semantic_text: {
|
||||
number_of_fragments: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -5,6 +5,9 @@
|
||||
----
|
||||
const response = await client.indices.create({
|
||||
index: "retrievers_example",
|
||||
settings: {
|
||||
number_of_shards: 1,
|
||||
},
|
||||
mappings: {
|
||||
properties: {
|
||||
vector: {
|
||||
@ -12,6 +15,9 @@ const response = await client.indices.create({
|
||||
dims: 3,
|
||||
similarity: "l2_norm",
|
||||
index: true,
|
||||
index_options: {
|
||||
type: "flat",
|
||||
},
|
||||
},
|
||||
text: {
|
||||
type: "text",
|
||||
@ -22,6 +28,9 @@ const response = await client.indices.create({
|
||||
topic: {
|
||||
type: "keyword",
|
||||
},
|
||||
timestamp: {
|
||||
type: "date",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
@ -35,6 +44,7 @@ const response1 = await client.index({
|
||||
text: "Large language models are revolutionizing information retrieval by boosting search precision, deepening contextual understanding, and reshaping user experiences in data-rich environments.",
|
||||
year: 2024,
|
||||
topic: ["llm", "ai", "information_retrieval"],
|
||||
timestamp: "2021-01-01T12:10:30",
|
||||
},
|
||||
});
|
||||
console.log(response1);
|
||||
@ -47,6 +57,7 @@ const response2 = await client.index({
|
||||
text: "Artificial intelligence is transforming medicine, from advancing diagnostics and tailoring treatment plans to empowering predictive patient care for improved health outcomes.",
|
||||
year: 2023,
|
||||
topic: ["ai", "medicine"],
|
||||
timestamp: "2022-01-01T12:10:30",
|
||||
},
|
||||
});
|
||||
console.log(response2);
|
||||
@ -59,6 +70,7 @@ const response3 = await client.index({
|
||||
text: "AI is redefining security by enabling advanced threat detection, proactive risk analysis, and dynamic defenses against increasingly sophisticated cyber threats.",
|
||||
year: 2024,
|
||||
topic: ["ai", "security"],
|
||||
timestamp: "2023-01-01T12:10:30",
|
||||
},
|
||||
});
|
||||
console.log(response3);
|
||||
@ -71,6 +83,7 @@ const response4 = await client.index({
|
||||
text: "Elastic introduces Elastic AI Assistant, the open, generative AI sidekick powered by ESRE to democratize cybersecurity and enable users of every skill level.",
|
||||
year: 2023,
|
||||
topic: ["ai", "elastic", "assistant"],
|
||||
timestamp: "2024-01-01T12:10:30",
|
||||
},
|
||||
});
|
||||
console.log(response4);
|
||||
@ -83,6 +96,7 @@ const response5 = await client.index({
|
||||
text: "Learn how to spin up a deployment of our hosted Elasticsearch Service and use Elastic Observability to gain deeper insight into the behavior of your applications and systems.",
|
||||
year: 2024,
|
||||
topic: ["documentation", "observability", "elastic"],
|
||||
timestamp: "2025-01-01T12:10:30",
|
||||
},
|
||||
});
|
||||
console.log(response5);
|
||||
18
docs/doc_examples/fff86117c47f974074284644e8a97a99.asciidoc
Normal file
18
docs/doc_examples/fff86117c47f974074284644e8a97a99.asciidoc
Normal file
@ -0,0 +1,18 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "jinaai-embeddings",
|
||||
inference_config: {
|
||||
service: "jinaai",
|
||||
service_settings: {
|
||||
model_id: "jina-embeddings-v3",
|
||||
api_key: "<api_key>",
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "@elastic/elasticsearch",
|
||||
"version": "8.16.3",
|
||||
"versionCanary": "8.16.3-canary.0",
|
||||
"version": "8.16.4",
|
||||
"versionCanary": "8.16.4-canary.0",
|
||||
"description": "The official Elasticsearch client for Node.js",
|
||||
"main": "./index.js",
|
||||
"types": "index.d.ts",
|
||||
|
||||
@ -45,7 +45,7 @@ export default class Cluster {
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides explanations for shard allocations in the cluster.
|
||||
* Explain the shard allocations. Get explanations for shard allocations in the cluster. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/cluster-allocation-explain.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.ClusterAllocationExplainResponse>
|
||||
@ -119,7 +119,7 @@ export default class Cluster {
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears cluster voting config exclusions.
|
||||
* Clear cluster voting config exclusions. Remove master-eligible nodes from the voting configuration exclusion list.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/voting-config-exclusions.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.ClusterDeleteVotingConfigExclusionsResponse>
|
||||
@ -221,7 +221,7 @@ export default class Cluster {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns cluster-wide settings. By default, it returns only settings that have been explicitly defined.
|
||||
* Get cluster-wide settings. By default, it returns only settings that have been explicitly defined.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/cluster-get-settings.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.ClusterGetSettingsResponse>
|
||||
@ -251,7 +251,7 @@ export default class Cluster {
|
||||
}
|
||||
|
||||
/**
|
||||
* The cluster health API returns a simple status on the health of the cluster. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster, yellow means that the primary shard is allocated but replicas are not, and green means that all shards are allocated. The index level status is controlled by the worst shard status. The cluster status is controlled by the worst index status.
|
||||
* Get the cluster health status. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. The index level status is controlled by the worst shard status. One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. The cluster status is controlled by the worst index status.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/cluster-health.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.ClusterHealthResponse>
|
||||
@ -323,7 +323,7 @@ export default class Cluster {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet been executed. NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the Task Management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API.
|
||||
* Get the pending cluster tasks. Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect. NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/cluster-pending.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.ClusterPendingTasksResponse>
|
||||
@ -353,7 +353,7 @@ export default class Cluster {
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the cluster voting config exclusions by node ids or node names.
|
||||
* Update voting configuration exclusions. Update the cluster voting config exclusions by node IDs or node names. By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. Clusters should have no voting configuration exclusions in normal operation. Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. This API waits for the nodes to be fully removed from the cluster before it returns. If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. In that case, you may safely retry the call. NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/voting-config-exclusions.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.ClusterPostVotingConfigExclusionsResponse>
|
||||
@ -427,7 +427,7 @@ export default class Cluster {
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the cluster settings.
|
||||
* Update the cluster settings. Configure and update dynamic settings on a running cluster. You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. You can also reset transient or persistent settings by assigning them a null value. If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. Only use `elasticsearch.yml` for static cluster settings and node settings. The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/cluster-update-settings.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.ClusterPutSettingsResponse>
|
||||
@ -469,7 +469,7 @@ export default class Cluster {
|
||||
}
|
||||
|
||||
/**
|
||||
* The cluster remote info API allows you to retrieve all of the configured remote cluster information. It returns connection and endpoint information keyed by the configured remote cluster alias.
|
||||
* Get remote cluster information. Get all of the configured remote cluster information. This API returns connection and endpoint information keyed by the configured remote cluster alias.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/cluster-remote-info.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.ClusterRemoteInfoResponse>
|
||||
@ -499,7 +499,7 @@ export default class Cluster {
|
||||
}
|
||||
|
||||
/**
|
||||
* Allows to manually change the allocation of individual shards in the cluster.
|
||||
* Reroute the cluster. Manually change the allocation of individual shards in the cluster. For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node. It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out. The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting. If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing. The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated. This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes. Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/cluster-reroute.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.ClusterRerouteResponse>
|
||||
@ -541,7 +541,7 @@ export default class Cluster {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a comprehensive information about the state of the cluster.
|
||||
* Get the cluster state. Get comprehensive information about the state of the cluster. The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster. The elected master node ensures that every node in the cluster has a copy of the same cluster state. This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. You may need to consult the Elasticsearch source code to determine the precise meaning of the response. By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter. Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. If you use this API repeatedly, your cluster may become unstable. WARNING: The response is a representation of an internal data structure. Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. Do not query this API using external monitoring tools. Instead, obtain the information you require using other more stable cluster APIs.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/cluster-state.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.ClusterStateResponse>
|
||||
@ -585,7 +585,7 @@ export default class Cluster {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns cluster statistics. It returns basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).
|
||||
* Get cluster statistics. Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/cluster-stats.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.ClusterStatsResponse>
|
||||
|
||||
@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey'
|
||||
interface That { transport: Transport }
|
||||
|
||||
/**
|
||||
* Returns the health of the cluster.
|
||||
* Get the cluster health. Get a report with the health status of an Elasticsearch cluster. The report contains a list of indicators that compose Elasticsearch functionality. Each indicator has a health status of: green, unknown, yellow or red. The indicator will provide an explanation and metadata describing the reason for its current health status. The cluster’s status is controlled by the worst indicator status. In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. The root cause and remediation steps are encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/health-api.html | Elasticsearch API documentation}
|
||||
*/
|
||||
export default async function HealthReportApi (this: That, params?: T.HealthReportRequest | TB.HealthReportRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.HealthReportResponse>
|
||||
|
||||
@ -45,7 +45,7 @@ export default class Ilm {
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the specified lifecycle policy definition. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error.
|
||||
* Delete a lifecycle policy. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/ilm-delete-lifecycle.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IlmDeleteLifecycleResponse>
|
||||
@ -77,7 +77,7 @@ export default class Ilm {
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves information about the index’s current lifecycle state, such as the currently executing phase, action, and step. Shows when the index entered each one, the definition of the running phase, and information about any failures.
|
||||
* Explain the lifecycle state. Get the current lifecycle status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream's backing indices. The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/ilm-explain-lifecycle.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IlmExplainLifecycleResponse>
|
||||
@ -109,7 +109,7 @@ export default class Ilm {
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves a lifecycle policy.
|
||||
* Get lifecycle policies.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/ilm-get-lifecycle.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IlmGetLifecycleResponse>
|
||||
@ -149,7 +149,7 @@ export default class Ilm {
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the current index lifecycle management (ILM) status.
|
||||
* Get the ILM status. Get the current index lifecycle management status.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/ilm-get-status.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IlmGetStatusResponse>
|
||||
@ -179,7 +179,7 @@ export default class Ilm {
|
||||
}
|
||||
|
||||
/**
|
||||
* Switches the indices, ILM policies, and legacy, composable and component templates from using custom node attributes and attribute-based allocation filters to using data tiers, and optionally deletes one legacy index template.+ Using node roles enables ILM to automatically move the indices between data tiers.
|
||||
* Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. Optionally, delete one legacy index template. Using node roles enables ILM to automatically move the indices between data tiers. Migrating away from custom node attributes routing can be manually performed. This API provides an automated way of performing three out of the four manual steps listed in the migration guide: 1. Stop setting the custom hot attribute on new indices. 1. Remove custom allocation settings from existing ILM policies. 1. Replace custom allocation settings from existing indices with the corresponding tier preference. ILM must be stopped before performing the migration. Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/ilm-migrate-to-data-tiers.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IlmMigrateToDataTiersResponse>
|
||||
@ -221,7 +221,7 @@ export default class Ilm {
|
||||
}
|
||||
|
||||
/**
|
||||
* Manually moves an index into the specified step and executes that step.
|
||||
* Move to a lifecycle step. Manually move an index into a specific step in the lifecycle policy and run that step. WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. You must specify both the current step and the step to be executed in the body of the request. The request will fail if the current step does not match the step currently running for the index This is to prevent the index from being moved from an unexpected step into the next step. When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. If only the phase is specified, the index will move to the first step of the first action in the target phase. If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. Only actions specified in the ILM policy are considered valid. An index cannot move to a step that is not part of its policy.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/ilm-move-to-step.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IlmMoveToStepResponse>
|
||||
@ -265,7 +265,7 @@ export default class Ilm {
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a lifecycle policy. If the specified policy exists, the policy is replaced and the policy version is incremented.
|
||||
* Create or update a lifecycle policy. If the specified policy exists, it is replaced and the policy version is incremented. NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/ilm-put-lifecycle.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IlmPutLifecycleResponse>
|
||||
@ -309,7 +309,7 @@ export default class Ilm {
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the assigned lifecycle policy and stops managing the specified index
|
||||
* Remove policies from an index. Remove the assigned lifecycle policies from an index or a data stream's backing indices. It also stops managing the indices.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/ilm-remove-policy.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IlmRemovePolicyResponse>
|
||||
@ -341,7 +341,7 @@ export default class Ilm {
|
||||
}
|
||||
|
||||
/**
|
||||
* Retries executing the policy for an index that is in the ERROR step.
|
||||
* Retry a policy. Retry running the lifecycle policy for an index that is in the ERROR step. The API sets the policy back to the step where the error occurred and runs the step. Use the explain lifecycle state API to determine whether an index is in the ERROR step.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/ilm-retry-policy.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IlmRetryResponse>
|
||||
@ -373,7 +373,7 @@ export default class Ilm {
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the index lifecycle management (ILM) plugin.
|
||||
* Start the ILM plugin. Start the index lifecycle management plugin if it is currently stopped. ILM is started automatically when the cluster is formed. Restarting ILM is necessary only when it has been stopped using the stop ILM API.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/ilm-start.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IlmStartResponse>
|
||||
@ -403,7 +403,7 @@ export default class Ilm {
|
||||
}
|
||||
|
||||
/**
|
||||
* Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin
|
||||
* Stop the ILM plugin. Halt all lifecycle management operations and stop the index lifecycle management plugin. This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. Use the get ILM status API to check whether ILM is running.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/ilm-stop.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IlmStopResponse>
|
||||
|
||||
@ -769,7 +769,7 @@ export default class Indices {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the status for a data stream lifecycle. Retrieves information about an index or data stream’s current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution.
|
||||
* Get the status for a data stream lifecycle. Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/data-streams-explain-lifecycle.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest | TB.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IndicesExplainDataLifecycleResponse>
|
||||
|
||||
@ -77,22 +77,22 @@ export default class Ingest {
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes an ip location database configuration
|
||||
* Deletes an IP location database configuration.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/delete-ip-location-database-api.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async deleteIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
|
||||
async deleteIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
|
||||
async deleteIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise<T.TODO>
|
||||
async deleteIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise<any> {
|
||||
async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest | TB.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IngestDeleteIpLocationDatabaseResponse>
|
||||
async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest | TB.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IngestDeleteIpLocationDatabaseResponse, unknown>>
|
||||
async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest | TB.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise<T.IngestDeleteIpLocationDatabaseResponse>
|
||||
async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest | TB.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise<any> {
|
||||
const acceptedPath: string[] = ['id']
|
||||
const querystring: Record<string, any> = {}
|
||||
const body = undefined
|
||||
|
||||
params = params ?? {}
|
||||
for (const key in params) {
|
||||
if (acceptedPath.includes(key)) {
|
||||
continue
|
||||
} else if (key !== 'body') {
|
||||
// @ts-expect-error
|
||||
querystring[key] = params[key]
|
||||
}
|
||||
}
|
||||
@ -211,13 +211,13 @@ export default class Ingest {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the specified ip location database configuration
|
||||
* Returns information about one or more IP location database configurations.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/get-ip-location-database-api.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async getIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
|
||||
async getIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
|
||||
async getIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise<T.TODO>
|
||||
async getIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise<any> {
|
||||
async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest | TB.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IngestGetIpLocationDatabaseResponse>
|
||||
async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest | TB.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IngestGetIpLocationDatabaseResponse, unknown>>
|
||||
async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest | TB.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise<T.IngestGetIpLocationDatabaseResponse>
|
||||
async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest | TB.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise<any> {
|
||||
const acceptedPath: string[] = ['id']
|
||||
const querystring: Record<string, any> = {}
|
||||
const body = undefined
|
||||
@ -227,6 +227,7 @@ export default class Ingest {
|
||||
if (acceptedPath.includes(key)) {
|
||||
continue
|
||||
} else if (key !== 'body') {
|
||||
// @ts-expect-error
|
||||
querystring[key] = params[key]
|
||||
}
|
||||
}
|
||||
@ -364,22 +365,27 @@ export default class Ingest {
|
||||
}
|
||||
|
||||
/**
|
||||
* Puts the configuration for a ip location database to be downloaded
|
||||
* Returns information about one or more IP location database configurations.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/put-ip-location-database-api.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async putIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
|
||||
async putIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
|
||||
async putIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise<T.TODO>
|
||||
async putIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise<any> {
|
||||
async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest | TB.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IngestPutIpLocationDatabaseResponse>
|
||||
async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest | TB.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IngestPutIpLocationDatabaseResponse, unknown>>
|
||||
async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest | TB.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise<T.IngestPutIpLocationDatabaseResponse>
|
||||
async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest | TB.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise<any> {
|
||||
const acceptedPath: string[] = ['id']
|
||||
const acceptedBody: string[] = ['configuration']
|
||||
const querystring: Record<string, any> = {}
|
||||
const body = undefined
|
||||
// @ts-expect-error
|
||||
let body: any = params.body ?? undefined
|
||||
|
||||
params = params ?? {}
|
||||
for (const key in params) {
|
||||
if (acceptedPath.includes(key)) {
|
||||
if (acceptedBody.includes(key)) {
|
||||
// @ts-expect-error
|
||||
body = params[key]
|
||||
} else if (acceptedPath.includes(key)) {
|
||||
continue
|
||||
} else if (key !== 'body') {
|
||||
// @ts-expect-error
|
||||
querystring[key] = params[key]
|
||||
}
|
||||
}
|
||||
|
||||
@ -1953,7 +1953,7 @@ export default class Ml {
|
||||
async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise<T.MlPutDataFrameAnalyticsResponse>
|
||||
async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise<any> {
|
||||
const acceptedPath: string[] = ['id']
|
||||
const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', 'model_memory_limit', 'source', 'headers', 'version']
|
||||
const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', '_meta', 'model_memory_limit', 'source', 'headers', 'version']
|
||||
const querystring: Record<string, any> = {}
|
||||
// @ts-expect-error
|
||||
const userBody: any = params?.body
|
||||
@ -1997,7 +1997,7 @@ export default class Ml {
|
||||
async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise<T.MlPutDatafeedResponse>
|
||||
async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise<any> {
|
||||
const acceptedPath: string[] = ['datafeed_id']
|
||||
const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size', 'headers']
|
||||
const acceptedBody: string[] = ['aggregations', 'aggs', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size', 'headers']
|
||||
const querystring: Record<string, any> = {}
|
||||
// @ts-expect-error
|
||||
const userBody: any = params?.body
|
||||
@ -2084,8 +2084,8 @@ export default class Ml {
|
||||
async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.MlPutJobResponse, unknown>>
|
||||
async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptions): Promise<T.MlPutJobResponse>
|
||||
async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptions): Promise<any> {
|
||||
const acceptedPath: string[] = ['job_id']
|
||||
const acceptedBody: string[] = ['allow_lazy_open', 'analysis_config', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'daily_model_snapshot_retention_after_days', 'data_description', 'datafeed_config', 'description', 'groups', 'model_plot_config', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_index_name', 'results_retention_days']
|
||||
const acceptedPath: string[] = []
|
||||
const acceptedBody: string[] = ['allow_lazy_open', 'analysis_config', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'daily_model_snapshot_retention_after_days', 'data_description', 'datafeed_config', 'description', 'job_id', 'groups', 'model_plot_config', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_index_name', 'results_retention_days']
|
||||
const querystring: Record<string, any> = {}
|
||||
// @ts-expect-error
|
||||
const userBody: any = params?.body
|
||||
|
||||
@ -45,7 +45,7 @@ export default class Nodes {
|
||||
}
|
||||
|
||||
/**
|
||||
* You can use this API to clear the archived repositories metering information in the cluster.
|
||||
* Clear the archived repositories metering. Clear the archived repositories metering information in the cluster.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/clear-repositories-metering-archive-api.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest | TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.NodesClearRepositoriesMeteringArchiveResponse>
|
||||
@ -78,7 +78,7 @@ export default class Nodes {
|
||||
}
|
||||
|
||||
/**
|
||||
* You can use the cluster repositories metering API to retrieve repositories metering information in a cluster. This API exposes monotonically non-decreasing counters and it’s expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it won’t be present after node restarts.
|
||||
* Get cluster repositories metering. Get repositories metering information for a cluster. This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/get-repositories-metering-api.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.NodesGetRepositoriesMeteringInfoResponse>
|
||||
@ -110,7 +110,7 @@ export default class Nodes {
|
||||
}
|
||||
|
||||
/**
|
||||
* This API yields a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of each node’s top hot threads.
|
||||
* Get the hot threads for nodes. Get a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of the top hot threads for each node.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/cluster-nodes-hot-threads.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.NodesHotThreadsResponse>
|
||||
@ -150,7 +150,7 @@ export default class Nodes {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns cluster nodes information.
|
||||
* Get node information. By default, the API returns all attributes and core settings for cluster nodes.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/cluster-nodes-info.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.NodesInfoResponse>
|
||||
@ -197,7 +197,7 @@ export default class Nodes {
|
||||
}
|
||||
|
||||
/**
|
||||
* Reloads the keystore on nodes in the cluster.
|
||||
* Reload the keystore on nodes in the cluster. Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. That is, you can change them on disk and reload them without restarting any nodes in the cluster. When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node. When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/secure-settings.html#reloadable-secure-settings | Elasticsearch API documentation}
|
||||
*/
|
||||
async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.NodesReloadSecureSettingsResponse>
|
||||
@ -249,7 +249,7 @@ export default class Nodes {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns cluster nodes statistics.
|
||||
* Get node statistics. Get statistics for nodes in a cluster. By default, all stats are returned. You can limit the returned information by using metrics.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/cluster-nodes-stats.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.NodesStatsResponse>
|
||||
@ -303,7 +303,7 @@ export default class Nodes {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns information on the usage of features.
|
||||
* Get feature usage information.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/cluster-nodes-usage.html | Elasticsearch API documentation}
|
||||
*/
|
||||
async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.NodesUsageResponse>
|
||||
|
||||
@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey'
|
||||
interface That { transport: Transport }
|
||||
|
||||
/**
|
||||
* Ping the cluster. Returns whether the cluster is running.
|
||||
* Ping the cluster. Get information about whether the cluster is running.
|
||||
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.16/index.html | Elasticsearch API documentation}
|
||||
*/
|
||||
export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.PingResponse>
|
||||
|
||||
293
src/api/types.ts
293
src/api/types.ts
@ -58,6 +58,7 @@ export type BulkOperationType = 'index' | 'create' | 'update' | 'delete'
|
||||
|
||||
export interface BulkRequest<TDocument = unknown, TPartialDocument = unknown> extends RequestBase {
|
||||
index?: IndexName
|
||||
list_executed_pipelines?: boolean
|
||||
pipeline?: string
|
||||
refresh?: Refresh
|
||||
routing?: Routing
|
||||
@ -67,6 +68,7 @@ export interface BulkRequest<TDocument = unknown, TPartialDocument = unknown> ex
|
||||
timeout?: Duration
|
||||
wait_for_active_shards?: WaitForActiveShards
|
||||
require_alias?: boolean
|
||||
require_data_stream?: boolean
|
||||
operations?: (BulkOperationContainer | BulkUpdateAction<TDocument, TPartialDocument> | TDocument)[]
|
||||
}
|
||||
|
||||
@ -6892,7 +6894,7 @@ export type CatAllocationResponse = CatAllocationAllocationRecord[]
|
||||
|
||||
export interface CatComponentTemplatesComponentTemplate {
|
||||
name: string
|
||||
version: string
|
||||
version: string | null
|
||||
alias_count: string
|
||||
mapping_count: string
|
||||
settings_count: string
|
||||
@ -12499,7 +12501,15 @@ export type InferenceDenseByteVector = byte[]
|
||||
|
||||
export type InferenceDenseVector = float[]
|
||||
|
||||
export interface InferenceInferenceChunkingSettings extends InferenceInferenceEndpoint {
|
||||
max_chunk_size?: integer
|
||||
overlap?: integer
|
||||
sentence_overlap?: integer
|
||||
strategy?: string
|
||||
}
|
||||
|
||||
export interface InferenceInferenceEndpoint {
|
||||
chunking_settings?: InferenceInferenceChunkingSettings
|
||||
service: string
|
||||
service_settings: InferenceServiceSettings
|
||||
task_settings?: InferenceTaskSettings
|
||||
@ -12647,7 +12657,16 @@ export interface IngestCsvProcessor extends IngestProcessorBase {
|
||||
|
||||
export interface IngestDatabaseConfiguration {
|
||||
name: Name
|
||||
maxmind: IngestMaxmind
|
||||
maxmind?: IngestMaxmind
|
||||
ipinfo?: IngestIpinfo
|
||||
}
|
||||
|
||||
export interface IngestDatabaseConfigurationFull {
|
||||
web?: IngestWeb
|
||||
local?: IngestLocal
|
||||
name: Name
|
||||
maxmind?: IngestMaxmind
|
||||
ipinfo?: IngestIpinfo
|
||||
}
|
||||
|
||||
export interface IngestDateIndexNameProcessor extends IngestProcessorBase {
|
||||
@ -12799,6 +12818,9 @@ export interface IngestIpLocationProcessor extends IngestProcessorBase {
|
||||
download_database_on_pipeline_creation?: boolean
|
||||
}
|
||||
|
||||
export interface IngestIpinfo {
|
||||
}
|
||||
|
||||
export interface IngestJoinProcessor extends IngestProcessorBase {
|
||||
field: Field
|
||||
separator: string
|
||||
@ -12829,6 +12851,10 @@ export interface IngestKeyValueProcessor extends IngestProcessorBase {
|
||||
value_split: string
|
||||
}
|
||||
|
||||
export interface IngestLocal {
|
||||
type: string
|
||||
}
|
||||
|
||||
export interface IngestLowercaseProcessor extends IngestProcessorBase {
|
||||
field: Field
|
||||
ignore_missing?: boolean
|
||||
@ -13036,6 +13062,9 @@ export interface IngestUserAgentProcessor extends IngestProcessorBase {
|
||||
|
||||
export type IngestUserAgentProperty = 'name' | 'os' | 'device' | 'original' | 'version'
|
||||
|
||||
export interface IngestWeb {
|
||||
}
|
||||
|
||||
export interface IngestDeleteGeoipDatabaseRequest extends RequestBase {
|
||||
id: Ids
|
||||
master_timeout?: Duration
|
||||
@ -13044,6 +13073,14 @@ export interface IngestDeleteGeoipDatabaseRequest extends RequestBase {
|
||||
|
||||
export type IngestDeleteGeoipDatabaseResponse = AcknowledgedResponseBase
|
||||
|
||||
export interface IngestDeleteIpLocationDatabaseRequest extends RequestBase {
|
||||
id: Ids
|
||||
master_timeout?: Duration
|
||||
timeout?: Duration
|
||||
}
|
||||
|
||||
export type IngestDeleteIpLocationDatabaseResponse = AcknowledgedResponseBase
|
||||
|
||||
export interface IngestDeletePipelineRequest extends RequestBase {
|
||||
id: Id
|
||||
master_timeout?: Duration
|
||||
@ -13094,6 +13131,23 @@ export interface IngestGetGeoipDatabaseResponse {
|
||||
databases: IngestGetGeoipDatabaseDatabaseConfigurationMetadata[]
|
||||
}
|
||||
|
||||
export interface IngestGetIpLocationDatabaseDatabaseConfigurationMetadata {
|
||||
id: Id
|
||||
version: VersionNumber
|
||||
modified_date_millis?: EpochTime<UnitMillis>
|
||||
modified_date?: EpochTime<UnitMillis>
|
||||
database: IngestDatabaseConfigurationFull
|
||||
}
|
||||
|
||||
export interface IngestGetIpLocationDatabaseRequest extends RequestBase {
|
||||
id?: Ids
|
||||
master_timeout?: Duration
|
||||
}
|
||||
|
||||
export interface IngestGetIpLocationDatabaseResponse {
|
||||
databases: IngestGetIpLocationDatabaseDatabaseConfigurationMetadata[]
|
||||
}
|
||||
|
||||
export interface IngestGetPipelineRequest extends RequestBase {
|
||||
id?: Id
|
||||
master_timeout?: Duration
|
||||
@ -13119,6 +13173,15 @@ export interface IngestPutGeoipDatabaseRequest extends RequestBase {
|
||||
|
||||
export type IngestPutGeoipDatabaseResponse = AcknowledgedResponseBase
|
||||
|
||||
export interface IngestPutIpLocationDatabaseRequest extends RequestBase {
|
||||
id: Id
|
||||
master_timeout?: Duration
|
||||
timeout?: Duration
|
||||
configuration?: IngestDatabaseConfiguration
|
||||
}
|
||||
|
||||
export type IngestPutIpLocationDatabaseResponse = AcknowledgedResponseBase
|
||||
|
||||
export interface IngestPutPipelineRequest extends RequestBase {
|
||||
id: Id
|
||||
master_timeout?: Duration
|
||||
@ -13390,6 +13453,12 @@ export interface MigrationPostFeatureUpgradeResponse {
|
||||
features: MigrationPostFeatureUpgradeMigrationFeature[]
|
||||
}
|
||||
|
||||
export interface MlAdaptiveAllocationsSettings {
|
||||
enabled: boolean
|
||||
min_number_of_allocations?: integer
|
||||
max_number_of_allocations?: integer
|
||||
}
|
||||
|
||||
export interface MlAnalysisConfig {
|
||||
bucket_span?: Duration
|
||||
categorization_analyzer?: MlCategorizationAnalyzer
|
||||
@ -13420,7 +13489,7 @@ export interface MlAnalysisConfigRead {
|
||||
|
||||
export interface MlAnalysisLimits {
|
||||
categorization_examples_limit?: long
|
||||
model_memory_limit?: string
|
||||
model_memory_limit?: ByteSize
|
||||
}
|
||||
|
||||
export interface MlAnalysisMemoryLimit {
|
||||
@ -13455,20 +13524,21 @@ export interface MlAnomaly {
|
||||
}
|
||||
|
||||
export interface MlAnomalyCause {
|
||||
actual: double[]
|
||||
by_field_name: Name
|
||||
by_field_value: string
|
||||
correlated_by_field_value: string
|
||||
field_name: Field
|
||||
function: string
|
||||
function_description: string
|
||||
influencers: MlInfluence[]
|
||||
over_field_name: Name
|
||||
over_field_value: string
|
||||
partition_field_name: string
|
||||
partition_field_value: string
|
||||
actual?: double[]
|
||||
by_field_name?: Name
|
||||
by_field_value?: string
|
||||
correlated_by_field_value?: string
|
||||
field_name?: Field
|
||||
function?: string
|
||||
function_description?: string
|
||||
geo_results?: MlGeoResults
|
||||
influencers?: MlInfluence[]
|
||||
over_field_name?: Name
|
||||
over_field_value?: string
|
||||
partition_field_name?: string
|
||||
partition_field_value?: string
|
||||
probability: double
|
||||
typical: double[]
|
||||
typical?: double[]
|
||||
}
|
||||
|
||||
export interface MlAnomalyExplanation {
|
||||
@ -13569,6 +13639,14 @@ export interface MlClassificationInferenceOptions {
|
||||
top_classes_results_field?: string
|
||||
}
|
||||
|
||||
export interface MlCommonTokenizationConfig {
|
||||
do_lower_case?: boolean
|
||||
max_sequence_length?: integer
|
||||
span?: integer
|
||||
truncate?: MlTokenizationTruncate
|
||||
with_special_tokens?: boolean
|
||||
}
|
||||
|
||||
export type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte'
|
||||
|
||||
export type MlCustomSettings = any
|
||||
@ -13658,15 +13736,16 @@ export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping'
|
||||
export interface MlDatafeedStats {
|
||||
assignment_explanation?: string
|
||||
datafeed_id: Id
|
||||
node?: MlDiscoveryNode
|
||||
node?: MlDiscoveryNodeCompact
|
||||
state: MlDatafeedState
|
||||
timing_stats: MlDatafeedTimingStats
|
||||
timing_stats?: MlDatafeedTimingStats
|
||||
running_state?: MlDatafeedRunningState
|
||||
}
|
||||
|
||||
export interface MlDatafeedTimingStats {
|
||||
bucket_count: long
|
||||
exponential_average_search_time_per_hour_ms: DurationValue<UnitFloatMillis>
|
||||
exponential_average_calculation_context?: MlExponentialAverageCalculationContext
|
||||
job_id: Id
|
||||
search_count: long
|
||||
total_search_time_ms: DurationValue<UnitFloatMillis>
|
||||
@ -13858,6 +13937,7 @@ export interface MlDataframeAnalyticsSummary {
|
||||
model_memory_limit?: string
|
||||
source: MlDataframeAnalyticsSource
|
||||
version?: VersionString
|
||||
_meta?: Metadata
|
||||
}
|
||||
|
||||
export interface MlDataframeEvaluationClassification {
|
||||
@ -13963,21 +14043,48 @@ export interface MlDetectorRead {
|
||||
use_null?: boolean
|
||||
}
|
||||
|
||||
export interface MlDiscoveryNode {
|
||||
attributes: Record<string, string>
|
||||
export interface MlDetectorUpdate {
|
||||
detector_index: integer
|
||||
description?: string
|
||||
custom_rules?: MlDetectionRule[]
|
||||
}
|
||||
|
||||
export type MlDiscoveryNode = Partial<Record<Id, MlDiscoveryNodeContent>>
|
||||
|
||||
export interface MlDiscoveryNodeCompact {
|
||||
name: Name
|
||||
ephemeral_id: Id
|
||||
id: Id
|
||||
name: Name
|
||||
transport_address: TransportAddress
|
||||
attributes: Record<string, string>
|
||||
}
|
||||
|
||||
export interface MlDiscoveryNodeContent {
|
||||
name?: Name
|
||||
ephemeral_id: Id
|
||||
transport_address: TransportAddress
|
||||
external_id: string
|
||||
attributes: Record<string, string>
|
||||
roles: string[]
|
||||
version: VersionString
|
||||
min_index_version: integer
|
||||
max_index_version: integer
|
||||
}
|
||||
|
||||
export type MlExcludeFrequent = 'all' | 'none' | 'by' | 'over'
|
||||
|
||||
export interface MlExponentialAverageCalculationContext {
|
||||
incremental_metric_value_ms: DurationValue<UnitFloatMillis>
|
||||
latest_timestamp?: EpochTime<UnitMillis>
|
||||
previous_exponential_average_ms?: DurationValue<UnitFloatMillis>
|
||||
}
|
||||
|
||||
export interface MlFillMaskInferenceOptions {
|
||||
mask_token?: string
|
||||
num_top_classes?: integer
|
||||
tokenization?: MlTokenizationConfigContainer
|
||||
results_field?: string
|
||||
vocabulary: MlVocabulary
|
||||
}
|
||||
|
||||
export interface MlFillMaskInferenceUpdateOptions {
|
||||
@ -14000,8 +14107,8 @@ export interface MlFilterRef {
|
||||
export type MlFilterType = 'include' | 'exclude'
|
||||
|
||||
export interface MlGeoResults {
|
||||
actual_point: string
|
||||
typical_point: string
|
||||
actual_point?: string
|
||||
typical_point?: string
|
||||
}
|
||||
|
||||
export interface MlHyperparameter {
|
||||
@ -14165,7 +14272,7 @@ export interface MlJobStats {
|
||||
forecasts_stats: MlJobForecastStatistics
|
||||
job_id: string
|
||||
model_size_stats: MlModelSizeStats
|
||||
node?: MlDiscoveryNode
|
||||
node?: MlDiscoveryNodeCompact
|
||||
open_time?: DateTime
|
||||
state: MlJobState
|
||||
timing_stats: MlJobTimingStats
|
||||
@ -14185,6 +14292,23 @@ export interface MlJobTimingStats {
|
||||
|
||||
export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit'
|
||||
|
||||
export interface MlModelPackageConfig {
|
||||
create_time?: EpochTime<UnitMillis>
|
||||
description?: string
|
||||
inference_config?: Record<string, any>
|
||||
metadata?: Metadata
|
||||
minimum_version?: string
|
||||
model_repository?: string
|
||||
model_type?: string
|
||||
packaged_model_id: Id
|
||||
platform_architecture?: string
|
||||
prefix_strings?: MlTrainedModelPrefixStrings
|
||||
size?: ByteSize
|
||||
sha256?: string
|
||||
tags?: string[]
|
||||
vocabulary_file?: string
|
||||
}
|
||||
|
||||
export interface MlModelPlotConfig {
|
||||
annotations_enabled?: boolean
|
||||
enabled?: boolean
|
||||
@ -14199,6 +14323,7 @@ export interface MlModelSizeStats {
|
||||
model_bytes: ByteSize
|
||||
model_bytes_exceeded?: ByteSize
|
||||
model_bytes_memory_limit?: ByteSize
|
||||
output_memory_allocator_bytes?: ByteSize
|
||||
peak_model_bytes?: ByteSize
|
||||
assignment_memory_basis?: string
|
||||
result_type: string
|
||||
@ -14248,20 +14373,11 @@ export interface MlNerInferenceUpdateOptions {
|
||||
results_field?: string
|
||||
}
|
||||
|
||||
export interface MlNlpBertTokenizationConfig {
|
||||
do_lower_case?: boolean
|
||||
with_special_tokens?: boolean
|
||||
max_sequence_length?: integer
|
||||
truncate?: MlTokenizationTruncate
|
||||
span?: integer
|
||||
export interface MlNlpBertTokenizationConfig extends MlCommonTokenizationConfig {
|
||||
}
|
||||
|
||||
export interface MlNlpRobertaTokenizationConfig {
|
||||
export interface MlNlpRobertaTokenizationConfig extends MlCommonTokenizationConfig {
|
||||
add_prefix_space?: boolean
|
||||
with_special_tokens?: boolean
|
||||
max_sequence_length?: integer
|
||||
truncate?: MlTokenizationTruncate
|
||||
span?: integer
|
||||
}
|
||||
|
||||
export interface MlNlpTokenizationUpdateOptions {
|
||||
@ -14285,7 +14401,7 @@ export interface MlOverallBucket {
|
||||
overall_score: double
|
||||
result_type: string
|
||||
timestamp: EpochTime<UnitMillis>
|
||||
timestamp_string: DateTime
|
||||
timestamp_string?: DateTime
|
||||
}
|
||||
|
||||
export interface MlOverallBucketJob {
|
||||
@ -14373,6 +14489,7 @@ export interface MlTextEmbeddingInferenceOptions {
|
||||
embedding_size?: integer
|
||||
tokenization?: MlTokenizationConfigContainer
|
||||
results_field?: string
|
||||
vocabulary: MlVocabulary
|
||||
}
|
||||
|
||||
export interface MlTextEmbeddingInferenceUpdateOptions {
|
||||
@ -14383,6 +14500,7 @@ export interface MlTextEmbeddingInferenceUpdateOptions {
|
||||
export interface MlTextExpansionInferenceOptions {
|
||||
tokenization?: MlTokenizationConfigContainer
|
||||
results_field?: string
|
||||
vocabulary: MlVocabulary
|
||||
}
|
||||
|
||||
export interface MlTextExpansionInferenceUpdateOptions {
|
||||
@ -14397,6 +14515,7 @@ export interface MlTimingStats {
|
||||
|
||||
export interface MlTokenizationConfigContainer {
|
||||
bert?: MlNlpBertTokenizationConfig
|
||||
bert_ja?: MlNlpBertTokenizationConfig
|
||||
mpnet?: MlNlpBertTokenizationConfig
|
||||
roberta?: MlNlpRobertaTokenizationConfig
|
||||
}
|
||||
@ -14427,27 +14546,31 @@ export interface MlTotalFeatureImportanceStatistics {
|
||||
}
|
||||
|
||||
export interface MlTrainedModelAssignment {
|
||||
adaptive_allocations?: MlAdaptiveAllocationsSettings | null
|
||||
assignment_state: MlDeploymentAssignmentState
|
||||
max_assigned_allocations?: integer
|
||||
reason?: string
|
||||
routing_table: Record<string, MlTrainedModelAssignmentRoutingTable>
|
||||
start_time: DateTime
|
||||
task_parameters: MlTrainedModelAssignmentTaskParameters
|
||||
}
|
||||
|
||||
export interface MlTrainedModelAssignmentRoutingTable {
|
||||
reason: string
|
||||
reason?: string
|
||||
routing_state: MlRoutingState
|
||||
current_allocations: integer
|
||||
target_allocations: integer
|
||||
}
|
||||
|
||||
export interface MlTrainedModelAssignmentTaskParameters {
|
||||
model_bytes: integer
|
||||
model_bytes: ByteSize
|
||||
model_id: Id
|
||||
deployment_id: Id
|
||||
cache_size: ByteSize
|
||||
cache_size?: ByteSize
|
||||
number_of_allocations: integer
|
||||
priority: MlTrainingPriority
|
||||
per_deployment_memory_bytes: ByteSize
|
||||
per_allocation_memory_bytes: ByteSize
|
||||
queue_capacity: integer
|
||||
threads_per_allocation: integer
|
||||
}
|
||||
@ -14470,6 +14593,7 @@ export interface MlTrainedModelConfig {
|
||||
license_level?: string
|
||||
metadata?: MlTrainedModelConfigMetadata
|
||||
model_size_bytes?: ByteSize
|
||||
model_package?: MlModelPackageConfig
|
||||
location?: MlTrainedModelLocation
|
||||
prefix_strings?: MlTrainedModelPrefixStrings
|
||||
}
|
||||
@ -14492,36 +14616,45 @@ export interface MlTrainedModelDeploymentAllocationStatus {
|
||||
}
|
||||
|
||||
export interface MlTrainedModelDeploymentNodesStats {
|
||||
average_inference_time_ms: DurationValue<UnitFloatMillis>
|
||||
error_count: integer
|
||||
inference_count: integer
|
||||
last_access: long
|
||||
node: MlDiscoveryNode
|
||||
number_of_allocations: integer
|
||||
number_of_pending_requests: integer
|
||||
rejection_execution_count: integer
|
||||
average_inference_time_ms?: DurationValue<UnitFloatMillis>
|
||||
average_inference_time_ms_last_minute?: DurationValue<UnitFloatMillis>
|
||||
average_inference_time_ms_excluding_cache_hits?: DurationValue<UnitFloatMillis>
|
||||
error_count?: integer
|
||||
inference_count?: long
|
||||
inference_cache_hit_count?: long
|
||||
inference_cache_hit_count_last_minute?: long
|
||||
last_access?: EpochTime<UnitMillis>
|
||||
node?: MlDiscoveryNode
|
||||
number_of_allocations?: integer
|
||||
number_of_pending_requests?: integer
|
||||
peak_throughput_per_minute: long
|
||||
rejection_execution_count?: integer
|
||||
routing_state: MlTrainedModelAssignmentRoutingTable
|
||||
start_time: EpochTime<UnitMillis>
|
||||
threads_per_allocation: integer
|
||||
timeout_count: integer
|
||||
start_time?: EpochTime<UnitMillis>
|
||||
threads_per_allocation?: integer
|
||||
throughput_last_minute: integer
|
||||
timeout_count?: integer
|
||||
}
|
||||
|
||||
export interface MlTrainedModelDeploymentStats {
|
||||
allocation_status: MlTrainedModelDeploymentAllocationStatus
|
||||
adaptive_allocations?: MlAdaptiveAllocationsSettings
|
||||
allocation_status?: MlTrainedModelDeploymentAllocationStatus
|
||||
cache_size?: ByteSize
|
||||
deployment_id: Id
|
||||
error_count: integer
|
||||
inference_count: integer
|
||||
error_count?: integer
|
||||
inference_count?: integer
|
||||
model_id: Id
|
||||
nodes: MlTrainedModelDeploymentNodesStats[]
|
||||
number_of_allocations: integer
|
||||
queue_capacity: integer
|
||||
rejected_execution_count: integer
|
||||
reason: string
|
||||
number_of_allocations?: integer
|
||||
peak_throughput_per_minute: long
|
||||
priority: MlTrainingPriority
|
||||
queue_capacity?: integer
|
||||
rejected_execution_count?: integer
|
||||
reason?: string
|
||||
start_time: EpochTime<UnitMillis>
|
||||
state: MlDeploymentAssignmentState
|
||||
threads_per_allocation: integer
|
||||
timeout_count: integer
|
||||
state?: MlDeploymentAssignmentState
|
||||
threads_per_allocation?: integer
|
||||
timeout_count?: integer
|
||||
}
|
||||
|
||||
export interface MlTrainedModelEntities {
|
||||
@ -15155,6 +15288,7 @@ export interface MlGetTrainedModelsRequest extends RequestBase {
|
||||
exclude_generated?: boolean
|
||||
from?: integer
|
||||
include?: MlInclude
|
||||
include_model_definition?: boolean
|
||||
size?: integer
|
||||
tags?: string | string[]
|
||||
}
|
||||
@ -15205,9 +15339,11 @@ export interface MlInfoDefaults {
|
||||
}
|
||||
|
||||
export interface MlInfoLimits {
|
||||
max_model_memory_limit?: string
|
||||
effective_max_model_memory_limit: string
|
||||
total_ml_memory: string
|
||||
max_single_ml_node_processors?: integer
|
||||
total_ml_processors?: integer
|
||||
max_model_memory_limit?: ByteSize
|
||||
effective_max_model_memory_limit?: ByteSize
|
||||
total_ml_memory: ByteSize
|
||||
}
|
||||
|
||||
export interface MlInfoNativeCode {
|
||||
@ -15252,21 +15388,24 @@ export interface MlPostDataRequest<TData = unknown> extends RequestBase {
|
||||
}
|
||||
|
||||
export interface MlPostDataResponse {
|
||||
bucket_count: long
|
||||
earliest_record_timestamp: long
|
||||
empty_bucket_count: long
|
||||
job_id: Id
|
||||
processed_record_count: long
|
||||
processed_field_count: long
|
||||
input_bytes: long
|
||||
input_field_count: long
|
||||
input_record_count: long
|
||||
invalid_date_count: long
|
||||
job_id: Id
|
||||
last_data_time: integer
|
||||
latest_record_timestamp: long
|
||||
missing_field_count: long
|
||||
out_of_order_timestamp_count: long
|
||||
processed_field_count: long
|
||||
processed_record_count: long
|
||||
empty_bucket_count: long
|
||||
sparse_bucket_count: long
|
||||
bucket_count: long
|
||||
earliest_record_timestamp?: EpochTime<UnitMillis>
|
||||
latest_record_timestamp?: EpochTime<UnitMillis>
|
||||
last_data_time?: EpochTime<UnitMillis>
|
||||
latest_empty_bucket_timestamp?: EpochTime<UnitMillis>
|
||||
latest_sparse_bucket_timestamp?: EpochTime<UnitMillis>
|
||||
input_record_count: long
|
||||
log_time?: EpochTime<UnitMillis>
|
||||
}
|
||||
|
||||
export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig {
|
||||
@ -15327,6 +15466,7 @@ export interface MlPutDataFrameAnalyticsRequest extends RequestBase {
|
||||
description?: string
|
||||
dest: MlDataframeAnalyticsDestination
|
||||
max_num_threads?: integer
|
||||
_meta?: Metadata
|
||||
model_memory_limit?: string
|
||||
source: MlDataframeAnalyticsSource
|
||||
headers?: HttpHeaders
|
||||
@ -15343,6 +15483,7 @@ export interface MlPutDataFrameAnalyticsResponse {
|
||||
dest: MlDataframeAnalyticsDestination
|
||||
id: Id
|
||||
max_num_threads: integer
|
||||
_meta?: Metadata
|
||||
model_memory_limit: string
|
||||
source: MlDataframeAnalyticsSource
|
||||
version: VersionString
|
||||
@ -15355,6 +15496,8 @@ export interface MlPutDatafeedRequest extends RequestBase {
|
||||
ignore_throttled?: boolean
|
||||
ignore_unavailable?: boolean
|
||||
aggregations?: Record<string, AggregationsAggregationContainer>
|
||||
/** @alias aggregations */
|
||||
aggs?: Record<string, AggregationsAggregationContainer>
|
||||
chunking_config?: MlChunkingConfig
|
||||
delayed_data_check_config?: MlDelayedDataCheckConfig
|
||||
frequency?: Duration
|
||||
@ -15404,6 +15547,10 @@ export interface MlPutFilterResponse {
|
||||
|
||||
export interface MlPutJobRequest extends RequestBase {
|
||||
job_id: Id
|
||||
allow_no_indices?: boolean
|
||||
expand_wildcards?: ExpandWildcards
|
||||
ignore_throttled?: boolean
|
||||
ignore_unavailable?: boolean
|
||||
allow_lazy_open?: boolean
|
||||
analysis_config: MlAnalysisConfig
|
||||
analysis_limits?: MlAnalysisLimits
|
||||
@ -15754,7 +15901,7 @@ export interface MlUpdateJobRequest extends RequestBase {
|
||||
renormalization_window_days?: long
|
||||
results_retention_days?: long
|
||||
groups?: string[]
|
||||
detectors?: MlDetector[]
|
||||
detectors?: MlDetectorUpdate[]
|
||||
per_partition_categorization?: MlPerPartitionCategorization
|
||||
}
|
||||
|
||||
|
||||
@ -58,6 +58,7 @@ export type BulkOperationType = 'index' | 'create' | 'update' | 'delete'
|
||||
|
||||
export interface BulkRequest<TDocument = unknown, TPartialDocument = unknown> extends RequestBase {
|
||||
index?: IndexName
|
||||
list_executed_pipelines?: boolean
|
||||
pipeline?: string
|
||||
refresh?: Refresh
|
||||
routing?: Routing
|
||||
@ -67,6 +68,7 @@ export interface BulkRequest<TDocument = unknown, TPartialDocument = unknown> ex
|
||||
timeout?: Duration
|
||||
wait_for_active_shards?: WaitForActiveShards
|
||||
require_alias?: boolean
|
||||
require_data_stream?: boolean
|
||||
/** @deprecated The use of the 'body' key has been deprecated, use 'operations' instead. */
|
||||
body?: (BulkOperationContainer | BulkUpdateAction<TDocument, TPartialDocument> | TDocument)[]
|
||||
}
|
||||
@ -6972,7 +6974,7 @@ export type CatAllocationResponse = CatAllocationAllocationRecord[]
|
||||
|
||||
export interface CatComponentTemplatesComponentTemplate {
|
||||
name: string
|
||||
version: string
|
||||
version: string | null
|
||||
alias_count: string
|
||||
mapping_count: string
|
||||
settings_count: string
|
||||
@ -12723,7 +12725,15 @@ export type InferenceDenseByteVector = byte[]
|
||||
|
||||
export type InferenceDenseVector = float[]
|
||||
|
||||
export interface InferenceInferenceChunkingSettings extends InferenceInferenceEndpoint {
|
||||
max_chunk_size?: integer
|
||||
overlap?: integer
|
||||
sentence_overlap?: integer
|
||||
strategy?: string
|
||||
}
|
||||
|
||||
export interface InferenceInferenceEndpoint {
|
||||
chunking_settings?: InferenceInferenceChunkingSettings
|
||||
service: string
|
||||
service_settings: InferenceServiceSettings
|
||||
task_settings?: InferenceTaskSettings
|
||||
@ -12875,7 +12885,16 @@ export interface IngestCsvProcessor extends IngestProcessorBase {
|
||||
|
||||
export interface IngestDatabaseConfiguration {
|
||||
name: Name
|
||||
maxmind: IngestMaxmind
|
||||
maxmind?: IngestMaxmind
|
||||
ipinfo?: IngestIpinfo
|
||||
}
|
||||
|
||||
export interface IngestDatabaseConfigurationFull {
|
||||
web?: IngestWeb
|
||||
local?: IngestLocal
|
||||
name: Name
|
||||
maxmind?: IngestMaxmind
|
||||
ipinfo?: IngestIpinfo
|
||||
}
|
||||
|
||||
export interface IngestDateIndexNameProcessor extends IngestProcessorBase {
|
||||
@ -13027,6 +13046,9 @@ export interface IngestIpLocationProcessor extends IngestProcessorBase {
|
||||
download_database_on_pipeline_creation?: boolean
|
||||
}
|
||||
|
||||
export interface IngestIpinfo {
|
||||
}
|
||||
|
||||
export interface IngestJoinProcessor extends IngestProcessorBase {
|
||||
field: Field
|
||||
separator: string
|
||||
@ -13057,6 +13079,10 @@ export interface IngestKeyValueProcessor extends IngestProcessorBase {
|
||||
value_split: string
|
||||
}
|
||||
|
||||
export interface IngestLocal {
|
||||
type: string
|
||||
}
|
||||
|
||||
export interface IngestLowercaseProcessor extends IngestProcessorBase {
|
||||
field: Field
|
||||
ignore_missing?: boolean
|
||||
@ -13264,6 +13290,9 @@ export interface IngestUserAgentProcessor extends IngestProcessorBase {
|
||||
|
||||
export type IngestUserAgentProperty = 'name' | 'os' | 'device' | 'original' | 'version'
|
||||
|
||||
export interface IngestWeb {
|
||||
}
|
||||
|
||||
export interface IngestDeleteGeoipDatabaseRequest extends RequestBase {
|
||||
id: Ids
|
||||
master_timeout?: Duration
|
||||
@ -13272,6 +13301,14 @@ export interface IngestDeleteGeoipDatabaseRequest extends RequestBase {
|
||||
|
||||
export type IngestDeleteGeoipDatabaseResponse = AcknowledgedResponseBase
|
||||
|
||||
export interface IngestDeleteIpLocationDatabaseRequest extends RequestBase {
|
||||
id: Ids
|
||||
master_timeout?: Duration
|
||||
timeout?: Duration
|
||||
}
|
||||
|
||||
export type IngestDeleteIpLocationDatabaseResponse = AcknowledgedResponseBase
|
||||
|
||||
export interface IngestDeletePipelineRequest extends RequestBase {
|
||||
id: Id
|
||||
master_timeout?: Duration
|
||||
@ -13322,6 +13359,23 @@ export interface IngestGetGeoipDatabaseResponse {
|
||||
databases: IngestGetGeoipDatabaseDatabaseConfigurationMetadata[]
|
||||
}
|
||||
|
||||
export interface IngestGetIpLocationDatabaseDatabaseConfigurationMetadata {
|
||||
id: Id
|
||||
version: VersionNumber
|
||||
modified_date_millis?: EpochTime<UnitMillis>
|
||||
modified_date?: EpochTime<UnitMillis>
|
||||
database: IngestDatabaseConfigurationFull
|
||||
}
|
||||
|
||||
export interface IngestGetIpLocationDatabaseRequest extends RequestBase {
|
||||
id?: Ids
|
||||
master_timeout?: Duration
|
||||
}
|
||||
|
||||
export interface IngestGetIpLocationDatabaseResponse {
|
||||
databases: IngestGetIpLocationDatabaseDatabaseConfigurationMetadata[]
|
||||
}
|
||||
|
||||
export interface IngestGetPipelineRequest extends RequestBase {
|
||||
id?: Id
|
||||
master_timeout?: Duration
|
||||
@ -13350,6 +13404,16 @@ export interface IngestPutGeoipDatabaseRequest extends RequestBase {
|
||||
|
||||
export type IngestPutGeoipDatabaseResponse = AcknowledgedResponseBase
|
||||
|
||||
export interface IngestPutIpLocationDatabaseRequest extends RequestBase {
|
||||
id: Id
|
||||
master_timeout?: Duration
|
||||
timeout?: Duration
|
||||
/** @deprecated The use of the 'body' key has been deprecated, use 'configuration' instead. */
|
||||
body?: IngestDatabaseConfiguration
|
||||
}
|
||||
|
||||
export type IngestPutIpLocationDatabaseResponse = AcknowledgedResponseBase
|
||||
|
||||
export interface IngestPutPipelineRequest extends RequestBase {
|
||||
id: Id
|
||||
master_timeout?: Duration
|
||||
@ -13631,6 +13695,12 @@ export interface MigrationPostFeatureUpgradeResponse {
|
||||
features: MigrationPostFeatureUpgradeMigrationFeature[]
|
||||
}
|
||||
|
||||
export interface MlAdaptiveAllocationsSettings {
|
||||
enabled: boolean
|
||||
min_number_of_allocations?: integer
|
||||
max_number_of_allocations?: integer
|
||||
}
|
||||
|
||||
export interface MlAnalysisConfig {
|
||||
bucket_span?: Duration
|
||||
categorization_analyzer?: MlCategorizationAnalyzer
|
||||
@ -13661,7 +13731,7 @@ export interface MlAnalysisConfigRead {
|
||||
|
||||
export interface MlAnalysisLimits {
|
||||
categorization_examples_limit?: long
|
||||
model_memory_limit?: string
|
||||
model_memory_limit?: ByteSize
|
||||
}
|
||||
|
||||
export interface MlAnalysisMemoryLimit {
|
||||
@ -13696,20 +13766,21 @@ export interface MlAnomaly {
|
||||
}
|
||||
|
||||
export interface MlAnomalyCause {
|
||||
actual: double[]
|
||||
by_field_name: Name
|
||||
by_field_value: string
|
||||
correlated_by_field_value: string
|
||||
field_name: Field
|
||||
function: string
|
||||
function_description: string
|
||||
influencers: MlInfluence[]
|
||||
over_field_name: Name
|
||||
over_field_value: string
|
||||
partition_field_name: string
|
||||
partition_field_value: string
|
||||
actual?: double[]
|
||||
by_field_name?: Name
|
||||
by_field_value?: string
|
||||
correlated_by_field_value?: string
|
||||
field_name?: Field
|
||||
function?: string
|
||||
function_description?: string
|
||||
geo_results?: MlGeoResults
|
||||
influencers?: MlInfluence[]
|
||||
over_field_name?: Name
|
||||
over_field_value?: string
|
||||
partition_field_name?: string
|
||||
partition_field_value?: string
|
||||
probability: double
|
||||
typical: double[]
|
||||
typical?: double[]
|
||||
}
|
||||
|
||||
export interface MlAnomalyExplanation {
|
||||
@ -13810,6 +13881,14 @@ export interface MlClassificationInferenceOptions {
|
||||
top_classes_results_field?: string
|
||||
}
|
||||
|
||||
export interface MlCommonTokenizationConfig {
|
||||
do_lower_case?: boolean
|
||||
max_sequence_length?: integer
|
||||
span?: integer
|
||||
truncate?: MlTokenizationTruncate
|
||||
with_special_tokens?: boolean
|
||||
}
|
||||
|
||||
export type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte'
|
||||
|
||||
export type MlCustomSettings = any
|
||||
@ -13899,15 +13978,16 @@ export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping'
|
||||
export interface MlDatafeedStats {
|
||||
assignment_explanation?: string
|
||||
datafeed_id: Id
|
||||
node?: MlDiscoveryNode
|
||||
node?: MlDiscoveryNodeCompact
|
||||
state: MlDatafeedState
|
||||
timing_stats: MlDatafeedTimingStats
|
||||
timing_stats?: MlDatafeedTimingStats
|
||||
running_state?: MlDatafeedRunningState
|
||||
}
|
||||
|
||||
export interface MlDatafeedTimingStats {
|
||||
bucket_count: long
|
||||
exponential_average_search_time_per_hour_ms: DurationValue<UnitFloatMillis>
|
||||
exponential_average_calculation_context?: MlExponentialAverageCalculationContext
|
||||
job_id: Id
|
||||
search_count: long
|
||||
total_search_time_ms: DurationValue<UnitFloatMillis>
|
||||
@ -14099,6 +14179,7 @@ export interface MlDataframeAnalyticsSummary {
|
||||
model_memory_limit?: string
|
||||
source: MlDataframeAnalyticsSource
|
||||
version?: VersionString
|
||||
_meta?: Metadata
|
||||
}
|
||||
|
||||
export interface MlDataframeEvaluationClassification {
|
||||
@ -14204,21 +14285,48 @@ export interface MlDetectorRead {
|
||||
use_null?: boolean
|
||||
}
|
||||
|
||||
export interface MlDiscoveryNode {
|
||||
attributes: Record<string, string>
|
||||
export interface MlDetectorUpdate {
|
||||
detector_index: integer
|
||||
description?: string
|
||||
custom_rules?: MlDetectionRule[]
|
||||
}
|
||||
|
||||
export type MlDiscoveryNode = Partial<Record<Id, MlDiscoveryNodeContent>>
|
||||
|
||||
export interface MlDiscoveryNodeCompact {
|
||||
name: Name
|
||||
ephemeral_id: Id
|
||||
id: Id
|
||||
name: Name
|
||||
transport_address: TransportAddress
|
||||
attributes: Record<string, string>
|
||||
}
|
||||
|
||||
export interface MlDiscoveryNodeContent {
|
||||
name?: Name
|
||||
ephemeral_id: Id
|
||||
transport_address: TransportAddress
|
||||
external_id: string
|
||||
attributes: Record<string, string>
|
||||
roles: string[]
|
||||
version: VersionString
|
||||
min_index_version: integer
|
||||
max_index_version: integer
|
||||
}
|
||||
|
||||
export type MlExcludeFrequent = 'all' | 'none' | 'by' | 'over'
|
||||
|
||||
export interface MlExponentialAverageCalculationContext {
|
||||
incremental_metric_value_ms: DurationValue<UnitFloatMillis>
|
||||
latest_timestamp?: EpochTime<UnitMillis>
|
||||
previous_exponential_average_ms?: DurationValue<UnitFloatMillis>
|
||||
}
|
||||
|
||||
export interface MlFillMaskInferenceOptions {
|
||||
mask_token?: string
|
||||
num_top_classes?: integer
|
||||
tokenization?: MlTokenizationConfigContainer
|
||||
results_field?: string
|
||||
vocabulary: MlVocabulary
|
||||
}
|
||||
|
||||
export interface MlFillMaskInferenceUpdateOptions {
|
||||
@ -14241,8 +14349,8 @@ export interface MlFilterRef {
|
||||
export type MlFilterType = 'include' | 'exclude'
|
||||
|
||||
export interface MlGeoResults {
|
||||
actual_point: string
|
||||
typical_point: string
|
||||
actual_point?: string
|
||||
typical_point?: string
|
||||
}
|
||||
|
||||
export interface MlHyperparameter {
|
||||
@ -14406,7 +14514,7 @@ export interface MlJobStats {
|
||||
forecasts_stats: MlJobForecastStatistics
|
||||
job_id: string
|
||||
model_size_stats: MlModelSizeStats
|
||||
node?: MlDiscoveryNode
|
||||
node?: MlDiscoveryNodeCompact
|
||||
open_time?: DateTime
|
||||
state: MlJobState
|
||||
timing_stats: MlJobTimingStats
|
||||
@ -14426,6 +14534,23 @@ export interface MlJobTimingStats {
|
||||
|
||||
export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit'
|
||||
|
||||
export interface MlModelPackageConfig {
|
||||
create_time?: EpochTime<UnitMillis>
|
||||
description?: string
|
||||
inference_config?: Record<string, any>
|
||||
metadata?: Metadata
|
||||
minimum_version?: string
|
||||
model_repository?: string
|
||||
model_type?: string
|
||||
packaged_model_id: Id
|
||||
platform_architecture?: string
|
||||
prefix_strings?: MlTrainedModelPrefixStrings
|
||||
size?: ByteSize
|
||||
sha256?: string
|
||||
tags?: string[]
|
||||
vocabulary_file?: string
|
||||
}
|
||||
|
||||
export interface MlModelPlotConfig {
|
||||
annotations_enabled?: boolean
|
||||
enabled?: boolean
|
||||
@ -14440,6 +14565,7 @@ export interface MlModelSizeStats {
|
||||
model_bytes: ByteSize
|
||||
model_bytes_exceeded?: ByteSize
|
||||
model_bytes_memory_limit?: ByteSize
|
||||
output_memory_allocator_bytes?: ByteSize
|
||||
peak_model_bytes?: ByteSize
|
||||
assignment_memory_basis?: string
|
||||
result_type: string
|
||||
@ -14489,20 +14615,11 @@ export interface MlNerInferenceUpdateOptions {
|
||||
results_field?: string
|
||||
}
|
||||
|
||||
export interface MlNlpBertTokenizationConfig {
|
||||
do_lower_case?: boolean
|
||||
with_special_tokens?: boolean
|
||||
max_sequence_length?: integer
|
||||
truncate?: MlTokenizationTruncate
|
||||
span?: integer
|
||||
export interface MlNlpBertTokenizationConfig extends MlCommonTokenizationConfig {
|
||||
}
|
||||
|
||||
export interface MlNlpRobertaTokenizationConfig {
|
||||
export interface MlNlpRobertaTokenizationConfig extends MlCommonTokenizationConfig {
|
||||
add_prefix_space?: boolean
|
||||
with_special_tokens?: boolean
|
||||
max_sequence_length?: integer
|
||||
truncate?: MlTokenizationTruncate
|
||||
span?: integer
|
||||
}
|
||||
|
||||
export interface MlNlpTokenizationUpdateOptions {
|
||||
@ -14526,7 +14643,7 @@ export interface MlOverallBucket {
|
||||
overall_score: double
|
||||
result_type: string
|
||||
timestamp: EpochTime<UnitMillis>
|
||||
timestamp_string: DateTime
|
||||
timestamp_string?: DateTime
|
||||
}
|
||||
|
||||
export interface MlOverallBucketJob {
|
||||
@ -14614,6 +14731,7 @@ export interface MlTextEmbeddingInferenceOptions {
|
||||
embedding_size?: integer
|
||||
tokenization?: MlTokenizationConfigContainer
|
||||
results_field?: string
|
||||
vocabulary: MlVocabulary
|
||||
}
|
||||
|
||||
export interface MlTextEmbeddingInferenceUpdateOptions {
|
||||
@ -14624,6 +14742,7 @@ export interface MlTextEmbeddingInferenceUpdateOptions {
|
||||
export interface MlTextExpansionInferenceOptions {
|
||||
tokenization?: MlTokenizationConfigContainer
|
||||
results_field?: string
|
||||
vocabulary: MlVocabulary
|
||||
}
|
||||
|
||||
export interface MlTextExpansionInferenceUpdateOptions {
|
||||
@ -14638,6 +14757,7 @@ export interface MlTimingStats {
|
||||
|
||||
export interface MlTokenizationConfigContainer {
|
||||
bert?: MlNlpBertTokenizationConfig
|
||||
bert_ja?: MlNlpBertTokenizationConfig
|
||||
mpnet?: MlNlpBertTokenizationConfig
|
||||
roberta?: MlNlpRobertaTokenizationConfig
|
||||
}
|
||||
@ -14668,27 +14788,31 @@ export interface MlTotalFeatureImportanceStatistics {
|
||||
}
|
||||
|
||||
export interface MlTrainedModelAssignment {
|
||||
adaptive_allocations?: MlAdaptiveAllocationsSettings | null
|
||||
assignment_state: MlDeploymentAssignmentState
|
||||
max_assigned_allocations?: integer
|
||||
reason?: string
|
||||
routing_table: Record<string, MlTrainedModelAssignmentRoutingTable>
|
||||
start_time: DateTime
|
||||
task_parameters: MlTrainedModelAssignmentTaskParameters
|
||||
}
|
||||
|
||||
export interface MlTrainedModelAssignmentRoutingTable {
|
||||
reason: string
|
||||
reason?: string
|
||||
routing_state: MlRoutingState
|
||||
current_allocations: integer
|
||||
target_allocations: integer
|
||||
}
|
||||
|
||||
export interface MlTrainedModelAssignmentTaskParameters {
|
||||
model_bytes: integer
|
||||
model_bytes: ByteSize
|
||||
model_id: Id
|
||||
deployment_id: Id
|
||||
cache_size: ByteSize
|
||||
cache_size?: ByteSize
|
||||
number_of_allocations: integer
|
||||
priority: MlTrainingPriority
|
||||
per_deployment_memory_bytes: ByteSize
|
||||
per_allocation_memory_bytes: ByteSize
|
||||
queue_capacity: integer
|
||||
threads_per_allocation: integer
|
||||
}
|
||||
@ -14711,6 +14835,7 @@ export interface MlTrainedModelConfig {
|
||||
license_level?: string
|
||||
metadata?: MlTrainedModelConfigMetadata
|
||||
model_size_bytes?: ByteSize
|
||||
model_package?: MlModelPackageConfig
|
||||
location?: MlTrainedModelLocation
|
||||
prefix_strings?: MlTrainedModelPrefixStrings
|
||||
}
|
||||
@ -14733,36 +14858,45 @@ export interface MlTrainedModelDeploymentAllocationStatus {
|
||||
}
|
||||
|
||||
export interface MlTrainedModelDeploymentNodesStats {
|
||||
average_inference_time_ms: DurationValue<UnitFloatMillis>
|
||||
error_count: integer
|
||||
inference_count: integer
|
||||
last_access: long
|
||||
node: MlDiscoveryNode
|
||||
number_of_allocations: integer
|
||||
number_of_pending_requests: integer
|
||||
rejection_execution_count: integer
|
||||
average_inference_time_ms?: DurationValue<UnitFloatMillis>
|
||||
average_inference_time_ms_last_minute?: DurationValue<UnitFloatMillis>
|
||||
average_inference_time_ms_excluding_cache_hits?: DurationValue<UnitFloatMillis>
|
||||
error_count?: integer
|
||||
inference_count?: long
|
||||
inference_cache_hit_count?: long
|
||||
inference_cache_hit_count_last_minute?: long
|
||||
last_access?: EpochTime<UnitMillis>
|
||||
node?: MlDiscoveryNode
|
||||
number_of_allocations?: integer
|
||||
number_of_pending_requests?: integer
|
||||
peak_throughput_per_minute: long
|
||||
rejection_execution_count?: integer
|
||||
routing_state: MlTrainedModelAssignmentRoutingTable
|
||||
start_time: EpochTime<UnitMillis>
|
||||
threads_per_allocation: integer
|
||||
timeout_count: integer
|
||||
start_time?: EpochTime<UnitMillis>
|
||||
threads_per_allocation?: integer
|
||||
throughput_last_minute: integer
|
||||
timeout_count?: integer
|
||||
}
|
||||
|
||||
export interface MlTrainedModelDeploymentStats {
|
||||
allocation_status: MlTrainedModelDeploymentAllocationStatus
|
||||
adaptive_allocations?: MlAdaptiveAllocationsSettings
|
||||
allocation_status?: MlTrainedModelDeploymentAllocationStatus
|
||||
cache_size?: ByteSize
|
||||
deployment_id: Id
|
||||
error_count: integer
|
||||
inference_count: integer
|
||||
error_count?: integer
|
||||
inference_count?: integer
|
||||
model_id: Id
|
||||
nodes: MlTrainedModelDeploymentNodesStats[]
|
||||
number_of_allocations: integer
|
||||
queue_capacity: integer
|
||||
rejected_execution_count: integer
|
||||
reason: string
|
||||
number_of_allocations?: integer
|
||||
peak_throughput_per_minute: long
|
||||
priority: MlTrainingPriority
|
||||
queue_capacity?: integer
|
||||
rejected_execution_count?: integer
|
||||
reason?: string
|
||||
start_time: EpochTime<UnitMillis>
|
||||
state: MlDeploymentAssignmentState
|
||||
threads_per_allocation: integer
|
||||
timeout_count: integer
|
||||
state?: MlDeploymentAssignmentState
|
||||
threads_per_allocation?: integer
|
||||
timeout_count?: integer
|
||||
}
|
||||
|
||||
export interface MlTrainedModelEntities {
|
||||
@ -15438,6 +15572,7 @@ export interface MlGetTrainedModelsRequest extends RequestBase {
|
||||
exclude_generated?: boolean
|
||||
from?: integer
|
||||
include?: MlInclude
|
||||
include_model_definition?: boolean
|
||||
size?: integer
|
||||
tags?: string | string[]
|
||||
}
|
||||
@ -15491,9 +15626,11 @@ export interface MlInfoDefaults {
|
||||
}
|
||||
|
||||
export interface MlInfoLimits {
|
||||
max_model_memory_limit?: string
|
||||
effective_max_model_memory_limit: string
|
||||
total_ml_memory: string
|
||||
max_single_ml_node_processors?: integer
|
||||
total_ml_processors?: integer
|
||||
max_model_memory_limit?: ByteSize
|
||||
effective_max_model_memory_limit?: ByteSize
|
||||
total_ml_memory: ByteSize
|
||||
}
|
||||
|
||||
export interface MlInfoNativeCode {
|
||||
@ -15545,21 +15682,24 @@ export interface MlPostDataRequest<TData = unknown> extends RequestBase {
|
||||
}
|
||||
|
||||
export interface MlPostDataResponse {
|
||||
bucket_count: long
|
||||
earliest_record_timestamp: long
|
||||
empty_bucket_count: long
|
||||
job_id: Id
|
||||
processed_record_count: long
|
||||
processed_field_count: long
|
||||
input_bytes: long
|
||||
input_field_count: long
|
||||
input_record_count: long
|
||||
invalid_date_count: long
|
||||
job_id: Id
|
||||
last_data_time: integer
|
||||
latest_record_timestamp: long
|
||||
missing_field_count: long
|
||||
out_of_order_timestamp_count: long
|
||||
processed_field_count: long
|
||||
processed_record_count: long
|
||||
empty_bucket_count: long
|
||||
sparse_bucket_count: long
|
||||
bucket_count: long
|
||||
earliest_record_timestamp?: EpochTime<UnitMillis>
|
||||
latest_record_timestamp?: EpochTime<UnitMillis>
|
||||
last_data_time?: EpochTime<UnitMillis>
|
||||
latest_empty_bucket_timestamp?: EpochTime<UnitMillis>
|
||||
latest_sparse_bucket_timestamp?: EpochTime<UnitMillis>
|
||||
input_record_count: long
|
||||
log_time?: EpochTime<UnitMillis>
|
||||
}
|
||||
|
||||
export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig {
|
||||
@ -15631,6 +15771,7 @@ export interface MlPutDataFrameAnalyticsRequest extends RequestBase {
|
||||
description?: string
|
||||
dest: MlDataframeAnalyticsDestination
|
||||
max_num_threads?: integer
|
||||
_meta?: Metadata
|
||||
model_memory_limit?: string
|
||||
source: MlDataframeAnalyticsSource
|
||||
headers?: HttpHeaders
|
||||
@ -15648,6 +15789,7 @@ export interface MlPutDataFrameAnalyticsResponse {
|
||||
dest: MlDataframeAnalyticsDestination
|
||||
id: Id
|
||||
max_num_threads: integer
|
||||
_meta?: Metadata
|
||||
model_memory_limit: string
|
||||
source: MlDataframeAnalyticsSource
|
||||
version: VersionString
|
||||
@ -15662,6 +15804,8 @@ export interface MlPutDatafeedRequest extends RequestBase {
|
||||
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
|
||||
body?: {
|
||||
aggregations?: Record<string, AggregationsAggregationContainer>
|
||||
/** @alias aggregations */
|
||||
aggs?: Record<string, AggregationsAggregationContainer>
|
||||
chunking_config?: MlChunkingConfig
|
||||
delayed_data_check_config?: MlDelayedDataCheckConfig
|
||||
frequency?: Duration
|
||||
@ -15715,6 +15859,10 @@ export interface MlPutFilterResponse {
|
||||
|
||||
export interface MlPutJobRequest extends RequestBase {
|
||||
job_id: Id
|
||||
allow_no_indices?: boolean
|
||||
expand_wildcards?: ExpandWildcards
|
||||
ignore_throttled?: boolean
|
||||
ignore_unavailable?: boolean
|
||||
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
|
||||
body?: {
|
||||
allow_lazy_open?: boolean
|
||||
@ -15726,6 +15874,7 @@ export interface MlPutJobRequest extends RequestBase {
|
||||
data_description: MlDataDescription
|
||||
datafeed_config?: MlDatafeedConfig
|
||||
description?: string
|
||||
job_id?: Id
|
||||
groups?: string[]
|
||||
model_plot_config?: MlModelPlotConfig
|
||||
model_snapshot_retention_days?: long
|
||||
@ -16097,7 +16246,7 @@ export interface MlUpdateJobRequest extends RequestBase {
|
||||
renormalization_window_days?: long
|
||||
results_retention_days?: long
|
||||
groups?: string[]
|
||||
detectors?: MlDetector[]
|
||||
detectors?: MlDetectorUpdate[]
|
||||
per_partition_categorization?: MlPerPartitionCategorization
|
||||
}
|
||||
}
|
||||
|
||||
@ -211,7 +211,14 @@ export default class Client extends API {
|
||||
}
|
||||
|
||||
if (options.enableMetaHeader) {
|
||||
options.headers['x-elastic-client-meta'] = `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion}`
|
||||
let clientMeta = `es=${clientVersion},js=${nodeVersion},t=${transportVersion}`
|
||||
if (options.Connection === UndiciConnection) {
|
||||
clientMeta += `,un=${nodeVersion}`
|
||||
} else {
|
||||
// assumes HttpConnection
|
||||
clientMeta += `,hc=${nodeVersion}`
|
||||
}
|
||||
options.headers['x-elastic-client-meta'] = clientMeta
|
||||
}
|
||||
|
||||
this.name = options.name
|
||||
|
||||
@ -24,7 +24,7 @@ import FakeTimers from '@sinonjs/fake-timers'
|
||||
import { buildServer, connection } from '../utils'
|
||||
import { Client, errors } from '../..'
|
||||
import * as symbols from '@elastic/transport/lib/symbols'
|
||||
import { BaseConnectionPool, CloudConnectionPool, WeightedConnectionPool } from '@elastic/transport'
|
||||
import { BaseConnectionPool, CloudConnectionPool, WeightedConnectionPool, HttpConnection } from '@elastic/transport'
|
||||
|
||||
let clientVersion: string = require('../../package.json').version // eslint-disable-line
|
||||
if (clientVersion.includes('-')) {
|
||||
@ -403,6 +403,44 @@ test('Meta header disabled', async t => {
|
||||
await client.transport.request({ method: 'GET', path: '/' })
|
||||
})
|
||||
|
||||
test('Meta header indicates when UndiciConnection is used', async t => {
|
||||
t.plan(1)
|
||||
|
||||
function handler (req: http.IncomingMessage, res: http.ServerResponse) {
|
||||
t.equal(req.headers['x-elastic-client-meta'], `es=${clientVersion},js=${nodeVersion},t=${transportVersion},un=${nodeVersion}`)
|
||||
res.end('ok')
|
||||
}
|
||||
|
||||
const [{ port }, server] = await buildServer(handler)
|
||||
|
||||
const client = new Client({
|
||||
node: `http://localhost:${port}`,
|
||||
// Connection: UndiciConnection is the default
|
||||
})
|
||||
|
||||
await client.transport.request({ method: 'GET', path: '/' })
|
||||
server.stop()
|
||||
})
|
||||
|
||||
test('Meta header indicates when HttpConnection is used', async t => {
|
||||
t.plan(1)
|
||||
|
||||
function handler (req: http.IncomingMessage, res: http.ServerResponse) {
|
||||
t.equal(req.headers['x-elastic-client-meta'], `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion}`)
|
||||
res.end('ok')
|
||||
}
|
||||
|
||||
const [{ port }, server] = await buildServer(handler)
|
||||
|
||||
const client = new Client({
|
||||
node: `http://localhost:${port}`,
|
||||
Connection: HttpConnection,
|
||||
})
|
||||
|
||||
await client.transport.request({ method: 'GET', path: '/' })
|
||||
server.stop()
|
||||
})
|
||||
|
||||
test('caFingerprint', t => {
|
||||
const client = new Client({
|
||||
node: 'https://localhost:9200',
|
||||
|
||||
Reference in New Issue
Block a user