Auto-generated code for 8.17 (#2503)
This commit is contained in:
46
docs/doc_examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc
Normal file
46
docs/doc_examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc
Normal file
@ -0,0 +1,46 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "retrievers_example",
|
||||
retriever: {
|
||||
rrf: {
|
||||
retrievers: [
|
||||
{
|
||||
standard: {
|
||||
query: {
|
||||
range: {
|
||||
year: {
|
||||
gt: 2023,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
standard: {
|
||||
query: {
|
||||
term: {
|
||||
topic: "elastic",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
rank_window_size: 10,
|
||||
rank_constant: 1,
|
||||
},
|
||||
},
|
||||
_source: false,
|
||||
aggs: {
|
||||
topics: {
|
||||
terms: {
|
||||
field: "topic",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
18
docs/doc_examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc
Normal file
18
docs/doc_examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc
Normal file
@ -0,0 +1,18 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.put({
|
||||
task_type: "rerank",
|
||||
inference_id: "my-rerank-model",
|
||||
inference_config: {
|
||||
service: "cohere",
|
||||
service_settings: {
|
||||
model_id: "rerank-english-v3.0",
|
||||
api_key: "{{COHERE_API_KEY}}",
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
49
docs/doc_examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc
Normal file
49
docs/doc_examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc
Normal file
@ -0,0 +1,49 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "retrievers_example_nested",
|
||||
retriever: {
|
||||
rrf: {
|
||||
retrievers: [
|
||||
{
|
||||
standard: {
|
||||
query: {
|
||||
nested: {
|
||||
path: "nested_field",
|
||||
inner_hits: {
|
||||
name: "nested_vector",
|
||||
_source: false,
|
||||
fields: ["nested_field.paragraph_id"],
|
||||
},
|
||||
query: {
|
||||
knn: {
|
||||
field: "nested_field.nested_vector",
|
||||
query_vector: [1, 0, 0.5],
|
||||
k: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
standard: {
|
||||
query: {
|
||||
term: {
|
||||
topic: "ai",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
rank_window_size: 10,
|
||||
rank_constant: 1,
|
||||
},
|
||||
},
|
||||
_source: ["topic"],
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
57
docs/doc_examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc
Normal file
57
docs/doc_examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc
Normal file
@ -0,0 +1,57 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "retrievers_example",
|
||||
retriever: {
|
||||
rrf: {
|
||||
retrievers: [
|
||||
{
|
||||
standard: {
|
||||
query: {
|
||||
term: {
|
||||
topic: "elastic",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
rrf: {
|
||||
retrievers: [
|
||||
{
|
||||
standard: {
|
||||
query: {
|
||||
query_string: {
|
||||
query:
|
||||
"(information retrieval) OR (artificial intelligence)",
|
||||
default_field: "text",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
knn: {
|
||||
field: "vector",
|
||||
query_vector: [0.23, 0.67, 0.89],
|
||||
k: 3,
|
||||
num_candidates: 5,
|
||||
},
|
||||
},
|
||||
],
|
||||
rank_window_size: 10,
|
||||
rank_constant: 1,
|
||||
},
|
||||
},
|
||||
],
|
||||
rank_window_size: 10,
|
||||
rank_constant: 1,
|
||||
},
|
||||
},
|
||||
_source: false,
|
||||
size: 1,
|
||||
explain: true,
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -16,7 +16,7 @@ const response = await client.search({
|
||||
},
|
||||
},
|
||||
field: "text",
|
||||
inference_id: "my-cohere-rerank-model",
|
||||
inference_id: "elastic-rerank",
|
||||
inference_text: "How often does the moon hide the sun?",
|
||||
rank_window_size: 100,
|
||||
min_score: 0.5,
|
||||
@ -3,8 +3,8 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.cluster.getSettings({
|
||||
flat_settings: "true",
|
||||
const response = await client.indices.rollover({
|
||||
alias: "datastream",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -1,28 +0,0 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.esql.query({
|
||||
format: "txt",
|
||||
query:
|
||||
"\n FROM library\n | SORT page_count DESC\n | KEEP name, author\n | LOOKUP era ON author\n | LIMIT 5\n ",
|
||||
tables: {
|
||||
era: {
|
||||
author: {
|
||||
keyword: [
|
||||
"Frank Herbert",
|
||||
"Peter F. Hamilton",
|
||||
"Vernor Vinge",
|
||||
"Alastair Reynolds",
|
||||
"James S.A. Corey",
|
||||
],
|
||||
},
|
||||
era: {
|
||||
keyword: ["The New Wave", "Diamond", "Diamond", "Diamond", "Hadron"],
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -1,16 +0,0 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.cluster.putSettings({
|
||||
persistent: {
|
||||
"cluster.indices.close.enable": false,
|
||||
"indices.recovery.max_bytes_per_sec": "50mb",
|
||||
},
|
||||
transient: {
|
||||
"*": null,
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
23
docs/doc_examples/30d051f534aeb884176eedb2c11dac85.asciidoc
Normal file
23
docs/doc_examples/30d051f534aeb884176eedb2c11dac85.asciidoc
Normal file
@ -0,0 +1,23 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.put({
|
||||
task_type: "rerank",
|
||||
inference_id: "my-elastic-rerank",
|
||||
inference_config: {
|
||||
service: "elasticsearch",
|
||||
service_settings: {
|
||||
model_id: ".rerank-v1",
|
||||
num_threads: 1,
|
||||
adaptive_allocations: {
|
||||
enabled: true,
|
||||
min_number_of_allocations: 1,
|
||||
max_number_of_allocations: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -8,11 +8,6 @@ const response = await client.search({
|
||||
query: {
|
||||
bool: {
|
||||
must: [
|
||||
{
|
||||
term: {
|
||||
"category.keyword": "Main Course",
|
||||
},
|
||||
},
|
||||
{
|
||||
term: {
|
||||
tags: "vegetarian",
|
||||
@ -27,6 +22,11 @@ const response = await client.search({
|
||||
},
|
||||
],
|
||||
should: [
|
||||
{
|
||||
term: {
|
||||
category: "Main Course",
|
||||
},
|
||||
},
|
||||
{
|
||||
multi_match: {
|
||||
query: "curry spicy",
|
||||
@ -9,7 +9,6 @@ const response = await client.indices.create({
|
||||
properties: {
|
||||
inference_field: {
|
||||
type: "semantic_text",
|
||||
inference_id: "my-elser-endpoint",
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -45,7 +45,7 @@ console.log(response);
|
||||
|
||||
const response1 = await client.indices.putIndexTemplate({
|
||||
name: 2,
|
||||
index_patterns: ["k8s*"],
|
||||
index_patterns: ["k9s*"],
|
||||
composed_of: ["destination_template"],
|
||||
data_stream: {},
|
||||
});
|
||||
44
docs/doc_examples/76e02434835630cb830724beb92df354.asciidoc
Normal file
44
docs/doc_examples/76e02434835630cb830724beb92df354.asciidoc
Normal file
@ -0,0 +1,44 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "retrievers_example",
|
||||
retriever: {
|
||||
rrf: {
|
||||
retrievers: [
|
||||
{
|
||||
knn: {
|
||||
field: "vector",
|
||||
query_vector: [0.23, 0.67, 0.89],
|
||||
k: 3,
|
||||
num_candidates: 5,
|
||||
},
|
||||
},
|
||||
{
|
||||
text_similarity_reranker: {
|
||||
retriever: {
|
||||
standard: {
|
||||
query: {
|
||||
term: {
|
||||
topic: "ai",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
field: "text",
|
||||
inference_id: "my-rerank-model",
|
||||
inference_text:
|
||||
"Can I use generative AI to identify user intent and improve search relevance?",
|
||||
},
|
||||
},
|
||||
],
|
||||
rank_window_size: 10,
|
||||
rank_constant: 1,
|
||||
},
|
||||
},
|
||||
_source: false,
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
46
docs/doc_examples/78043831fd32004a82930c8ac8a1d809.asciidoc
Normal file
46
docs/doc_examples/78043831fd32004a82930c8ac8a1d809.asciidoc
Normal file
@ -0,0 +1,46 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "retrievers_example",
|
||||
retriever: {
|
||||
text_similarity_reranker: {
|
||||
retriever: {
|
||||
rrf: {
|
||||
retrievers: [
|
||||
{
|
||||
standard: {
|
||||
query: {
|
||||
query_string: {
|
||||
query:
|
||||
"(information retrieval) OR (artificial intelligence)",
|
||||
default_field: "text",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
knn: {
|
||||
field: "vector",
|
||||
query_vector: [0.23, 0.67, 0.89],
|
||||
k: 3,
|
||||
num_candidates: 5,
|
||||
},
|
||||
},
|
||||
],
|
||||
rank_window_size: 10,
|
||||
rank_constant: 1,
|
||||
},
|
||||
},
|
||||
field: "text",
|
||||
inference_id: "my-rerank-model",
|
||||
inference_text:
|
||||
"What are the state of the art applications of AI in information retrieval?",
|
||||
},
|
||||
},
|
||||
_source: false,
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
23
docs/doc_examples/79d206a528be704050a437adce2496dd.asciidoc
Normal file
23
docs/doc_examples/79d206a528be704050a437adce2496dd.asciidoc
Normal file
@ -0,0 +1,23 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.put({
|
||||
task_type: "rerank",
|
||||
inference_id: "my-elastic-rerank",
|
||||
inference_config: {
|
||||
service: "elasticsearch",
|
||||
service_settings: {
|
||||
model_id: ".rerank-v1",
|
||||
num_threads: 1,
|
||||
adaptive_allocations: {
|
||||
enabled: true,
|
||||
min_number_of_allocations: 1,
|
||||
max_number_of_allocations: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -4,7 +4,7 @@
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.create({
|
||||
index: "my-index-000002",
|
||||
index: "my-index-000003",
|
||||
mappings: {
|
||||
properties: {
|
||||
inference_field: {
|
||||
@ -9,7 +9,6 @@ const response = await client.indices.create({
|
||||
properties: {
|
||||
content: {
|
||||
type: "semantic_text",
|
||||
inference_id: "my-elser-endpoint",
|
||||
},
|
||||
},
|
||||
},
|
||||
13
docs/doc_examples/9313f534e1aa266cde7d4af74665497f.asciidoc
Normal file
13
docs/doc_examples/9313f534e1aa266cde7d4af74665497f.asciidoc
Normal file
@ -0,0 +1,13 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.connector.put({
|
||||
connector_id: "my-{service-name-stub}-connector",
|
||||
index_name: "my-elasticsearch-index",
|
||||
name: "Content synced from {service-name}",
|
||||
service_type: "{service-name-stub}",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
18
docs/doc_examples/96e88611f99e6834bd64b58dc8a282c1.asciidoc
Normal file
18
docs/doc_examples/96e88611f99e6834bd64b58dc8a282c1.asciidoc
Normal file
@ -0,0 +1,18 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.create({
|
||||
index: "my-index-000002",
|
||||
mappings: {
|
||||
properties: {
|
||||
inference_field: {
|
||||
type: "semantic_text",
|
||||
inference_id: "my-openai-endpoint",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
37
docs/doc_examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc
Normal file
37
docs/doc_examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc
Normal file
@ -0,0 +1,37 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "retrievers_example",
|
||||
retriever: {
|
||||
rrf: {
|
||||
retrievers: [
|
||||
{
|
||||
standard: {
|
||||
query: {
|
||||
query_string: {
|
||||
query: "(information retrieval) OR (artificial intelligence)",
|
||||
default_field: "text",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
knn: {
|
||||
field: "vector",
|
||||
query_vector: [0.23, 0.67, 0.89],
|
||||
k: 3,
|
||||
num_candidates: 5,
|
||||
},
|
||||
},
|
||||
],
|
||||
rank_window_size: 10,
|
||||
rank_constant: 1,
|
||||
},
|
||||
},
|
||||
_source: false,
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
27
docs/doc_examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc
Normal file
27
docs/doc_examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc
Normal file
@ -0,0 +1,27 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
retriever: {
|
||||
text_similarity_reranker: {
|
||||
retriever: {
|
||||
standard: {
|
||||
query: {
|
||||
match: {
|
||||
text: "How often does the moon hide the sun?",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
field: "text",
|
||||
inference_id: "my-elastic-rerank",
|
||||
inference_text: "How often does the moon hide the sun?",
|
||||
rank_window_size: 100,
|
||||
min_score: 0.5,
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
37
docs/doc_examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc
Normal file
37
docs/doc_examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc
Normal file
@ -0,0 +1,37 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "retrievers_example",
|
||||
retriever: {
|
||||
text_similarity_reranker: {
|
||||
retriever: {
|
||||
text_similarity_reranker: {
|
||||
retriever: {
|
||||
knn: {
|
||||
field: "vector",
|
||||
query_vector: [0.23, 0.67, 0.89],
|
||||
k: 3,
|
||||
num_candidates: 5,
|
||||
},
|
||||
},
|
||||
rank_window_size: 100,
|
||||
field: "text",
|
||||
inference_id: "my-rerank-model",
|
||||
inference_text:
|
||||
"What are the state of the art applications of AI in information retrieval?",
|
||||
},
|
||||
},
|
||||
rank_window_size: 10,
|
||||
field: "text",
|
||||
inference_id: "my-other-more-expensive-rerank-model",
|
||||
inference_text:
|
||||
"Applications of Large Language Models in technology and their impact on user satisfaction",
|
||||
},
|
||||
},
|
||||
_source: false,
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -12,7 +12,7 @@ const response = await client.inference.put({
|
||||
adaptive_allocations: {
|
||||
enabled: true,
|
||||
min_number_of_allocations: 1,
|
||||
max_number_of_allocations: 10,
|
||||
max_number_of_allocations: 4,
|
||||
},
|
||||
num_threads: 1,
|
||||
model_id: ".elser_model_2",
|
||||
14
docs/doc_examples/ae3473adaf1515afcf7773f26c018e5c.asciidoc
Normal file
14
docs/doc_examples/ae3473adaf1515afcf7773f26c018e5c.asciidoc
Normal file
@ -0,0 +1,14 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.connector.put({
|
||||
connector_id: "my-{service-name-stub}-connector",
|
||||
index_name: "my-elasticsearch-index",
|
||||
name: "Content synced from {service-name}",
|
||||
service_type: "{service-name-stub}",
|
||||
is_native: true,
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
45
docs/doc_examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc
Normal file
45
docs/doc_examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc
Normal file
@ -0,0 +1,45 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "retrievers_example",
|
||||
retriever: {
|
||||
rrf: {
|
||||
retrievers: [
|
||||
{
|
||||
standard: {
|
||||
query: {
|
||||
query_string: {
|
||||
query: "(information retrieval) OR (artificial intelligence)",
|
||||
default_field: "text",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
knn: {
|
||||
field: "vector",
|
||||
query_vector: [0.23, 0.67, 0.89],
|
||||
k: 3,
|
||||
num_candidates: 5,
|
||||
},
|
||||
},
|
||||
],
|
||||
rank_window_size: 10,
|
||||
rank_constant: 1,
|
||||
},
|
||||
},
|
||||
highlight: {
|
||||
fields: {
|
||||
text: {
|
||||
fragment_size: 150,
|
||||
number_of_fragments: 3,
|
||||
},
|
||||
},
|
||||
},
|
||||
_source: false,
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
83
docs/doc_examples/bee3fda7bb07086243424b62e5b16ca7.asciidoc
Normal file
83
docs/doc_examples/bee3fda7bb07086243424b62e5b16ca7.asciidoc
Normal file
@ -0,0 +1,83 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.create({
|
||||
index: "retrievers_example_nested",
|
||||
mappings: {
|
||||
properties: {
|
||||
nested_field: {
|
||||
type: "nested",
|
||||
properties: {
|
||||
paragraph_id: {
|
||||
type: "keyword",
|
||||
},
|
||||
nested_vector: {
|
||||
type: "dense_vector",
|
||||
dims: 3,
|
||||
similarity: "l2_norm",
|
||||
index: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
topic: {
|
||||
type: "keyword",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
|
||||
const response1 = await client.index({
|
||||
index: "retrievers_example_nested",
|
||||
id: 1,
|
||||
document: {
|
||||
nested_field: [
|
||||
{
|
||||
paragraph_id: "1a",
|
||||
nested_vector: [-1.12, -0.59, 0.78],
|
||||
},
|
||||
{
|
||||
paragraph_id: "1b",
|
||||
nested_vector: [-0.12, 1.56, 0.42],
|
||||
},
|
||||
{
|
||||
paragraph_id: "1c",
|
||||
nested_vector: [1, -1, 0],
|
||||
},
|
||||
],
|
||||
topic: ["ai"],
|
||||
},
|
||||
});
|
||||
console.log(response1);
|
||||
|
||||
const response2 = await client.index({
|
||||
index: "retrievers_example_nested",
|
||||
id: 2,
|
||||
document: {
|
||||
nested_field: [
|
||||
{
|
||||
paragraph_id: "2a",
|
||||
nested_vector: [0.23, 1.24, 0.65],
|
||||
},
|
||||
],
|
||||
topic: ["information_retrieval"],
|
||||
},
|
||||
});
|
||||
console.log(response2);
|
||||
|
||||
const response3 = await client.index({
|
||||
index: "retrievers_example_nested",
|
||||
id: 3,
|
||||
document: {
|
||||
topic: ["ai"],
|
||||
},
|
||||
});
|
||||
console.log(response3);
|
||||
|
||||
const response4 = await client.indices.refresh({
|
||||
index: "retrievers_example_nested",
|
||||
});
|
||||
console.log(response4);
|
||||
----
|
||||
@ -1,11 +0,0 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.cluster.getSettings({
|
||||
flat_settings: "true",
|
||||
filter_path: "transient",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
154
docs/doc_examples/d4158d486e7fee2702a14068b69e3b33.asciidoc
Normal file
154
docs/doc_examples/d4158d486e7fee2702a14068b69e3b33.asciidoc
Normal file
@ -0,0 +1,154 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.putIndexTemplate({
|
||||
name: "datastream_template",
|
||||
index_patterns: ["datastream*"],
|
||||
data_stream: {},
|
||||
template: {
|
||||
lifecycle: {
|
||||
downsampling: [
|
||||
{
|
||||
after: "1m",
|
||||
fixed_interval: "1h",
|
||||
},
|
||||
],
|
||||
},
|
||||
settings: {
|
||||
index: {
|
||||
mode: "time_series",
|
||||
},
|
||||
},
|
||||
mappings: {
|
||||
properties: {
|
||||
"@timestamp": {
|
||||
type: "date",
|
||||
},
|
||||
kubernetes: {
|
||||
properties: {
|
||||
container: {
|
||||
properties: {
|
||||
cpu: {
|
||||
properties: {
|
||||
usage: {
|
||||
properties: {
|
||||
core: {
|
||||
properties: {
|
||||
ns: {
|
||||
type: "long",
|
||||
},
|
||||
},
|
||||
},
|
||||
limit: {
|
||||
properties: {
|
||||
pct: {
|
||||
type: "float",
|
||||
},
|
||||
},
|
||||
},
|
||||
nanocores: {
|
||||
type: "long",
|
||||
time_series_metric: "gauge",
|
||||
},
|
||||
node: {
|
||||
properties: {
|
||||
pct: {
|
||||
type: "float",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
memory: {
|
||||
properties: {
|
||||
available: {
|
||||
properties: {
|
||||
bytes: {
|
||||
type: "long",
|
||||
time_series_metric: "gauge",
|
||||
},
|
||||
},
|
||||
},
|
||||
majorpagefaults: {
|
||||
type: "long",
|
||||
},
|
||||
pagefaults: {
|
||||
type: "long",
|
||||
time_series_metric: "gauge",
|
||||
},
|
||||
rss: {
|
||||
properties: {
|
||||
bytes: {
|
||||
type: "long",
|
||||
time_series_metric: "gauge",
|
||||
},
|
||||
},
|
||||
},
|
||||
usage: {
|
||||
properties: {
|
||||
bytes: {
|
||||
type: "long",
|
||||
time_series_metric: "gauge",
|
||||
},
|
||||
limit: {
|
||||
properties: {
|
||||
pct: {
|
||||
type: "float",
|
||||
},
|
||||
},
|
||||
},
|
||||
node: {
|
||||
properties: {
|
||||
pct: {
|
||||
type: "float",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
workingset: {
|
||||
properties: {
|
||||
bytes: {
|
||||
type: "long",
|
||||
time_series_metric: "gauge",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
name: {
|
||||
type: "keyword",
|
||||
},
|
||||
start_time: {
|
||||
type: "date",
|
||||
},
|
||||
},
|
||||
},
|
||||
host: {
|
||||
type: "keyword",
|
||||
time_series_dimension: true,
|
||||
},
|
||||
namespace: {
|
||||
type: "keyword",
|
||||
time_series_dimension: true,
|
||||
},
|
||||
node: {
|
||||
type: "keyword",
|
||||
time_series_dimension: true,
|
||||
},
|
||||
pod: {
|
||||
type: "keyword",
|
||||
time_series_dimension: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
16
docs/doc_examples/e22a1da3c622611be6855e534c0709ae.asciidoc
Normal file
16
docs/doc_examples/e22a1da3c622611be6855e534c0709ae.asciidoc
Normal file
@ -0,0 +1,16 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "POST",
|
||||
path: "/_query_rules/my-ruleset/_test",
|
||||
body: {
|
||||
match_criteria: {
|
||||
query_string: "puggles",
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
44
docs/doc_examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc
Normal file
44
docs/doc_examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc
Normal file
@ -0,0 +1,44 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "retrievers_example",
|
||||
retriever: {
|
||||
rrf: {
|
||||
retrievers: [
|
||||
{
|
||||
standard: {
|
||||
query: {
|
||||
query_string: {
|
||||
query: "(information retrieval) OR (artificial intelligence)",
|
||||
default_field: "text",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
knn: {
|
||||
field: "vector",
|
||||
query_vector: [0.23, 0.67, 0.89],
|
||||
k: 3,
|
||||
num_candidates: 5,
|
||||
},
|
||||
},
|
||||
],
|
||||
rank_window_size: 10,
|
||||
rank_constant: 1,
|
||||
},
|
||||
},
|
||||
collapse: {
|
||||
field: "year",
|
||||
inner_hits: {
|
||||
name: "topic related documents",
|
||||
_source: ["year"],
|
||||
},
|
||||
},
|
||||
_source: false,
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
94
docs/doc_examples/ee05714a83d75fb6858e3b9fcbeb8f8b.asciidoc
Normal file
94
docs/doc_examples/ee05714a83d75fb6858e3b9fcbeb8f8b.asciidoc
Normal file
@ -0,0 +1,94 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.create({
|
||||
index: "retrievers_example",
|
||||
mappings: {
|
||||
properties: {
|
||||
vector: {
|
||||
type: "dense_vector",
|
||||
dims: 3,
|
||||
similarity: "l2_norm",
|
||||
index: true,
|
||||
},
|
||||
text: {
|
||||
type: "text",
|
||||
},
|
||||
year: {
|
||||
type: "integer",
|
||||
},
|
||||
topic: {
|
||||
type: "keyword",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
|
||||
const response1 = await client.index({
|
||||
index: "retrievers_example",
|
||||
id: 1,
|
||||
document: {
|
||||
vector: [0.23, 0.67, 0.89],
|
||||
text: "Large language models are revolutionizing information retrieval by boosting search precision, deepening contextual understanding, and reshaping user experiences in data-rich environments.",
|
||||
year: 2024,
|
||||
topic: ["llm", "ai", "information_retrieval"],
|
||||
},
|
||||
});
|
||||
console.log(response1);
|
||||
|
||||
const response2 = await client.index({
|
||||
index: "retrievers_example",
|
||||
id: 2,
|
||||
document: {
|
||||
vector: [0.12, 0.56, 0.78],
|
||||
text: "Artificial intelligence is transforming medicine, from advancing diagnostics and tailoring treatment plans to empowering predictive patient care for improved health outcomes.",
|
||||
year: 2023,
|
||||
topic: ["ai", "medicine"],
|
||||
},
|
||||
});
|
||||
console.log(response2);
|
||||
|
||||
const response3 = await client.index({
|
||||
index: "retrievers_example",
|
||||
id: 3,
|
||||
document: {
|
||||
vector: [0.45, 0.32, 0.91],
|
||||
text: "AI is redefining security by enabling advanced threat detection, proactive risk analysis, and dynamic defenses against increasingly sophisticated cyber threats.",
|
||||
year: 2024,
|
||||
topic: ["ai", "security"],
|
||||
},
|
||||
});
|
||||
console.log(response3);
|
||||
|
||||
const response4 = await client.index({
|
||||
index: "retrievers_example",
|
||||
id: 4,
|
||||
document: {
|
||||
vector: [0.34, 0.21, 0.98],
|
||||
text: "Elastic introduces Elastic AI Assistant, the open, generative AI sidekick powered by ESRE to democratize cybersecurity and enable users of every skill level.",
|
||||
year: 2023,
|
||||
topic: ["ai", "elastic", "assistant"],
|
||||
},
|
||||
});
|
||||
console.log(response4);
|
||||
|
||||
const response5 = await client.index({
|
||||
index: "retrievers_example",
|
||||
id: 5,
|
||||
document: {
|
||||
vector: [0.11, 0.65, 0.47],
|
||||
text: "Learn how to spin up a deployment of our hosted Elasticsearch Service and use Elastic Observability to gain deeper insight into the behavior of your applications and systems.",
|
||||
year: 2024,
|
||||
topic: ["documentation", "observability", "elastic"],
|
||||
},
|
||||
});
|
||||
console.log(response5);
|
||||
|
||||
const response6 = await client.indices.refresh({
|
||||
index: "retrievers_example",
|
||||
});
|
||||
console.log(response6);
|
||||
----
|
||||
@ -818,6 +818,8 @@ Random by default.
|
||||
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match.
|
||||
If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
|
||||
Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
|
||||
** *`allow_partial_search_results` (Optional, boolean)*: If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception.
|
||||
If `true`, the point in time will contain all the shards that are available at the time of the request.
|
||||
|
||||
[discrete]
|
||||
=== ping
|
||||
@ -1116,6 +1118,8 @@ However, using computationally expensive named queries on a large number of hits
|
||||
This parameter can only be used when the `q` query string parameter is specified.
|
||||
** *`max_concurrent_shard_requests` (Optional, number)*: Defines the number of concurrent shard requests per node this search executes concurrently.
|
||||
This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests.
|
||||
** *`min_compatible_shard_node` (Optional, string)*: The minimum version of the node that can handle the request
|
||||
Any handling node with a lower version will fail the request.
|
||||
** *`preference` (Optional, string)*: Nodes and shards used for the search.
|
||||
By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are:
|
||||
`_only_local` to run the search only on shards on the local node;
|
||||
@ -1558,6 +1562,8 @@ client.asyncSearch.status({ id })
|
||||
|
||||
* *Request (object):*
|
||||
** *`id` (string)*: A unique identifier for the async search.
|
||||
** *`keep_alive` (Optional, string | -1 | 0)*: Specifies how long the async search needs to be available.
|
||||
Ongoing async searches and any saved search results are deleted after this period.
|
||||
|
||||
[discrete]
|
||||
==== submit
|
||||
@ -1655,18 +1661,18 @@ A partial reduction is performed every time the coordinating node has received a
|
||||
** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed)
|
||||
** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored
|
||||
** *`max_concurrent_shard_requests` (Optional, number)*: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests
|
||||
** *`min_compatible_shard_node` (Optional, string)*
|
||||
** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random)
|
||||
** *`pre_filter_shard_size` (Optional, number)*: The default value cannot be changed, which enforces the execution of a pre-filter roundtrip to retrieve statistics from each shard so that the ones that surely don’t hold any document matching the query get skipped.
|
||||
** *`request_cache` (Optional, boolean)*: Specify if request cache should be used for this request or not, defaults to true
|
||||
** *`routing` (Optional, string)*: A list of specific routing values
|
||||
** *`scroll` (Optional, string | -1 | 0)*
|
||||
** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Search operation type
|
||||
** *`suggest_field` (Optional, string)*: Specifies which field to use for suggestions.
|
||||
** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))*: Specify suggest mode
|
||||
** *`suggest_size` (Optional, number)*: How many suggestions to return in response
|
||||
** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned.
|
||||
** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response
|
||||
** *`rest_total_hits_as_int` (Optional, boolean)*
|
||||
** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response
|
||||
** *`_source_excludes` (Optional, string | string[])*: A list of fields to exclude from the returned _source field
|
||||
** *`_source_includes` (Optional, string | string[])*: A list of fields to extract and return from the _source field
|
||||
** *`q` (Optional, string)*: Query in the Lucene query string syntax
|
||||
@ -1690,6 +1696,9 @@ client.autoscaling.deleteAutoscalingPolicy({ name })
|
||||
|
||||
* *Request (object):*
|
||||
** *`name` (string)*: the name of the autoscaling policy
|
||||
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node.
|
||||
If no response is received before the timeout expires, the request fails and returns an error.
|
||||
** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.
|
||||
|
||||
[discrete]
|
||||
==== get_autoscaling_capacity
|
||||
@ -1711,9 +1720,15 @@ Do not use this information to make autoscaling decisions.
|
||||
{ref}/autoscaling-get-autoscaling-capacity.html[Endpoint documentation]
|
||||
[source,ts]
|
||||
----
|
||||
client.autoscaling.getAutoscalingCapacity()
|
||||
client.autoscaling.getAutoscalingCapacity({ ... })
|
||||
----
|
||||
|
||||
[discrete]
|
||||
==== Arguments
|
||||
|
||||
* *Request (object):*
|
||||
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node.
|
||||
If no response is received before the timeout expires, the request fails and returns an error.
|
||||
|
||||
[discrete]
|
||||
==== get_autoscaling_policy
|
||||
@ -1732,6 +1747,8 @@ client.autoscaling.getAutoscalingPolicy({ name })
|
||||
|
||||
* *Request (object):*
|
||||
** *`name` (string)*: the name of the autoscaling policy
|
||||
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node.
|
||||
If no response is received before the timeout expires, the request fails and returns an error.
|
||||
|
||||
[discrete]
|
||||
==== put_autoscaling_policy
|
||||
@ -1751,6 +1768,9 @@ client.autoscaling.putAutoscalingPolicy({ name })
|
||||
* *Request (object):*
|
||||
** *`name` (string)*: the name of the autoscaling policy
|
||||
** *`policy` (Optional, { roles, deciders })*
|
||||
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node.
|
||||
If no response is received before the timeout expires, the request fails and returns an error.
|
||||
** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.
|
||||
|
||||
[discrete]
|
||||
=== cat
|
||||
@ -1792,10 +1812,6 @@ client.cat.allocation({ ... })
|
||||
* *Request (object):*
|
||||
** *`node_id` (Optional, string | string[])*: List of node identifiers or names used to limit the returned information.
|
||||
** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values.
|
||||
** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the
|
||||
local cluster state. If `false` the list of selected nodes are computed
|
||||
from the cluster state of the master node. In both cases the coordinating
|
||||
node will send requests for further information to each selected node.
|
||||
|
||||
[discrete]
|
||||
==== component_templates
|
||||
@ -1817,10 +1833,6 @@ client.cat.componentTemplates({ ... })
|
||||
|
||||
* *Request (object):*
|
||||
** *`name` (Optional, string)*: The name of the component template. Accepts wildcard expressions. If omitted, all component templates are returned.
|
||||
** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the
|
||||
local cluster state. If `false` the list of selected nodes are computed
|
||||
from the cluster state of the master node. In both cases the coordinating
|
||||
node will send requests for further information to each selected node.
|
||||
|
||||
[discrete]
|
||||
==== count
|
||||
@ -1947,17 +1959,9 @@ IMPORTANT: cat APIs are only intended for human consumption using the command li
|
||||
{ref}/cat-master.html[Endpoint documentation]
|
||||
[source,ts]
|
||||
----
|
||||
client.cat.master({ ... })
|
||||
client.cat.master()
|
||||
----
|
||||
|
||||
[discrete]
|
||||
==== Arguments
|
||||
|
||||
* *Request (object):*
|
||||
** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the
|
||||
local cluster state. If `false` the list of selected nodes are computed
|
||||
from the cluster state of the master node. In both cases the coordinating
|
||||
node will send requests for further information to each selected node.
|
||||
|
||||
[discrete]
|
||||
==== ml_data_frame_analytics
|
||||
@ -2096,17 +2100,9 @@ IMPORTANT: cat APIs are only intended for human consumption using the command li
|
||||
{ref}/cat-nodeattrs.html[Endpoint documentation]
|
||||
[source,ts]
|
||||
----
|
||||
client.cat.nodeattrs({ ... })
|
||||
client.cat.nodeattrs()
|
||||
----
|
||||
|
||||
[discrete]
|
||||
==== Arguments
|
||||
|
||||
* *Request (object):*
|
||||
** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the
|
||||
local cluster state. If `false` the list of selected nodes are computed
|
||||
from the cluster state of the master node. In both cases the coordinating
|
||||
node will send requests for further information to each selected node.
|
||||
|
||||
[discrete]
|
||||
==== nodes
|
||||
@ -2135,17 +2131,9 @@ IMPORTANT: cat APIs are only intended for human consumption using the command li
|
||||
{ref}/cat-pending-tasks.html[Endpoint documentation]
|
||||
[source,ts]
|
||||
----
|
||||
client.cat.pendingTasks({ ... })
|
||||
client.cat.pendingTasks()
|
||||
----
|
||||
|
||||
[discrete]
|
||||
==== Arguments
|
||||
|
||||
* *Request (object):*
|
||||
** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the
|
||||
local cluster state. If `false` the list of selected nodes are computed
|
||||
from the cluster state of the master node. In both cases the coordinating
|
||||
node will send requests for further information to each selected node.
|
||||
|
||||
[discrete]
|
||||
==== plugins
|
||||
@ -2155,17 +2143,9 @@ IMPORTANT: cat APIs are only intended for human consumption using the command li
|
||||
{ref}/cat-plugins.html[Endpoint documentation]
|
||||
[source,ts]
|
||||
----
|
||||
client.cat.plugins({ ... })
|
||||
client.cat.plugins()
|
||||
----
|
||||
|
||||
[discrete]
|
||||
==== Arguments
|
||||
|
||||
* *Request (object):*
|
||||
** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the
|
||||
local cluster state. If `false` the list of selected nodes are computed
|
||||
from the cluster state of the master node. In both cases the coordinating
|
||||
node will send requests for further information to each selected node.
|
||||
|
||||
[discrete]
|
||||
==== recovery
|
||||
@ -2222,10 +2202,6 @@ client.cat.segments({ ... })
|
||||
Supports wildcards (`*`).
|
||||
To target all data streams and indices, omit this parameter or use `*` or `_all`.
|
||||
** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values.
|
||||
** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the
|
||||
local cluster state. If `false` the list of selected nodes are computed
|
||||
from the cluster state of the master node. In both cases the coordinating
|
||||
node will send requests for further information to each selected node.
|
||||
|
||||
[discrete]
|
||||
==== shards
|
||||
@ -2308,10 +2284,6 @@ client.cat.templates({ ... })
|
||||
* *Request (object):*
|
||||
** *`name` (Optional, string)*: The name of the template to return.
|
||||
Accepts wildcard expressions. If omitted, all templates are returned.
|
||||
** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the
|
||||
local cluster state. If `false` the list of selected nodes are computed
|
||||
from the cluster state of the master node. In both cases the coordinating
|
||||
node will send requests for further information to each selected node.
|
||||
|
||||
[discrete]
|
||||
==== thread_pool
|
||||
@ -2332,10 +2304,6 @@ client.cat.threadPool({ ... })
|
||||
** *`thread_pool_patterns` (Optional, string | string[])*: A list of thread pool names used to limit the request.
|
||||
Accepts wildcard expressions.
|
||||
** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values.
|
||||
** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the
|
||||
local cluster state. If `false` the list of selected nodes are computed
|
||||
from the cluster state of the master node. In both cases the coordinating
|
||||
node will send requests for further information to each selected node.
|
||||
|
||||
[discrete]
|
||||
==== transforms
|
||||
@ -2392,37 +2360,27 @@ Creates a new follower index configured to follow the referenced leader index.
|
||||
{ref}/ccr-put-follow.html[Endpoint documentation]
|
||||
[source,ts]
|
||||
----
|
||||
client.ccr.follow({ index, leader_index, remote_cluster })
|
||||
client.ccr.follow({ index })
|
||||
----
|
||||
|
||||
[discrete]
|
||||
==== Arguments
|
||||
|
||||
* *Request (object):*
|
||||
** *`index` (string)*: The name of the follower index.
|
||||
** *`leader_index` (string)*: The name of the index in the leader cluster to follow.
|
||||
** *`remote_cluster` (string)*: The remote cluster containing the leader index.
|
||||
** *`data_stream_name` (Optional, string)*: If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed.
|
||||
** *`max_outstanding_read_requests` (Optional, number)*: The maximum number of outstanding reads requests from the remote cluster.
|
||||
** *`max_outstanding_write_requests` (Optional, number)*: The maximum number of outstanding write requests on the follower.
|
||||
** *`max_read_request_operation_count` (Optional, number)*: The maximum number of operations to pull per read from the remote cluster.
|
||||
** *`max_read_request_size` (Optional, number | string)*: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster.
|
||||
** *`max_retry_delay` (Optional, string | -1 | 0)*: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when
|
||||
retrying.
|
||||
** *`max_write_buffer_count` (Optional, number)*: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be
|
||||
deferred until the number of queued operations goes below the limit.
|
||||
** *`max_write_buffer_size` (Optional, number | string)*: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will
|
||||
be deferred until the total bytes of queued operations goes below the limit.
|
||||
** *`max_write_request_operation_count` (Optional, number)*: The maximum number of operations per bulk write request executed on the follower.
|
||||
** *`max_write_request_size` (Optional, number | string)*: The maximum total bytes of operations per bulk write request executed on the follower.
|
||||
** *`read_poll_timeout` (Optional, string | -1 | 0)*: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index.
|
||||
When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics.
|
||||
Then the follower will immediately attempt to read from the leader again.
|
||||
** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Settings to override from the leader index.
|
||||
** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be
|
||||
active.
|
||||
A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the
|
||||
remote Lucene segment files to the follower index.
|
||||
** *`index` (string)*: The name of the follower index
|
||||
** *`leader_index` (Optional, string)*
|
||||
** *`max_outstanding_read_requests` (Optional, number)*
|
||||
** *`max_outstanding_write_requests` (Optional, number)*
|
||||
** *`max_read_request_operation_count` (Optional, number)*
|
||||
** *`max_read_request_size` (Optional, string)*
|
||||
** *`max_retry_delay` (Optional, string | -1 | 0)*
|
||||
** *`max_write_buffer_count` (Optional, number)*
|
||||
** *`max_write_buffer_size` (Optional, string)*
|
||||
** *`max_write_request_operation_count` (Optional, number)*
|
||||
** *`max_write_request_size` (Optional, string)*
|
||||
** *`read_poll_timeout` (Optional, string | -1 | 0)*
|
||||
** *`remote_cluster` (Optional, string)*
|
||||
** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Sets the number of shard copies that must be active before returning. Defaults to 0. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)
|
||||
|
||||
[discrete]
|
||||
==== follow_info
|
||||
@ -3756,9 +3714,6 @@ client.eql.search({ index, query })
|
||||
** *`fields` (Optional, { field, format, include_unmapped } | { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit.
|
||||
** *`result_position` (Optional, Enum("tail" | "head"))*
|
||||
** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*
|
||||
** *`max_samples_per_key` (Optional, number)*: By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size`
|
||||
parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the
|
||||
`max_samples_per_key` parameter. Pipes are not supported for sample queries.
|
||||
** *`allow_no_indices` (Optional, boolean)*
|
||||
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*
|
||||
** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response.
|
||||
@ -3981,6 +3936,7 @@ the indices stats API.
|
||||
** *`ignore_unavailable` (Optional, boolean)*
|
||||
** *`lenient` (Optional, boolean)*
|
||||
** *`max_concurrent_shard_requests` (Optional, number)*
|
||||
** *`min_compatible_shard_node` (Optional, string)*
|
||||
** *`preference` (Optional, string)*
|
||||
** *`pre_filter_shard_size` (Optional, number)*
|
||||
** *`request_cache` (Optional, boolean)*
|
||||
@ -4673,6 +4629,7 @@ If the request can target data streams, this argument determines whether wildcar
|
||||
Supports a list of values, such as `open,hidden`.
|
||||
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
|
||||
** *`ignore_unavailable` (Optional, boolean)*: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error.
|
||||
** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only.
|
||||
|
||||
[discrete]
|
||||
==== exists_index_template
|
||||
@ -4870,6 +4827,7 @@ If the request can target data streams, this argument determines whether wildcar
|
||||
Supports a list of values, such as `open,hidden`.
|
||||
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
|
||||
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
|
||||
** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only.
|
||||
|
||||
[discrete]
|
||||
==== get_data_lifecycle
|
||||
@ -5212,7 +5170,11 @@ client.indices.putDataLifecycle({ name })
|
||||
** *`name` (string | string[])*: List of data streams used to limit the request.
|
||||
Supports wildcards (`*`).
|
||||
To target all data streams use `*` or `_all`.
|
||||
** *`lifecycle` (Optional, { data_retention, downsampling, enabled })*
|
||||
** *`data_retention` (Optional, string | -1 | 0)*: If defined, every document added to this data stream will be stored at least for this time frame.
|
||||
Any time after this duration the document could be deleted.
|
||||
When empty, every document in this data stream will be stored indefinitely.
|
||||
** *`downsampling` (Optional, { rounds })*: If defined, every backing index will execute the configured downsampling configuration after the backing
|
||||
index is not the data stream write index anymore.
|
||||
** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match.
|
||||
Supports a list of values, such as `open,hidden`.
|
||||
Valid values are: `all`, `hidden`, `open`, `closed`, `none`.
|
||||
@ -5296,7 +5258,7 @@ a new date field is added instead of string.
|
||||
not used at all by Elasticsearch, but can be used to store
|
||||
application-specific metadata.
|
||||
** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields.
|
||||
** *`properties` (Optional, Record<string, { type } | { boost, fielddata, index, null_value, type } | { type, enabled, null_value, boost, coerce, script, on_script_error, ignore_malformed, time_series_metric, analyzer, eager_global_ordinals, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, term_vector, format, precision_step, locale } | { relations, eager_global_ordinals, type } | { boost, eager_global_ordinals, index, index_options, script, on_script_error, normalizer, norms, null_value, similarity, split_queries_on_whitespace, time_series_dimension, type } | { type, fields, meta, copy_to } | { type } | { positive_score_impact, type } | { positive_score_impact, type } | { analyzer, index, index_options, max_shingle_size, norms, search_analyzer, search_quote_analyzer, similarity, term_vector, type } | { analyzer, boost, eager_global_ordinals, fielddata, fielddata_frequency_filter, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, similarity, term_vector, type } | { type } | { type, null_value } | { boost, format, ignore_malformed, index, null_value, precision_step, type } | { boost, fielddata, format, ignore_malformed, index, null_value, precision_step, locale, type } | { type, default_metric, metrics, time_series_metric } | { type, dims, element_type, index, index_options, similarity } | { boost, depth_limit, doc_values, eager_global_ordinals, index, index_options, null_value, similarity, split_queries_on_whitespace, type } | { enabled, include_in_parent, include_in_root, type } | { enabled, subobjects, type } | { type, meta, inference_id } | { type } | { analyzer, contexts, max_input_length, preserve_position_increments, preserve_separators, search_analyzer, type } | { value, type } | { path, type } | { ignore_malformed, type } | { boost, index, ignore_malformed, null_value, on_script_error, script, time_series_dimension, type } | { type } | { analyzer, boost, index, null_value, enable_position_increments, type } | { ignore_malformed, ignore_z_value, null_value, index, on_script_error, script, type } | { coerce, ignore_malformed, ignore_z_value, orientation, strategy, type } | { ignore_malformed, ignore_z_value, null_value, type } | { coerce, ignore_malformed, ignore_z_value, orientation, type } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value, scaling_factor } | { type, null_value } | { type, null_value } | { format, type } | { type } | { type } | { type } | { type } | { type } | { type, norms, index_options, index, null_value, rules, language, country, variant, strength, decomposition, alternate, case_level, case_first, numeric, variable_top, hiragana_quaternary_mode }>)*: Mapping for a field. For new fields, this mapping can include:
|
||||
** *`properties` (Optional, Record<string, { type } | { boost, fielddata, index, null_value, type } | { type, enabled, null_value, boost, coerce, script, on_script_error, ignore_malformed, time_series_metric, analyzer, eager_global_ordinals, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, term_vector, format, precision_step, locale } | { relations, eager_global_ordinals, type } | { boost, eager_global_ordinals, index, index_options, script, on_script_error, normalizer, norms, null_value, similarity, split_queries_on_whitespace, time_series_dimension, type } | { type, fields, meta, copy_to } | { type } | { positive_score_impact, type } | { positive_score_impact, type } | { analyzer, index, index_options, max_shingle_size, norms, search_analyzer, search_quote_analyzer, similarity, term_vector, type } | { analyzer, boost, eager_global_ordinals, fielddata, fielddata_frequency_filter, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, similarity, term_vector, type } | { type } | { type, null_value } | { boost, format, ignore_malformed, index, null_value, precision_step, type } | { boost, fielddata, format, ignore_malformed, index, null_value, precision_step, locale, type } | { type, default_metric, metrics, time_series_metric } | { type, element_type, dims, similarity, index, index_options } | { boost, depth_limit, doc_values, eager_global_ordinals, index, index_options, null_value, similarity, split_queries_on_whitespace, type } | { enabled, include_in_parent, include_in_root, type } | { enabled, subobjects, type } | { type, meta, inference_id } | { type } | { analyzer, contexts, max_input_length, preserve_position_increments, preserve_separators, search_analyzer, type } | { value, type } | { path, type } | { ignore_malformed, type } | { boost, index, ignore_malformed, null_value, on_script_error, script, time_series_dimension, type } | { type } | { analyzer, boost, index, null_value, enable_position_increments, type } | { ignore_malformed, ignore_z_value, null_value, index, on_script_error, script, type } | { coerce, ignore_malformed, ignore_z_value, orientation, strategy, type } | { ignore_malformed, ignore_z_value, null_value, type } | { coerce, ignore_malformed, ignore_z_value, orientation, type } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value, scaling_factor } | { type, null_value } | { type, null_value } | { format, type } | { type } | { type } | { type } | { type } | { type } | { type, norms, index_options, index, null_value, rules, language, country, variant, strength, decomposition, alternate, case_level, case_first, numeric, variable_top, hiragana_quaternary_mode }>)*: Mapping for a field. For new fields, this mapping can include:
|
||||
|
||||
- Field name
|
||||
- Field data type
|
||||
@ -5572,6 +5534,7 @@ If the request can target data streams, this argument determines whether wildcar
|
||||
Supports a list of values, such as `open,hidden`.
|
||||
Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
|
||||
** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index.
|
||||
** *`verbose` (Optional, boolean)*: If `true`, the request returns a verbose response.
|
||||
|
||||
[discrete]
|
||||
==== shard_stores
|
||||
@ -7441,7 +7404,7 @@ client.ml.postCalendarEvents({ calendar_id, events })
|
||||
|
||||
* *Request (object):*
|
||||
** *`calendar_id` (string)*: A string that uniquely identifies a calendar.
|
||||
** *`events` ({ calendar_id, event_id, description, end_time, start_time, skip_result, skip_model_update, force_time_shift }[])*: A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format.
|
||||
** *`events` ({ calendar_id, event_id, description, end_time, start_time }[])*: A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format.
|
||||
|
||||
[discrete]
|
||||
==== post_data
|
||||
@ -7633,7 +7596,7 @@ Create a datafeed.
|
||||
Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job.
|
||||
You can associate only one datafeed with each anomaly detection job.
|
||||
The datafeed contains a query that runs at a defined interval (`frequency`).
|
||||
If you are concerned about delayed data, you can add a delay (`query_delay`) at each interval.
|
||||
If you are concerned about delayed data, you can add a delay (`query_delay') at each interval.
|
||||
When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had
|
||||
at the time of creation and runs the query using those same roles. If you provide secondary authorization headers,
|
||||
those credentials are used instead.
|
||||
@ -8997,7 +8960,7 @@ client.searchApplication.put({ name })
|
||||
|
||||
* *Request (object):*
|
||||
** *`name` (string)*: The name of the search application to be created or updated.
|
||||
** *`search_application` (Optional, { indices, analytics_collection_name, template })*
|
||||
** *`search_application` (Optional, { name, indices, updated_at_millis, analytics_collection_name, template })*
|
||||
** *`create` (Optional, boolean)*: If `true`, this request cannot replace or update existing Search Applications.
|
||||
|
||||
[discrete]
|
||||
@ -9204,7 +9167,7 @@ client.security.bulkPutRole({ roles })
|
||||
==== Arguments
|
||||
|
||||
* *Request (object):*
|
||||
** *`roles` (Record<string, { cluster, indices, remote_indices, remote_cluster, global, applications, metadata, run_as, description, transient_metadata }>)*: A dictionary of role name to RoleDescriptor objects to add or update
|
||||
** *`roles` (Record<string, { cluster, indices, remote_indices, remote_cluster, global, applications, metadata, run_as, description, restriction, transient_metadata }>)*: A dictionary of role name to RoleDescriptor objects to add or update
|
||||
** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.
|
||||
|
||||
[discrete]
|
||||
@ -9361,7 +9324,7 @@ client.security.createApiKey({ ... })
|
||||
* *Request (object):*
|
||||
** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. By default, API keys never expire.
|
||||
** *`name` (Optional, string)*: Specifies the name for this API key.
|
||||
** *`role_descriptors` (Optional, Record<string, { cluster, indices, remote_indices, remote_cluster, global, applications, metadata, run_as, description, transient_metadata }>)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API.
|
||||
** *`role_descriptors` (Optional, Record<string, { cluster, indices, remote_indices, remote_cluster, global, applications, metadata, run_as, description, restriction, transient_metadata }>)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API.
|
||||
** *`metadata` (Optional, Record<string, User-defined value>)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage.
|
||||
** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.
|
||||
|
||||
@ -9700,6 +9663,8 @@ client.security.getPrivileges({ ... })
|
||||
Get roles.
|
||||
|
||||
Get roles in the native realm.
|
||||
The role management APIs are generally the preferred way to manage roles, rather than using file-based role management.
|
||||
The get roles API cannot retrieve roles that are defined in roles files.
|
||||
|
||||
{ref}/security-api-get-role.html[Endpoint documentation]
|
||||
[source,ts]
|
||||
@ -10071,7 +10036,7 @@ client.security.putRole({ name })
|
||||
==== Arguments
|
||||
|
||||
* *Request (object):*
|
||||
** *`name` (string)*: The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role.
|
||||
** *`name` (string)*: The name of the role.
|
||||
** *`applications` (Optional, { application, privileges, resources }[])*: A list of application privilege entries.
|
||||
** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of cluster privileges. These privileges define the cluster-level actions for users with this role.
|
||||
** *`global` (Optional, Record<string, User-defined value>)*: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges.
|
||||
@ -10431,7 +10396,7 @@ client.security.updateApiKey({ id })
|
||||
|
||||
* *Request (object):*
|
||||
** *`id` (string)*: The ID of the API key to update.
|
||||
** *`role_descriptors` (Optional, Record<string, { cluster, indices, remote_indices, remote_cluster, global, applications, metadata, run_as, description, transient_metadata }>)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API.
|
||||
** *`role_descriptors` (Optional, Record<string, { cluster, indices, remote_indices, remote_cluster, global, applications, metadata, run_as, description, restriction, transient_metadata }>)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API.
|
||||
** *`metadata` (Optional, Record<string, User-defined value>)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with _ are reserved for system usage.
|
||||
** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key.
|
||||
|
||||
|
||||
Reference in New Issue
Block a user