Auto-generated code for main (#2357)
This commit is contained in:
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/text_embedding/my-e5-model",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "my-e5-model",
|
||||
inference_config: {
|
||||
service: "elasticsearch",
|
||||
service_settings: {
|
||||
num_allocations: 1,
|
||||
|
||||
@ -3,13 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_connector/my-connector/_api_key_id",
|
||||
body: {
|
||||
api_key_id: "my-api-key-id",
|
||||
api_key_secret_id: "my-connector-secret-id",
|
||||
},
|
||||
const response = await client.connector.updateApiKeyId({
|
||||
connector_id: "my-connector",
|
||||
api_key_id: "my-api-key-id",
|
||||
api_key_secret_id: "my-connector-secret-id",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/text_embedding/google_vertex_ai_embeddings",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "google_vertex_ai_embeddings",
|
||||
inference_config: {
|
||||
service: "googlevertexai",
|
||||
service_settings: {
|
||||
service_account_json: "<service_account_json>",
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/text_embedding/amazon_bedrock_embeddings",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "amazon_bedrock_embeddings",
|
||||
inference_config: {
|
||||
service: "amazonbedrock",
|
||||
service_settings: {
|
||||
access_key: "<aws_access_key>",
|
||||
|
||||
@ -3,29 +3,26 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_connector/my-g-drive-connector/_filtering",
|
||||
body: {
|
||||
rules: [
|
||||
{
|
||||
field: "file_extension",
|
||||
id: "exclude-txt-files",
|
||||
order: 0,
|
||||
policy: "exclude",
|
||||
rule: "equals",
|
||||
value: "txt",
|
||||
},
|
||||
{
|
||||
field: "_",
|
||||
id: "DEFAULT",
|
||||
order: 1,
|
||||
policy: "include",
|
||||
rule: "regex",
|
||||
value: ".*",
|
||||
},
|
||||
],
|
||||
},
|
||||
const response = await client.connector.updateFiltering({
|
||||
connector_id: "my-g-drive-connector",
|
||||
rules: [
|
||||
{
|
||||
field: "file_extension",
|
||||
id: "exclude-txt-files",
|
||||
order: 0,
|
||||
policy: "exclude",
|
||||
rule: "equals",
|
||||
value: "txt",
|
||||
},
|
||||
{
|
||||
field: "_",
|
||||
id: "DEFAULT",
|
||||
order: 1,
|
||||
policy: "include",
|
||||
rule: "regex",
|
||||
value: ".*",
|
||||
},
|
||||
],
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,13 +3,11 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "POST",
|
||||
path: "/_inference/sparse_embedding/my-elser-model",
|
||||
body: {
|
||||
input:
|
||||
"The sky above the port was the color of television tuned to a dead channel.",
|
||||
},
|
||||
const response = await client.inference.inference({
|
||||
task_type: "sparse_embedding",
|
||||
inference_id: "my-elser-model",
|
||||
input:
|
||||
"The sky above the port was the color of television tuned to a dead channel.",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
23
docs/doc_examples/13fe12cdb73bc89f07a83f1e6b127511.asciidoc
Normal file
23
docs/doc_examples/13fe12cdb73bc89f07a83f1e6b127511.asciidoc
Normal file
@ -0,0 +1,23 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.create({
|
||||
index: "google-vertex-ai-embeddings",
|
||||
mappings: {
|
||||
properties: {
|
||||
content_embedding: {
|
||||
type: "dense_vector",
|
||||
dims: 768,
|
||||
element_type: "float",
|
||||
similarity: "dot_product",
|
||||
},
|
||||
content: {
|
||||
type: "text",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -3,9 +3,9 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.ingest.deleteGeoipDatabase({
|
||||
id: "my-database-id",
|
||||
body: null,
|
||||
const response = await client.transport.request({
|
||||
method: "DELETE",
|
||||
path: "/_ingest/geoip/database/my-database-id",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/text_embedding/openai_embeddings",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "openai_embeddings",
|
||||
inference_config: {
|
||||
service: "openai",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/rerank/google_vertex_ai_rerank",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "rerank",
|
||||
inference_id: "google_vertex_ai_rerank",
|
||||
inference_config: {
|
||||
service: "googlevertexai",
|
||||
service_settings: {
|
||||
service_account_json: "<service_account_json>",
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/sparse_embedding/my-elser-model",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "sparse_embedding",
|
||||
inference_id: "my-elser-model",
|
||||
inference_config: {
|
||||
service: "elser",
|
||||
service_settings: {
|
||||
adaptive_allocations: {
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/text_embedding/azure_ai_studio_embeddings",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "azure_ai_studio_embeddings",
|
||||
inference_config: {
|
||||
service: "azureaistudio",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
|
||||
@ -3,12 +3,8 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "GET",
|
||||
path: "/_connector",
|
||||
querystring: {
|
||||
service_type: "sharepoint_online",
|
||||
},
|
||||
const response = await client.connector.list({
|
||||
service_type: "sharepoint_online",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
22
docs/doc_examples/20179a8889e949d6a8ee5fbf2ba35c96.asciidoc
Normal file
22
docs/doc_examples/20179a8889e949d6a8ee5fbf2ba35c96.asciidoc
Normal file
@ -0,0 +1,22 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "google-vertex-ai-embeddings",
|
||||
knn: {
|
||||
field: "content_embedding",
|
||||
query_vector_builder: {
|
||||
text_embedding: {
|
||||
model_id: "google_vertex_ai_embeddings",
|
||||
model_text: "Calculate fuel cost",
|
||||
},
|
||||
},
|
||||
k: 10,
|
||||
num_candidates: 100,
|
||||
},
|
||||
_source: ["id", "content"],
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/text_embedding/azure_ai_studio_embeddings",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "azure_ai_studio_embeddings",
|
||||
inference_config: {
|
||||
service: "azureaistudio",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
|
||||
@ -3,14 +3,11 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_connector/my-connector",
|
||||
body: {
|
||||
index_name: "search-google-drive",
|
||||
name: "My Connector",
|
||||
service_type: "google_drive",
|
||||
},
|
||||
const response = await client.connector.put({
|
||||
connector_id: "my-connector",
|
||||
index_name: "search-google-drive",
|
||||
name: "My Connector",
|
||||
service_type: "google_drive",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/completion/anthropic_completion",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "completion",
|
||||
inference_id: "anthropic_completion",
|
||||
inference_config: {
|
||||
service: "anthropic",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
|
||||
@ -3,9 +3,9 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.ingest.getGeoipDatabase({
|
||||
id: "my-database-id",
|
||||
body: null,
|
||||
const response = await client.transport.request({
|
||||
method: "GET",
|
||||
path: "/_ingest/geoip/database/my-database-id",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
20
docs/doc_examples/3c0d0c38e1c819a35a68cdba5ae8ccc4.asciidoc
Normal file
20
docs/doc_examples/3c0d0c38e1c819a35a68cdba5ae8ccc4.asciidoc
Normal file
@ -0,0 +1,20 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "alibabacloud_ai_search_embeddings",
|
||||
inference_config: {
|
||||
service: "alibabacloud-ai-search",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
service_id: "<service_id>",
|
||||
host: "<host>",
|
||||
workspace: "<workspace>",
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -3,16 +3,13 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_connector/my-connector/_pipeline",
|
||||
body: {
|
||||
pipeline: {
|
||||
extract_binary_content: true,
|
||||
name: "my-connector-pipeline",
|
||||
reduce_whitespace: true,
|
||||
run_ml_inference: true,
|
||||
},
|
||||
const response = await client.connector.updatePipeline({
|
||||
connector_id: "my-connector",
|
||||
pipeline: {
|
||||
extract_binary_content: true,
|
||||
name: "my-connector-pipeline",
|
||||
reduce_whitespace: true,
|
||||
run_ml_inference: true,
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
|
||||
@ -3,12 +3,9 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_connector/my-connector/_status",
|
||||
body: {
|
||||
status: "needs_configuration",
|
||||
},
|
||||
const response = await client.connector.updateStatus({
|
||||
connector_id: "my-connector",
|
||||
status: "needs_configuration",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/text_embedding/my-msmarco-minilm-model",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "my-msmarco-minilm-model",
|
||||
inference_config: {
|
||||
service: "elasticsearch",
|
||||
service_settings: {
|
||||
num_allocations: 1,
|
||||
|
||||
@ -3,9 +3,9 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "DELETE",
|
||||
path: "/_inference/sparse_embedding/my-elser-model",
|
||||
const response = await client.inference.delete({
|
||||
task_type: "sparse_embedding",
|
||||
inference_id: "my-elser-model",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
20
docs/doc_examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc
Normal file
20
docs/doc_examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc
Normal file
@ -0,0 +1,20 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.put({
|
||||
task_type: "sparse_embedding",
|
||||
inference_id: "alibabacloud_ai_search_sparse",
|
||||
inference_config: {
|
||||
service: "alibabacloud-ai-search",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
service_id: "ops-text-sparse-embedding-001",
|
||||
host: "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com",
|
||||
workspace: "default",
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -3,9 +3,9 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.ingest.deleteGeoipDatabase({
|
||||
id: "example-database-id",
|
||||
body: null,
|
||||
const response = await client.transport.request({
|
||||
method: "DELETE",
|
||||
path: "/_ingest/geoip/database/example-database-id",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
22
docs/doc_examples/54c12d5099d7b715c15f5bbf65b386a1.asciidoc
Normal file
22
docs/doc_examples/54c12d5099d7b715c15f5bbf65b386a1.asciidoc
Normal file
@ -0,0 +1,22 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.indices.create({
|
||||
index: "alibabacloud-ai-search-embeddings",
|
||||
mappings: {
|
||||
properties: {
|
||||
content_embedding: {
|
||||
type: "dense_vector",
|
||||
dims: 1024,
|
||||
element_type: "float",
|
||||
},
|
||||
content: {
|
||||
type: "text",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/completion/openai-completion",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "completion",
|
||||
inference_id: "openai-completion",
|
||||
inference_config: {
|
||||
service: "openai",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
|
||||
@ -3,21 +3,18 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_connector/my-connector/_last_sync",
|
||||
body: {
|
||||
last_access_control_sync_error: "Houston, we have a problem!",
|
||||
last_access_control_sync_scheduled_at: "2023-11-09T15:13:08.231Z",
|
||||
last_access_control_sync_status: "pending",
|
||||
last_deleted_document_count: 42,
|
||||
last_incremental_sync_scheduled_at: "2023-11-09T15:13:08.231Z",
|
||||
last_indexed_document_count: 42,
|
||||
last_sync_error: "Houston, we have a problem!",
|
||||
last_sync_scheduled_at: "2024-11-09T15:13:08.231Z",
|
||||
last_sync_status: "completed",
|
||||
last_synced: "2024-11-09T15:13:08.231Z",
|
||||
},
|
||||
const response = await client.connector.lastSync({
|
||||
connector_id: "my-connector",
|
||||
last_access_control_sync_error: "Houston, we have a problem!",
|
||||
last_access_control_sync_scheduled_at: "2023-11-09T15:13:08.231Z",
|
||||
last_access_control_sync_status: "pending",
|
||||
last_deleted_document_count: 42,
|
||||
last_incremental_sync_scheduled_at: "2023-11-09T15:13:08.231Z",
|
||||
last_indexed_document_count: 42,
|
||||
last_sync_error: "Houston, we have a problem!",
|
||||
last_sync_scheduled_at: "2024-11-09T15:13:08.231Z",
|
||||
last_sync_status: "completed",
|
||||
last_synced: "2024-11-09T15:13:08.231Z",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,12 +3,9 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_connector/my-connector/_index_name",
|
||||
body: {
|
||||
index_name: "data-from-my-google-drive",
|
||||
},
|
||||
const response = await client.connector.updateIndexName({
|
||||
connector_id: "my-connector",
|
||||
index_name: "data-from-my-google-drive",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/completion/azure_ai_studio_completion",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "completion",
|
||||
inference_id: "azure_ai_studio_completion",
|
||||
inference_config: {
|
||||
service: "azureaistudio",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
|
||||
@ -3,15 +3,13 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "POST",
|
||||
path: "/_inference/text_embedding/my-cohere-endpoint",
|
||||
body: {
|
||||
input:
|
||||
"The sky above the port was the color of television tuned to a dead channel.",
|
||||
task_settings: {
|
||||
input_type: "ingest",
|
||||
},
|
||||
const response = await client.inference.inference({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "my-cohere-endpoint",
|
||||
input:
|
||||
"The sky above the port was the color of television tuned to a dead channel.",
|
||||
task_settings: {
|
||||
input_type: "ingest",
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/text_embedding/amazon_bedrock_embeddings",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "amazon_bedrock_embeddings",
|
||||
inference_config: {
|
||||
service: "amazonbedrock",
|
||||
service_settings: {
|
||||
access_key: "<aws_access_key>",
|
||||
|
||||
@ -3,12 +3,9 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "DELETE",
|
||||
path: "/_connector/another-connector",
|
||||
querystring: {
|
||||
delete_sync_jobs: "true",
|
||||
},
|
||||
const response = await client.connector.delete({
|
||||
connector_id: "another-connector",
|
||||
delete_sync_jobs: "true",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/text_embedding/azure_openai_embeddings",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "azure_openai_embeddings",
|
||||
inference_config: {
|
||||
service: "azureopenai",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
|
||||
21
docs/doc_examples/7c63a1d2fbec5283e913ff39fafd0604.asciidoc
Normal file
21
docs/doc_examples/7c63a1d2fbec5283e913ff39fafd0604.asciidoc
Normal file
@ -0,0 +1,21 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.ingest.putPipeline({
|
||||
id: "google_vertex_ai_embeddings",
|
||||
processors: [
|
||||
{
|
||||
inference: {
|
||||
model_id: "google_vertex_ai_embeddings",
|
||||
input_output: {
|
||||
input_field: "content",
|
||||
output_field: "content_embedding",
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/completion/amazon_bedrock_completion",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "completion",
|
||||
inference_id: "amazon_bedrock_completion",
|
||||
inference_config: {
|
||||
service: "amazonbedrock",
|
||||
service_settings: {
|
||||
access_key: "<aws_access_key>",
|
||||
|
||||
18
docs/doc_examples/82eff1d681a5d0d1538ef011bb32ab9a.asciidoc
Normal file
18
docs/doc_examples/82eff1d681a5d0d1538ef011bb32ab9a.asciidoc
Normal file
@ -0,0 +1,18 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.reindex({
|
||||
wait_for_completion: "false",
|
||||
source: {
|
||||
index: "test-data",
|
||||
size: 50,
|
||||
},
|
||||
dest: {
|
||||
index: "alibabacloud-ai-search-embeddings",
|
||||
pipeline: "alibabacloud_ai_search_embeddings",
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/text_embedding/my-e5-model",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "my-e5-model",
|
||||
inference_config: {
|
||||
service: "elasticsearch",
|
||||
service_settings: {
|
||||
adaptive_allocations: {
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/text_embedding/mistral_embeddings",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "mistral_embeddings",
|
||||
inference_config: {
|
||||
service: "mistral",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/rerank/cohere-rerank",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "rerank",
|
||||
inference_id: "cohere-rerank",
|
||||
inference_config: {
|
||||
service: "cohere",
|
||||
service_settings: {
|
||||
api_key: "<API-KEY>",
|
||||
|
||||
@ -3,23 +3,20 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_connector/my-connector/_scheduling",
|
||||
body: {
|
||||
scheduling: {
|
||||
access_control: {
|
||||
enabled: true,
|
||||
interval: "0 10 0 * * ?",
|
||||
},
|
||||
full: {
|
||||
enabled: true,
|
||||
interval: "0 20 0 * * ?",
|
||||
},
|
||||
incremental: {
|
||||
enabled: false,
|
||||
interval: "0 30 0 * * ?",
|
||||
},
|
||||
const response = await client.connector.updateScheduling({
|
||||
connector_id: "my-connector",
|
||||
scheduling: {
|
||||
access_control: {
|
||||
enabled: true,
|
||||
interval: "0 10 0 * * ?",
|
||||
},
|
||||
full: {
|
||||
enabled: true,
|
||||
interval: "0 20 0 * * ?",
|
||||
},
|
||||
incremental: {
|
||||
enabled: false,
|
||||
interval: "0 30 0 * * ?",
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
22
docs/doc_examples/9326e323f7ffde678fa04d2d1de3d3bc.asciidoc
Normal file
22
docs/doc_examples/9326e323f7ffde678fa04d2d1de3d3bc.asciidoc
Normal file
@ -0,0 +1,22 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.search({
|
||||
index: "alibabacloud-ai-search-embeddings",
|
||||
knn: {
|
||||
field: "content_embedding",
|
||||
query_vector_builder: {
|
||||
text_embedding: {
|
||||
model_id: "alibabacloud_ai_search_embeddings",
|
||||
model_text: "Calculate fuel cost",
|
||||
},
|
||||
},
|
||||
k: 10,
|
||||
num_candidates: 100,
|
||||
},
|
||||
_source: ["id", "content"],
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -3,8 +3,9 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.ingest.putGeoipDatabase({
|
||||
id: "my-database-id",
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_ingest/geoip/database/my-database-id",
|
||||
body: {
|
||||
name: "GeoIP2-Domain",
|
||||
maxmind: {
|
||||
|
||||
21
docs/doc_examples/9868ce609f4450702934fcbf4c340bf1.asciidoc
Normal file
21
docs/doc_examples/9868ce609f4450702934fcbf4c340bf1.asciidoc
Normal file
@ -0,0 +1,21 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.ingest.putPipeline({
|
||||
id: "alibabacloud_ai_search_embeddings",
|
||||
processors: [
|
||||
{
|
||||
inference: {
|
||||
model_id: "alibabacloud_ai_search_embeddings",
|
||||
input_output: {
|
||||
input_field: "content",
|
||||
output_field: "content_embedding",
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
20
docs/doc_examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc
Normal file
20
docs/doc_examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc
Normal file
@ -0,0 +1,20 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "alibabacloud_ai_search_embeddings",
|
||||
inference_config: {
|
||||
service: "alibabacloud-ai-search",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
service_id: "ops-text-embedding-001",
|
||||
host: "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com",
|
||||
workspace: "default",
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/text_embedding/mistral-embeddings-test",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "mistral-embeddings-test",
|
||||
inference_config: {
|
||||
service: "mistral",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/text_embedding/cohere-embeddings",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "cohere-embeddings",
|
||||
inference_config: {
|
||||
service: "cohere",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
|
||||
@ -3,17 +3,14 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_connector/my-spo-connector/_configuration",
|
||||
body: {
|
||||
values: {
|
||||
tenant_id: "my-tenant-id",
|
||||
tenant_name: "my-sharepoint-site",
|
||||
client_id: "foo",
|
||||
secret_value: "bar",
|
||||
site_collections: "*",
|
||||
},
|
||||
const response = await client.connector.updateConfiguration({
|
||||
connector_id: "my-spo-connector",
|
||||
values: {
|
||||
tenant_id: "my-tenant-id",
|
||||
tenant_name: "my-sharepoint-site",
|
||||
client_id: "foo",
|
||||
secret_value: "bar",
|
||||
site_collections: "*",
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
|
||||
20
docs/doc_examples/9d396afad93782699d7a929578c85284.asciidoc
Normal file
20
docs/doc_examples/9d396afad93782699d7a929578c85284.asciidoc
Normal file
@ -0,0 +1,20 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "google_vertex_ai_embeddings",
|
||||
inference_config: {
|
||||
service: "googlevertexai",
|
||||
service_settings: {
|
||||
service_account_json: "<service_account_json>",
|
||||
model_id: "text-embedding-004",
|
||||
location: "<location>",
|
||||
project_id: "<project_id>",
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/text_embedding/openai-embeddings",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "openai-embeddings",
|
||||
inference_config: {
|
||||
service: "openai",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/sparse_embedding/elser_embeddings",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "sparse_embedding",
|
||||
inference_id: "elser_embeddings",
|
||||
inference_config: {
|
||||
service: "elser",
|
||||
service_settings: {
|
||||
num_allocations: 1,
|
||||
|
||||
@ -3,9 +3,9 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "GET",
|
||||
path: "/_inference/sparse_embedding/my-elser-model",
|
||||
const response = await client.inference.get({
|
||||
task_type: "sparse_embedding",
|
||||
inference_id: "my-elser-model",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/text_embedding/hugging_face_embeddings",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "hugging_face_embeddings",
|
||||
inference_config: {
|
||||
service: "hugging_face",
|
||||
service_settings: {
|
||||
api_key: "<access_token>",
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/text_embedding/azure_openai_embeddings",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "azure_openai_embeddings",
|
||||
inference_config: {
|
||||
service: "azureopenai",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
|
||||
18
docs/doc_examples/a69c7c3412af73758f629e76263063b5.asciidoc
Normal file
18
docs/doc_examples/a69c7c3412af73758f629e76263063b5.asciidoc
Normal file
@ -0,0 +1,18 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.reindex({
|
||||
wait_for_completion: "false",
|
||||
source: {
|
||||
index: "test-data",
|
||||
size: 50,
|
||||
},
|
||||
dest: {
|
||||
index: "google-vertex-ai-embeddings",
|
||||
pipeline: "google_vertex_ai_embeddings",
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -3,9 +3,6 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "GET",
|
||||
path: "/_connector",
|
||||
});
|
||||
const response = await client.connector.list();
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
20
docs/doc_examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc
Normal file
20
docs/doc_examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc
Normal file
@ -0,0 +1,20 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.inference.put({
|
||||
task_type: "rerank",
|
||||
inference_id: "alibabacloud_ai_search_rerank",
|
||||
inference_config: {
|
||||
service: "alibabacloud-ai-search",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
service_id: "ops-bge-reranker-larger",
|
||||
host: "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com",
|
||||
workspace: "default",
|
||||
},
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
12
docs/doc_examples/b16700002af3aa70639f3e88c733bf35.asciidoc
Normal file
12
docs/doc_examples/b16700002af3aa70639f3e88c733bf35.asciidoc
Normal file
@ -0,0 +1,12 @@
|
||||
// This file is autogenerated, DO NOT EDIT
|
||||
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.openPointInTime({
|
||||
index: "my-index-000001",
|
||||
keep_alive: "1m",
|
||||
allow_partial_search_results: "true",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
@ -3,12 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "POST",
|
||||
path: "/_inference/completion/openai_chat_completions",
|
||||
body: {
|
||||
input: "What is Elastic?",
|
||||
},
|
||||
const response = await client.inference.inference({
|
||||
task_type: "completion",
|
||||
inference_id: "openai_chat_completions",
|
||||
input: "What is Elastic?",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/sparse_embedding/my-elser-endpoint",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "sparse_embedding",
|
||||
inference_id: "my-elser-endpoint",
|
||||
inference_config: {
|
||||
service: "elser",
|
||||
service_settings: {
|
||||
num_allocations: 1,
|
||||
|
||||
@ -3,13 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_connector/my-connector/_name",
|
||||
body: {
|
||||
name: "Custom connector",
|
||||
description: "This is my customized connector",
|
||||
},
|
||||
const response = await client.connector.updateName({
|
||||
connector_id: "my-connector",
|
||||
name: "Custom connector",
|
||||
description: "This is my customized connector",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,13 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_connector/my-spo-connector/_configuration",
|
||||
body: {
|
||||
values: {
|
||||
secret_value: "foo-bar",
|
||||
},
|
||||
const response = await client.connector.updateConfiguration({
|
||||
connector_id: "my-spo-connector",
|
||||
values: {
|
||||
secret_value: "foo-bar",
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
|
||||
@ -3,12 +3,8 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "GET",
|
||||
path: "/_connector",
|
||||
querystring: {
|
||||
index_name: "search-google-drive",
|
||||
},
|
||||
const response = await client.connector.list({
|
||||
index_name: "search-google-drive",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,12 +3,9 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_connector/my-connector/_service_type",
|
||||
body: {
|
||||
service_type: "sharepoint_online",
|
||||
},
|
||||
const response = await client.connector.updateServiceType({
|
||||
connector_id: "my-connector",
|
||||
service_type: "sharepoint_online",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,16 +3,13 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_connector/my-connector",
|
||||
body: {
|
||||
index_name: "search-google-drive",
|
||||
name: "My Connector",
|
||||
description: "My Connector to sync data to Elastic index from Google Drive",
|
||||
service_type: "google_drive",
|
||||
language: "english",
|
||||
},
|
||||
const response = await client.connector.put({
|
||||
connector_id: "my-connector",
|
||||
index_name: "search-google-drive",
|
||||
name: "My Connector",
|
||||
description: "My Connector to sync data to Elastic index from Google Drive",
|
||||
service_type: "google_drive",
|
||||
language: "english",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/sparse_embedding/my-elser-model",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "sparse_embedding",
|
||||
inference_id: "my-elser-model",
|
||||
inference_config: {
|
||||
service: "elser",
|
||||
service_settings: {
|
||||
num_allocations: 1,
|
||||
|
||||
@ -3,9 +3,8 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_connector/my-connector/_check_in",
|
||||
const response = await client.connector.checkIn({
|
||||
connector_id: "my-connector",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,13 +3,9 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "GET",
|
||||
path: "/_connector",
|
||||
querystring: {
|
||||
from: "0",
|
||||
size: "2",
|
||||
},
|
||||
const response = await client.connector.list({
|
||||
from: 0,
|
||||
size: 2,
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/text_embedding/cohere_embeddings",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "cohere_embeddings",
|
||||
inference_config: {
|
||||
service: "cohere",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/completion/google_ai_studio_completion",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "completion",
|
||||
inference_id: "google_ai_studio_completion",
|
||||
inference_config: {
|
||||
service: "googleaistudio",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
|
||||
@ -3,12 +3,8 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "GET",
|
||||
path: "/_connector",
|
||||
querystring: {
|
||||
service_type: "sharepoint_online,google_drive",
|
||||
},
|
||||
const response = await client.connector.list({
|
||||
service_type: "sharepoint_online,google_drive",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,15 +3,12 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_connector/my-connector/_scheduling",
|
||||
body: {
|
||||
scheduling: {
|
||||
full: {
|
||||
enabled: true,
|
||||
interval: "0 10 0 * * ?",
|
||||
},
|
||||
const response = await client.connector.updateScheduling({
|
||||
connector_id: "my-connector",
|
||||
scheduling: {
|
||||
full: {
|
||||
enabled: true,
|
||||
interval: "0 10 0 * * ?",
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
@ -3,12 +3,9 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_connector/my-connector/_error",
|
||||
body: {
|
||||
error: "Houston, we have a problem!",
|
||||
},
|
||||
const response = await client.connector.updateError({
|
||||
connector_id: "my-connector",
|
||||
error: "Houston, we have a problem!",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/text_embedding/hugging-face-embeddings",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "text_embedding",
|
||||
inference_id: "hugging-face-embeddings",
|
||||
inference_config: {
|
||||
service: "hugging_face",
|
||||
service_settings: {
|
||||
api_key: "<access_token>",
|
||||
|
||||
@ -3,13 +3,11 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "POST",
|
||||
path: "/_inference/rerank/cohere_rerank",
|
||||
body: {
|
||||
input: ["luke", "like", "leia", "chewy", "r2d2", "star", "wars"],
|
||||
query: "star wars main character",
|
||||
},
|
||||
const response = await client.inference.inference({
|
||||
task_type: "rerank",
|
||||
inference_id: "cohere_rerank",
|
||||
input: ["luke", "like", "leia", "chewy", "r2d2", "star", "wars"],
|
||||
query: "star wars main character",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,9 +3,8 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "GET",
|
||||
path: "/_connector/my-connector",
|
||||
const response = await client.connector.get({
|
||||
connector_id: "my-connector",
|
||||
});
|
||||
console.log(response);
|
||||
----
|
||||
|
||||
@ -3,10 +3,10 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_inference/completion/azure_openai_completion",
|
||||
body: {
|
||||
const response = await client.inference.put({
|
||||
task_type: "completion",
|
||||
inference_id: "azure_openai_completion",
|
||||
inference_config: {
|
||||
service: "azureopenai",
|
||||
service_settings: {
|
||||
api_key: "<api_key>",
|
||||
|
||||
@ -3,19 +3,16 @@
|
||||
|
||||
[source, js]
|
||||
----
|
||||
const response = await client.transport.request({
|
||||
method: "PUT",
|
||||
path: "/_connector/my-sql-connector/_filtering",
|
||||
body: {
|
||||
advanced_snippet: {
|
||||
value: [
|
||||
{
|
||||
tables: ["users", "orders"],
|
||||
query:
|
||||
"SELECT users.id AS id, orders.order_id AS order_id FROM users JOIN orders ON users.id = orders.user_id",
|
||||
},
|
||||
],
|
||||
},
|
||||
const response = await client.connector.updateFiltering({
|
||||
connector_id: "my-sql-connector",
|
||||
advanced_snippet: {
|
||||
value: [
|
||||
{
|
||||
tables: ["users", "orders"],
|
||||
query:
|
||||
"SELECT users.id AS id, orders.order_id AS order_id FROM users JOIN orders ON users.id = orders.user_id",
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
console.log(response);
|
||||
|
||||
@ -5667,12 +5667,20 @@ client.inference.put({ inference_id })
|
||||
=== ingest
|
||||
[discrete]
|
||||
==== delete_geoip_database
|
||||
Deletes a geoip database configuration
|
||||
Deletes a geoip database configuration.
|
||||
[source,ts]
|
||||
----
|
||||
client.ingest.deleteGeoipDatabase()
|
||||
client.ingest.deleteGeoipDatabase({ id })
|
||||
----
|
||||
|
||||
[discrete]
|
||||
==== Arguments
|
||||
|
||||
* *Request (object):*
|
||||
** *`id` (string | string[])*: A list of geoip database configurations to delete
|
||||
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node.
|
||||
If no response is received before the timeout expires, the request fails and returns an error.
|
||||
** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.
|
||||
|
||||
[discrete]
|
||||
==== delete_pipeline
|
||||
@ -5708,12 +5716,21 @@ client.ingest.geoIpStats()
|
||||
|
||||
[discrete]
|
||||
==== get_geoip_database
|
||||
Returns geoip database configuration.
|
||||
Returns information about one or more geoip database configurations.
|
||||
[source,ts]
|
||||
----
|
||||
client.ingest.getGeoipDatabase()
|
||||
client.ingest.getGeoipDatabase({ ... })
|
||||
----
|
||||
|
||||
[discrete]
|
||||
==== Arguments
|
||||
|
||||
* *Request (object):*
|
||||
** *`id` (Optional, string | string[])*: List of database configuration IDs to retrieve.
|
||||
Wildcard (`*`) expressions are supported.
|
||||
To get all database configurations, omit this parameter or use `*`.
|
||||
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node.
|
||||
If no response is received before the timeout expires, the request fails and returns an error.
|
||||
|
||||
[discrete]
|
||||
==== get_pipeline
|
||||
@ -5752,12 +5769,23 @@ client.ingest.processorGrok()
|
||||
|
||||
[discrete]
|
||||
==== put_geoip_database
|
||||
Puts the configuration for a geoip database to be downloaded
|
||||
Returns information about one or more geoip database configurations.
|
||||
[source,ts]
|
||||
----
|
||||
client.ingest.putGeoipDatabase()
|
||||
client.ingest.putGeoipDatabase({ id, name, maxmind })
|
||||
----
|
||||
|
||||
[discrete]
|
||||
==== Arguments
|
||||
|
||||
* *Request (object):*
|
||||
** *`id` (string)*: ID of the database configuration to create or update.
|
||||
** *`name` (string)*: The provider-assigned name of the IP geolocation database to download.
|
||||
** *`maxmind` ({ account_id })*: The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading.
|
||||
At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured.
|
||||
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node.
|
||||
If no response is received before the timeout expires, the request fails and returns an error.
|
||||
** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.
|
||||
|
||||
[discrete]
|
||||
==== put_pipeline
|
||||
@ -5777,8 +5805,8 @@ client.ingest.putPipeline({ id })
|
||||
** *`id` (string)*: ID of the ingest pipeline to create or update.
|
||||
** *`_meta` (Optional, Record<string, User-defined value>)*: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch.
|
||||
** *`description` (Optional, string)*: Description of the ingest pipeline.
|
||||
** *`on_failure` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors.
|
||||
** *`processors` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified.
|
||||
** *`on_failure` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors.
|
||||
** *`processors` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified.
|
||||
** *`version` (Optional, number)*: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers.
|
||||
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.
|
||||
** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.
|
||||
@ -5791,16 +5819,16 @@ Executes an ingest pipeline against a set of provided documents.
|
||||
{ref}/simulate-pipeline-api.html[Endpoint documentation]
|
||||
[source,ts]
|
||||
----
|
||||
client.ingest.simulate({ ... })
|
||||
client.ingest.simulate({ docs })
|
||||
----
|
||||
|
||||
[discrete]
|
||||
==== Arguments
|
||||
|
||||
* *Request (object):*
|
||||
** *`docs` ({ _id, _index, _source }[])*: Sample documents to test in the pipeline.
|
||||
** *`id` (Optional, string)*: Pipeline to test.
|
||||
If you don’t specify a `pipeline` in the request body, this parameter is required.
|
||||
** *`docs` (Optional, { _id, _index, _source }[])*: Sample documents to test in the pipeline.
|
||||
** *`pipeline` (Optional, { description, on_failure, processors, version, _meta })*: Pipeline to test.
|
||||
If you don’t specify the `pipeline` request path parameter, this parameter is required.
|
||||
If you specify both this and the request path parameter, the API only uses the request path parameter.
|
||||
|
||||
Reference in New Issue
Block a user