Auto-generated code for main (#2357)

This commit is contained in:
Elastic Machine
2024-09-03 15:36:33 +01:00
committed by GitHub
parent 9e08aaebe2
commit 132d6d6062
82 changed files with 797 additions and 418 deletions

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/text_embedding/my-e5-model",
body: {
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "my-e5-model",
inference_config: {
service: "elasticsearch",
service_settings: {
num_allocations: 1,

View File

@ -3,13 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_connector/my-connector/_api_key_id",
body: {
api_key_id: "my-api-key-id",
api_key_secret_id: "my-connector-secret-id",
},
const response = await client.connector.updateApiKeyId({
connector_id: "my-connector",
api_key_id: "my-api-key-id",
api_key_secret_id: "my-connector-secret-id",
});
console.log(response);
----

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/text_embedding/google_vertex_ai_embeddings",
body: {
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "google_vertex_ai_embeddings",
inference_config: {
service: "googlevertexai",
service_settings: {
service_account_json: "<service_account_json>",

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/text_embedding/amazon_bedrock_embeddings",
body: {
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "amazon_bedrock_embeddings",
inference_config: {
service: "amazonbedrock",
service_settings: {
access_key: "<aws_access_key>",

View File

@ -3,29 +3,26 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_connector/my-g-drive-connector/_filtering",
body: {
rules: [
{
field: "file_extension",
id: "exclude-txt-files",
order: 0,
policy: "exclude",
rule: "equals",
value: "txt",
},
{
field: "_",
id: "DEFAULT",
order: 1,
policy: "include",
rule: "regex",
value: ".*",
},
],
},
const response = await client.connector.updateFiltering({
connector_id: "my-g-drive-connector",
rules: [
{
field: "file_extension",
id: "exclude-txt-files",
order: 0,
policy: "exclude",
rule: "equals",
value: "txt",
},
{
field: "_",
id: "DEFAULT",
order: 1,
policy: "include",
rule: "regex",
value: ".*",
},
],
});
console.log(response);
----

View File

@ -3,13 +3,11 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_inference/sparse_embedding/my-elser-model",
body: {
input:
"The sky above the port was the color of television tuned to a dead channel.",
},
const response = await client.inference.inference({
task_type: "sparse_embedding",
inference_id: "my-elser-model",
input:
"The sky above the port was the color of television tuned to a dead channel.",
});
console.log(response);
----

View File

@ -0,0 +1,23 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.create({
index: "google-vertex-ai-embeddings",
mappings: {
properties: {
content_embedding: {
type: "dense_vector",
dims: 768,
element_type: "float",
similarity: "dot_product",
},
content: {
type: "text",
},
},
},
});
console.log(response);
----

View File

@ -3,9 +3,9 @@
[source, js]
----
const response = await client.ingest.deleteGeoipDatabase({
id: "my-database-id",
body: null,
const response = await client.transport.request({
method: "DELETE",
path: "/_ingest/geoip/database/my-database-id",
});
console.log(response);
----

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/text_embedding/openai_embeddings",
body: {
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "openai_embeddings",
inference_config: {
service: "openai",
service_settings: {
api_key: "<api_key>",

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/rerank/google_vertex_ai_rerank",
body: {
const response = await client.inference.put({
task_type: "rerank",
inference_id: "google_vertex_ai_rerank",
inference_config: {
service: "googlevertexai",
service_settings: {
service_account_json: "<service_account_json>",

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/sparse_embedding/my-elser-model",
body: {
const response = await client.inference.put({
task_type: "sparse_embedding",
inference_id: "my-elser-model",
inference_config: {
service: "elser",
service_settings: {
adaptive_allocations: {

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/text_embedding/azure_ai_studio_embeddings",
body: {
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "azure_ai_studio_embeddings",
inference_config: {
service: "azureaistudio",
service_settings: {
api_key: "<api_key>",

View File

@ -3,12 +3,8 @@
[source, js]
----
const response = await client.transport.request({
method: "GET",
path: "/_connector",
querystring: {
service_type: "sharepoint_online",
},
const response = await client.connector.list({
service_type: "sharepoint_online",
});
console.log(response);
----

View File

@ -0,0 +1,22 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "google-vertex-ai-embeddings",
knn: {
field: "content_embedding",
query_vector_builder: {
text_embedding: {
model_id: "google_vertex_ai_embeddings",
model_text: "Calculate fuel cost",
},
},
k: 10,
num_candidates: 100,
},
_source: ["id", "content"],
});
console.log(response);
----

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/text_embedding/azure_ai_studio_embeddings",
body: {
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "azure_ai_studio_embeddings",
inference_config: {
service: "azureaistudio",
service_settings: {
api_key: "<api_key>",

View File

@ -3,14 +3,11 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_connector/my-connector",
body: {
index_name: "search-google-drive",
name: "My Connector",
service_type: "google_drive",
},
const response = await client.connector.put({
connector_id: "my-connector",
index_name: "search-google-drive",
name: "My Connector",
service_type: "google_drive",
});
console.log(response);
----

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/completion/anthropic_completion",
body: {
const response = await client.inference.put({
task_type: "completion",
inference_id: "anthropic_completion",
inference_config: {
service: "anthropic",
service_settings: {
api_key: "<api_key>",

View File

@ -3,9 +3,9 @@
[source, js]
----
const response = await client.ingest.getGeoipDatabase({
id: "my-database-id",
body: null,
const response = await client.transport.request({
method: "GET",
path: "/_ingest/geoip/database/my-database-id",
});
console.log(response);
----

View File

@ -0,0 +1,20 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "alibabacloud_ai_search_embeddings",
inference_config: {
service: "alibabacloud-ai-search",
service_settings: {
api_key: "<api_key>",
service_id: "<service_id>",
host: "<host>",
workspace: "<workspace>",
},
},
});
console.log(response);
----

View File

@ -3,16 +3,13 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_connector/my-connector/_pipeline",
body: {
pipeline: {
extract_binary_content: true,
name: "my-connector-pipeline",
reduce_whitespace: true,
run_ml_inference: true,
},
const response = await client.connector.updatePipeline({
connector_id: "my-connector",
pipeline: {
extract_binary_content: true,
name: "my-connector-pipeline",
reduce_whitespace: true,
run_ml_inference: true,
},
});
console.log(response);

View File

@ -3,12 +3,9 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_connector/my-connector/_status",
body: {
status: "needs_configuration",
},
const response = await client.connector.updateStatus({
connector_id: "my-connector",
status: "needs_configuration",
});
console.log(response);
----

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/text_embedding/my-msmarco-minilm-model",
body: {
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "my-msmarco-minilm-model",
inference_config: {
service: "elasticsearch",
service_settings: {
num_allocations: 1,

View File

@ -3,9 +3,9 @@
[source, js]
----
const response = await client.transport.request({
method: "DELETE",
path: "/_inference/sparse_embedding/my-elser-model",
const response = await client.inference.delete({
task_type: "sparse_embedding",
inference_id: "my-elser-model",
});
console.log(response);
----

View File

@ -0,0 +1,20 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.inference.put({
task_type: "sparse_embedding",
inference_id: "alibabacloud_ai_search_sparse",
inference_config: {
service: "alibabacloud-ai-search",
service_settings: {
api_key: "<api_key>",
service_id: "ops-text-sparse-embedding-001",
host: "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com",
workspace: "default",
},
},
});
console.log(response);
----

View File

@ -3,9 +3,9 @@
[source, js]
----
const response = await client.ingest.deleteGeoipDatabase({
id: "example-database-id",
body: null,
const response = await client.transport.request({
method: "DELETE",
path: "/_ingest/geoip/database/example-database-id",
});
console.log(response);
----

View File

@ -0,0 +1,22 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.create({
index: "alibabacloud-ai-search-embeddings",
mappings: {
properties: {
content_embedding: {
type: "dense_vector",
dims: 1024,
element_type: "float",
},
content: {
type: "text",
},
},
},
});
console.log(response);
----

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/completion/openai-completion",
body: {
const response = await client.inference.put({
task_type: "completion",
inference_id: "openai-completion",
inference_config: {
service: "openai",
service_settings: {
api_key: "<api_key>",

View File

@ -3,21 +3,18 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_connector/my-connector/_last_sync",
body: {
last_access_control_sync_error: "Houston, we have a problem!",
last_access_control_sync_scheduled_at: "2023-11-09T15:13:08.231Z",
last_access_control_sync_status: "pending",
last_deleted_document_count: 42,
last_incremental_sync_scheduled_at: "2023-11-09T15:13:08.231Z",
last_indexed_document_count: 42,
last_sync_error: "Houston, we have a problem!",
last_sync_scheduled_at: "2024-11-09T15:13:08.231Z",
last_sync_status: "completed",
last_synced: "2024-11-09T15:13:08.231Z",
},
const response = await client.connector.lastSync({
connector_id: "my-connector",
last_access_control_sync_error: "Houston, we have a problem!",
last_access_control_sync_scheduled_at: "2023-11-09T15:13:08.231Z",
last_access_control_sync_status: "pending",
last_deleted_document_count: 42,
last_incremental_sync_scheduled_at: "2023-11-09T15:13:08.231Z",
last_indexed_document_count: 42,
last_sync_error: "Houston, we have a problem!",
last_sync_scheduled_at: "2024-11-09T15:13:08.231Z",
last_sync_status: "completed",
last_synced: "2024-11-09T15:13:08.231Z",
});
console.log(response);
----

View File

@ -3,12 +3,9 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_connector/my-connector/_index_name",
body: {
index_name: "data-from-my-google-drive",
},
const response = await client.connector.updateIndexName({
connector_id: "my-connector",
index_name: "data-from-my-google-drive",
});
console.log(response);
----

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/completion/azure_ai_studio_completion",
body: {
const response = await client.inference.put({
task_type: "completion",
inference_id: "azure_ai_studio_completion",
inference_config: {
service: "azureaistudio",
service_settings: {
api_key: "<api_key>",

View File

@ -3,15 +3,13 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_inference/text_embedding/my-cohere-endpoint",
body: {
input:
"The sky above the port was the color of television tuned to a dead channel.",
task_settings: {
input_type: "ingest",
},
const response = await client.inference.inference({
task_type: "text_embedding",
inference_id: "my-cohere-endpoint",
input:
"The sky above the port was the color of television tuned to a dead channel.",
task_settings: {
input_type: "ingest",
},
});
console.log(response);

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/text_embedding/amazon_bedrock_embeddings",
body: {
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "amazon_bedrock_embeddings",
inference_config: {
service: "amazonbedrock",
service_settings: {
access_key: "<aws_access_key>",

View File

@ -3,12 +3,9 @@
[source, js]
----
const response = await client.transport.request({
method: "DELETE",
path: "/_connector/another-connector",
querystring: {
delete_sync_jobs: "true",
},
const response = await client.connector.delete({
connector_id: "another-connector",
delete_sync_jobs: "true",
});
console.log(response);
----

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/text_embedding/azure_openai_embeddings",
body: {
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "azure_openai_embeddings",
inference_config: {
service: "azureopenai",
service_settings: {
api_key: "<api_key>",

View File

@ -0,0 +1,21 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.ingest.putPipeline({
id: "google_vertex_ai_embeddings",
processors: [
{
inference: {
model_id: "google_vertex_ai_embeddings",
input_output: {
input_field: "content",
output_field: "content_embedding",
},
},
},
],
});
console.log(response);
----

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/completion/amazon_bedrock_completion",
body: {
const response = await client.inference.put({
task_type: "completion",
inference_id: "amazon_bedrock_completion",
inference_config: {
service: "amazonbedrock",
service_settings: {
access_key: "<aws_access_key>",

View File

@ -0,0 +1,18 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.reindex({
wait_for_completion: "false",
source: {
index: "test-data",
size: 50,
},
dest: {
index: "alibabacloud-ai-search-embeddings",
pipeline: "alibabacloud_ai_search_embeddings",
},
});
console.log(response);
----

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/text_embedding/my-e5-model",
body: {
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "my-e5-model",
inference_config: {
service: "elasticsearch",
service_settings: {
adaptive_allocations: {

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/text_embedding/mistral_embeddings",
body: {
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "mistral_embeddings",
inference_config: {
service: "mistral",
service_settings: {
api_key: "<api_key>",

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/rerank/cohere-rerank",
body: {
const response = await client.inference.put({
task_type: "rerank",
inference_id: "cohere-rerank",
inference_config: {
service: "cohere",
service_settings: {
api_key: "<API-KEY>",

View File

@ -3,23 +3,20 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_connector/my-connector/_scheduling",
body: {
scheduling: {
access_control: {
enabled: true,
interval: "0 10 0 * * ?",
},
full: {
enabled: true,
interval: "0 20 0 * * ?",
},
incremental: {
enabled: false,
interval: "0 30 0 * * ?",
},
const response = await client.connector.updateScheduling({
connector_id: "my-connector",
scheduling: {
access_control: {
enabled: true,
interval: "0 10 0 * * ?",
},
full: {
enabled: true,
interval: "0 20 0 * * ?",
},
incremental: {
enabled: false,
interval: "0 30 0 * * ?",
},
},
});

View File

@ -0,0 +1,22 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "alibabacloud-ai-search-embeddings",
knn: {
field: "content_embedding",
query_vector_builder: {
text_embedding: {
model_id: "alibabacloud_ai_search_embeddings",
model_text: "Calculate fuel cost",
},
},
k: 10,
num_candidates: 100,
},
_source: ["id", "content"],
});
console.log(response);
----

View File

@ -3,8 +3,9 @@
[source, js]
----
const response = await client.ingest.putGeoipDatabase({
id: "my-database-id",
const response = await client.transport.request({
method: "PUT",
path: "/_ingest/geoip/database/my-database-id",
body: {
name: "GeoIP2-Domain",
maxmind: {

View File

@ -0,0 +1,21 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.ingest.putPipeline({
id: "alibabacloud_ai_search_embeddings",
processors: [
{
inference: {
model_id: "alibabacloud_ai_search_embeddings",
input_output: {
input_field: "content",
output_field: "content_embedding",
},
},
},
],
});
console.log(response);
----

View File

@ -0,0 +1,20 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "alibabacloud_ai_search_embeddings",
inference_config: {
service: "alibabacloud-ai-search",
service_settings: {
api_key: "<api_key>",
service_id: "ops-text-embedding-001",
host: "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com",
workspace: "default",
},
},
});
console.log(response);
----

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/text_embedding/mistral-embeddings-test",
body: {
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "mistral-embeddings-test",
inference_config: {
service: "mistral",
service_settings: {
api_key: "<api_key>",

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/text_embedding/cohere-embeddings",
body: {
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "cohere-embeddings",
inference_config: {
service: "cohere",
service_settings: {
api_key: "<api_key>",

View File

@ -3,17 +3,14 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_connector/my-spo-connector/_configuration",
body: {
values: {
tenant_id: "my-tenant-id",
tenant_name: "my-sharepoint-site",
client_id: "foo",
secret_value: "bar",
site_collections: "*",
},
const response = await client.connector.updateConfiguration({
connector_id: "my-spo-connector",
values: {
tenant_id: "my-tenant-id",
tenant_name: "my-sharepoint-site",
client_id: "foo",
secret_value: "bar",
site_collections: "*",
},
});
console.log(response);

View File

@ -0,0 +1,20 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "google_vertex_ai_embeddings",
inference_config: {
service: "googlevertexai",
service_settings: {
service_account_json: "<service_account_json>",
model_id: "text-embedding-004",
location: "<location>",
project_id: "<project_id>",
},
},
});
console.log(response);
----

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/text_embedding/openai-embeddings",
body: {
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "openai-embeddings",
inference_config: {
service: "openai",
service_settings: {
api_key: "<api_key>",

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/sparse_embedding/elser_embeddings",
body: {
const response = await client.inference.put({
task_type: "sparse_embedding",
inference_id: "elser_embeddings",
inference_config: {
service: "elser",
service_settings: {
num_allocations: 1,

View File

@ -3,9 +3,9 @@
[source, js]
----
const response = await client.transport.request({
method: "GET",
path: "/_inference/sparse_embedding/my-elser-model",
const response = await client.inference.get({
task_type: "sparse_embedding",
inference_id: "my-elser-model",
});
console.log(response);
----

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/text_embedding/hugging_face_embeddings",
body: {
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "hugging_face_embeddings",
inference_config: {
service: "hugging_face",
service_settings: {
api_key: "<access_token>",

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/text_embedding/azure_openai_embeddings",
body: {
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "azure_openai_embeddings",
inference_config: {
service: "azureopenai",
service_settings: {
api_key: "<api_key>",

View File

@ -0,0 +1,18 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.reindex({
wait_for_completion: "false",
source: {
index: "test-data",
size: 50,
},
dest: {
index: "google-vertex-ai-embeddings",
pipeline: "google_vertex_ai_embeddings",
},
});
console.log(response);
----

View File

@ -3,9 +3,6 @@
[source, js]
----
const response = await client.transport.request({
method: "GET",
path: "/_connector",
});
const response = await client.connector.list();
console.log(response);
----

View File

@ -0,0 +1,20 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.inference.put({
task_type: "rerank",
inference_id: "alibabacloud_ai_search_rerank",
inference_config: {
service: "alibabacloud-ai-search",
service_settings: {
api_key: "<api_key>",
service_id: "ops-bge-reranker-larger",
host: "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com",
workspace: "default",
},
},
});
console.log(response);
----

View File

@ -0,0 +1,12 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.openPointInTime({
index: "my-index-000001",
keep_alive: "1m",
allow_partial_search_results: "true",
});
console.log(response);
----

View File

@ -3,12 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_inference/completion/openai_chat_completions",
body: {
input: "What is Elastic?",
},
const response = await client.inference.inference({
task_type: "completion",
inference_id: "openai_chat_completions",
input: "What is Elastic?",
});
console.log(response);
----

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/sparse_embedding/my-elser-endpoint",
body: {
const response = await client.inference.put({
task_type: "sparse_embedding",
inference_id: "my-elser-endpoint",
inference_config: {
service: "elser",
service_settings: {
num_allocations: 1,

View File

@ -3,13 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_connector/my-connector/_name",
body: {
name: "Custom connector",
description: "This is my customized connector",
},
const response = await client.connector.updateName({
connector_id: "my-connector",
name: "Custom connector",
description: "This is my customized connector",
});
console.log(response);
----

View File

@ -3,13 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_connector/my-spo-connector/_configuration",
body: {
values: {
secret_value: "foo-bar",
},
const response = await client.connector.updateConfiguration({
connector_id: "my-spo-connector",
values: {
secret_value: "foo-bar",
},
});
console.log(response);

View File

@ -3,12 +3,8 @@
[source, js]
----
const response = await client.transport.request({
method: "GET",
path: "/_connector",
querystring: {
index_name: "search-google-drive",
},
const response = await client.connector.list({
index_name: "search-google-drive",
});
console.log(response);
----

View File

@ -3,12 +3,9 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_connector/my-connector/_service_type",
body: {
service_type: "sharepoint_online",
},
const response = await client.connector.updateServiceType({
connector_id: "my-connector",
service_type: "sharepoint_online",
});
console.log(response);
----

View File

@ -3,16 +3,13 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_connector/my-connector",
body: {
index_name: "search-google-drive",
name: "My Connector",
description: "My Connector to sync data to Elastic index from Google Drive",
service_type: "google_drive",
language: "english",
},
const response = await client.connector.put({
connector_id: "my-connector",
index_name: "search-google-drive",
name: "My Connector",
description: "My Connector to sync data to Elastic index from Google Drive",
service_type: "google_drive",
language: "english",
});
console.log(response);
----

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/sparse_embedding/my-elser-model",
body: {
const response = await client.inference.put({
task_type: "sparse_embedding",
inference_id: "my-elser-model",
inference_config: {
service: "elser",
service_settings: {
num_allocations: 1,

View File

@ -3,9 +3,8 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_connector/my-connector/_check_in",
const response = await client.connector.checkIn({
connector_id: "my-connector",
});
console.log(response);
----

View File

@ -3,13 +3,9 @@
[source, js]
----
const response = await client.transport.request({
method: "GET",
path: "/_connector",
querystring: {
from: "0",
size: "2",
},
const response = await client.connector.list({
from: 0,
size: 2,
});
console.log(response);
----

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/text_embedding/cohere_embeddings",
body: {
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "cohere_embeddings",
inference_config: {
service: "cohere",
service_settings: {
api_key: "<api_key>",

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/completion/google_ai_studio_completion",
body: {
const response = await client.inference.put({
task_type: "completion",
inference_id: "google_ai_studio_completion",
inference_config: {
service: "googleaistudio",
service_settings: {
api_key: "<api_key>",

View File

@ -3,12 +3,8 @@
[source, js]
----
const response = await client.transport.request({
method: "GET",
path: "/_connector",
querystring: {
service_type: "sharepoint_online,google_drive",
},
const response = await client.connector.list({
service_type: "sharepoint_online,google_drive",
});
console.log(response);
----

View File

@ -3,15 +3,12 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_connector/my-connector/_scheduling",
body: {
scheduling: {
full: {
enabled: true,
interval: "0 10 0 * * ?",
},
const response = await client.connector.updateScheduling({
connector_id: "my-connector",
scheduling: {
full: {
enabled: true,
interval: "0 10 0 * * ?",
},
},
});

View File

@ -3,12 +3,9 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_connector/my-connector/_error",
body: {
error: "Houston, we have a problem!",
},
const response = await client.connector.updateError({
connector_id: "my-connector",
error: "Houston, we have a problem!",
});
console.log(response);
----

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/text_embedding/hugging-face-embeddings",
body: {
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "hugging-face-embeddings",
inference_config: {
service: "hugging_face",
service_settings: {
api_key: "<access_token>",

View File

@ -3,13 +3,11 @@
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_inference/rerank/cohere_rerank",
body: {
input: ["luke", "like", "leia", "chewy", "r2d2", "star", "wars"],
query: "star wars main character",
},
const response = await client.inference.inference({
task_type: "rerank",
inference_id: "cohere_rerank",
input: ["luke", "like", "leia", "chewy", "r2d2", "star", "wars"],
query: "star wars main character",
});
console.log(response);
----

View File

@ -3,9 +3,8 @@
[source, js]
----
const response = await client.transport.request({
method: "GET",
path: "/_connector/my-connector",
const response = await client.connector.get({
connector_id: "my-connector",
});
console.log(response);
----

View File

@ -3,10 +3,10 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_inference/completion/azure_openai_completion",
body: {
const response = await client.inference.put({
task_type: "completion",
inference_id: "azure_openai_completion",
inference_config: {
service: "azureopenai",
service_settings: {
api_key: "<api_key>",

View File

@ -3,19 +3,16 @@
[source, js]
----
const response = await client.transport.request({
method: "PUT",
path: "/_connector/my-sql-connector/_filtering",
body: {
advanced_snippet: {
value: [
{
tables: ["users", "orders"],
query:
"SELECT users.id AS id, orders.order_id AS order_id FROM users JOIN orders ON users.id = orders.user_id",
},
],
},
const response = await client.connector.updateFiltering({
connector_id: "my-sql-connector",
advanced_snippet: {
value: [
{
tables: ["users", "orders"],
query:
"SELECT users.id AS id, orders.order_id AS order_id FROM users JOIN orders ON users.id = orders.user_id",
},
],
},
});
console.log(response);

View File

@ -5667,12 +5667,20 @@ client.inference.put({ inference_id })
=== ingest
[discrete]
==== delete_geoip_database
Deletes a geoip database configuration
Deletes a geoip database configuration.
[source,ts]
----
client.ingest.deleteGeoipDatabase()
client.ingest.deleteGeoipDatabase({ id })
----
[discrete]
==== Arguments
* *Request (object):*
** *`id` (string | string[])*: A list of geoip database configurations to delete
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node.
If no response is received before the timeout expires, the request fails and returns an error.
** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.
[discrete]
==== delete_pipeline
@ -5708,12 +5716,21 @@ client.ingest.geoIpStats()
[discrete]
==== get_geoip_database
Returns geoip database configuration.
Returns information about one or more geoip database configurations.
[source,ts]
----
client.ingest.getGeoipDatabase()
client.ingest.getGeoipDatabase({ ... })
----
[discrete]
==== Arguments
* *Request (object):*
** *`id` (Optional, string | string[])*: List of database configuration IDs to retrieve.
Wildcard (`*`) expressions are supported.
To get all database configurations, omit this parameter or use `*`.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node.
If no response is received before the timeout expires, the request fails and returns an error.
[discrete]
==== get_pipeline
@ -5752,12 +5769,23 @@ client.ingest.processorGrok()
[discrete]
==== put_geoip_database
Puts the configuration for a geoip database to be downloaded
Returns information about one or more geoip database configurations.
[source,ts]
----
client.ingest.putGeoipDatabase()
client.ingest.putGeoipDatabase({ id, name, maxmind })
----
[discrete]
==== Arguments
* *Request (object):*
** *`id` (string)*: ID of the database configuration to create or update.
** *`name` (string)*: The provider-assigned name of the IP geolocation database to download.
** *`maxmind` ({ account_id })*: The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading.
At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node.
If no response is received before the timeout expires, the request fails and returns an error.
** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.
[discrete]
==== put_pipeline
@ -5777,8 +5805,8 @@ client.ingest.putPipeline({ id })
** *`id` (string)*: ID of the ingest pipeline to create or update.
** *`_meta` (Optional, Record<string, User-defined value>)*: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch.
** *`description` (Optional, string)*: Description of the ingest pipeline.
** *`on_failure` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors.
** *`processors` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified.
** *`on_failure` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors.
** *`processors` (Optional, { append, attachment, bytes, circle, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, foreach, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, pipeline, remove, rename, reroute, script, set, set_security_user, sort, split, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified.
** *`version` (Optional, number)*: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers.
** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.
** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.
@ -5791,16 +5819,16 @@ Executes an ingest pipeline against a set of provided documents.
{ref}/simulate-pipeline-api.html[Endpoint documentation]
[source,ts]
----
client.ingest.simulate({ ... })
client.ingest.simulate({ docs })
----
[discrete]
==== Arguments
* *Request (object):*
** *`docs` ({ _id, _index, _source }[])*: Sample documents to test in the pipeline.
** *`id` (Optional, string)*: Pipeline to test.
If you dont specify a `pipeline` in the request body, this parameter is required.
** *`docs` (Optional, { _id, _index, _source }[])*: Sample documents to test in the pipeline.
** *`pipeline` (Optional, { description, on_failure, processors, version, _meta })*: Pipeline to test.
If you dont specify the `pipeline` request path parameter, this parameter is required.
If you specify both this and the request path parameter, the API only uses the request path parameter.

View File

@ -45,22 +45,22 @@ export default class Ingest {
}
/**
* Deletes a geoip database configuration
* Deletes a geoip database configuration.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html | Elasticsearch API documentation}
*/
async deleteGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
async deleteGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
async deleteGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise<T.TODO>
async deleteGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise<any> {
async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IngestDeleteGeoipDatabaseResponse>
async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IngestDeleteGeoipDatabaseResponse, unknown>>
async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise<T.IngestDeleteGeoipDatabaseResponse>
async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['id']
const querystring: Record<string, any> = {}
const body = undefined
params = params ?? {}
for (const key in params) {
if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body') {
// @ts-expect-error
querystring[key] = params[key]
}
}
@ -139,13 +139,13 @@ export default class Ingest {
}
/**
* Returns geoip database configuration.
* Returns information about one or more geoip database configurations.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html | Elasticsearch API documentation}
*/
async getGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
async getGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
async getGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise<T.TODO>
async getGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise<any> {
async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IngestGetGeoipDatabaseResponse>
async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IngestGetGeoipDatabaseResponse, unknown>>
async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise<T.IngestGetGeoipDatabaseResponse>
async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['id']
const querystring: Record<string, any> = {}
const body = undefined
@ -155,6 +155,7 @@ export default class Ingest {
if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body') {
// @ts-expect-error
querystring[key] = params[key]
}
}
@ -248,22 +249,34 @@ export default class Ingest {
}
/**
* Puts the configuration for a geoip database to be downloaded
* Returns information about one or more geoip database configurations.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html | Elasticsearch API documentation}
*/
async putGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise<T.TODO>
async putGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.TODO, unknown>>
async putGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise<T.TODO>
async putGeoipDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise<any> {
async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IngestPutGeoipDatabaseResponse>
async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IngestPutGeoipDatabaseResponse, unknown>>
async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise<T.IngestPutGeoipDatabaseResponse>
async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['id']
const acceptedBody: string[] = ['name', 'maxmind']
const querystring: Record<string, any> = {}
const body = undefined
// @ts-expect-error
const userBody: any = params?.body
let body: Record<string, any> | string
if (typeof userBody === 'string') {
body = userBody
} else {
body = userBody != null ? { ...userBody } : undefined
}
params = params ?? {}
for (const key in params) {
if (acceptedPath.includes(key)) {
if (acceptedBody.includes(key)) {
body = body ?? {}
// @ts-expect-error
body[key] = params[key]
} else if (acceptedPath.includes(key)) {
continue
} else if (key !== 'body') {
// @ts-expect-error
querystring[key] = params[key]
}
}
@ -327,10 +340,10 @@ export default class Ingest {
* Executes an ingest pipeline against a set of provided documents.
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html | Elasticsearch API documentation}
*/
async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IngestSimulateResponse>
async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IngestSimulateResponse, unknown>>
async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptions): Promise<T.IngestSimulateResponse>
async simulate (this: That, params?: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptions): Promise<any> {
async simulate (this: That, params: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.IngestSimulateResponse>
async simulate (this: That, params: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.IngestSimulateResponse, unknown>>
async simulate (this: That, params: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptions): Promise<T.IngestSimulateResponse>
async simulate (this: That, params: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptions): Promise<any> {
const acceptedPath: string[] = ['id']
const acceptedBody: string[] = ['docs', 'pipeline']
const querystring: Record<string, any> = {}
@ -343,7 +356,6 @@ export default class Ingest {
body = userBody != null ? { ...userBody } : undefined
}
params = params ?? {}
for (const key in params) {
if (acceptedBody.includes(key)) {
body = body ?? {}

View File

@ -1117,7 +1117,7 @@ export interface RenderSearchTemplateResponse {
export interface ScriptsPainlessExecutePainlessContextSetup {
document: any
index: IndexName
query: QueryDslQueryContainer
query?: QueryDslQueryContainer
}
export interface ScriptsPainlessExecuteRequest extends RequestBase {
@ -4836,11 +4836,11 @@ export type AnalysisPhoneticRuleType = 'approx' | 'exact'
export interface AnalysisPhoneticTokenFilter extends AnalysisTokenFilterBase {
type: 'phonetic'
encoder: AnalysisPhoneticEncoder
languageset: AnalysisPhoneticLanguage | AnalysisPhoneticLanguage[]
languageset?: AnalysisPhoneticLanguage | AnalysisPhoneticLanguage[]
max_code_len?: integer
name_type: AnalysisPhoneticNameType
name_type?: AnalysisPhoneticNameType
replace?: boolean
rule_type: AnalysisPhoneticRuleType
rule_type?: AnalysisPhoneticRuleType
}
export interface AnalysisPorterStemTokenFilter extends AnalysisTokenFilterBase {
@ -12445,6 +12445,11 @@ export interface IngestCsvProcessor extends IngestProcessorBase {
trim?: boolean
}
export interface IngestDatabaseConfiguration {
name: Name
maxmind: IngestMaxmind
}
export interface IngestDateIndexNameProcessor extends IngestProcessorBase {
date_formats: string[]
date_rounding: string
@ -12523,6 +12528,12 @@ export interface IngestGsubProcessor extends IngestProcessorBase {
target_field?: Field
}
export interface IngestHtmlStripProcessor extends IngestProcessorBase {
field: Field
ignore_missing?: boolean
target_field?: Field
}
export interface IngestInferenceConfig {
regression?: IngestInferenceConfigRegression
classification?: IngestInferenceConfigClassification
@ -12584,6 +12595,10 @@ export interface IngestLowercaseProcessor extends IngestProcessorBase {
target_field?: Field
}
export interface IngestMaxmind {
account_id: Id
}
export interface IngestPipeline {
description?: string
on_failure?: IngestProcessorContainer[]
@ -12629,6 +12644,7 @@ export interface IngestProcessorContainer {
geoip?: IngestGeoIpProcessor
grok?: IngestGrokProcessor
gsub?: IngestGsubProcessor
html_strip?: IngestHtmlStripProcessor
inference?: IngestInferenceProcessor
join?: IngestJoinProcessor
json?: IngestJsonProcessor
@ -12646,6 +12662,7 @@ export interface IngestProcessorContainer {
trim?: IngestTrimProcessor
uppercase?: IngestUppercaseProcessor
urldecode?: IngestUrlDecodeProcessor
uri_parts?: IngestUriPartsProcessor
user_agent?: IngestUserAgentProcessor
}
@ -12716,6 +12733,14 @@ export interface IngestUppercaseProcessor extends IngestProcessorBase {
target_field?: Field
}
export interface IngestUriPartsProcessor extends IngestProcessorBase {
field: Field
ignore_missing?: boolean
keep_original?: boolean
remove_if_successful?: boolean
target_field?: Field
}
export interface IngestUrlDecodeProcessor extends IngestProcessorBase {
field: Field
ignore_missing?: boolean
@ -12732,6 +12757,14 @@ export interface IngestUserAgentProcessor extends IngestProcessorBase {
export type IngestUserAgentProperty = 'NAME' | 'MAJOR' | 'MINOR' | 'PATCH' | 'OS' | 'OS_NAME' | 'OS_MAJOR' | 'OS_MINOR' | 'DEVICE' | 'BUILD'
export interface IngestDeleteGeoipDatabaseRequest extends RequestBase {
id: Ids
master_timeout?: Duration
timeout?: Duration
}
export type IngestDeleteGeoipDatabaseResponse = AcknowledgedResponseBase
export interface IngestDeletePipelineRequest extends RequestBase {
id: Id
master_timeout?: Duration
@ -12744,8 +12777,9 @@ export interface IngestGeoIpStatsGeoIpDownloadStatistics {
successful_downloads: integer
failed_downloads: integer
total_download_time: DurationValue<UnitMillis>
database_count: integer
databases_count: integer
skipped_updates: integer
expired_databases: integer
}
export interface IngestGeoIpStatsGeoIpNodeDatabaseName {
@ -12765,6 +12799,22 @@ export interface IngestGeoIpStatsResponse {
nodes: Record<Id, IngestGeoIpStatsGeoIpNodeDatabases>
}
export interface IngestGetGeoipDatabaseDatabaseConfigurationMetadata {
id: Id
version: long
modified_date_millis: EpochTime<UnitMillis>
database: IngestDatabaseConfiguration
}
export interface IngestGetGeoipDatabaseRequest extends RequestBase {
id?: Ids
master_timeout?: Duration
}
export interface IngestGetGeoipDatabaseResponse {
databases: IngestGetGeoipDatabaseDatabaseConfigurationMetadata[]
}
export interface IngestGetPipelineRequest extends RequestBase {
id?: Id
master_timeout?: Duration
@ -12780,6 +12830,16 @@ export interface IngestProcessorGrokResponse {
patterns: Record<string, string>
}
export interface IngestPutGeoipDatabaseRequest extends RequestBase {
id: Id
master_timeout?: Duration
timeout?: Duration
name: Name
maxmind: IngestMaxmind
}
export type IngestPutGeoipDatabaseResponse = AcknowledgedResponseBase
export interface IngestPutPipelineRequest extends RequestBase {
id: Id
master_timeout?: Duration
@ -12819,21 +12879,29 @@ export interface IngestSimulateIngest {
export interface IngestSimulatePipelineSimulation {
doc?: IngestSimulateDocumentSimulation
processor_results?: IngestSimulatePipelineSimulation[]
tag?: string
processor_type?: string
status?: WatcherActionStatusOptions
description?: string
ignored_error?: ErrorCause
error?: ErrorCause
}
export interface IngestSimulateRequest extends RequestBase {
id?: Id
verbose?: boolean
docs?: IngestSimulateDocument[]
docs: IngestSimulateDocument[]
pipeline?: IngestPipeline
}
export interface IngestSimulateResponse {
docs: IngestSimulatePipelineSimulation[]
docs: IngestSimulateSimulateDocumentResult[]
}
export interface IngestSimulateSimulateDocumentResult {
doc?: IngestSimulateDocumentSimulation
error?: ErrorCause
processor_results?: IngestSimulatePipelineSimulation[]
}
export interface LicenseLicense {

View File

@ -1162,7 +1162,7 @@ export interface RenderSearchTemplateResponse {
export interface ScriptsPainlessExecutePainlessContextSetup {
document: any
index: IndexName
query: QueryDslQueryContainer
query?: QueryDslQueryContainer
}
export interface ScriptsPainlessExecuteRequest extends RequestBase {
@ -4909,11 +4909,11 @@ export type AnalysisPhoneticRuleType = 'approx' | 'exact'
export interface AnalysisPhoneticTokenFilter extends AnalysisTokenFilterBase {
type: 'phonetic'
encoder: AnalysisPhoneticEncoder
languageset: AnalysisPhoneticLanguage | AnalysisPhoneticLanguage[]
languageset?: AnalysisPhoneticLanguage | AnalysisPhoneticLanguage[]
max_code_len?: integer
name_type: AnalysisPhoneticNameType
name_type?: AnalysisPhoneticNameType
replace?: boolean
rule_type: AnalysisPhoneticRuleType
rule_type?: AnalysisPhoneticRuleType
}
export interface AnalysisPorterStemTokenFilter extends AnalysisTokenFilterBase {
@ -12670,6 +12670,11 @@ export interface IngestCsvProcessor extends IngestProcessorBase {
trim?: boolean
}
export interface IngestDatabaseConfiguration {
name: Name
maxmind: IngestMaxmind
}
export interface IngestDateIndexNameProcessor extends IngestProcessorBase {
date_formats: string[]
date_rounding: string
@ -12748,6 +12753,12 @@ export interface IngestGsubProcessor extends IngestProcessorBase {
target_field?: Field
}
export interface IngestHtmlStripProcessor extends IngestProcessorBase {
field: Field
ignore_missing?: boolean
target_field?: Field
}
export interface IngestInferenceConfig {
regression?: IngestInferenceConfigRegression
classification?: IngestInferenceConfigClassification
@ -12809,6 +12820,10 @@ export interface IngestLowercaseProcessor extends IngestProcessorBase {
target_field?: Field
}
export interface IngestMaxmind {
account_id: Id
}
export interface IngestPipeline {
description?: string
on_failure?: IngestProcessorContainer[]
@ -12854,6 +12869,7 @@ export interface IngestProcessorContainer {
geoip?: IngestGeoIpProcessor
grok?: IngestGrokProcessor
gsub?: IngestGsubProcessor
html_strip?: IngestHtmlStripProcessor
inference?: IngestInferenceProcessor
join?: IngestJoinProcessor
json?: IngestJsonProcessor
@ -12871,6 +12887,7 @@ export interface IngestProcessorContainer {
trim?: IngestTrimProcessor
uppercase?: IngestUppercaseProcessor
urldecode?: IngestUrlDecodeProcessor
uri_parts?: IngestUriPartsProcessor
user_agent?: IngestUserAgentProcessor
}
@ -12941,6 +12958,14 @@ export interface IngestUppercaseProcessor extends IngestProcessorBase {
target_field?: Field
}
export interface IngestUriPartsProcessor extends IngestProcessorBase {
field: Field
ignore_missing?: boolean
keep_original?: boolean
remove_if_successful?: boolean
target_field?: Field
}
export interface IngestUrlDecodeProcessor extends IngestProcessorBase {
field: Field
ignore_missing?: boolean
@ -12957,6 +12982,14 @@ export interface IngestUserAgentProcessor extends IngestProcessorBase {
export type IngestUserAgentProperty = 'NAME' | 'MAJOR' | 'MINOR' | 'PATCH' | 'OS' | 'OS_NAME' | 'OS_MAJOR' | 'OS_MINOR' | 'DEVICE' | 'BUILD'
export interface IngestDeleteGeoipDatabaseRequest extends RequestBase {
id: Ids
master_timeout?: Duration
timeout?: Duration
}
export type IngestDeleteGeoipDatabaseResponse = AcknowledgedResponseBase
export interface IngestDeletePipelineRequest extends RequestBase {
id: Id
master_timeout?: Duration
@ -12969,8 +13002,9 @@ export interface IngestGeoIpStatsGeoIpDownloadStatistics {
successful_downloads: integer
failed_downloads: integer
total_download_time: DurationValue<UnitMillis>
database_count: integer
databases_count: integer
skipped_updates: integer
expired_databases: integer
}
export interface IngestGeoIpStatsGeoIpNodeDatabaseName {
@ -12990,6 +13024,22 @@ export interface IngestGeoIpStatsResponse {
nodes: Record<Id, IngestGeoIpStatsGeoIpNodeDatabases>
}
export interface IngestGetGeoipDatabaseDatabaseConfigurationMetadata {
id: Id
version: long
modified_date_millis: EpochTime<UnitMillis>
database: IngestDatabaseConfiguration
}
export interface IngestGetGeoipDatabaseRequest extends RequestBase {
id?: Ids
master_timeout?: Duration
}
export interface IngestGetGeoipDatabaseResponse {
databases: IngestGetGeoipDatabaseDatabaseConfigurationMetadata[]
}
export interface IngestGetPipelineRequest extends RequestBase {
id?: Id
master_timeout?: Duration
@ -13005,6 +13055,19 @@ export interface IngestProcessorGrokResponse {
patterns: Record<string, string>
}
export interface IngestPutGeoipDatabaseRequest extends RequestBase {
id: Id
master_timeout?: Duration
timeout?: Duration
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
name: Name
maxmind: IngestMaxmind
}
}
export type IngestPutGeoipDatabaseResponse = AcknowledgedResponseBase
export interface IngestPutPipelineRequest extends RequestBase {
id: Id
master_timeout?: Duration
@ -13047,10 +13110,12 @@ export interface IngestSimulateIngest {
export interface IngestSimulatePipelineSimulation {
doc?: IngestSimulateDocumentSimulation
processor_results?: IngestSimulatePipelineSimulation[]
tag?: string
processor_type?: string
status?: WatcherActionStatusOptions
description?: string
ignored_error?: ErrorCause
error?: ErrorCause
}
export interface IngestSimulateRequest extends RequestBase {
@ -13058,13 +13123,19 @@ export interface IngestSimulateRequest extends RequestBase {
verbose?: boolean
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
body?: {
docs?: IngestSimulateDocument[]
docs: IngestSimulateDocument[]
pipeline?: IngestPipeline
}
}
export interface IngestSimulateResponse {
docs: IngestSimulatePipelineSimulation[]
docs: IngestSimulateSimulateDocumentResult[]
}
export interface IngestSimulateSimulateDocumentResult {
doc?: IngestSimulateDocumentSimulation
error?: ErrorCause
processor_results?: IngestSimulatePipelineSimulation[]
}
export interface LicenseLicense {