Auto-generated API code (#2580)

This commit is contained in:
Elastic Machine
2025-01-21 19:07:51 +00:00
committed by GitHub
parent fecda564da
commit 3627a4c56b
9 changed files with 130 additions and 16 deletions

View File

@ -0,0 +1,20 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_inference/chat_completion/openai-completion/_stream",
body: {
model: "gpt-4o",
messages: [
{
role: "user",
content: "What is Elastic?",
},
],
},
});
console.log(response);
----

View File

@ -3,11 +3,13 @@
[source, js]
----
const response = await client.inference.inference({
const response = await client.inference.put({
task_type: "my-inference-endpoint",
inference_id: "_update",
service_settings: {
api_key: "<API_KEY>",
inference_config: {
service_settings: {
api_key: "<API_KEY>",
},
},
});
console.log(response);

View File

@ -1,10 +0,0 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.indices.unfreeze({
index: "my-index-000001",
});
console.log(response);
----

View File

@ -0,0 +1,47 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_inference/chat_completion/openai-completion/_stream",
body: {
messages: [
{
role: "user",
content: [
{
type: "text",
text: "What's the price of a scarf?",
},
],
},
],
tools: [
{
type: "function",
function: {
name: "get_current_price",
description: "Get the current price of a item",
parameters: {
type: "object",
properties: {
item: {
id: "123",
},
},
},
},
},
],
tool_choice: {
type: "function",
function: {
name: "get_current_price",
},
},
},
});
console.log(response);
----

View File

@ -0,0 +1,20 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.search({
index: "image-index",
knn: {
field: "image-vector",
query_vector: [-5, 9, -12],
k: 10,
num_candidates: 100,
rescore_vector: {
oversample: 2,
},
},
fields: ["title", "file-type"],
});
console.log(response);
----

View File

@ -5,7 +5,8 @@
----
const response = await client.cluster.putSettings({
persistent: {
"cluster.routing.allocation.disk.watermark.low": "30gb",
"cluster.routing.allocation.disk.watermark.low": "90%",
"cluster.routing.allocation.disk.watermark.high": "95%",
},
});
console.log(response);

View File

@ -0,0 +1,34 @@
// This file is autogenerated, DO NOT EDIT
// Use `node scripts/generate-docs-examples.js` to generate the docs examples
[source, js]
----
const response = await client.transport.request({
method: "POST",
path: "/_inference/chat_completion/openai-completion/_stream",
body: {
messages: [
{
role: "assistant",
content: "Let's find out what the weather is",
tool_calls: [
{
id: "call_KcAjWtAww20AihPHphUh46Gd",
type: "function",
function: {
name: "get_current_weather",
arguments: '{"location":"Boston, MA"}',
},
},
],
},
{
role: "tool",
content: "The weather is cold",
tool_call_id: "call_KcAjWtAww20AihPHphUh46Gd",
},
],
},
});
console.log(response);
----

View File

@ -6894,7 +6894,7 @@ export type CatAllocationResponse = CatAllocationAllocationRecord[]
export interface CatComponentTemplatesComponentTemplate {
name: string
version: string
version: string | null
alias_count: string
mapping_count: string
settings_count: string

View File

@ -6974,7 +6974,7 @@ export type CatAllocationResponse = CatAllocationAllocationRecord[]
export interface CatComponentTemplatesComponentTemplate {
name: string
version: string
version: string | null
alias_count: string
mapping_count: string
settings_count: string