// This file is autogenerated, DO NOT EDIT // Use `node scripts/generate-docs-examples.js` to generate the docs examples [source, js] ---- const response = await client.inference.put({ task_type: "text_embedding", inference_id: "my-e5-model", inference_config: { service: "elasticsearch", service_settings: { adaptive_allocations: { enabled: true, min_number_of_allocations: 3, max_number_of_allocations: 10, }, num_threads: 1, model_id: ".multilingual-e5-small", }, }, }); console.log(response); ----