Auto-generated code for 8.10 (#2077)

This commit is contained in:
Elastic Machine
2023-11-16 17:51:01 +01:00
committed by GitHub
parent 745919c92d
commit d44b0ffc39
3 changed files with 3 additions and 0 deletions

View File

@ -6355,6 +6355,7 @@ client.ml.startTrainedModelDeployment({ model_id })
** *`cache_size` (Optional, number | string)*: The inference cache size (in memory outside the JVM heap) per node for the model.
The default value is the same size as the `model_size_bytes`. To disable the cache,
`0b` can be provided.
** *`deployment_id` (Optional, string)*: A unique identifier for the deployment of the model.
** *`number_of_allocations` (Optional, number)*: The number of model allocations on each node where the model is deployed.
All allocations on a node share the same copy of the model in memory but use
a separate set of threads to evaluate the model.

View File

@ -14087,6 +14087,7 @@ export interface MlStartDatafeedResponse {
export interface MlStartTrainedModelDeploymentRequest extends RequestBase {
model_id: Id
cache_size?: ByteSize
deployment_id?: string
number_of_allocations?: integer
priority?: MlTrainingPriority
queue_capacity?: integer

View File

@ -14356,6 +14356,7 @@ export interface MlStartDatafeedResponse {
export interface MlStartTrainedModelDeploymentRequest extends RequestBase {
model_id: Id
cache_size?: ByteSize
deployment_id?: string
number_of_allocations?: integer
priority?: MlTrainingPriority
queue_capacity?: integer