Auto-generated code for 8.11 (#2076)
This commit is contained in:
@ -6491,6 +6491,7 @@ client.ml.startTrainedModelDeployment({ model_id })
|
||||
** *`cache_size` (Optional, number | string)*: The inference cache size (in memory outside the JVM heap) per node for the model.
|
||||
The default value is the same size as the `model_size_bytes`. To disable the cache,
|
||||
`0b` can be provided.
|
||||
** *`deployment_id` (Optional, string)*: A unique identifier for the deployment of the model.
|
||||
** *`number_of_allocations` (Optional, number)*: The number of model allocations on each node where the model is deployed.
|
||||
All allocations on a node share the same copy of the model in memory but use
|
||||
a separate set of threads to evaluate the model.
|
||||
|
||||
@ -14107,6 +14107,7 @@ export interface MlStartDatafeedResponse {
|
||||
export interface MlStartTrainedModelDeploymentRequest extends RequestBase {
|
||||
model_id: Id
|
||||
cache_size?: ByteSize
|
||||
deployment_id?: string
|
||||
number_of_allocations?: integer
|
||||
priority?: MlTrainingPriority
|
||||
queue_capacity?: integer
|
||||
|
||||
@ -14376,6 +14376,7 @@ export interface MlStartDatafeedResponse {
|
||||
export interface MlStartTrainedModelDeploymentRequest extends RequestBase {
|
||||
model_id: Id
|
||||
cache_size?: ByteSize
|
||||
deployment_id?: string
|
||||
number_of_allocations?: integer
|
||||
priority?: MlTrainingPriority
|
||||
queue_capacity?: integer
|
||||
|
||||
Reference in New Issue
Block a user