Auto-generated code for 8.10 (#2077)
This commit is contained in:
@ -6355,6 +6355,7 @@ client.ml.startTrainedModelDeployment({ model_id })
|
||||
** *`cache_size` (Optional, number | string)*: The inference cache size (in memory outside the JVM heap) per node for the model.
|
||||
The default value is the same size as the `model_size_bytes`. To disable the cache,
|
||||
`0b` can be provided.
|
||||
** *`deployment_id` (Optional, string)*: A unique identifier for the deployment of the model.
|
||||
** *`number_of_allocations` (Optional, number)*: The number of model allocations on each node where the model is deployed.
|
||||
All allocations on a node share the same copy of the model in memory but use
|
||||
a separate set of threads to evaluate the model.
|
||||
|
||||
@ -14087,6 +14087,7 @@ export interface MlStartDatafeedResponse {
|
||||
export interface MlStartTrainedModelDeploymentRequest extends RequestBase {
|
||||
model_id: Id
|
||||
cache_size?: ByteSize
|
||||
deployment_id?: string
|
||||
number_of_allocations?: integer
|
||||
priority?: MlTrainingPriority
|
||||
queue_capacity?: integer
|
||||
|
||||
@ -14356,6 +14356,7 @@ export interface MlStartDatafeedResponse {
|
||||
export interface MlStartTrainedModelDeploymentRequest extends RequestBase {
|
||||
model_id: Id
|
||||
cache_size?: ByteSize
|
||||
deployment_id?: string
|
||||
number_of_allocations?: integer
|
||||
priority?: MlTrainingPriority
|
||||
queue_capacity?: integer
|
||||
|
||||
Reference in New Issue
Block a user