Files
elasticsearch-js/docs/reference/ml.asciidoc
2024-12-05 14:47:50 -06:00

3201 lines
75 KiB
Plaintext

[[reference-ml]]
== client.ml
////////
===========================================================================================================================
|| ||
|| ||
|| ||
|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ ||
|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ ||
|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ ||
|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ ||
|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ ||
|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ ||
|| ||
|| ||
|| This file is autogenerated, DO NOT send pull requests that changes this file directly. ||
|| You should update the script that does the generation, which can be found in: ||
|| https://github.com/elastic/elastic-client-generator-js ||
|| ||
|| You can run the script with the following command: ||
|| npm run elasticsearch -- --version <version> ||
|| ||
|| ||
|| ||
===========================================================================================================================
////////
++++
<style>
.lang-ts a.xref {
text-decoration: underline !important;
}
</style>
++++
[discrete]
[[client.ml.clearTrainedModelDeploymentCache]]
== `client.ml.clearTrainedModelDeploymentCache()`
Clear trained model deployment cache. Cache will be cleared on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. Calling this API clears the caches without restarting the deployment.
{ref}/clear-trained-model-deployment-cache.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions) => Promise<MlClearTrainedModelDeploymentCacheResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlClearTrainedModelDeploymentCacheRequest extends <<RequestBase>> {
model_id: <<Id>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlClearTrainedModelDeploymentCacheResponse {
cleared: boolean
}
----
[discrete]
[[client.ml.closeJob]]
== `client.ml.closeJob()`
Close anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job.
{ref}/ml-close-job.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlCloseJobRequest, options?: TransportRequestOptions) => Promise<MlCloseJobResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlCloseJobRequest extends <<RequestBase>> {
job_id: <<Id>>
allow_no_match?: boolean
force?: boolean
timeout?: <<Duration>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlCloseJobResponse {
closed: boolean
}
----
[discrete]
[[client.ml.deleteCalendar]]
== `client.ml.deleteCalendar()`
Delete a calendar. Removes all scheduled events from a calendar, then deletes it.
{ref}/ml-delete-calendar.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlDeleteCalendarRequest, options?: TransportRequestOptions) => Promise<MlDeleteCalendarResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlDeleteCalendarRequest extends <<RequestBase>> {
calendar_id: <<Id>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type MlDeleteCalendarResponse = <<AcknowledgedResponseBase>>
----
[discrete]
[[client.ml.deleteCalendarEvent]]
== `client.ml.deleteCalendarEvent()`
Delete events from a calendar.
{ref}/ml-delete-calendar-event.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlDeleteCalendarEventRequest, options?: TransportRequestOptions) => Promise<MlDeleteCalendarEventResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlDeleteCalendarEventRequest extends <<RequestBase>> {
calendar_id: <<Id>>
event_id: <<Id>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type MlDeleteCalendarEventResponse = <<AcknowledgedResponseBase>>
----
[discrete]
[[client.ml.deleteCalendarJob]]
== `client.ml.deleteCalendarJob()`
Delete anomaly jobs from a calendar.
{ref}/ml-delete-calendar-job.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlDeleteCalendarJobRequest, options?: TransportRequestOptions) => Promise<MlDeleteCalendarJobResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlDeleteCalendarJobRequest extends <<RequestBase>> {
calendar_id: <<Id>>
job_id: <<Ids>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlDeleteCalendarJobResponse {
calendar_id: <<Id>>
description?: string
job_ids: <<Ids>>
}
----
[discrete]
[[client.ml.deleteDataFrameAnalytics]]
== `client.ml.deleteDataFrameAnalytics()`
Delete a data frame analytics job.
{ref}/delete-dfanalytics.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise<MlDeleteDataFrameAnalyticsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlDeleteDataFrameAnalyticsRequest extends <<RequestBase>> {
id: <<Id>>
force?: boolean
timeout?: <<Duration>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type MlDeleteDataFrameAnalyticsResponse = <<AcknowledgedResponseBase>>
----
[discrete]
[[client.ml.deleteDatafeed]]
== `client.ml.deleteDatafeed()`
Delete a datafeed.
{ref}/ml-delete-datafeed.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlDeleteDatafeedRequest, options?: TransportRequestOptions) => Promise<MlDeleteDatafeedResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlDeleteDatafeedRequest extends <<RequestBase>> {
datafeed_id: <<Id>>
force?: boolean
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type MlDeleteDatafeedResponse = <<AcknowledgedResponseBase>>
----
[discrete]
[[client.ml.deleteExpiredData]]
== `client.ml.deleteExpiredData()`
Delete expired ML data. Deletes all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using _all, by specifying * as the <job_id>, or by omitting the <job_id>.
{ref}/ml-delete-expired-data.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlDeleteExpiredDataRequest, options?: TransportRequestOptions) => Promise<MlDeleteExpiredDataResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlDeleteExpiredDataRequest extends <<RequestBase>> {
job_id?: <<Id>>
requests_per_second?: <<float>>
timeout?: <<Duration>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlDeleteExpiredDataResponse {
deleted: boolean
}
----
[discrete]
[[client.ml.deleteFilter]]
== `client.ml.deleteFilter()`
Delete a filter. If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter.
{ref}/ml-delete-filter.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlDeleteFilterRequest, options?: TransportRequestOptions) => Promise<MlDeleteFilterResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlDeleteFilterRequest extends <<RequestBase>> {
filter_id: <<Id>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type MlDeleteFilterResponse = <<AcknowledgedResponseBase>>
----
[discrete]
[[client.ml.deleteForecast]]
== `client.ml.deleteForecast()`
Delete forecasts from a job. By default, forecasts are retained for 14 days. You can specify a different retention period with the `expires_in` parameter in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire.
{ref}/ml-delete-forecast.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlDeleteForecastRequest, options?: TransportRequestOptions) => Promise<MlDeleteForecastResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlDeleteForecastRequest extends <<RequestBase>> {
job_id: <<Id>>
forecast_id?: <<Id>>
allow_no_forecasts?: boolean
timeout?: <<Duration>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type MlDeleteForecastResponse = <<AcknowledgedResponseBase>>
----
[discrete]
[[client.ml.deleteJob]]
== `client.ml.deleteJob()`
Delete an anomaly detection job. All job configuration, model state and results are deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. If you delete a job that has a datafeed, the request first tries to delete the datafeed. This behavior is equivalent to calling the delete datafeed API with the same timeout and force parameters as the delete job request.
{ref}/ml-delete-job.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlDeleteJobRequest, options?: TransportRequestOptions) => Promise<MlDeleteJobResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlDeleteJobRequest extends <<RequestBase>> {
job_id: <<Id>>
force?: boolean
delete_user_annotations?: boolean
wait_for_completion?: boolean
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type MlDeleteJobResponse = <<AcknowledgedResponseBase>>
----
[discrete]
[[client.ml.deleteModelSnapshot]]
== `client.ml.deleteModelSnapshot()`
Delete a model snapshot. You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API.
{ref}/ml-delete-snapshot.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlDeleteModelSnapshotRequest, options?: TransportRequestOptions) => Promise<MlDeleteModelSnapshotResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlDeleteModelSnapshotRequest extends <<RequestBase>> {
job_id: <<Id>>
snapshot_id: <<Id>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type MlDeleteModelSnapshotResponse = <<AcknowledgedResponseBase>>
----
[discrete]
[[client.ml.deleteTrainedModel]]
== `client.ml.deleteTrainedModel()`
Delete an unreferenced trained model. The request deletes a trained inference model that is not referenced by an ingest pipeline.
{ref}/delete-trained-models.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlDeleteTrainedModelRequest, options?: TransportRequestOptions) => Promise<MlDeleteTrainedModelResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlDeleteTrainedModelRequest extends <<RequestBase>> {
model_id: <<Id>>
force?: boolean
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type MlDeleteTrainedModelResponse = <<AcknowledgedResponseBase>>
----
[discrete]
[[client.ml.deleteTrainedModelAlias]]
== `client.ml.deleteTrainedModelAlias()`
Delete a trained model alias. This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error.
{ref}/delete-trained-models-aliases.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions) => Promise<MlDeleteTrainedModelAliasResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlDeleteTrainedModelAliasRequest extends <<RequestBase>> {
model_alias: <<Name>>
model_id: <<Id>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type MlDeleteTrainedModelAliasResponse = <<AcknowledgedResponseBase>>
----
[discrete]
[[client.ml.estimateModelMemory]]
== `client.ml.estimateModelMemory()`
Estimate job model memory usage. Makes an estimation of the memory usage for an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references.
{ref}/ml-apis.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlEstimateModelMemoryRequest, options?: TransportRequestOptions) => Promise<MlEstimateModelMemoryResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlEstimateModelMemoryRequest extends <<RequestBase>> {
analysis_config?: <<MlAnalysisConfig>>
max_bucket_cardinality?: Record<<<Field>>, <<long>>>
overall_cardinality?: Record<<<Field>>, <<long>>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlEstimateModelMemoryResponse {
model_memory_estimate: string
}
----
[discrete]
[[client.ml.evaluateDataFrame]]
== `client.ml.evaluateDataFrame()`
Evaluate data frame analytics. The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present.
{ref}/evaluate-dfanalytics.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlEvaluateDataFrameRequest, options?: TransportRequestOptions) => Promise<MlEvaluateDataFrameResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlEvaluateDataFrameRequest extends <<RequestBase>> {
evaluation: <<MlDataframeEvaluationContainer>>
index: <<IndexName>>
query?: <<QueryDslQueryContainer>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlEvaluateDataFrameResponse {
classification?: MlEvaluateDataFrameDataframeClassificationSummary
outlier_detection?: MlEvaluateDataFrameDataframeOutlierDetectionSummary
regression?: MlEvaluateDataFrameDataframeRegressionSummary
}
----
[discrete]
[[client.ml.explainDataFrameAnalytics]]
== `client.ml.explainDataFrameAnalytics()`
Explain data frame analytics config. This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided: * which fields are included or not in the analysis and why, * how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation.
{ref}/explain-dfanalytics.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise<MlExplainDataFrameAnalyticsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlExplainDataFrameAnalyticsRequest extends <<RequestBase>> {
id?: <<Id>>
source?: <<MlDataframeAnalyticsSource>>
dest?: <<MlDataframeAnalyticsDestination>>
analysis?: <<MlDataframeAnalysisContainer>>
description?: string
model_memory_limit?: string
max_num_threads?: <<integer>>
analyzed_fields?: <<MlDataframeAnalysisAnalyzedFields>> | string[]
allow_lazy_start?: boolean
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlExplainDataFrameAnalyticsResponse {
field_selection: <<MlDataframeAnalyticsFieldSelection>>[]
memory_estimation: <<MlDataframeAnalyticsMemoryEstimation>>
}
----
[discrete]
[[client.ml.flushJob]]
== `client.ml.flushJob()`
Force buffered data to be processed. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data.
{ref}/ml-flush-job.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlFlushJobRequest, options?: TransportRequestOptions) => Promise<MlFlushJobResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlFlushJobRequest extends <<RequestBase>> {
job_id: <<Id>>
advance_time?: <<DateTime>>
calc_interim?: boolean
end?: <<DateTime>>
skip_time?: <<DateTime>>
start?: <<DateTime>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlFlushJobResponse {
flushed: boolean
last_finalized_bucket_end?: <<integer>>
}
----
[discrete]
[[client.ml.forecast]]
== `client.ml.forecast()`
Predict future behavior of a time series. Forecasts are not supported for jobs that perform population analysis; an error occurs if you try to create a forecast for a job that has an `over_field_name` in its configuration. Forcasts predict future behavior based on historical data.
{ref}/ml-forecast.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlForecastRequest, options?: TransportRequestOptions) => Promise<MlForecastResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlForecastRequest extends <<RequestBase>> {
job_id: <<Id>>
duration?: <<Duration>>
expires_in?: <<Duration>>
max_model_memory?: string
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlForecastResponse {
acknowledged: boolean
forecast_id: <<Id>>
}
----
[discrete]
[[client.ml.getBuckets]]
== `client.ml.getBuckets()`
Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket.
{ref}/ml-get-bucket.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlGetBucketsRequest, options?: TransportRequestOptions) => Promise<MlGetBucketsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlGetBucketsRequest extends <<RequestBase>> {
job_id: <<Id>>
timestamp?: <<DateTime>>
from?: <<integer>>
size?: <<integer>>
anomaly_score?: <<double>>
desc?: boolean
end?: <<DateTime>>
exclude_interim?: boolean
expand?: boolean
page?: <<MlPage>>
sort?: <<Field>>
start?: <<DateTime>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlGetBucketsResponse {
buckets: <<MlBucketSummary>>[]
count: <<long>>
}
----
[discrete]
[[client.ml.getCalendarEvents]]
== `client.ml.getCalendarEvents()`
Get info about events in calendars.
{ref}/ml-get-calendar-event.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlGetCalendarEventsRequest, options?: TransportRequestOptions) => Promise<MlGetCalendarEventsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlGetCalendarEventsRequest extends <<RequestBase>> {
calendar_id: <<Id>>
end?: <<DateTime>>
from?: <<integer>>
job_id?: <<Id>>
size?: <<integer>>
start?: <<DateTime>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlGetCalendarEventsResponse {
count: <<long>>
events: <<MlCalendarEvent>>[]
}
----
[discrete]
[[client.ml.getCalendars]]
== `client.ml.getCalendars()`
Get calendar configuration info.
{ref}/ml-get-calendar.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlGetCalendarsRequest, options?: TransportRequestOptions) => Promise<MlGetCalendarsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlGetCalendarsRequest extends <<RequestBase>> {
calendar_id?: <<Id>>
from?: <<integer>>
size?: <<integer>>
page?: <<MlPage>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlGetCalendarsResponse {
calendars: MlGetCalendarsCalendar[]
count: <<long>>
}
----
[discrete]
[[client.ml.getCategories]]
== `client.ml.getCategories()`
Get anomaly detection job results for categories.
{ref}/ml-get-category.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlGetCategoriesRequest, options?: TransportRequestOptions) => Promise<MlGetCategoriesResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlGetCategoriesRequest extends <<RequestBase>> {
job_id: <<Id>>
category_id?: <<CategoryId>>
from?: <<integer>>
partition_field_value?: string
size?: <<integer>>
page?: <<MlPage>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlGetCategoriesResponse {
categories: <<MlCategory>>[]
count: <<long>>
}
----
[discrete]
[[client.ml.getDataFrameAnalytics]]
== `client.ml.getDataFrameAnalytics()`
Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single API request by using a list of data frame analytics jobs or a wildcard expression.
{ref}/get-dfanalytics.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise<MlGetDataFrameAnalyticsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlGetDataFrameAnalyticsRequest extends <<RequestBase>> {
id?: <<Id>>
allow_no_match?: boolean
from?: <<integer>>
size?: <<integer>>
exclude_generated?: boolean
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlGetDataFrameAnalyticsResponse {
count: <<integer>>
data_frame_analytics: <<MlDataframeAnalyticsSummary>>[]
}
----
[discrete]
[[client.ml.getDataFrameAnalyticsStats]]
== `client.ml.getDataFrameAnalyticsStats()`
Get data frame analytics jobs usage info.
{ref}/get-dfanalytics-stats.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions) => Promise<MlGetDataFrameAnalyticsStatsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlGetDataFrameAnalyticsStatsRequest extends <<RequestBase>> {
id?: <<Id>>
allow_no_match?: boolean
from?: <<integer>>
size?: <<integer>>
verbose?: boolean
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlGetDataFrameAnalyticsStatsResponse {
count: <<long>>
data_frame_analytics: <<MlDataframeAnalytics>>[]
}
----
[discrete]
[[client.ml.getDatafeedStats]]
== `client.ml.getDatafeedStats()`
Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by using a list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the `<feed_id>`, or by omitting the `<feed_id>`. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds.
{ref}/ml-get-datafeed-stats.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlGetDatafeedStatsRequest, options?: TransportRequestOptions) => Promise<MlGetDatafeedStatsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlGetDatafeedStatsRequest extends <<RequestBase>> {
datafeed_id?: <<Ids>>
allow_no_match?: boolean
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlGetDatafeedStatsResponse {
count: <<long>>
datafeeds: <<MlDatafeedStats>>[]
}
----
[discrete]
[[client.ml.getDatafeeds]]
== `client.ml.getDatafeeds()`
Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by using a list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the `<feed_id>`, or by omitting the `<feed_id>`. This API returns a maximum of 10,000 datafeeds.
{ref}/ml-get-datafeed.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlGetDatafeedsRequest, options?: TransportRequestOptions) => Promise<MlGetDatafeedsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlGetDatafeedsRequest extends <<RequestBase>> {
datafeed_id?: <<Ids>>
allow_no_match?: boolean
exclude_generated?: boolean
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlGetDatafeedsResponse {
count: <<long>>
datafeeds: <<MlDatafeed>>[]
}
----
[discrete]
[[client.ml.getFilters]]
== `client.ml.getFilters()`
Get filters. You can get a single filter or all filters.
{ref}/ml-get-filter.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlGetFiltersRequest, options?: TransportRequestOptions) => Promise<MlGetFiltersResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlGetFiltersRequest extends <<RequestBase>> {
filter_id?: <<Ids>>
from?: <<integer>>
size?: <<integer>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlGetFiltersResponse {
count: <<long>>
filters: <<MlFilter>>[]
}
----
[discrete]
[[client.ml.getInfluencers]]
== `client.ml.getInfluencers()`
Get anomaly detection job results for influencers. Influencers are the entities that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration.
{ref}/ml-get-influencer.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlGetInfluencersRequest, options?: TransportRequestOptions) => Promise<MlGetInfluencersResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlGetInfluencersRequest extends <<RequestBase>> {
job_id: <<Id>>
desc?: boolean
end?: <<DateTime>>
exclude_interim?: boolean
influencer_score?: <<double>>
from?: <<integer>>
size?: <<integer>>
sort?: <<Field>>
start?: <<DateTime>>
page?: <<MlPage>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlGetInfluencersResponse {
count: <<long>>
influencers: <<MlInfluencer>>[]
}
----
[discrete]
[[client.ml.getJobStats]]
== `client.ml.getJobStats()`
Get anomaly detection jobs usage info.
{ref}/ml-get-job-stats.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlGetJobStatsRequest, options?: TransportRequestOptions) => Promise<MlGetJobStatsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlGetJobStatsRequest extends <<RequestBase>> {
job_id?: <<Id>>
allow_no_match?: boolean
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlGetJobStatsResponse {
count: <<long>>
jobs: <<MlJobStats>>[]
}
----
[discrete]
[[client.ml.getJobs]]
== `client.ml.getJobs()`
Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the `<job_id>`, or by omitting the `<job_id>`.
{ref}/ml-get-job.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlGetJobsRequest, options?: TransportRequestOptions) => Promise<MlGetJobsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlGetJobsRequest extends <<RequestBase>> {
job_id?: <<Ids>>
allow_no_match?: boolean
exclude_generated?: boolean
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlGetJobsResponse {
count: <<long>>
jobs: <<MlJob>>[]
}
----
[discrete]
[[client.ml.getMemoryStats]]
== `client.ml.getMemoryStats()`
Get machine learning memory usage info. Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM.
{ref}/get-ml-memory.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlGetMemoryStatsRequest, options?: TransportRequestOptions) => Promise<MlGetMemoryStatsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlGetMemoryStatsRequest extends <<RequestBase>> {
node_id?: <<Id>>
human?: boolean
master_timeout?: <<Duration>>
timeout?: <<Duration>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlGetMemoryStatsResponse {
_nodes: <<NodeStatistics>>
cluster_name: <<Name>>
nodes: Record<<<Id>>, MlGetMemoryStatsMemory>
}
----
[discrete]
[[client.ml.getModelSnapshotUpgradeStats]]
== `client.ml.getModelSnapshotUpgradeStats()`
Get anomaly detection job model snapshot upgrade usage info.
{ref}/ml-get-job-model-snapshot-upgrade-stats.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions) => Promise<MlGetModelSnapshotUpgradeStatsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlGetModelSnapshotUpgradeStatsRequest extends <<RequestBase>> {
job_id: <<Id>>
snapshot_id: <<Id>>
allow_no_match?: boolean
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlGetModelSnapshotUpgradeStatsResponse {
count: <<long>>
model_snapshot_upgrades: <<MlModelSnapshotUpgrade>>[]
}
----
[discrete]
[[client.ml.getModelSnapshots]]
== `client.ml.getModelSnapshots()`
Get model snapshots info.
{ref}/ml-get-snapshot.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlGetModelSnapshotsRequest, options?: TransportRequestOptions) => Promise<MlGetModelSnapshotsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlGetModelSnapshotsRequest extends <<RequestBase>> {
job_id: <<Id>>
snapshot_id?: <<Id>>
from?: <<integer>>
size?: <<integer>>
desc?: boolean
end?: <<DateTime>>
page?: <<MlPage>>
sort?: <<Field>>
start?: <<DateTime>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlGetModelSnapshotsResponse {
count: <<long>>
model_snapshots: <<MlModelSnapshot>>[]
}
----
[discrete]
[[client.ml.getOverallBuckets]]
== `client.ml.getOverallBuckets()`
Get overall bucket results. Retrievs overall bucket results that summarize the bucket results of multiple anomaly detection jobs. The `overall_score` is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum `anomaly_score` per anomaly detection job in the overall bucket is calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. This means that you can fine-tune the `overall_score` so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` is high only when all jobs detect anomalies in that overall bucket. If you set the `bucket_span` parameter (to a value greater than its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span.
{ref}/ml-get-overall-buckets.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlGetOverallBucketsRequest, options?: TransportRequestOptions) => Promise<MlGetOverallBucketsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlGetOverallBucketsRequest extends <<RequestBase>> {
job_id: <<Id>>
allow_no_match?: boolean
bucket_span?: <<Duration>>
end?: <<DateTime>>
exclude_interim?: boolean
overall_score?: <<double>> | string
start?: <<DateTime>>
top_n?: <<integer>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlGetOverallBucketsResponse {
count: <<long>>
overall_buckets: <<MlOverallBucket>>[]
}
----
[discrete]
[[client.ml.getRecords]]
== `client.ml.getRecords()`
Get anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size of the input data. In practice, there are often too many to be able to manually process them. The machine learning features therefore perform a sophisticated aggregation of the anomaly records into buckets. The number of record results depends on the number of anomalies found in each bucket, which relates to the number of time series being modeled and the number of detectors.
{ref}/ml-get-record.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlGetRecordsRequest, options?: TransportRequestOptions) => Promise<MlGetRecordsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlGetRecordsRequest extends <<RequestBase>> {
job_id: <<Id>>
from?: <<integer>>
size?: <<integer>>
desc?: boolean
end?: <<DateTime>>
exclude_interim?: boolean
page?: <<MlPage>>
record_score?: <<double>>
sort?: <<Field>>
start?: <<DateTime>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlGetRecordsResponse {
count: <<long>>
records: <<MlAnomaly>>[]
}
----
[discrete]
[[client.ml.getTrainedModels]]
== `client.ml.getTrainedModels()`
Get trained model configuration info.
{ref}/get-trained-models.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlGetTrainedModelsRequest, options?: TransportRequestOptions) => Promise<MlGetTrainedModelsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlGetTrainedModelsRequest extends <<RequestBase>> {
model_id?: <<Ids>>
allow_no_match?: boolean
decompress_definition?: boolean
exclude_generated?: boolean
from?: <<integer>>
include?: <<MlInclude>>
size?: <<integer>>
tags?: string | string[]
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlGetTrainedModelsResponse {
count: <<integer>>
trained_model_configs: <<MlTrainedModelConfig>>[]
}
----
[discrete]
[[client.ml.getTrainedModelsStats]]
== `client.ml.getTrainedModelsStats()`
Get trained models usage info. You can get usage information for multiple trained models in a single API request by using a list of model IDs or a wildcard expression.
{ref}/get-trained-models-stats.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions) => Promise<MlGetTrainedModelsStatsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlGetTrainedModelsStatsRequest extends <<RequestBase>> {
model_id?: <<Ids>>
allow_no_match?: boolean
from?: <<integer>>
size?: <<integer>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlGetTrainedModelsStatsResponse {
count: <<integer>>
trained_model_stats: <<MlTrainedModelStats>>[]
}
----
[discrete]
[[client.ml.inferTrainedModel]]
== `client.ml.inferTrainedModel()`
Evaluate a trained model.
{ref}/infer-trained-model.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlInferTrainedModelRequest, options?: TransportRequestOptions) => Promise<MlInferTrainedModelResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlInferTrainedModelRequest extends <<RequestBase>> {
model_id: <<Id>>
timeout?: <<Duration>>
docs: Record<string, any>[]
inference_config?: <<MlInferenceConfigUpdateContainer>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlInferTrainedModelResponse {
inference_results: <<MlInferenceResponseResult>>[]
}
----
[discrete]
[[client.ml.info]]
== `client.ml.info()`
Return ML defaults and limits. Returns defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration.
{ref}/get-ml-info.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlInfoRequest, options?: TransportRequestOptions) => Promise<MlInfoResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlInfoRequest extends <<RequestBase>> {}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlInfoResponse {
defaults: MlInfoDefaults
limits: MlInfoLimits
upgrade_mode: boolean
native_code: MlInfoNativeCode
}
----
[discrete]
[[client.ml.openJob]]
== `client.ml.openJob()`
Open anomaly detection jobs. An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received.
{ref}/ml-open-job.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlOpenJobRequest, options?: TransportRequestOptions) => Promise<MlOpenJobResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlOpenJobRequest extends <<RequestBase>> {
job_id: <<Id>>
timeout?: <<Duration>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlOpenJobResponse {
opened: boolean
node: <<NodeId>>
}
----
[discrete]
[[client.ml.postCalendarEvents]]
== `client.ml.postCalendarEvents()`
Add scheduled events to the calendar.
{ref}/ml-post-calendar-event.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlPostCalendarEventsRequest, options?: TransportRequestOptions) => Promise<MlPostCalendarEventsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlPostCalendarEventsRequest extends <<RequestBase>> {
calendar_id: <<Id>>
events: <<MlCalendarEvent>>[]
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlPostCalendarEventsResponse {
events: <<MlCalendarEvent>>[]
}
----
[discrete]
[[client.ml.postData]]
== `client.ml.postData()`
Send data to an anomaly detection job for analysis. IMPORTANT: For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a list.
{ref}/ml-post-data.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlPostDataRequest, options?: TransportRequestOptions) => Promise<MlPostDataResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlPostDataRequest<TData = unknown> extends <<RequestBase>> {
job_id: <<Id>>
reset_end?: <<DateTime>>
reset_start?: <<DateTime>>
data?: TData[]
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlPostDataResponse {
bucket_count: <<long>>
earliest_record_timestamp: <<long>>
empty_bucket_count: <<long>>
input_bytes: <<long>>
input_field_count: <<long>>
input_record_count: <<long>>
invalid_date_count: <<long>>
job_id: <<Id>>
last_data_time: <<integer>>
latest_record_timestamp: <<long>>
missing_field_count: <<long>>
out_of_order_timestamp_count: <<long>>
processed_field_count: <<long>>
processed_record_count: <<long>>
sparse_bucket_count: <<long>>
}
----
[discrete]
[[client.ml.previewDataFrameAnalytics]]
== `client.ml.previewDataFrameAnalytics()`
Preview features used by data frame analytics. Previews the extracted features used by a data frame analytics config.
{ref}/preview-dfanalytics.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise<MlPreviewDataFrameAnalyticsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlPreviewDataFrameAnalyticsRequest extends <<RequestBase>> {
id?: <<Id>>
config?: MlPreviewDataFrameAnalyticsDataframePreviewConfig
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlPreviewDataFrameAnalyticsResponse {
feature_values: Record<<<Field>>, string>[]
}
----
[discrete]
[[client.ml.previewDatafeed]]
== `client.ml.previewDatafeed()`
Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials.
{ref}/ml-preview-datafeed.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlPreviewDatafeedRequest, options?: TransportRequestOptions) => Promise<MlPreviewDatafeedResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlPreviewDatafeedRequest extends <<RequestBase>> {
datafeed_id?: <<Id>>
start?: <<DateTime>>
end?: <<DateTime>>
datafeed_config?: <<MlDatafeedConfig>>
job_config?: <<MlJobConfig>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type MlPreviewDatafeedResponse<TDocument = unknown> = TDocument[]
----
[discrete]
[[client.ml.putCalendar]]
== `client.ml.putCalendar()`
Create a calendar.
{ref}/ml-put-calendar.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlPutCalendarRequest, options?: TransportRequestOptions) => Promise<MlPutCalendarResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlPutCalendarRequest extends <<RequestBase>> {
calendar_id: <<Id>>
job_ids?: <<Id>>[]
description?: string
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlPutCalendarResponse {
calendar_id: <<Id>>
description?: string
job_ids: <<Ids>>
}
----
[discrete]
[[client.ml.putCalendarJob]]
== `client.ml.putCalendarJob()`
Add anomaly detection job to calendar.
{ref}/ml-put-calendar-job.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlPutCalendarJobRequest, options?: TransportRequestOptions) => Promise<MlPutCalendarJobResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlPutCalendarJobRequest extends <<RequestBase>> {
calendar_id: <<Id>>
job_id: <<Ids>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlPutCalendarJobResponse {
calendar_id: <<Id>>
description?: string
job_ids: <<Ids>>
}
----
[discrete]
[[client.ml.putDataFrameAnalytics]]
== `client.ml.putDataFrameAnalytics()`
Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index.
{ref}/put-dfanalytics.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise<MlPutDataFrameAnalyticsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlPutDataFrameAnalyticsRequest extends <<RequestBase>> {
id: <<Id>>
allow_lazy_start?: boolean
analysis: <<MlDataframeAnalysisContainer>>
analyzed_fields?: <<MlDataframeAnalysisAnalyzedFields>> | string[]
description?: string
dest: <<MlDataframeAnalyticsDestination>>
max_num_threads?: <<integer>>
model_memory_limit?: string
source: <<MlDataframeAnalyticsSource>>
headers?: <<HttpHeaders>>
version?: <<VersionString>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlPutDataFrameAnalyticsResponse {
authorization?: <<MlDataframeAnalyticsAuthorization>>
allow_lazy_start: boolean
analysis: <<MlDataframeAnalysisContainer>>
analyzed_fields?: <<MlDataframeAnalysisAnalyzedFields>> | string[]
create_time: <<EpochTime>><<<UnitMillis>>>
description?: string
dest: <<MlDataframeAnalyticsDestination>>
id: <<Id>>
max_num_threads: <<integer>>
model_memory_limit: string
source: <<MlDataframeAnalyticsSource>>
version: <<VersionString>>
}
----
[discrete]
[[client.ml.putDatafeed]]
== `client.ml.putDatafeed()`
Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay`) at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index.
{ref}/ml-put-datafeed.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlPutDatafeedRequest, options?: TransportRequestOptions) => Promise<MlPutDatafeedResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlPutDatafeedRequest extends <<RequestBase>> {
datafeed_id: <<Id>>
allow_no_indices?: boolean
expand_wildcards?: <<ExpandWildcards>>
ignore_throttled?: boolean
ignore_unavailable?: boolean
aggregations?: Record<string, <<AggregationsAggregationContainer>>>
chunking_config?: <<MlChunkingConfig>>
delayed_data_check_config?: <<MlDelayedDataCheckConfig>>
frequency?: <<Duration>>
indices?: <<Indices>>
pass:[/**] @alias indices */
indexes?: <<Indices>>
indices_options?: <<IndicesOptions>>
job_id?: <<Id>>
max_empty_searches?: <<integer>>
query?: <<QueryDslQueryContainer>>
query_delay?: <<Duration>>
runtime_mappings?: <<MappingRuntimeFields>>
script_fields?: Record<string, <<ScriptField>>>
scroll_size?: <<integer>>
headers?: <<HttpHeaders>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlPutDatafeedResponse {
aggregations?: Record<string, <<AggregationsAggregationContainer>>>
authorization?: <<MlDatafeedAuthorization>>
chunking_config: <<MlChunkingConfig>>
delayed_data_check_config?: <<MlDelayedDataCheckConfig>>
datafeed_id: <<Id>>
frequency?: <<Duration>>
indices: string[]
job_id: <<Id>>
indices_options?: <<IndicesOptions>>
max_empty_searches?: <<integer>>
query: <<QueryDslQueryContainer>>
query_delay: <<Duration>>
runtime_mappings?: <<MappingRuntimeFields>>
script_fields?: Record<string, <<ScriptField>>>
scroll_size: <<integer>>
}
----
[discrete]
[[client.ml.putFilter]]
== `client.ml.putFilter()`
Create a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects.
{ref}/ml-put-filter.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlPutFilterRequest, options?: TransportRequestOptions) => Promise<MlPutFilterResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlPutFilterRequest extends <<RequestBase>> {
filter_id: <<Id>>
description?: string
items?: string[]
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlPutFilterResponse {
description: string
filter_id: <<Id>>
items: string[]
}
----
[discrete]
[[client.ml.putJob]]
== `client.ml.putJob()`
Create an anomaly detection job. If you include a `datafeed_config`, you must have read index privileges on the source index.
{ref}/ml-put-job.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlPutJobRequest, options?: TransportRequestOptions) => Promise<MlPutJobResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlPutJobRequest extends <<RequestBase>> {
job_id: <<Id>>
allow_lazy_open?: boolean
analysis_config: <<MlAnalysisConfig>>
analysis_limits?: <<MlAnalysisLimits>>
background_persist_interval?: <<Duration>>
custom_settings?: <<MlCustomSettings>>
daily_model_snapshot_retention_after_days?: <<long>>
data_description: <<MlDataDescription>>
datafeed_config?: <<MlDatafeedConfig>>
description?: string
groups?: string[]
model_plot_config?: <<MlModelPlotConfig>>
model_snapshot_retention_days?: <<long>>
renormalization_window_days?: <<long>>
results_index_name?: <<IndexName>>
results_retention_days?: <<long>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlPutJobResponse {
allow_lazy_open: boolean
analysis_config: <<MlAnalysisConfigRead>>
analysis_limits: <<MlAnalysisLimits>>
background_persist_interval?: <<Duration>>
create_time: <<DateTime>>
custom_settings?: <<MlCustomSettings>>
daily_model_snapshot_retention_after_days: <<long>>
data_description: <<MlDataDescription>>
datafeed_config?: <<MlDatafeed>>
description?: string
groups?: string[]
job_id: <<Id>>
job_type: string
job_version: string
model_plot_config?: <<MlModelPlotConfig>>
model_snapshot_id?: <<Id>>
model_snapshot_retention_days: <<long>>
renormalization_window_days?: <<long>>
results_index_name: string
results_retention_days?: <<long>>
}
----
[discrete]
[[client.ml.putTrainedModel]]
== `client.ml.putTrainedModel()`
Create a trained model. Enable you to supply a trained model that is not created by data frame analytics.
{ref}/put-trained-models.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlPutTrainedModelRequest, options?: TransportRequestOptions) => Promise<MlPutTrainedModelResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlPutTrainedModelRequest extends <<RequestBase>> {
model_id: <<Id>>
defer_definition_decompression?: boolean
wait_for_completion?: boolean
compressed_definition?: string
definition?: MlPutTrainedModelDefinition
description?: string
inference_config?: <<MlInferenceConfigCreateContainer>>
input?: MlPutTrainedModelInput
metadata?: any
model_type?: <<MlTrainedModelType>>
model_size_bytes?: <<long>>
platform_architecture?: string
tags?: string[]
prefix_strings?: <<MlTrainedModelPrefixStrings>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type MlPutTrainedModelResponse = <<MlTrainedModelConfig>>
----
[discrete]
[[client.ml.putTrainedModelAlias]]
== `client.ml.putTrainedModelAlias()`
Create or update a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning.
{ref}/put-trained-models-aliases.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlPutTrainedModelAliasRequest, options?: TransportRequestOptions) => Promise<MlPutTrainedModelAliasResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlPutTrainedModelAliasRequest extends <<RequestBase>> {
model_alias: <<Name>>
model_id: <<Id>>
reassign?: boolean
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type MlPutTrainedModelAliasResponse = <<AcknowledgedResponseBase>>
----
[discrete]
[[client.ml.putTrainedModelDefinitionPart]]
== `client.ml.putTrainedModelDefinitionPart()`
Create part of a trained model definition.
{ref}/put-trained-model-definition-part.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions) => Promise<MlPutTrainedModelDefinitionPartResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlPutTrainedModelDefinitionPartRequest extends <<RequestBase>> {
model_id: <<Id>>
part: <<integer>>
definition: string
total_definition_length: <<long>>
total_parts: <<integer>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type MlPutTrainedModelDefinitionPartResponse = <<AcknowledgedResponseBase>>
----
[discrete]
[[client.ml.putTrainedModelVocabulary]]
== `client.ml.putTrainedModelVocabulary()`
Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition.
{ref}/put-trained-model-vocabulary.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions) => Promise<MlPutTrainedModelVocabularyResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlPutTrainedModelVocabularyRequest extends <<RequestBase>> {
model_id: <<Id>>
vocabulary: string[]
merges?: string[]
scores?: <<double>>[]
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type MlPutTrainedModelVocabularyResponse = <<AcknowledgedResponseBase>>
----
[discrete]
[[client.ml.resetJob]]
== `client.ml.resetJob()`
Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list.
{ref}/ml-reset-job.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlResetJobRequest, options?: TransportRequestOptions) => Promise<MlResetJobResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlResetJobRequest extends <<RequestBase>> {
job_id: <<Id>>
wait_for_completion?: boolean
delete_user_annotations?: boolean
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type MlResetJobResponse = <<AcknowledgedResponseBase>>
----
[discrete]
[[client.ml.revertModelSnapshot]]
== `client.ml.revertModelSnapshot()`
Revert to a snapshot. The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior or a one-off event. In the case where this anomalous input is known to be a one-off, then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure.
{ref}/ml-revert-snapshot.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlRevertModelSnapshotRequest, options?: TransportRequestOptions) => Promise<MlRevertModelSnapshotResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlRevertModelSnapshotRequest extends <<RequestBase>> {
job_id: <<Id>>
snapshot_id: <<Id>>
delete_intervening_results?: boolean
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlRevertModelSnapshotResponse {
model: <<MlModelSnapshot>>
}
----
[discrete]
[[client.ml.setUpgradeMode]]
== `client.ml.setUpgradeMode()`
Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your machine learning indices. In those circumstances, there must be no machine learning jobs running. You can close the machine learning jobs, do the upgrade, then open all the jobs again. Alternatively, you can use this API to temporarily halt tasks associated with the jobs and datafeeds and prevent new jobs from opening. You can also use this API during upgrades that do not require you to reindex your machine learning indices, though stopping jobs is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get machine learning info API.
{ref}/ml-set-upgrade-mode.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlSetUpgradeModeRequest, options?: TransportRequestOptions) => Promise<MlSetUpgradeModeResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlSetUpgradeModeRequest extends <<RequestBase>> {
enabled?: boolean
timeout?: <<Duration>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
type MlSetUpgradeModeResponse = <<AcknowledgedResponseBase>>
----
[discrete]
[[client.ml.startDataFrameAnalytics]]
== `client.ml.startDataFrameAnalytics()`
Start a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings.
{ref}/start-dfanalytics.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise<MlStartDataFrameAnalyticsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlStartDataFrameAnalyticsRequest extends <<RequestBase>> {
id: <<Id>>
timeout?: <<Duration>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlStartDataFrameAnalyticsResponse {
acknowledged: boolean
node: <<NodeId>>
}
----
[discrete]
[[client.ml.startDatafeed]]
== `client.ml.startDatafeed()`
Start datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead.
{ref}/ml-start-datafeed.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlStartDatafeedRequest, options?: TransportRequestOptions) => Promise<MlStartDatafeedResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlStartDatafeedRequest extends <<RequestBase>> {
datafeed_id: <<Id>>
end?: <<DateTime>>
start?: <<DateTime>>
timeout?: <<Duration>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlStartDatafeedResponse {
node: <<NodeIds>>
started: boolean
}
----
[discrete]
[[client.ml.startTrainedModelDeployment]]
== `client.ml.startTrainedModelDeployment()`
Start a trained model deployment. It allocates the model to every machine learning node.
{ref}/start-trained-model-deployment.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions) => Promise<MlStartTrainedModelDeploymentResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlStartTrainedModelDeploymentRequest extends <<RequestBase>> {
model_id: <<Id>>
cache_size?: <<ByteSize>>
deployment_id?: string
number_of_allocations?: <<integer>>
priority?: <<MlTrainingPriority>>
queue_capacity?: <<integer>>
threads_per_allocation?: <<integer>>
timeout?: <<Duration>>
wait_for?: <<MlDeploymentAllocationState>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlStartTrainedModelDeploymentResponse {
assignment: <<MlTrainedModelAssignment>>
}
----
[discrete]
[[client.ml.stopDataFrameAnalytics]]
== `client.ml.stopDataFrameAnalytics()`
Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle.
{ref}/stop-dfanalytics.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise<MlStopDataFrameAnalyticsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlStopDataFrameAnalyticsRequest extends <<RequestBase>> {
id: <<Id>>
allow_no_match?: boolean
force?: boolean
timeout?: <<Duration>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlStopDataFrameAnalyticsResponse {
stopped: boolean
}
----
[discrete]
[[client.ml.stopDatafeed]]
== `client.ml.stopDatafeed()`
Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle.
{ref}/ml-stop-datafeed.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlStopDatafeedRequest, options?: TransportRequestOptions) => Promise<MlStopDatafeedResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlStopDatafeedRequest extends <<RequestBase>> {
datafeed_id: <<Id>>
allow_no_match?: boolean
force?: boolean
timeout?: <<Duration>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlStopDatafeedResponse {
stopped: boolean
}
----
[discrete]
[[client.ml.stopTrainedModelDeployment]]
== `client.ml.stopTrainedModelDeployment()`
Stop a trained model deployment.
{ref}/stop-trained-model-deployment.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions) => Promise<MlStopTrainedModelDeploymentResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlStopTrainedModelDeploymentRequest extends <<RequestBase>> {
model_id: <<Id>>
allow_no_match?: boolean
force?: boolean
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlStopTrainedModelDeploymentResponse {
stopped: boolean
}
----
[discrete]
[[client.ml.updateDataFrameAnalytics]]
== `client.ml.updateDataFrameAnalytics()`
Update a data frame analytics job.
{ref}/update-dfanalytics.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise<MlUpdateDataFrameAnalyticsResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlUpdateDataFrameAnalyticsRequest extends <<RequestBase>> {
id: <<Id>>
description?: string
model_memory_limit?: string
max_num_threads?: <<integer>>
allow_lazy_start?: boolean
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlUpdateDataFrameAnalyticsResponse {
authorization?: <<MlDataframeAnalyticsAuthorization>>
allow_lazy_start: boolean
analysis: <<MlDataframeAnalysisContainer>>
analyzed_fields?: <<MlDataframeAnalysisAnalyzedFields>> | string[]
create_time: <<long>>
description?: string
dest: <<MlDataframeAnalyticsDestination>>
id: <<Id>>
max_num_threads: <<integer>>
model_memory_limit: string
source: <<MlDataframeAnalyticsSource>>
version: <<VersionString>>
}
----
[discrete]
[[client.ml.updateDatafeed]]
== `client.ml.updateDatafeed()`
Update a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead.
{ref}/ml-update-datafeed.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlUpdateDatafeedRequest, options?: TransportRequestOptions) => Promise<MlUpdateDatafeedResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlUpdateDatafeedRequest extends <<RequestBase>> {
datafeed_id: <<Id>>
allow_no_indices?: boolean
expand_wildcards?: <<ExpandWildcards>>
ignore_throttled?: boolean
ignore_unavailable?: boolean
aggregations?: Record<string, <<AggregationsAggregationContainer>>>
chunking_config?: <<MlChunkingConfig>>
delayed_data_check_config?: <<MlDelayedDataCheckConfig>>
frequency?: <<Duration>>
indices?: string[]
pass:[/**] @alias indices */
indexes?: string[]
indices_options?: <<IndicesOptions>>
job_id?: <<Id>>
max_empty_searches?: <<integer>>
query?: <<QueryDslQueryContainer>>
query_delay?: <<Duration>>
runtime_mappings?: <<MappingRuntimeFields>>
script_fields?: Record<string, <<ScriptField>>>
scroll_size?: <<integer>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlUpdateDatafeedResponse {
authorization?: <<MlDatafeedAuthorization>>
aggregations?: Record<string, <<AggregationsAggregationContainer>>>
chunking_config: <<MlChunkingConfig>>
delayed_data_check_config?: <<MlDelayedDataCheckConfig>>
datafeed_id: <<Id>>
frequency?: <<Duration>>
indices: string[]
indices_options?: <<IndicesOptions>>
job_id: <<Id>>
max_empty_searches?: <<integer>>
query: <<QueryDslQueryContainer>>
query_delay: <<Duration>>
runtime_mappings?: <<MappingRuntimeFields>>
script_fields?: Record<string, <<ScriptField>>>
scroll_size: <<integer>>
}
----
[discrete]
[[client.ml.updateFilter]]
== `client.ml.updateFilter()`
Update a filter. Updates the description of a filter, adds items, or removes items from the list.
{ref}/ml-update-filter.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlUpdateFilterRequest, options?: TransportRequestOptions) => Promise<MlUpdateFilterResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlUpdateFilterRequest extends <<RequestBase>> {
filter_id: <<Id>>
add_items?: string[]
description?: string
remove_items?: string[]
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlUpdateFilterResponse {
description: string
filter_id: <<Id>>
items: string[]
}
----
[discrete]
[[client.ml.updateJob]]
== `client.ml.updateJob()`
Update an anomaly detection job. Updates certain properties of an anomaly detection job.
{ref}/ml-update-job.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlUpdateJobRequest, options?: TransportRequestOptions) => Promise<MlUpdateJobResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlUpdateJobRequest extends <<RequestBase>> {
job_id: <<Id>>
allow_lazy_open?: boolean
analysis_limits?: <<MlAnalysisMemoryLimit>>
background_persist_interval?: <<Duration>>
custom_settings?: Record<string, any>
categorization_filters?: string[]
description?: string
model_plot_config?: <<MlModelPlotConfig>>
model_prune_window?: <<Duration>>
daily_model_snapshot_retention_after_days?: <<long>>
model_snapshot_retention_days?: <<long>>
renormalization_window_days?: <<long>>
results_retention_days?: <<long>>
groups?: string[]
detectors?: <<MlDetector>>[]
per_partition_categorization?: <<MlPerPartitionCategorization>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlUpdateJobResponse {
allow_lazy_open: boolean
analysis_config: <<MlAnalysisConfigRead>>
analysis_limits: <<MlAnalysisLimits>>
background_persist_interval?: <<Duration>>
create_time: <<EpochTime>><<<UnitMillis>>>
finished_time?: <<EpochTime>><<<UnitMillis>>>
custom_settings?: Record<string, string>
daily_model_snapshot_retention_after_days: <<long>>
data_description: <<MlDataDescription>>
datafeed_config?: <<MlDatafeed>>
description?: string
groups?: string[]
job_id: <<Id>>
job_type: string
job_version: <<VersionString>>
model_plot_config?: <<MlModelPlotConfig>>
model_snapshot_id?: <<Id>>
model_snapshot_retention_days: <<long>>
renormalization_window_days?: <<long>>
results_index_name: <<IndexName>>
results_retention_days?: <<long>>
}
----
[discrete]
[[client.ml.updateModelSnapshot]]
== `client.ml.updateModelSnapshot()`
Update a snapshot. Updates certain properties of a snapshot.
{ref}/ml-update-snapshot.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlUpdateModelSnapshotRequest, options?: TransportRequestOptions) => Promise<MlUpdateModelSnapshotResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlUpdateModelSnapshotRequest extends <<RequestBase>> {
job_id: <<Id>>
snapshot_id: <<Id>>
description?: string
retain?: boolean
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlUpdateModelSnapshotResponse {
acknowledged: boolean
model: <<MlModelSnapshot>>
}
----
[discrete]
[[client.ml.updateTrainedModelDeployment]]
== `client.ml.updateTrainedModelDeployment()`
Update a trained model deployment.
{ref}/update-trained-model-deployment.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions) => Promise<MlUpdateTrainedModelDeploymentResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlUpdateTrainedModelDeploymentRequest extends <<RequestBase>> {
model_id: <<Id>>
number_of_allocations?: <<integer>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlUpdateTrainedModelDeploymentResponse {
assignment: <<MlTrainedModelAssignment>>
}
----
[discrete]
[[client.ml.upgradeJobSnapshot]]
== `client.ml.upgradeJobSnapshot()`
Upgrade a snapshot. Upgrades an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job.
{ref}/ml-upgrade-job-model-snapshot.html[{es} documentation]
[discrete]
=== Function signature
[source,ts]
----
(request: MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions) => Promise<MlUpgradeJobSnapshotResponse>
----
[discrete]
=== Request
[source,ts,subs=+macros]
----
interface MlUpgradeJobSnapshotRequest extends <<RequestBase>> {
job_id: <<Id>>
snapshot_id: <<Id>>
wait_for_completion?: boolean
timeout?: <<Duration>>
}
----
[discrete]
=== Response
[source,ts,subs=+macros]
----
interface MlUpgradeJobSnapshotResponse {
node: <<NodeId>>
completed: boolean
}
----