Files
elasticsearch-js/docs/reference-shared-types-ml.asciidoc

3802 lines
161 KiB
Plaintext
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

[[reference-shared-types-ml]]
////////
===========================================================================================================================
|| ||
|| ||
|| ||
|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ ||
|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ ||
|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ ||
|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ ||
|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ ||
|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ ||
|| ||
|| ||
|| This file is autogenerated, DO NOT send pull requests that changes this file directly. ||
|| You should update the script that does the generation, which can be found in: ||
|| https://github.com/elastic/elastic-client-generator-js ||
|| ||
|| You can run the script with the following command: ||
|| npm run elasticsearch -- --version <version> ||
|| ||
|| ||
|| ||
===========================================================================================================================
////////
== Shared Ml types
[discrete]
[[MlAnalysisConfig]]
=== MlAnalysisConfig
[pass]
++++
<pre>
++++
interface MlAnalysisConfig {
pass:[/**] @property bucket_span The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. This value should be either a whole number of days or equate to a whole number of buckets in one day. If the anomaly detection job uses a datafeed with aggregations, this value must also be divisible by the interval of the date histogram aggregation. */
bucket_span?: <<Duration>>
pass:[/**] @property categorization_analyzer If `categorization_field_name` is specified, you can also define the analyzer that is used to interpret the categorization field. This property cannot be used at the same time as `categorization_filters`. The categorization analyzer specifies how the `categorization_field` is interpreted by the categorization process. The `categorization_analyzer` field can be specified either as a string or as an object. If it is a string, it must refer to a built-in analyzer or one added by another plugin. */
categorization_analyzer?: <<MlCategorizationAnalyzer>>
pass:[/**] @property categorization_field_name If this property is specified, the values of the specified field will be categorized. The resulting categories must be used in a detector by setting `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword `mlcategory`. */
categorization_field_name?: <<Field>>
pass:[/**] @property categorization_filters If `categorization_field_name` is specified, you can also define optional filters. This property expects an array of regular expressions. The expressions are used to filter out matching sequences from the categorization field values. You can use this functionality to fine tune the categorization by excluding sequences from consideration when categories are defined. For example, you can exclude SQL statements that appear in your log files. This property cannot be used at the same time as `categorization_analyzer`. If you only want to define simple regular expression filters that are applied prior to tokenization, setting this property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering, use the `categorization_analyzer` property instead and include the filters as pattern_replace character filters. The effect is exactly the same. */
categorization_filters?: string[]
pass:[/**] @property detectors <<Detector>> configuration objects specify which data fields a job analyzes. They also specify which analytical functions are used. You can specify multiple detectors for a job. If the detectors array does not contain at least one detector, no analysis can occur and an error is returned. */
detectors: <<MlDetector>>[]
pass:[/**] @property influencers A comma separated list of influencer field names. Typically these can be the by, over, or partition fields that are used in the detector configuration. You might also want to use a field name that is not specifically named in a detector, but is available as part of the input data. When you use multiple detectors, the use of influencers is recommended as it aggregates results for each influencer entity. */
influencers?: <<Field>>[]
pass:[/**] @property latency The size of the window in which to expect data that is out of time order. If you specify a non-zero value, it must be greater than or equal to one second. NOTE: Latency is applicable only when you send data by using the post data API. */
latency?: <<Duration>>
pass:[/**] @property model_prune_window Advanced configuration option. Affects the pruning of models that have not been updated for the given time duration. The value must be set to a multiple of the `bucket_span`. If set too low, important information may be removed from the model. For jobs created in 8.1 and later, the default value is the greater of `30d` or 20 times `bucket_span`. */
model_prune_window?: <<Duration>>
pass:[/**] @property multivariate_by_fields This functionality is reserved for internal use. It is not supported for use in customer environments and is not subject to the support SLA of official GA features. If set to `true`, the analysis will automatically find correlations between metrics for a given by field value and report anomalies when those correlations cease to hold. For example, suppose CPU and memory usage on host A is usually highly correlated with the same metrics on host B. Perhaps this correlation occurs because they are running a load-balanced application. If you enable this property, anomalies will be reported when, for example, CPU usage on host A is high and the value of CPU usage on host B is low. That is to say, youll see an anomaly when the CPU of host A is unusual given the CPU of host B. To use the `multivariate_by_fields` property, you must also specify `by_field_name` in your detector. */
multivariate_by_fields?: boolean
pass:[/**] @property per_partition_categorization <<Settings>> related to how categorization interacts with partition fields. */
per_partition_categorization?: <<MlPerPartitionCategorization>>
pass:[/**] @property summary_count_field_name If this property is specified, the data that is fed to the job is expected to be pre-summarized. This property value is the name of the field that contains the count of raw data points that have been summarized. The same `summary_count_field_name` applies to all detectors in the job. NOTE: The `summary_count_field_name` property cannot be used with the `metric` function. */
summary_count_field_name?: <<Field>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlAnalysisConfigRead]]
=== MlAnalysisConfigRead
[pass]
++++
<pre>
++++
interface MlAnalysisConfigRead {
pass:[/**] @property bucket_span The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. */
bucket_span: <<Duration>>
pass:[/**] @property categorization_analyzer If `categorization_field_name` is specified, you can also define the analyzer that is used to interpret the categorization field. This property cannot be used at the same time as `categorization_filters`. The categorization analyzer specifies how the `categorization_field` is interpreted by the categorization process. */
categorization_analyzer?: <<MlCategorizationAnalyzer>>
pass:[/**] @property categorization_field_name If this property is specified, the values of the specified field will be categorized. The resulting categories must be used in a detector by setting `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword `mlcategory`. */
categorization_field_name?: <<Field>>
pass:[/**] @property categorization_filters If `categorization_field_name` is specified, you can also define optional filters. This property expects an array of regular expressions. The expressions are used to filter out matching sequences from the categorization field values. */
categorization_filters?: string[]
pass:[/**] @property detectors An array of detector configuration objects. <<Detector>> configuration objects specify which data fields a job analyzes. They also specify which analytical functions are used. You can specify multiple detectors for a job. */
detectors: <<MlDetectorRead>>[]
pass:[/**] @property influencers A comma separated list of influencer field names. Typically these can be the by, over, or partition fields that are used in the detector configuration. You might also want to use a field name that is not specifically named in a detector, but is available as part of the input data. When you use multiple detectors, the use of influencers is recommended as it aggregates results for each influencer entity. */
influencers: <<Field>>[]
pass:[/**] @property model_prune_window Advanced configuration option. Affects the pruning of models that have not been updated for the given time duration. The value must be set to a multiple of the `bucket_span`. If set too low, important information may be removed from the model. Typically, set to `30d` or longer. If not set, model pruning only occurs if the model memory status reaches the soft limit or the hard limit. For jobs created in 8.1 and later, the default value is the greater of `30d` or 20 times `bucket_span`. */
model_prune_window?: <<Duration>>
pass:[/**] @property latency The size of the window in which to expect data that is out of time order. Defaults to no latency. If you specify a non-zero value, it must be greater than or equal to one second. */
latency?: <<Duration>>
pass:[/**] @property multivariate_by_fields This functionality is reserved for internal use. It is not supported for use in customer environments and is not subject to the support SLA of official GA features. If set to `true`, the analysis will automatically find correlations between metrics for a given by field value and report anomalies when those correlations cease to hold. */
multivariate_by_fields?: boolean
pass:[/**] @property per_partition_categorization <<Settings>> related to how categorization interacts with partition fields. */
per_partition_categorization?: <<MlPerPartitionCategorization>>
pass:[/**] @property summary_count_field_name If this property is specified, the data that is fed to the job is expected to be pre-summarized. This property value is the name of the field that contains the count of raw data points that have been summarized. The same `summary_count_field_name` applies to all detectors in the job. */
summary_count_field_name?: <<Field>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlAnalysisLimits]]
=== MlAnalysisLimits
[pass]
++++
<pre>
++++
interface MlAnalysisLimits {
pass:[/**] @property categorization_examples_limit The maximum number of examples stored per category in memory and in the results data store. If you increase this value, more examples are available, however it requires that you have more storage available. If you set this value to 0, no examples are stored. NOTE: The `categorization_examples_limit` applies only to analysis that uses categorization. */
categorization_examples_limit?: <<long>>
pass:[/**] @property model_memory_limit The approximate maximum amount of memory resources that are required for analytical processing. Once this limit is approached, data pruning becomes more aggressive. Upon exceeding this limit, new entities are not modeled. If the `xpack.ml.max_model_memory_limit` setting has a value greater than 0 and less than 1024mb, that value is used instead of the default. The default value is relatively small to ensure that high resource usage is a conscious decision. If you have jobs that are expected to analyze high cardinality fields, you will likely need to use a higher value. If you specify a number instead of a string, the units are assumed to be MiB. Specifying a string is recommended for clarity. If you specify a <<byte>> size unit of `b` or `kb` and the number does not equate to a discrete number of megabytes, it is rounded down to the closest MiB. The minimum valid value is 1 MiB. If you specify a value less than 1 MiB, an error occurs. If you specify a value for the `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create jobs that have `model_memory_limit` values greater than that setting value. */
model_memory_limit?: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlAnalysisMemoryLimit]]
=== MlAnalysisMemoryLimit
[pass]
++++
<pre>
++++
interface MlAnalysisMemoryLimit {
pass:[/**] @property model_memory_limit Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. */
model_memory_limit: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlAnomaly]]
=== MlAnomaly
[pass]
++++
<pre>
++++
interface MlAnomaly {
pass:[/**] @property actual The actual value for the bucket. */
actual?: <<double>>[]
pass:[/**] @property anomaly_score_explanation Information about the factors impacting the initial anomaly score. */
anomaly_score_explanation?: <<MlAnomalyExplanation>>
pass:[/**] @property bucket_span The length of the bucket in seconds. This value matches the `bucket_span` that is specified in the job. */
bucket_span: <<DurationValue>><<<UnitSeconds>>>
pass:[/**] @property by_field_name The field used to split the data. In particular, this property is used for analyzing the splits with respect to their own history. It is used for finding unusual values in the context of the split. */
by_field_name?: string
pass:[/**] @property by_field_value The value of `by_field_name`. */
by_field_value?: string
pass:[/**] @property causes For population analysis, an over field must be specified in the detector. This property contains an array of anomaly records that are the causes for the anomaly that has been identified for the over field. This sub-resource contains the most anomalous records for the `over_field_name`. For scalability reasons, a maximum of the 10 most significant causes of the anomaly are returned. As part of the core analytical modeling, these low-level anomaly records are aggregated for their parent over field record. The `causes` resource contains similar elements to the record resource, namely `actual`, `typical`, `geo_results.actual_point`, `geo_results.typical_point`, `*_field_name` and `*_field_value`. Probability and scores are not applicable to causes. */
causes?: <<MlAnomalyCause>>[]
pass:[/**] @property detector_index A unique identifier for the detector. */
detector_index: <<integer>>
pass:[/**] @property field_name Certain functions require a field to operate on, for example, `sum()`. For those functions, this value is the name of the field to be analyzed. */
field_name?: string
pass:[/**] @property function The function in which the anomaly occurs, as specified in the detector configuration. For example, `max`. */
function?: string
pass:[/**] @property function_description The description of the function in which the anomaly occurs, as specified in the detector configuration. */
function_description?: string
pass:[/**] @property geo_results If the detector function is `lat_long`, this object contains comma delimited strings for the latitude and longitude of the actual and typical values. */
geo_results?: <<MlGeoResults>>
pass:[/**] @property influencers If influencers were specified in the detector configuration, this array contains influencers that contributed to or were to blame for an anomaly. */
influencers?: <<MlInfluence>>[]
pass:[/**] @property initial_record_score A normalized score between 0-100, which is based on the probability of the anomalousness of this record. This is the initial value that was calculated at the time the bucket was processed. */
initial_record_score: <<double>>
pass:[/**] @property is_interim If true, this is an interim result. In other words, the results are calculated based on partial input data. */
is_interim: boolean
pass:[/**] @property job_id Identifier for the anomaly detection job. */
job_id: string
pass:[/**] @property over_field_name The field used to split the data. In particular, this property is used for analyzing the splits with respect to the history of all splits. It is used for finding unusual values in the population of all splits. */
over_field_name?: string
pass:[/**] @property over_field_value The value of `over_field_name`. */
over_field_value?: string
pass:[/**] @property partition_field_name The field used to segment the analysis. When you use this property, you have completely independent baselines for each value of this field. */
partition_field_name?: string
pass:[/**] @property partition_field_value The value of `partition_field_name`. */
partition_field_value?: string
pass:[/**] @property probability The probability of the individual anomaly occurring, in the range 0 to 1. For example, `0.0000772031`. This value can be held to a high precision of over 300 decimal places, so the `record_score` is provided as a human-readable and friendly interpretation of this. */
probability: <<double>>
pass:[/**] @property record_score A normalized score between 0-100, which is based on the probability of the anomalousness of this record. Unlike `initial_record_score`, this value will be updated by a re-normalization process as new data is analyzed. */
record_score: <<double>>
pass:[/**] @property result_type Internal. This is always set to `record`. */
result_type: string
pass:[/**] @property timestamp The start time of the bucket for which these results were calculated. */
timestamp: <<EpochTime>><<<UnitMillis>>>
pass:[/**] @property typical The typical value for the bucket, according to analytical modeling. */
typical?: <<double>>[]
}
[pass]
++++
</pre>
++++
[discrete]
[[MlAnomalyCause]]
=== MlAnomalyCause
[pass]
++++
<pre>
++++
interface MlAnomalyCause {
actual: <<double>>[]
by_field_name: <<Name>>
by_field_value: string
correlated_by_field_value: string
field_name: <<Field>>
function: string
function_description: string
influencers: <<MlInfluence>>[]
over_field_name: <<Name>>
over_field_value: string
partition_field_name: string
partition_field_value: string
probability: <<double>>
typical: <<double>>[]
}
[pass]
++++
</pre>
++++
[discrete]
[[MlAnomalyExplanation]]
=== MlAnomalyExplanation
[pass]
++++
<pre>
++++
interface MlAnomalyExplanation {
pass:[/**] @property anomaly_characteristics_impact <<Impact>> from the duration and magnitude of the detected anomaly relative to the historical average. */
anomaly_characteristics_impact?: <<integer>>
pass:[/**] @property anomaly_length Length of the detected anomaly in the number of buckets. */
anomaly_length?: <<integer>>
pass:[/**] @property anomaly_type <<Type>> of the detected anomaly: `spike` or `dip`. */
anomaly_type?: string
pass:[/**] @property high_variance_penalty Indicates reduction of anomaly score for the bucket with large confidence intervals. If a bucket has large confidence intervals, the score is reduced. */
high_variance_penalty?: boolean
pass:[/**] @property incomplete_bucket_penalty If the bucket contains fewer samples than expected, the score is reduced. */
incomplete_bucket_penalty?: boolean
pass:[/**] @property lower_confidence_bound Lower bound of the 95% confidence interval. */
lower_confidence_bound?: <<double>>
pass:[/**] @property multi_bucket_impact <<Impact>> of the deviation between actual and typical values in the past 12 buckets. */
multi_bucket_impact?: <<integer>>
pass:[/**] @property single_bucket_impact <<Impact>> of the deviation between actual and typical values in the current bucket. */
single_bucket_impact?: <<integer>>
pass:[/**] @property typical_value Typical (expected) value for this bucket. */
typical_value?: <<double>>
pass:[/**] @property upper_confidence_bound Upper bound of the 95% confidence interval. */
upper_confidence_bound?: <<double>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlApiKeyAuthorization]]
=== MlApiKeyAuthorization
[pass]
++++
<pre>
++++
interface MlApiKeyAuthorization {
pass:[/**] @property id The identifier for the API key. */
id: string
pass:[/**] @property name The name of the API key. */
name: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlAppliesTo]]
=== MlAppliesTo
[pass]
++++
<pre>
++++
type MlAppliesTo = 'actual' | 'typical' | 'diff_from_typical' | 'time'
[pass]
++++
</pre>
++++
[discrete]
[[MlBucketInfluencer]]
=== MlBucketInfluencer
[pass]
++++
<pre>
++++
interface MlBucketInfluencer {
pass:[/**] @property anomaly_score A normalized score between 0-100, which is calculated for each bucket influencer. This score might be updated as newer data is analyzed. */
anomaly_score: <<double>>
pass:[/**] @property bucket_span The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */
bucket_span: <<DurationValue>><<<UnitSeconds>>>
pass:[/**] @property influencer_field_name The field name of the influencer. */
influencer_field_name: <<Field>>
pass:[/**] @property initial_anomaly_score The score between 0-100 for each bucket influencer. This score is the initial value that was calculated at the time the bucket was processed. */
initial_anomaly_score: <<double>>
pass:[/**] @property is_interim If true, this is an interim result. In other words, the results are calculated based on partial input data. */
is_interim: boolean
pass:[/**] @property job_id Identifier for the anomaly detection job. */
job_id: <<Id>>
pass:[/**] @property probability The probability that the bucket has this behavior, in the range 0 to 1. This value can be held to a high precision of over 300 decimal places, so the `anomaly_score` is provided as a human-readable and friendly interpretation of this. */
probability: <<double>>
pass:[/**] @property raw_anomaly_score Internal. */
raw_anomaly_score: <<double>>
pass:[/**] @property result_type Internal. This value is always set to `bucket_influencer`. */
result_type: string
pass:[/**] @property timestamp The start time of the bucket for which these results were calculated. */
timestamp: <<EpochTime>><<<UnitMillis>>>
pass:[/**] @property timestamp_string The start time of the bucket for which these results were calculated. */
timestamp_string?: <<DateTime>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlBucketSummary]]
=== MlBucketSummary
[pass]
++++
<pre>
++++
interface MlBucketSummary {
pass:[/**] @property anomaly_score The maximum anomaly score, between 0-100, for any of the bucket influencers. This is an overall, rate-limited score for the job. All the anomaly records in the bucket contribute to this score. This value might be updated as new data is analyzed. */
anomaly_score: <<double>>
bucket_influencers: <<MlBucketInfluencer>>[]
pass:[/**] @property bucket_span The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */
bucket_span: <<DurationValue>><<<UnitSeconds>>>
pass:[/**] @property event_count The number of input data records processed in this bucket. */
event_count: <<long>>
pass:[/**] @property initial_anomaly_score The maximum anomaly score for any of the bucket influencers. This is the initial value that was calculated at the time the bucket was processed. */
initial_anomaly_score: <<double>>
pass:[/**] @property is_interim If true, this is an interim result. In other words, the results are calculated based on partial input data. */
is_interim: boolean
pass:[/**] @property job_id Identifier for the anomaly detection job. */
job_id: <<Id>>
pass:[/**] @property processing_time_ms The amount of time, in milliseconds, that it took to analyze the bucket contents and calculate results. */
processing_time_ms: <<DurationValue>><<<UnitMillis>>>
pass:[/**] @property result_type Internal. This value is always set to bucket. */
result_type: string
pass:[/**] @property timestamp The start time of the bucket. This timestamp uniquely identifies the bucket. Events that occur exactly at the timestamp of the bucket are included in the results for the bucket. */
timestamp: <<EpochTime>><<<UnitMillis>>>
pass:[/**] @property timestamp_string The start time of the bucket. This timestamp uniquely identifies the bucket. Events that occur exactly at the timestamp of the bucket are included in the results for the bucket. */
timestamp_string?: <<DateTime>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlCalendarEvent]]
=== MlCalendarEvent
[pass]
++++
<pre>
++++
interface MlCalendarEvent {
pass:[/**] @property calendar_id A string that uniquely identifies a calendar. */
calendar_id?: <<Id>>
event_id?: <<Id>>
pass:[/**] @property description A description of the scheduled event. */
description: string
pass:[/**] @property end_time The timestamp for the end of the scheduled event in milliseconds since the epoch or ISO 8601 format. */
end_time: <<DateTime>>
pass:[/**] @property start_time The timestamp for the beginning of the scheduled event in milliseconds since the epoch or ISO 8601 format. */
start_time: <<DateTime>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlCategorizationAnalyzer]]
=== MlCategorizationAnalyzer
[pass]
++++
<pre>
++++
type MlCategorizationAnalyzer = string | <<MlCategorizationAnalyzerDefinition>>
[pass]
++++
</pre>
++++
[discrete]
[[MlCategorizationAnalyzerDefinition]]
=== MlCategorizationAnalyzerDefinition
[pass]
++++
<pre>
++++
interface MlCategorizationAnalyzerDefinition {
pass:[/**] @property char_filter One or more character filters. In addition to the built-in character filters, other plugins can provide more character filters. If this property is not specified, no character filters are applied prior to categorization. If you are customizing some other aspect of the analyzer and you need to achieve the equivalent of `categorization_filters` (which are not permitted when some other aspect of the analyzer is customized), add them here as pattern replace character filters. */
char_filter?: <<AnalysisCharFilter>>[]
pass:[/**] @property filter One or more token filters. In addition to the built-in token filters, other plugins can provide more token filters. If this property is not specified, no token filters are applied prior to categorization. */
filter?: <<AnalysisTokenFilter>>[]
pass:[/**] @property tokenizer The name or definition of the tokenizer to use after character filters are applied. This property is compulsory if `categorization_analyzer` is specified as an object. Machine learning provides a tokenizer called `ml_standard` that tokenizes in a way that has been determined to produce good categorization results on a variety of log file formats for logs in English. If you want to use that tokenizer but change the character or token filters, specify "tokenizer": "ml_standard" in your `categorization_analyzer`. Additionally, the `ml_classic` tokenizer is available, which tokenizes in the same way as the non-customizable tokenizer in old versions of the product (before 6.2). `ml_classic` was the default categorization tokenizer in versions 6.2 to 7.13, so if you need categorization identical to the default for jobs created in these versions, specify "tokenizer": "ml_classic" in your `categorization_analyzer`. */
tokenizer?: <<AnalysisTokenizer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlCategorizationStatus]]
=== MlCategorizationStatus
[pass]
++++
<pre>
++++
type MlCategorizationStatus = 'ok' | 'warn'
[pass]
++++
</pre>
++++
[discrete]
[[MlCategory]]
=== MlCategory
[pass]
++++
<pre>
++++
interface MlCategory {
pass:[/**] @property category_id A unique identifier for the category. category_id is unique at the job level, even when per-partition categorization is enabled. */
category_id: <<ulong>>
pass:[/**] @property examples A list of examples of actual values that matched the category. */
examples: string[]
pass:[/**] @property grok_pattern [experimental] A Grok pattern that could be used in Logstash or an ingest pipeline to extract fields from messages that match the category. This field is experimental and may be changed or removed in a future release. The Grok patterns that are found are not optimal, but are often a good starting point for manual tweaking. */
grok_pattern?: <<GrokPattern>>
pass:[/**] @property job_id Identifier for the anomaly detection job. */
job_id: <<Id>>
pass:[/**] @property max_matching_length The maximum length of the fields that matched the category. The value is increased by 10% to enable matching for similar fields that have not been analyzed. */
max_matching_length: <<ulong>>
pass:[/**] @property partition_field_name If per-partition categorization is enabled, this property identifies the field used to segment the categorization. It is not present when per-partition categorization is disabled. */
partition_field_name?: string
pass:[/**] @property partition_field_value If per-partition categorization is enabled, this property identifies the value of the partition_field_name for the category. It is not present when per-partition categorization is disabled. */
partition_field_value?: string
pass:[/**] @property regex A regular expression that is used to search for values that match the category. */
regex: string
pass:[/**] @property terms A space separated list of the common tokens that are matched in values of the category. */
terms: string
pass:[/**] @property num_matches The number of messages that have been matched by this category. This is only guaranteed to have the latest accurate count after a job _flush or _close */
num_matches?: <<long>>
pass:[/**] @property preferred_to_categories A list of category_id entries that this current category encompasses. Any new message that is processed by the categorizer will match against this category and not any of the categories in this list. This is only guaranteed to have the latest accurate list of categories after a job _flush or _close */
preferred_to_categories?: <<Id>>[]
p?: string
result_type: string
mlcategory: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlChunkingConfig]]
=== MlChunkingConfig
[pass]
++++
<pre>
++++
interface MlChunkingConfig {
pass:[/**] @property mode If the mode is `auto`, the chunk size is dynamically calculated; this is the recommended value when the datafeed does not use aggregations. If the mode is `manual`, chunking is applied according to the specified `time_span`; use this mode when the datafeed uses aggregations. If the mode is `off`, no chunking is applied. */
mode: <<MlChunkingMode>>
pass:[/**] @property time_span The time span that each search will be querying. This setting is applicable only when the `mode` is set to `manual`. */
time_span?: <<Duration>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlChunkingMode]]
=== MlChunkingMode
[pass]
++++
<pre>
++++
type MlChunkingMode = 'auto' | 'manual' | 'off'
[pass]
++++
</pre>
++++
[discrete]
[[MlClassificationInferenceOptions]]
=== MlClassificationInferenceOptions
[pass]
++++
<pre>
++++
interface MlClassificationInferenceOptions {
pass:[/**] @property num_top_classes Specifies the number of top class predictions to return. Defaults to 0. */
num_top_classes?: <<integer>>
pass:[/**] @property num_top_feature_importance_values Specifies the maximum number of feature importance values per document. */
num_top_feature_importance_values?: <<integer>>
pass:[/**] @property prediction_field_type Specifies the type of the predicted field to write. Acceptable values are: string, number, boolean. When boolean is provided 1.0 is transformed to true and 0.0 to false. */
prediction_field_type?: string
pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */
results_field?: string
pass:[/**] @property top_classes_results_field Specifies the field to which the top classes are written. Defaults to top_classes. */
top_classes_results_field?: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlConditionOperator]]
=== MlConditionOperator
[pass]
++++
<pre>
++++
type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte'
[pass]
++++
</pre>
++++
[discrete]
[[MlCustomSettings]]
=== MlCustomSettings
[pass]
++++
<pre>
++++
type MlCustomSettings = any
[pass]
++++
</pre>
++++
[discrete]
[[MlDataCounts]]
=== MlDataCounts
[pass]
++++
<pre>
++++
interface MlDataCounts {
bucket_count: <<long>>
earliest_record_timestamp?: <<long>>
empty_bucket_count: <<long>>
input_bytes: <<long>>
input_field_count: <<long>>
input_record_count: <<long>>
invalid_date_count: <<long>>
job_id: <<Id>>
last_data_time?: <<long>>
latest_empty_bucket_timestamp?: <<long>>
latest_record_timestamp?: <<long>>
latest_sparse_bucket_timestamp?: <<long>>
latest_bucket_timestamp?: <<long>>
log_time?: <<long>>
missing_field_count: <<long>>
out_of_order_timestamp_count: <<long>>
processed_field_count: <<long>>
processed_record_count: <<long>>
sparse_bucket_count: <<long>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataDescription]]
=== MlDataDescription
[pass]
++++
<pre>
++++
interface MlDataDescription {
pass:[/**] @property format Only JSON format is supported at this time. */
format?: string
pass:[/**] @property time_field The name of the field that contains the timestamp. */
time_field?: <<Field>>
pass:[/**] @property time_format The time format, which can be `epoch`, `epoch_ms`, or a custom pattern. The value `epoch` refers to UNIX or Epoch time (the number of seconds since 1 Jan 1970). The value `epoch_ms` indicates that time is measured in milliseconds since the epoch. The `epoch` and `epoch_ms` time formats accept either <<integer>> or real values. Custom patterns must conform to the Java DateTimeFormatter class. When you use date-time formatting patterns, it is recommended that you provide the full date, time and time zone. For example: `yyyy-MM-dd'T'HH:mm:ssX`. If the pattern that you specify is not sufficient to produce a complete timestamp, job creation fails. */
time_format?: string
field_delimiter?: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDatafeed]]
=== MlDatafeed
[pass]
++++
<pre>
++++
interface MlDatafeed {
aggregations?: Record<string, <<AggregationsAggregationContainer>>>
aggs?: Record<string, <<AggregationsAggregationContainer>>>
pass:[/**] @property authorization The security privileges that the datafeed uses to run its queries. If Elastic Stack security features were disabled at the time of the most recent update to the datafeed, this property is omitted. */
authorization?: <<MlDatafeedAuthorization>>
chunking_config?: <<MlChunkingConfig>>
datafeed_id: <<Id>>
frequency?: <<Duration>>
indices: string[]
indexes?: string[]
job_id: <<Id>>
max_empty_searches?: <<integer>>
query: <<QueryDslQueryContainer>>
query_delay?: <<Duration>>
script_fields?: Record<string, <<ScriptField>>>
scroll_size?: <<integer>>
delayed_data_check_config: <<MlDelayedDataCheckConfig>>
runtime_mappings?: <<MappingRuntimeFields>>
indices_options?: <<IndicesOptions>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDatafeedAuthorization]]
=== MlDatafeedAuthorization
[pass]
++++
<pre>
++++
interface MlDatafeedAuthorization {
pass:[/**] @property api_key If an API key was used for the most recent update to the datafeed, its name and identifier are listed in the response. */
api_key?: <<MlApiKeyAuthorization>>
pass:[/**] @property roles If a user ID was used for the most recent update to the datafeed, its roles at the time of the update are listed in the response. */
roles?: string[]
pass:[/**] @property service_account If a service account was used for the most recent update to the datafeed, the account name is listed in the response. */
service_account?: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDatafeedConfig]]
=== MlDatafeedConfig
[pass]
++++
<pre>
++++
interface MlDatafeedConfig {
pass:[/**] @property aggregations If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. */
aggregations?: Record<string, <<AggregationsAggregationContainer>>>
pass:[/**] @property aggs If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. */
aggs?: Record<string, <<AggregationsAggregationContainer>>>
pass:[/**] @property chunking_config Datafeeds might be required to search over <<long>> time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated and is an advanced configuration option. */
chunking_config?: <<MlChunkingConfig>>
pass:[/**] @property datafeed_id A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. The default value is the job identifier. */
datafeed_id?: <<Id>>
pass:[/**] @property delayed_data_check_config Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` option is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. */
delayed_data_check_config?: <<MlDelayedDataCheckConfig>>
pass:[/**] @property frequency The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for <<short>> bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. For example: `150s`. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. */
frequency?: <<Duration>>
pass:[/**] @property indices An array of index names. Wildcards are supported. If any indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */
indices?: <<Indices>>
pass:[/**] @property indexes An array of index names. Wildcards are supported. If any indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */
indexes?: <<Indices>>
pass:[/**] @property indices_options Specifies index expansion options that are used during search. */
indices_options?: <<IndicesOptions>>
job_id?: <<Id>>
pass:[/**] @property max_empty_searches If a real-time datafeed has never seen any data (including during any initial training period) then it will automatically stop itself and close its associated job after this many real-time searches that return no documents. In other words, it will stop after `frequency` times `max_empty_searches` of real-time operation. If not set then a datafeed with no end time that sees no data will remain started until it is explicitly stopped. */
max_empty_searches?: <<integer>>
pass:[/**] @property query The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. */
query?: <<QueryDslQueryContainer>>
pass:[/**] @property query_delay The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. */
query_delay?: <<Duration>>
pass:[/**] @property runtime_mappings Specifies runtime fields for the datafeed search. */
runtime_mappings?: <<MappingRuntimeFields>>
pass:[/**] @property script_fields Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. */
script_fields?: Record<string, <<ScriptField>>>
pass:[/**] @property scroll_size The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default. */
scroll_size?: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDatafeedRunningState]]
=== MlDatafeedRunningState
[pass]
++++
<pre>
++++
interface MlDatafeedRunningState {
pass:[/**] @property real_time_configured Indicates if the datafeed is "real-time"; meaning that the datafeed has no configured `end` time. */
real_time_configured: boolean
pass:[/**] @property real_time_running Indicates whether the datafeed has finished running on the available past data. For datafeeds without a configured `end` time, this means that the datafeed is now running on "real-time" data. */
real_time_running: boolean
pass:[/**] @property search_interval Provides the latest time interval the datafeed has searched. */
search_interval?: <<MlRunningStateSearchInterval>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDatafeedState]]
=== MlDatafeedState
[pass]
++++
<pre>
++++
type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping'
[pass]
++++
</pre>
++++
[discrete]
[[MlDatafeedStats]]
=== MlDatafeedStats
[pass]
++++
<pre>
++++
interface MlDatafeedStats {
pass:[/**] @property assignment_explanation For started datafeeds only, contains messages relating to the selection of a node. */
assignment_explanation?: string
pass:[/**] @property datafeed_id A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */
datafeed_id: <<Id>>
pass:[/**] @property node For started datafeeds only, this information pertains to the node upon which the datafeed is started. */
node?: <<MlDiscoveryNode>>
pass:[/**] @property state The status of the datafeed, which can be one of the following values: `starting`, `started`, `stopping`, `stopped`. */
state: <<MlDatafeedState>>
pass:[/**] @property timing_stats An object that provides statistical information about timing aspect of this datafeed. */
timing_stats: <<MlDatafeedTimingStats>>
pass:[/**] @property running_state An object containing the running state for this datafeed. It is only provided if the datafeed is started. */
running_state?: <<MlDatafeedRunningState>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDatafeedTimingStats]]
=== MlDatafeedTimingStats
[pass]
++++
<pre>
++++
interface MlDatafeedTimingStats {
pass:[/**] @property bucket_count The number of buckets processed. */
bucket_count: <<long>>
pass:[/**] @property exponential_average_search_time_per_hour_ms The exponential average search time per hour, in milliseconds. */
exponential_average_search_time_per_hour_ms: <<DurationValue>><<<UnitFloatMillis>>>
pass:[/**] @property job_id Identifier for the anomaly detection job. */
job_id: <<Id>>
pass:[/**] @property search_count The number of searches run by the datafeed. */
search_count: <<long>>
pass:[/**] @property total_search_time_ms The total time the datafeed spent searching, in milliseconds. */
total_search_time_ms: <<DurationValue>><<<UnitFloatMillis>>>
pass:[/**] @property average_search_time_per_bucket_ms The average search time per bucket, in milliseconds. */
average_search_time_per_bucket_ms?: <<DurationValue>><<<UnitFloatMillis>>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalysis]]
=== MlDataframeAnalysis
[pass]
++++
<pre>
++++
interface MlDataframeAnalysis {
pass:[/**] @property alpha Advanced configuration option. Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. This parameter affects loss calculations by acting as a multiplier of the tree depth. Higher alpha values result in shallower trees and faster training times. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to zero. */
alpha?: <<double>>
pass:[/**] @property dependent_variable Defines which field of the document is to be predicted. It must match one of the fields in the index being used to train. If this field is missing from a document, then that document will not be used for training, but a prediction with the trained model will be generated for it. It is also known as continuous target variable. For classification analysis, the data type of the field must be numeric (`<<integer>>`, `<<short>>`, `<<long>>`, `<<byte>>`), categorical (`ip` or `keyword`), or `boolean`. There must be no more than 30 different values in this field. For regression analysis, the data type of the field must be numeric. */
dependent_variable: string
pass:[/**] @property downsample_factor Advanced configuration option. Controls the fraction of data that is used to compute the derivatives of the loss function for tree training. A small value results in the use of a small fraction of the data. If this value is set to be less than 1, accuracy typically improves. However, too small a value may result in poor convergence for the ensemble and so require more trees. By default, this value is calculated during hyperparameter optimization. It must be greater than zero and less than or equal to 1. */
downsample_factor?: <<double>>
pass:[/**] @property early_stopping_enabled Advanced configuration option. Specifies whether the training process should finish if it is not finding any better performing models. If disabled, the training process can take significantly longer and the chance of finding a better performing model is unremarkable. */
early_stopping_enabled?: boolean
pass:[/**] @property eta Advanced configuration option. The shrinkage applied to the weights. Smaller values result in larger forests which have a better generalization error. However, larger forests cause slower training. By default, this value is calculated during hyperparameter optimization. It must be a value between 0.001 and 1. */
eta?: <<double>>
pass:[/**] @property eta_growth_rate_per_tree Advanced configuration option. Specifies the rate at which `eta` increases for each new tree that is added to the forest. For example, a rate of 1.05 increases `eta` by 5% for each extra tree. By default, this value is calculated during hyperparameter optimization. It must be between 0.5 and 2. */
eta_growth_rate_per_tree?: <<double>>
pass:[/**] @property feature_bag_fraction Advanced configuration option. Defines the fraction of features that will be used when selecting a random bag for each candidate split. By default, this value is calculated during hyperparameter optimization. */
feature_bag_fraction?: <<double>>
pass:[/**] @property feature_processors Advanced configuration option. A collection of feature preprocessors that modify one or more included fields. The analysis uses the resulting one or more features instead of the original document field. However, these features are ephemeral; they are not stored in the destination index. Multiple `feature_processors` entries can refer to the same document fields. Automatic categorical feature encoding still occurs for the fields that are unprocessed by a custom processor or that have categorical values. Use this property only if you want to override the automatic feature encoding of the specified fields. */
feature_processors?: <<MlDataframeAnalysisFeatureProcessor>>[]
pass:[/**] @property gamma Advanced configuration option. Regularization parameter to prevent overfitting on the training data set. Multiplies a linear penalty associated with the size of individual trees in the forest. A high gamma value causes training to prefer small trees. A small gamma value results in larger individual trees and slower training. By default, this value is calculated during hyperparameter optimization. It must be a nonnegative value. */
gamma?: <<double>>
pass:[/**] @property lambda Advanced configuration option. Regularization parameter to prevent overfitting on the training data set. Multiplies an L2 regularization term which applies to leaf weights of the individual trees in the forest. A high lambda value causes training to favor small leaf weights. This behavior makes the prediction function smoother at the expense of potentially not being able to capture relevant relationships between the features and the dependent variable. A small lambda value results in large individual trees and slower training. By default, this value is calculated during hyperparameter optimization. It must be a nonnegative value. */
lambda?: <<double>>
pass:[/**] @property max_optimization_rounds_per_hyperparameter Advanced configuration option. A multiplier responsible for determining the maximum number of hyperparameter optimization steps in the Bayesian optimization procedure. The maximum number of steps is determined based on the number of undefined hyperparameters times the maximum optimization rounds per hyperparameter. By default, this value is calculated during hyperparameter optimization. */
max_optimization_rounds_per_hyperparameter?: <<integer>>
pass:[/**] @property max_trees Advanced configuration option. Defines the maximum number of decision trees in the forest. The maximum value is 2000. By default, this value is calculated during hyperparameter optimization. */
max_trees?: <<integer>>
pass:[/**] @property maximum_number_trees Advanced configuration option. Defines the maximum number of decision trees in the forest. The maximum value is 2000. By default, this value is calculated during hyperparameter optimization. */
maximum_number_trees?: <<integer>>
pass:[/**] @property num_top_feature_importance_values Advanced configuration option. Specifies the maximum number of feature importance values per document to return. By default, no feature importance calculation occurs. */
num_top_feature_importance_values?: <<integer>>
pass:[/**] @property prediction_field_name Defines the name of the prediction field in the results. Defaults to `<dependent_variable>_prediction`. */
prediction_field_name?: <<Field>>
pass:[/**] @property randomize_seed Defines the seed for the random generator that is used to pick training data. By default, it is randomly generated. Set it to a specific value to use the same training data each time you start a job (assuming other related parameters such as `source` and `analyzed_fields` are the same). */
randomize_seed?: <<double>>
pass:[/**] @property soft_tree_depth_limit Advanced configuration option. Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. This soft limit combines with the `soft_tree_depth_tolerance` to penalize trees that exceed the specified depth; the regularized loss increases quickly beyond this depth. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to 0. */
soft_tree_depth_limit?: <<integer>>
pass:[/**] @property soft_tree_depth_tolerance Advanced configuration option. This option controls how quickly the regularized loss increases when the tree depth exceeds `soft_tree_depth_limit`. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to 0.01. */
soft_tree_depth_tolerance?: <<double>>
pass:[/**] @property training_percent Defines what percentage of the eligible documents that will be used for training. Documents that are ignored by the analysis (for example those that contain arrays with more than one value) wont be included in the calculation for used percentage. */
training_percent?: <<Percentage>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalysisAnalyzedFields]]
=== MlDataframeAnalysisAnalyzedFields
[pass]
++++
<pre>
++++
interface MlDataframeAnalysisAnalyzedFields {
pass:[/**] @property includes An array of strings that defines the fields that will be excluded from the analysis. You do not need to add fields with unsupported data types to excludes, these fields are excluded from the analysis automatically. */
includes: string[]
pass:[/**] @property excludes An array of strings that defines the fields that will be included in the analysis. */
excludes: string[]
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalysisClassification]]
=== MlDataframeAnalysisClassification
[pass]
++++
<pre>
++++
interface MlDataframeAnalysisClassification extends <<MlDataframeAnalysis>> {
class_assignment_objective?: string
pass:[/**] @property num_top_classes Defines the number of categories for which the predicted probabilities are reported. It must be non-negative or -1. If it is -1 or greater than the total number of categories, probabilities are reported for all categories; if you have a large number of categories, there could be a significant effect on the size of your destination index. NOTE: To use the AUC ROC evaluation method, `num_top_classes` must be set to -1 or a value greater than or equal to the total number of categories. */
num_top_classes?: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalysisContainer]]
=== MlDataframeAnalysisContainer
[pass]
++++
<pre>
++++
interface MlDataframeAnalysisContainer {
pass:[/**] @property classification The configuration information necessary to perform classification. */
classification?: <<MlDataframeAnalysisClassification>>
pass:[/**] @property outlier_detection The configuration information necessary to perform outlier detection. NOTE: Advanced parameters are for fine-tuning classification analysis. They are set automatically by hyperparameter optimization to give the minimum validation error. It is highly recommended to use the default values unless you fully understand the function of these parameters. */
outlier_detection?: <<MlDataframeAnalysisOutlierDetection>>
pass:[/**] @property regression The configuration information necessary to perform regression. NOTE: Advanced parameters are for fine-tuning regression analysis. They are set automatically by hyperparameter optimization to give the minimum validation error. It is highly recommended to use the default values unless you fully understand the function of these parameters. */
regression?: <<MlDataframeAnalysisRegression>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalysisFeatureProcessor]]
=== MlDataframeAnalysisFeatureProcessor
[pass]
++++
<pre>
++++
interface MlDataframeAnalysisFeatureProcessor {
pass:[/**] @property frequency_encoding The configuration information necessary to perform frequency encoding. */
frequency_encoding?: <<MlDataframeAnalysisFeatureProcessorFrequencyEncoding>>
pass:[/**] @property multi_encoding The configuration information necessary to perform multi encoding. It allows multiple processors to be changed together. This way the output of a processor can then be passed to another as an input. */
multi_encoding?: <<MlDataframeAnalysisFeatureProcessorMultiEncoding>>
pass:[/**] @property n_gram_encoding The configuration information necessary to perform n-gram encoding. Features created by this encoder have the following name format: <feature_prefix>.<ngram><string position>. For example, if the feature_prefix is f, the feature name for the second unigram in a string is f.11. */
n_gram_encoding?: <<MlDataframeAnalysisFeatureProcessorNGramEncoding>>
pass:[/**] @property one_hot_encoding The configuration information necessary to perform one hot encoding. */
one_hot_encoding?: <<MlDataframeAnalysisFeatureProcessorOneHotEncoding>>
pass:[/**] @property target_mean_encoding The configuration information necessary to perform target mean encoding. */
target_mean_encoding?: <<MlDataframeAnalysisFeatureProcessorTargetMeanEncoding>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalysisFeatureProcessorFrequencyEncoding]]
=== MlDataframeAnalysisFeatureProcessorFrequencyEncoding
[pass]
++++
<pre>
++++
interface MlDataframeAnalysisFeatureProcessorFrequencyEncoding {
pass:[/**] @property feature_name The resulting feature name. */
feature_name: <<Name>>
field: <<Field>>
pass:[/**] @property frequency_map The resulting frequency map for the field value. If the field value is missing from the frequency_map, the resulting value is 0. */
frequency_map: Record<string, <<double>>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalysisFeatureProcessorMultiEncoding]]
=== MlDataframeAnalysisFeatureProcessorMultiEncoding
[pass]
++++
<pre>
++++
interface MlDataframeAnalysisFeatureProcessorMultiEncoding {
pass:[/**] @property processors The ordered array of custom processors to execute. Must be more than 1. */
processors: <<integer>>[]
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalysisFeatureProcessorNGramEncoding]]
=== MlDataframeAnalysisFeatureProcessorNGramEncoding
[pass]
++++
<pre>
++++
interface MlDataframeAnalysisFeatureProcessorNGramEncoding {
pass:[/**] @property feature_prefix The feature name prefix. Defaults to ngram_<start>_<length>. */
feature_prefix?: string
pass:[/**] @property field The name of the text field to encode. */
field: <<Field>>
pass:[/**] @property length Specifies the length of the n-gram substring. Defaults to 50. Must be greater than 0. */
length?: <<integer>>
pass:[/**] @property n_grams Specifies which n-grams to gather. Its an array of <<integer>> values where the minimum value is 1, and a maximum value is 5. */
n_grams: <<integer>>[]
pass:[/**] @property start Specifies the zero-indexed start of the n-gram substring. Negative values are allowed for encoding n-grams of string suffixes. Defaults to 0. */
start?: <<integer>>
custom?: boolean
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalysisFeatureProcessorOneHotEncoding]]
=== MlDataframeAnalysisFeatureProcessorOneHotEncoding
[pass]
++++
<pre>
++++
interface MlDataframeAnalysisFeatureProcessorOneHotEncoding {
pass:[/**] @property field The name of the field to encode. */
field: <<Field>>
pass:[/**] @property hot_map The one hot map mapping the field value with the column name. */
hot_map: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalysisFeatureProcessorTargetMeanEncoding]]
=== MlDataframeAnalysisFeatureProcessorTargetMeanEncoding
[pass]
++++
<pre>
++++
interface MlDataframeAnalysisFeatureProcessorTargetMeanEncoding {
pass:[/**] @property default_value The default value if field value is not found in the target_map. */
default_value: <<integer>>
pass:[/**] @property feature_name The resulting feature name. */
feature_name: <<Name>>
pass:[/**] @property field The name of the field to encode. */
field: <<Field>>
pass:[/**] @property target_map The field value to target mean transition map. */
target_map: Record<string, any>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalysisOutlierDetection]]
=== MlDataframeAnalysisOutlierDetection
[pass]
++++
<pre>
++++
interface MlDataframeAnalysisOutlierDetection {
pass:[/**] @property compute_feature_influence Specifies whether the feature influence calculation is enabled. */
compute_feature_influence?: boolean
pass:[/**] @property feature_influence_threshold The minimum outlier score that a document needs to have in order to calculate its feature influence score. Value range: 0-1. */
feature_influence_threshold?: <<double>>
pass:[/**] @property method The method that outlier detection uses. Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`, and `ensemble`. The default value is ensemble, which means that outlier detection uses an ensemble of different methods and normalises and combines their individual outlier scores to obtain the overall outlier score. */
method?: string
pass:[/**] @property n_neighbors Defines the value for how many nearest neighbors each method of outlier detection uses to calculate its outlier score. When the value is not set, different values are used for different ensemble members. This default behavior helps improve the diversity in the ensemble; only override it if you are confident that the value you choose is appropriate for the data set. */
n_neighbors?: <<integer>>
pass:[/**] @property outlier_fraction The proportion of the data set that is assumed to be outlying prior to outlier detection. For example, 0.05 means it is assumed that 5% of values are real outliers and 95% are inliers. */
outlier_fraction?: <<double>>
pass:[/**] @property standardization_enabled If true, the following operation is performed on the columns before computing outlier scores: `(x_i - mean(x_i)) / sd(x_i)`. */
standardization_enabled?: boolean
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalysisRegression]]
=== MlDataframeAnalysisRegression
[pass]
++++
<pre>
++++
interface MlDataframeAnalysisRegression extends <<MlDataframeAnalysis>> {
pass:[/**] @property loss_function The loss function used during regression. Available options are `mse` (mean squared error), `msle` (mean squared logarithmic error), `huber` (Pseudo-Huber loss). */
loss_function?: string
pass:[/**] @property loss_function_parameter A positive number that is used as a parameter to the `loss_function`. */
loss_function_parameter?: <<double>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalytics]]
=== MlDataframeAnalytics
[pass]
++++
<pre>
++++
interface MlDataframeAnalytics {
pass:[/**] @property analysis_stats An object containing information about the analysis job. */
analysis_stats?: <<MlDataframeAnalyticsStatsContainer>>
pass:[/**] @property assignment_explanation For running jobs only, contains messages relating to the selection of a node to run the job. */
assignment_explanation?: string
pass:[/**] @property data_counts An object that provides counts for the quantity of documents skipped, used in training, or available for testing. */
data_counts: <<MlDataframeAnalyticsStatsDataCounts>>
pass:[/**] @property id The unique identifier of the data frame analytics job. */
id: <<Id>>
pass:[/**] @property memory_usage An object describing memory usage of the analytics. It is present only after the job is started and memory usage is reported. */
memory_usage: <<MlDataframeAnalyticsStatsMemoryUsage>>
pass:[/**] @property node Contains properties for the node that runs the job. This information is available only for running jobs. */
node?: <<NodeAttributes>>
pass:[/**] @property progress The progress report of the data frame analytics job by phase. */
progress: <<MlDataframeAnalyticsStatsProgress>>[]
pass:[/**] @property state The status of the data frame analytics job, which can be one of the following values: failed, started, starting, stopping, stopped. */
state: <<MlDataframeState>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalyticsAuthorization]]
=== MlDataframeAnalyticsAuthorization
[pass]
++++
<pre>
++++
interface MlDataframeAnalyticsAuthorization {
pass:[/**] @property api_key If an API key was used for the most recent update to the job, its name and identifier are listed in the response. */
api_key?: <<MlApiKeyAuthorization>>
pass:[/**] @property roles If a user ID was used for the most recent update to the job, its roles at the time of the update are listed in the response. */
roles?: string[]
pass:[/**] @property service_account If a service account was used for the most recent update to the job, the account name is listed in the response. */
service_account?: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalyticsDestination]]
=== MlDataframeAnalyticsDestination
[pass]
++++
<pre>
++++
interface MlDataframeAnalyticsDestination {
pass:[/**] @property index Defines the destination index to store the results of the data frame analytics job. */
index: <<IndexName>>
pass:[/**] @property results_field Defines the name of the field in which to store the results of the analysis. Defaults to `ml`. */
results_field?: <<Field>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalyticsFieldSelection]]
=== MlDataframeAnalyticsFieldSelection
[pass]
++++
<pre>
++++
interface MlDataframeAnalyticsFieldSelection {
pass:[/**] @property is_included Whether the field is selected to be included in the analysis. */
is_included: boolean
pass:[/**] @property is_required Whether the field is required. */
is_required: boolean
pass:[/**] @property feature_type The feature type of this field for the analysis. May be categorical or numerical. */
feature_type?: string
pass:[/**] @property mapping_types The mapping types of the field. */
mapping_types: string[]
pass:[/**] @property name The field name. */
name: <<Field>>
pass:[/**] @property reason The reason a field is not selected to be included in the analysis. */
reason?: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalyticsMemoryEstimation]]
=== MlDataframeAnalyticsMemoryEstimation
[pass]
++++
<pre>
++++
interface MlDataframeAnalyticsMemoryEstimation {
pass:[/**] @property expected_memory_with_disk Estimated memory usage under the assumption that overflowing to disk is allowed during data frame analytics. expected_memory_with_disk is usually smaller than expected_memory_without_disk as using disk allows to limit the main memory needed to perform data frame analytics. */
expected_memory_with_disk: string
pass:[/**] @property expected_memory_without_disk Estimated memory usage under the assumption that the whole data frame analytics should happen in memory (i.e. without overflowing to disk). */
expected_memory_without_disk: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalyticsSource]]
=== MlDataframeAnalyticsSource
[pass]
++++
<pre>
++++
interface MlDataframeAnalyticsSource {
pass:[/**] @property index Index or indices on which to perform the analysis. It can be a single index or index pattern as well as an array of indices or patterns. NOTE: If your source indices contain documents with the same IDs, only the document that is indexed last appears in the destination index. */
index: <<Indices>>
pass:[/**] @property query The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. By default, this property has the following value: {"match_all": {}}. */
query?: <<QueryDslQueryContainer>>
pass:[/**] @property runtime_mappings Definitions of runtime fields that will become part of the mapping of the destination index. */
runtime_mappings?: <<MappingRuntimeFields>>
pass:[/**] @property _source Specify `includes` and/or `excludes patterns to select which fields will be present in the destination. <<Fields>> that are excluded cannot be included in the analysis. */
_source?: <<MlDataframeAnalysisAnalyzedFields>> | string[]
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalyticsStatsContainer]]
=== MlDataframeAnalyticsStatsContainer
[pass]
++++
<pre>
++++
interface MlDataframeAnalyticsStatsContainer {
pass:[/**] @property classification_stats An object containing information about the classification analysis job. */
classification_stats?: <<MlDataframeAnalyticsStatsHyperparameters>>
pass:[/**] @property outlier_detection_stats An object containing information about the outlier detection job. */
outlier_detection_stats?: <<MlDataframeAnalyticsStatsOutlierDetection>>
pass:[/**] @property regression_stats An object containing information about the regression analysis. */
regression_stats?: <<MlDataframeAnalyticsStatsHyperparameters>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalyticsStatsDataCounts]]
=== MlDataframeAnalyticsStatsDataCounts
[pass]
++++
<pre>
++++
interface MlDataframeAnalyticsStatsDataCounts {
pass:[/**] @property skipped_docs_count The number of documents that are skipped during the analysis because they contained values that are not supported by the analysis. For example, outlier detection does not support missing fields so it skips documents with missing fields. Likewise, all types of analysis skip documents that contain arrays with more than one element. */
skipped_docs_count: <<integer>>
pass:[/**] @property test_docs_count The number of documents that are not used for training the model and can be used for testing. */
test_docs_count: <<integer>>
pass:[/**] @property training_docs_count The number of documents that are used for training the model. */
training_docs_count: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalyticsStatsHyperparameters]]
=== MlDataframeAnalyticsStatsHyperparameters
[pass]
++++
<pre>
++++
interface MlDataframeAnalyticsStatsHyperparameters {
pass:[/**] @property hyperparameters An object containing the parameters of the classification analysis job. */
hyperparameters: <<MlHyperparameters>>
pass:[/**] @property iteration The number of iterations on the analysis. */
iteration: <<integer>>
pass:[/**] @property timestamp The timestamp when the statistics were reported in milliseconds since the epoch. */
timestamp: <<EpochTime>><<<UnitMillis>>>
pass:[/**] @property timing_stats An object containing time statistics about the data frame analytics job. */
timing_stats: <<MlTimingStats>>
pass:[/**] @property validation_loss An object containing information about validation loss. */
validation_loss: <<MlValidationLoss>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalyticsStatsMemoryUsage]]
=== MlDataframeAnalyticsStatsMemoryUsage
[pass]
++++
<pre>
++++
interface MlDataframeAnalyticsStatsMemoryUsage {
pass:[/**] @property memory_reestimate_bytes This value is present when the status is hard_limit and it is a new estimate of how much memory the job needs. */
memory_reestimate_bytes?: <<long>>
pass:[/**] @property peak_usage_bytes The number of bytes used at the highest peak of memory usage. */
peak_usage_bytes: <<long>>
pass:[/**] @property status The memory usage status. */
status: string
pass:[/**] @property timestamp The timestamp when memory usage was calculated. */
timestamp?: <<EpochTime>><<<UnitMillis>>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalyticsStatsOutlierDetection]]
=== MlDataframeAnalyticsStatsOutlierDetection
[pass]
++++
<pre>
++++
interface MlDataframeAnalyticsStatsOutlierDetection {
pass:[/**] @property parameters The list of job parameters specified by the user or determined by algorithmic heuristics. */
parameters: <<MlOutlierDetectionParameters>>
pass:[/**] @property timestamp The timestamp when the statistics were reported in milliseconds since the epoch. */
timestamp: <<EpochTime>><<<UnitMillis>>>
pass:[/**] @property timing_stats An object containing time statistics about the data frame analytics job. */
timing_stats: <<MlTimingStats>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalyticsStatsProgress]]
=== MlDataframeAnalyticsStatsProgress
[pass]
++++
<pre>
++++
interface MlDataframeAnalyticsStatsProgress {
pass:[/**] @property phase Defines the phase of the data frame analytics job. */
phase: string
pass:[/**] @property progress_percent The progress that the data frame analytics job has made expressed in percentage. */
progress_percent: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeAnalyticsSummary]]
=== MlDataframeAnalyticsSummary
[pass]
++++
<pre>
++++
interface MlDataframeAnalyticsSummary {
allow_lazy_start?: boolean
analysis: <<MlDataframeAnalysisContainer>>
analyzed_fields?: <<MlDataframeAnalysisAnalyzedFields>> | string[]
pass:[/**] @property authorization The security privileges that the job uses to run its queries. If Elastic Stack security features were disabled at the time of the most recent update to the job, this property is omitted. */
authorization?: <<MlDataframeAnalyticsAuthorization>>
create_time?: <<EpochTime>><<<UnitMillis>>>
description?: string
dest: <<MlDataframeAnalyticsDestination>>
id: <<Id>>
max_num_threads?: <<integer>>
model_memory_limit?: string
source: <<MlDataframeAnalyticsSource>>
version?: <<VersionString>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeEvaluationClassification]]
=== MlDataframeEvaluationClassification
[pass]
++++
<pre>
++++
interface MlDataframeEvaluationClassification {
pass:[/**] @property actual_field The field of the index which contains the ground truth. The data type of this field can be boolean or <<integer>>. If the data type is <<integer>>, the value has to be either 0 (false) or 1 (true). */
actual_field: <<Field>>
pass:[/**] @property predicted_field The field in the index which contains the predicted value, in other words the results of the classification analysis. */
predicted_field?: <<Field>>
pass:[/**] @property top_classes_field The field of the index which is an array of documents of the form { "class_name": XXX, "class_probability": YYY }. This field must be defined as nested in the mappings. */
top_classes_field?: <<Field>>
pass:[/**] @property metrics Specifies the metrics that are used for the evaluation. */
metrics?: <<MlDataframeEvaluationClassificationMetrics>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeEvaluationClassificationMetrics]]
=== MlDataframeEvaluationClassificationMetrics
[pass]
++++
<pre>
++++
interface MlDataframeEvaluationClassificationMetrics extends <<MlDataframeEvaluationMetrics>> {
pass:[/**] @property accuracy Accuracy of predictions (per-class and overall). */
accuracy?: Record<string, any>
pass:[/**] @property multiclass_confusion_matrix Multiclass confusion matrix. */
multiclass_confusion_matrix?: Record<string, any>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeEvaluationClassificationMetricsAucRoc]]
=== MlDataframeEvaluationClassificationMetricsAucRoc
[pass]
++++
<pre>
++++
interface MlDataframeEvaluationClassificationMetricsAucRoc {
pass:[/**] @property class_name <<Name>> of the only class that is treated as positive during AUC ROC calculation. Other classes are treated as negative ("one-vs-all" strategy). All the evaluated documents must have class_name in the list of their top classes. */
class_name?: <<Name>>
pass:[/**] @property include_curve Whether or not the curve should be returned in addition to the score. Default value is false. */
include_curve?: boolean
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeEvaluationContainer]]
=== MlDataframeEvaluationContainer
[pass]
++++
<pre>
++++
interface MlDataframeEvaluationContainer {
pass:[/**] @property classification Classification evaluation evaluates the results of a classification analysis which outputs a prediction that identifies to which of the classes each document belongs. */
classification?: <<MlDataframeEvaluationClassification>>
pass:[/**] @property outlier_detection Outlier detection evaluates the results of an outlier detection analysis which outputs the probability that each document is an outlier. */
outlier_detection?: <<MlDataframeEvaluationOutlierDetection>>
pass:[/**] @property regression Regression evaluation evaluates the results of a regression analysis which outputs a prediction of values. */
regression?: <<MlDataframeEvaluationRegression>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeEvaluationMetrics]]
=== MlDataframeEvaluationMetrics
[pass]
++++
<pre>
++++
interface MlDataframeEvaluationMetrics {
pass:[/**] @property auc_roc The AUC ROC (area under the curve of the receiver operating characteristic) score and optionally the curve. It is calculated for a specific class (provided as "class_name") treated as positive. */
auc_roc?: <<MlDataframeEvaluationClassificationMetricsAucRoc>>
pass:[/**] @property precision Precision of predictions (per-class and average). */
precision?: Record<string, any>
pass:[/**] @property recall Recall of predictions (per-class and average). */
recall?: Record<string, any>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeEvaluationOutlierDetection]]
=== MlDataframeEvaluationOutlierDetection
[pass]
++++
<pre>
++++
interface MlDataframeEvaluationOutlierDetection {
pass:[/**] @property actual_field The field of the index which contains the ground truth. The data type of this field can be boolean or <<integer>>. If the data type is <<integer>>, the value has to be either 0 (false) or 1 (true). */
actual_field: <<Field>>
pass:[/**] @property predicted_probability_field The field of the index that defines the probability of whether the item belongs to the class in question or not. Its the field that contains the results of the analysis. */
predicted_probability_field: <<Field>>
pass:[/**] @property metrics Specifies the metrics that are used for the evaluation. */
metrics?: <<MlDataframeEvaluationOutlierDetectionMetrics>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeEvaluationOutlierDetectionMetrics]]
=== MlDataframeEvaluationOutlierDetectionMetrics
[pass]
++++
<pre>
++++
interface MlDataframeEvaluationOutlierDetectionMetrics extends <<MlDataframeEvaluationMetrics>> {
pass:[/**] @property confusion_matrix Accuracy of predictions (per-class and overall). */
confusion_matrix?: Record<string, any>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeEvaluationRegression]]
=== MlDataframeEvaluationRegression
[pass]
++++
<pre>
++++
interface MlDataframeEvaluationRegression {
pass:[/**] @property actual_field The field of the index which contains the ground truth. The data type of this field must be numerical. */
actual_field: <<Field>>
pass:[/**] @property predicted_field The field in the index that contains the predicted value, in other words the results of the regression analysis. */
predicted_field: <<Field>>
pass:[/**] @property metrics Specifies the metrics that are used for the evaluation. For more information on mse, msle, and huber, consult the Jupyter notebook on regression loss functions. */
metrics?: <<MlDataframeEvaluationRegressionMetrics>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeEvaluationRegressionMetrics]]
=== MlDataframeEvaluationRegressionMetrics
[pass]
++++
<pre>
++++
interface MlDataframeEvaluationRegressionMetrics {
pass:[/**] @property mse Average squared difference between the predicted values and the actual (ground truth) value. For more information, read this wiki article. */
mse?: Record<string, any>
pass:[/**] @property msle Average squared difference between the logarithm of the predicted values and the logarithm of the actual (ground truth) value. */
msle?: <<MlDataframeEvaluationRegressionMetricsMsle>>
pass:[/**] @property huber Pseudo Huber loss function. */
huber?: <<MlDataframeEvaluationRegressionMetricsHuber>>
pass:[/**] @property r_squared Proportion of the variance in the dependent variable that is predictable from the independent variables. */
r_squared?: Record<string, any>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeEvaluationRegressionMetricsHuber]]
=== MlDataframeEvaluationRegressionMetricsHuber
[pass]
++++
<pre>
++++
interface MlDataframeEvaluationRegressionMetricsHuber {
pass:[/**] @property delta Approximates 1/2 (prediction - actual)2 for values much less than delta and approximates a straight line with slope delta for values much larger than delta. Defaults to 1. Delta needs to be greater than 0. */
delta?: <<double>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeEvaluationRegressionMetricsMsle]]
=== MlDataframeEvaluationRegressionMetricsMsle
[pass]
++++
<pre>
++++
interface MlDataframeEvaluationRegressionMetricsMsle {
pass:[/**] @property offset Defines the transition point at which you switch from minimizing quadratic error to minimizing quadratic log error. Defaults to 1. */
offset?: <<double>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDataframeState]]
=== MlDataframeState
[pass]
++++
<pre>
++++
type MlDataframeState = 'started' | 'stopped' | 'starting' | 'stopping' | 'failed'
[pass]
++++
</pre>
++++
[discrete]
[[MlDelayedDataCheckConfig]]
=== MlDelayedDataCheckConfig
[pass]
++++
<pre>
++++
interface MlDelayedDataCheckConfig {
pass:[/**] @property check_window The window of time that is searched for late data. This window of time ends with the latest finalized bucket. It defaults to null, which causes an appropriate `check_window` to be calculated when the real-time datafeed runs. In particular, the default `check_window` span calculation is based on the maximum of `2h` or `8 * bucket_span`. */
check_window?: <<Duration>>
pass:[/**] @property enabled Specifies whether the datafeed periodically checks for delayed data. */
enabled: boolean
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDeploymentAllocationState]]
=== MlDeploymentAllocationState
[pass]
++++
<pre>
++++
type MlDeploymentAllocationState = 'started' | 'starting' | 'fully_allocated'
[pass]
++++
</pre>
++++
[discrete]
[[MlDeploymentAssignmentState]]
=== MlDeploymentAssignmentState
[pass]
++++
<pre>
++++
type MlDeploymentAssignmentState = 'started' | 'starting' | 'stopping' | 'failed'
[pass]
++++
</pre>
++++
[discrete]
[[MlDetectionRule]]
=== MlDetectionRule
[pass]
++++
<pre>
++++
interface MlDetectionRule {
pass:[/**] @property actions The set of actions to be triggered when the rule applies. If more than one action is specified the effects of all actions are combined. */
actions?: <<MlRuleAction>>[]
pass:[/**] @property conditions An array of numeric conditions when the rule applies. A rule must either have a non-empty scope or at least one condition. Multiple conditions are combined together with a logical AND. */
conditions?: <<MlRuleCondition>>[]
pass:[/**] @property scope A scope of series where the rule applies. A rule must either have a non-empty scope or at least one condition. By default, the scope includes all series. Scoping is allowed for any of the fields that are also specified in `by_field_name`, `over_field_name`, or `partition_field_name`. */
scope?: Record<<<Field>>, <<MlFilterRef>>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDetector]]
=== MlDetector
[pass]
++++
<pre>
++++
interface MlDetector {
pass:[/**] @property by_field_name The field used to split the data. In particular, this property is used for analyzing the splits with respect to their own history. It is used for finding unusual values in the context of the split. */
by_field_name?: <<Field>>
pass:[/**] @property custom_rules Custom rules enable you to customize the way detectors operate. For example, a rule may dictate conditions under which results should be skipped. Kibana refers to custom rules as job rules. */
custom_rules?: <<MlDetectionRule>>[]
pass:[/**] @property detector_description A description of the detector. */
detector_description?: string
pass:[/**] @property detector_index A unique identifier for the detector. This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. If you specify a value for this property, it is ignored. */
detector_index?: <<integer>>
pass:[/**] @property exclude_frequent If set, frequent entities are excluded from influencing the anomaly results. Entities can be considered frequent over time or frequent in a population. If you are working with both over and by fields, you can set `exclude_frequent` to `all` for both fields, or to `by` or `over` for those specific fields. */
exclude_frequent?: <<MlExcludeFrequent>>
pass:[/**] @property field_name The field that the detector uses in the function. If you use an event rate function such as count or rare, do not specify this field. The `field_name` cannot contain <<double>> quotes or backslashes. */
field_name?: <<Field>>
pass:[/**] @property function The analysis function that is used. For example, `count`, `rare`, `mean`, `min`, `max`, or `sum`. */
function?: string
pass:[/**] @property over_field_name The field used to split the data. In particular, this property is used for analyzing the splits with respect to the history of all splits. It is used for finding unusual values in the population of all splits. */
over_field_name?: <<Field>>
pass:[/**] @property partition_field_name The field used to segment the analysis. When you use this property, you have completely independent baselines for each value of this field. */
partition_field_name?: <<Field>>
pass:[/**] @property use_null Defines whether a new series is used as the null series when there is no value for the by or partition fields. */
use_null?: boolean
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDetectorRead]]
=== MlDetectorRead
[pass]
++++
<pre>
++++
interface MlDetectorRead {
pass:[/**] @property by_field_name The field used to split the data. In particular, this property is used for analyzing the splits with respect to their own history. It is used for finding unusual values in the context of the split. */
by_field_name?: <<Field>>
pass:[/**] @property custom_rules An array of custom rule objects, which enable you to customize the way detectors operate. For example, a rule may dictate to the detector conditions under which results should be skipped. Kibana refers to custom rules as job rules. */
custom_rules?: <<MlDetectionRule>>[]
pass:[/**] @property detector_description A description of the detector. */
detector_description?: string
pass:[/**] @property detector_index A unique identifier for the detector. This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. */
detector_index?: <<integer>>
pass:[/**] @property exclude_frequent Contains one of the following values: `all`, `none`, `by`, or `over`. If set, frequent entities are excluded from influencing the anomaly results. Entities can be considered frequent over time or frequent in a population. If you are working with both over and by fields, then you can set `exclude_frequent` to all for both fields, or to `by` or `over` for those specific fields. */
exclude_frequent?: <<MlExcludeFrequent>>
pass:[/**] @property field_name The field that the detector uses in the function. If you use an event rate function such as `count` or `rare`, do not specify this field. */
field_name?: <<Field>>
pass:[/**] @property function The analysis function that is used. For example, `count`, `rare`, `mean`, `min`, `max`, and `sum`. */
function: string
pass:[/**] @property over_field_name The field used to split the data. In particular, this property is used for analyzing the splits with respect to the history of all splits. It is used for finding unusual values in the population of all splits. */
over_field_name?: <<Field>>
pass:[/**] @property partition_field_name The field used to segment the analysis. When you use this property, you have completely independent baselines for each value of this field. */
partition_field_name?: <<Field>>
pass:[/**] @property use_null Defines whether a new series is used as the null series when there is no value for the by or partition fields. */
use_null?: boolean
}
[pass]
++++
</pre>
++++
[discrete]
[[MlDiscoveryNode]]
=== MlDiscoveryNode
[pass]
++++
<pre>
++++
interface MlDiscoveryNode {
attributes: Record<string, string>
ephemeral_id: <<Id>>
id: <<Id>>
name: <<Name>>
transport_address: <<TransportAddress>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlExcludeFrequent]]
=== MlExcludeFrequent
[pass]
++++
<pre>
++++
type MlExcludeFrequent = 'all' | 'none' | 'by' | 'over'
[pass]
++++
</pre>
++++
[discrete]
[[MlFillMaskInferenceOptions]]
=== MlFillMaskInferenceOptions
[pass]
++++
<pre>
++++
interface MlFillMaskInferenceOptions {
pass:[/**] @property mask_token The string/token which will be removed from incoming documents and replaced with the inference prediction(s). In a response, this field contains the mask token for the specified model/tokenizer. Each model and tokenizer has a predefined mask token which cannot be changed. Thus, it is recommended not to set this value in requests. However, if this field is present in a request, its value must match the predefined value for that model/tokenizer, otherwise the request will fail. */
mask_token?: string
pass:[/**] @property num_top_classes Specifies the number of top class predictions to return. Defaults to 0. */
num_top_classes?: <<integer>>
pass:[/**] @property tokenization The tokenization options to update when inferring */
tokenization?: <<MlTokenizationConfigContainer>>
pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */
results_field?: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlFillMaskInferenceUpdateOptions]]
=== MlFillMaskInferenceUpdateOptions
[pass]
++++
<pre>
++++
interface MlFillMaskInferenceUpdateOptions {
pass:[/**] @property num_top_classes Specifies the number of top class predictions to return. Defaults to 0. */
num_top_classes?: <<integer>>
pass:[/**] @property tokenization The tokenization options to update when inferring */
tokenization?: <<MlNlpTokenizationUpdateOptions>>
pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */
results_field?: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlFilter]]
=== MlFilter
[pass]
++++
<pre>
++++
interface MlFilter {
pass:[/**] @property description A description of the filter. */
description?: string
pass:[/**] @property filter_id A string that uniquely identifies a filter. */
filter_id: <<Id>>
pass:[/**] @property items An array of strings which is the filter item list. */
items: string[]
}
[pass]
++++
</pre>
++++
[discrete]
[[MlFilterRef]]
=== MlFilterRef
[pass]
++++
<pre>
++++
interface MlFilterRef {
pass:[/**] @property filter_id The identifier for the filter. */
filter_id: <<Id>>
pass:[/**] @property filter_type If set to `include`, the rule applies for values in the filter. If set to `exclude`, the rule applies for values not in the filter. */
filter_type?: <<MlFilterType>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlFilterType]]
=== MlFilterType
[pass]
++++
<pre>
++++
type MlFilterType = 'include' | 'exclude'
[pass]
++++
</pre>
++++
[discrete]
[[MlGeoResults]]
=== MlGeoResults
[pass]
++++
<pre>
++++
interface MlGeoResults {
pass:[/**] @property actual_point The actual value for the bucket formatted as a `geo_point`. */
actual_point: string
pass:[/**] @property typical_point The typical value for the bucket formatted as a `geo_point`. */
typical_point: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlHyperparameter]]
=== MlHyperparameter
[pass]
++++
<pre>
++++
interface MlHyperparameter {
pass:[/**] @property absolute_importance A positive number showing how much the parameter influences the variation of the loss function. For hyperparameters with values that are not specified by the user but tuned during hyperparameter optimization. */
absolute_importance?: <<double>>
pass:[/**] @property name <<Name>> of the hyperparameter. */
name: <<Name>>
pass:[/**] @property relative_importance A number between 0 and 1 showing the proportion of influence on the variation of the loss function among all tuned hyperparameters. For hyperparameters with values that are not specified by the user but tuned during hyperparameter optimization. */
relative_importance?: <<double>>
pass:[/**] @property supplied Indicates if the hyperparameter is specified by the user (true) or optimized (false). */
supplied: boolean
pass:[/**] @property value The value of the hyperparameter, either optimized or specified by the user. */
value: <<double>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlHyperparameters]]
=== MlHyperparameters
[pass]
++++
<pre>
++++
interface MlHyperparameters {
pass:[/**] @property alpha Advanced configuration option. Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. This parameter affects loss calculations by acting as a multiplier of the tree depth. Higher alpha values result in shallower trees and faster training times. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to zero. */
alpha?: <<double>>
pass:[/**] @property lambda Advanced configuration option. Regularization parameter to prevent overfitting on the training data set. Multiplies an L2 regularization term which applies to leaf weights of the individual trees in the forest. A high lambda value causes training to favor small leaf weights. This behavior makes the prediction function smoother at the expense of potentially not being able to capture relevant relationships between the features and the dependent variable. A small lambda value results in large individual trees and slower training. By default, this value is calculated during hyperparameter optimization. It must be a nonnegative value. */
lambda?: <<double>>
pass:[/**] @property gamma Advanced configuration option. Regularization parameter to prevent overfitting on the training data set. Multiplies a linear penalty associated with the size of individual trees in the forest. A high gamma value causes training to prefer small trees. A small gamma value results in larger individual trees and slower training. By default, this value is calculated during hyperparameter optimization. It must be a nonnegative value. */
gamma?: <<double>>
pass:[/**] @property eta Advanced configuration option. The shrinkage applied to the weights. Smaller values result in larger forests which have a better generalization error. However, larger forests cause slower training. By default, this value is calculated during hyperparameter optimization. It must be a value between `0.001` and `1`. */
eta?: <<double>>
pass:[/**] @property eta_growth_rate_per_tree Advanced configuration option. Specifies the rate at which `eta` increases for each new tree that is added to the forest. For example, a rate of 1.05 increases `eta` by 5% for each extra tree. By default, this value is calculated during hyperparameter optimization. It must be between `0.5` and `2`. */
eta_growth_rate_per_tree?: <<double>>
pass:[/**] @property feature_bag_fraction Advanced configuration option. Defines the fraction of features that will be used when selecting a random bag for each candidate split. By default, this value is calculated during hyperparameter optimization. */
feature_bag_fraction?: <<double>>
pass:[/**] @property downsample_factor Advanced configuration option. Controls the fraction of data that is used to compute the derivatives of the loss function for tree training. A small value results in the use of a small fraction of the data. If this value is set to be less than 1, accuracy typically improves. However, too small a value may result in poor convergence for the ensemble and so require more trees. By default, this value is calculated during hyperparameter optimization. It must be greater than zero and less than or equal to 1. */
downsample_factor?: <<double>>
pass:[/**] @property max_attempts_to_add_tree If the algorithm fails to determine a non-trivial tree (more than a single leaf), this parameter determines how many of such consecutive failures are tolerated. Once the number of attempts exceeds the threshold, the forest training stops. */
max_attempts_to_add_tree?: <<integer>>
pass:[/**] @property max_optimization_rounds_per_hyperparameter Advanced configuration option. A multiplier responsible for determining the maximum number of hyperparameter optimization steps in the Bayesian optimization procedure. The maximum number of steps is determined based on the number of undefined hyperparameters times the maximum optimization rounds per hyperparameter. By default, this value is calculated during hyperparameter optimization. */
max_optimization_rounds_per_hyperparameter?: <<integer>>
pass:[/**] @property max_trees Advanced configuration option. Defines the maximum number of decision trees in the forest. The maximum value is 2000. By default, this value is calculated during hyperparameter optimization. */
max_trees?: <<integer>>
pass:[/**] @property num_folds The maximum number of folds for the cross-validation procedure. */
num_folds?: <<integer>>
pass:[/**] @property num_splits_per_feature Determines the maximum number of splits for every feature that can occur in a decision tree when the tree is trained. */
num_splits_per_feature?: <<integer>>
pass:[/**] @property soft_tree_depth_limit Advanced configuration option. Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. This soft limit combines with the `soft_tree_depth_tolerance` to penalize trees that exceed the specified depth; the regularized loss increases quickly beyond this depth. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to 0. */
soft_tree_depth_limit?: <<integer>>
pass:[/**] @property soft_tree_depth_tolerance Advanced configuration option. This option controls how quickly the regularized loss increases when the tree depth exceeds `soft_tree_depth_limit`. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to 0.01. */
soft_tree_depth_tolerance?: <<double>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlInclude]]
=== MlInclude
[pass]
++++
<pre>
++++
type MlInclude = 'definition' | 'feature_importance_baseline' | 'hyperparameters' | 'total_feature_importance' | 'definition_status'
[pass]
++++
</pre>
++++
[discrete]
[[MlInferenceConfigCreateContainer]]
=== MlInferenceConfigCreateContainer
[pass]
++++
<pre>
++++
interface MlInferenceConfigCreateContainer {
pass:[/**] @property regression Regression configuration for inference. */
regression?: <<MlRegressionInferenceOptions>>
pass:[/**] @property classification Classification configuration for inference. */
classification?: <<MlClassificationInferenceOptions>>
pass:[/**] @property text_classification Text classification configuration for inference. */
text_classification?: <<MlTextClassificationInferenceOptions>>
pass:[/**] @property zero_shot_classification Zeroshot classification configuration for inference. */
zero_shot_classification?: <<MlZeroShotClassificationInferenceOptions>>
pass:[/**] @property fill_mask Fill mask configuration for inference. */
fill_mask?: <<MlFillMaskInferenceOptions>>
pass:[/**] @property ner Named entity recognition configuration for inference. */
ner?: <<MlNerInferenceOptions>>
pass:[/**] @property pass_through Pass through configuration for inference. */
pass_through?: <<MlPassThroughInferenceOptions>>
pass:[/**] @property text_embedding Text embedding configuration for inference. */
text_embedding?: <<MlTextEmbeddingInferenceOptions>>
pass:[/**] @property text_expansion Text expansion configuration for inference. */
text_expansion?: <<MlTextExpansionInferenceOptions>>
pass:[/**] @property question_answering Question answering configuration for inference. */
question_answering?: <<MlQuestionAnsweringInferenceOptions>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlInferenceConfigUpdateContainer]]
=== MlInferenceConfigUpdateContainer
[pass]
++++
<pre>
++++
interface MlInferenceConfigUpdateContainer {
pass:[/**] @property regression Regression configuration for inference. */
regression?: <<MlRegressionInferenceOptions>>
pass:[/**] @property classification Classification configuration for inference. */
classification?: <<MlClassificationInferenceOptions>>
pass:[/**] @property text_classification Text classification configuration for inference. */
text_classification?: <<MlTextClassificationInferenceUpdateOptions>>
pass:[/**] @property zero_shot_classification Zeroshot classification configuration for inference. */
zero_shot_classification?: <<MlZeroShotClassificationInferenceUpdateOptions>>
pass:[/**] @property fill_mask Fill mask configuration for inference. */
fill_mask?: <<MlFillMaskInferenceUpdateOptions>>
pass:[/**] @property ner Named entity recognition configuration for inference. */
ner?: <<MlNerInferenceUpdateOptions>>
pass:[/**] @property pass_through Pass through configuration for inference. */
pass_through?: <<MlPassThroughInferenceUpdateOptions>>
pass:[/**] @property text_embedding Text embedding configuration for inference. */
text_embedding?: <<MlTextEmbeddingInferenceUpdateOptions>>
pass:[/**] @property text_expansion Text expansion configuration for inference. */
text_expansion?: <<MlTextExpansionInferenceUpdateOptions>>
pass:[/**] @property question_answering Question answering configuration for inference */
question_answering?: <<MlQuestionAnsweringInferenceUpdateOptions>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlInferenceResponseResult]]
=== MlInferenceResponseResult
[pass]
++++
<pre>
++++
interface MlInferenceResponseResult {
pass:[/**] @property entities If the model is trained for named entity recognition (NER) tasks, the response contains the recognized entities. */
entities?: <<MlTrainedModelEntities>>[]
pass:[/**] @property is_truncated Indicates whether the input text was truncated to meet the model's maximum sequence length limit. This property is present only when it is true. */
is_truncated?: boolean
pass:[/**] @property predicted_value If the model is trained for a text classification or zero shot classification task, the response is the predicted class. For named entity recognition (NER) tasks, it contains the annotated text output. For fill mask tasks, it contains the top prediction for replacing the mask token. For text embedding tasks, it contains the raw numerical text embedding values. For regression models, its a numerical value For classification models, it may be an <<integer>>, <<double>>, boolean or string depending on prediction type */
predicted_value?: <<MlPredictedValue>> | <<MlPredictedValue>>[]
pass:[/**] @property predicted_value_sequence For fill mask tasks, the response contains the input text sequence with the mask token replaced by the predicted value. Additionally */
predicted_value_sequence?: string
pass:[/**] @property prediction_probability Specifies a probability for the predicted value. */
prediction_probability?: <<double>>
pass:[/**] @property prediction_score Specifies a confidence score for the predicted value. */
prediction_score?: <<double>>
pass:[/**] @property top_classes For fill mask, text classification, and zero shot classification tasks, the response contains a list of top class entries. */
top_classes?: <<MlTopClassEntry>>[]
pass:[/**] @property warning If the request failed, the response contains the reason for the failure. */
warning?: string
pass:[/**] @property feature_importance The feature importance for the inference results. Relevant only for classification or regression models */
feature_importance?: <<MlTrainedModelInferenceFeatureImportance>>[]
}
[pass]
++++
</pre>
++++
[discrete]
[[MlInfluence]]
=== MlInfluence
[pass]
++++
<pre>
++++
interface MlInfluence {
influencer_field_name: string
influencer_field_values: string[]
}
[pass]
++++
</pre>
++++
[discrete]
[[MlInfluencer]]
=== MlInfluencer
[pass]
++++
<pre>
++++
interface MlInfluencer {
pass:[/**] @property bucket_span The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */
bucket_span: <<DurationValue>><<<UnitSeconds>>>
pass:[/**] @property influencer_score A normalized score between 0-100, which is based on the probability of the influencer in this bucket aggregated across detectors. Unlike `initial_influencer_score`, this value is updated by a re-normalization process as new data is analyzed. */
influencer_score: <<double>>
pass:[/**] @property influencer_field_name The field name of the influencer. */
influencer_field_name: <<Field>>
pass:[/**] @property influencer_field_value The entity that influenced, contributed to, or was to blame for the anomaly. */
influencer_field_value: string
pass:[/**] @property initial_influencer_score A normalized score between 0-100, which is based on the probability of the influencer aggregated across detectors. This is the initial value that was calculated at the time the bucket was processed. */
initial_influencer_score: <<double>>
pass:[/**] @property is_interim If true, this is an interim result. In other words, the results are calculated based on partial input data. */
is_interim: boolean
pass:[/**] @property job_id Identifier for the anomaly detection job. */
job_id: <<Id>>
pass:[/**] @property probability The probability that the influencer has this behavior, in the range 0 to 1. This value can be held to a high precision of over 300 decimal places, so the `influencer_score` is provided as a human-readable and friendly interpretation of this value. */
probability: <<double>>
pass:[/**] @property result_type Internal. This value is always set to `influencer`. */
result_type: string
pass:[/**] @property timestamp The start time of the bucket for which these results were calculated. */
timestamp: <<EpochTime>><<<UnitMillis>>>
pass:[/**] @property foo Additional influencer properties are added, depending on the fields being analyzed. For example, if its analyzing `user_name` as an influencer, a field `user_name` is added to the result document. This information enables you to filter the anomaly results more easily. */
foo?: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlJob]]
=== MlJob
[pass]
++++
<pre>
++++
interface MlJob {
pass:[/**] @property allow_lazy_open Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */
allow_lazy_open: boolean
pass:[/**] @property analysis_config The analysis configuration, which specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. */
analysis_config: <<MlAnalysisConfig>>
pass:[/**] @property analysis_limits Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. */
analysis_limits?: <<MlAnalysisLimits>>
pass:[/**] @property background_persist_interval Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. */
background_persist_interval?: <<Duration>>
blocked?: <<MlJobBlocked>>
create_time?: <<DateTime>>
pass:[/**] @property custom_settings Advanced configuration option. Contains custom metadata about the job. */
custom_settings?: <<MlCustomSettings>>
pass:[/**] @property daily_model_snapshot_retention_after_days Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. */
daily_model_snapshot_retention_after_days?: <<long>>
pass:[/**] @property data_description The data description defines the format of the input data when you send data to the job by using the post data API. Note that when configuring a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. */
data_description: <<MlDataDescription>>
pass:[/**] @property datafeed_config The datafeed, which retrieves data from Elasticsearch for analysis by the job. You can associate only one datafeed with each anomaly detection job. */
datafeed_config?: <<MlDatafeed>>
pass:[/**] @property deleting Indicates that the process of deleting the job is in progress but not yet completed. It is only reported when `true`. */
deleting?: boolean
pass:[/**] @property description A description of the job. */
description?: string
pass:[/**] @property finished_time If the job closed or failed, this is the time the job finished, otherwise it is `null`. This property is informational; you cannot change its value. */
finished_time?: <<DateTime>>
pass:[/**] @property groups A list of job groups. A job can belong to no groups or many. */
groups?: string[]
pass:[/**] @property job_id Identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */
job_id: <<Id>>
pass:[/**] @property job_type Reserved for future use, currently set to `anomaly_detector`. */
job_type?: string
pass:[/**] @property job_version The machine learning configuration version number at which the the job was created. */
job_version?: <<VersionString>>
pass:[/**] @property model_plot_config This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. Model plot provides a simplified and indicative view of the model and its bounds. */
model_plot_config?: <<MlModelPlotConfig>>
model_snapshot_id?: <<Id>>
pass:[/**] @property model_snapshot_retention_days Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted. */
model_snapshot_retention_days: <<long>>
pass:[/**] @property renormalization_window_days Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 `bucket_spans`. */
renormalization_window_days?: <<long>>
pass:[/**] @property results_index_name A text string that affects the name of the machine learning results index. The default value is `shared`, which generates an index named `.ml-anomalies-shared`. */
results_index_name: <<IndexName>>
pass:[/**] @property results_retention_days Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. */
results_retention_days?: <<long>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlJobBlocked]]
=== MlJobBlocked
[pass]
++++
<pre>
++++
interface MlJobBlocked {
reason: <<MlJobBlockedReason>>
task_id?: <<TaskId>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlJobBlockedReason]]
=== MlJobBlockedReason
[pass]
++++
<pre>
++++
type MlJobBlockedReason = 'delete' | 'reset' | 'revert'
[pass]
++++
</pre>
++++
[discrete]
[[MlJobConfig]]
=== MlJobConfig
[pass]
++++
<pre>
++++
interface MlJobConfig {
pass:[/**] @property allow_lazy_open Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */
allow_lazy_open?: boolean
pass:[/**] @property analysis_config The analysis configuration, which specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. */
analysis_config: <<MlAnalysisConfig>>
pass:[/**] @property analysis_limits Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. */
analysis_limits?: <<MlAnalysisLimits>>
pass:[/**] @property background_persist_interval Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. */
background_persist_interval?: <<Duration>>
pass:[/**] @property custom_settings Advanced configuration option. Contains custom metadata about the job. */
custom_settings?: <<MlCustomSettings>>
pass:[/**] @property daily_model_snapshot_retention_after_days Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. */
daily_model_snapshot_retention_after_days?: <<long>>
pass:[/**] @property data_description The data description defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. */
data_description: <<MlDataDescription>>
pass:[/**] @property datafeed_config The datafeed, which retrieves data from Elasticsearch for analysis by the job. You can associate only one datafeed with each anomaly detection job. */
datafeed_config?: <<MlDatafeedConfig>>
pass:[/**] @property description A description of the job. */
description?: string
pass:[/**] @property groups A list of job groups. A job can belong to no groups or many. */
groups?: string[]
pass:[/**] @property job_id Identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */
job_id?: <<Id>>
pass:[/**] @property job_type Reserved for future use, currently set to `anomaly_detector`. */
job_type?: string
pass:[/**] @property model_plot_config This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. Model plot provides a simplified and indicative view of the model and its bounds. */
model_plot_config?: <<MlModelPlotConfig>>
pass:[/**] @property model_snapshot_retention_days Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. The default value is `10`, which means snapshots ten days older than the newest snapshot are deleted. */
model_snapshot_retention_days?: <<long>>
pass:[/**] @property renormalization_window_days Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 `bucket_spans`. */
renormalization_window_days?: <<long>>
pass:[/**] @property results_index_name A text string that affects the name of the machine learning results index. The default value is `shared`, which generates an index named `.ml-anomalies-shared`. */
results_index_name?: <<IndexName>>
pass:[/**] @property results_retention_days Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. */
results_retention_days?: <<long>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlJobForecastStatistics]]
=== MlJobForecastStatistics
[pass]
++++
<pre>
++++
interface MlJobForecastStatistics {
memory_bytes?: <<MlJobStatistics>>
processing_time_ms?: <<MlJobStatistics>>
records?: <<MlJobStatistics>>
status?: Record<string, <<long>>>
total: <<long>>
forecasted_jobs: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlJobState]]
=== MlJobState
[pass]
++++
<pre>
++++
type MlJobState = 'closing' | 'closed' | 'opened' | 'failed' | 'opening'
[pass]
++++
</pre>
++++
[discrete]
[[MlJobStatistics]]
=== MlJobStatistics
[pass]
++++
<pre>
++++
interface MlJobStatistics {
avg: <<double>>
max: <<double>>
min: <<double>>
total: <<double>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlJobStats]]
=== MlJobStats
[pass]
++++
<pre>
++++
interface MlJobStats {
pass:[/**] @property assignment_explanation For open anomaly detection jobs only, contains messages relating to the selection of a node to run the job. */
assignment_explanation?: string
pass:[/**] @property data_counts An object that describes the quantity of input to the job and any related error counts. The `data_count` values are cumulative for the lifetime of a job. If a model snapshot is reverted or old results are deleted, the job counts are not reset. */
data_counts: <<MlDataCounts>>
pass:[/**] @property forecasts_stats An object that provides statistical information about forecasts belonging to this job. Some statistics are omitted if no forecasts have been made. */
forecasts_stats: <<MlJobForecastStatistics>>
pass:[/**] @property job_id Identifier for the anomaly detection job. */
job_id: string
pass:[/**] @property model_size_stats An object that provides information about the size and contents of the model. */
model_size_stats: <<MlModelSizeStats>>
pass:[/**] @property node Contains properties for the node that runs the job. This information is available only for open jobs. */
node?: <<MlDiscoveryNode>>
pass:[/**] @property open_time For open jobs only, the elapsed time for which the job has been open. */
open_time?: <<DateTime>>
pass:[/**] @property state The status of the anomaly detection job, which can be one of the following values: `closed`, `closing`, `failed`, `opened`, `opening`. */
state: <<MlJobState>>
pass:[/**] @property timing_stats An object that provides statistical information about timing aspect of this job. */
timing_stats: <<MlJobTimingStats>>
pass:[/**] @property deleting Indicates that the process of deleting the job is in progress but not yet completed. It is only reported when `true`. */
deleting?: boolean
}
[pass]
++++
</pre>
++++
[discrete]
[[MlJobTimingStats]]
=== MlJobTimingStats
[pass]
++++
<pre>
++++
interface MlJobTimingStats {
average_bucket_processing_time_ms?: <<DurationValue>><<<UnitFloatMillis>>>
bucket_count: <<long>>
exponential_average_bucket_processing_time_ms?: <<DurationValue>><<<UnitFloatMillis>>>
exponential_average_bucket_processing_time_per_hour_ms: <<DurationValue>><<<UnitFloatMillis>>>
job_id: <<Id>>
total_bucket_processing_time_ms: <<DurationValue>><<<UnitFloatMillis>>>
maximum_bucket_processing_time_ms?: <<DurationValue>><<<UnitFloatMillis>>>
minimum_bucket_processing_time_ms?: <<DurationValue>><<<UnitFloatMillis>>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlMemoryStatus]]
=== MlMemoryStatus
[pass]
++++
<pre>
++++
type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit'
[pass]
++++
</pre>
++++
[discrete]
[[MlModelPlotConfig]]
=== MlModelPlotConfig
[pass]
++++
<pre>
++++
interface MlModelPlotConfig {
pass:[/**] @property annotations_enabled If true, enables calculation and storage of the model change annotations for each entity that is being analyzed. */
annotations_enabled?: boolean
pass:[/**] @property enabled If true, enables calculation and storage of the model bounds for each entity that is being analyzed. */
enabled?: boolean
pass:[/**] @property terms Limits data collection to this comma separated list of partition or by field values. If terms are not specified or it is an empty string, no filtering is applied. Wildcards are not supported. Only the specified terms can be viewed when using the Single <<Metric>> Viewer. */
terms?: <<Field>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlModelSizeStats]]
=== MlModelSizeStats
[pass]
++++
<pre>
++++
interface MlModelSizeStats {
bucket_allocation_failures_count: <<long>>
job_id: <<Id>>
log_time: <<DateTime>>
memory_status: <<MlMemoryStatus>>
model_bytes: <<ByteSize>>
model_bytes_exceeded?: <<ByteSize>>
model_bytes_memory_limit?: <<ByteSize>>
peak_model_bytes?: <<ByteSize>>
assignment_memory_basis?: string
result_type: string
total_by_field_count: <<long>>
total_over_field_count: <<long>>
total_partition_field_count: <<long>>
categorization_status: <<MlCategorizationStatus>>
categorized_doc_count: <<integer>>
dead_category_count: <<integer>>
failed_category_count: <<integer>>
frequent_category_count: <<integer>>
rare_category_count: <<integer>>
total_category_count: <<integer>>
timestamp?: <<long>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlModelSnapshot]]
=== MlModelSnapshot
[pass]
++++
<pre>
++++
interface MlModelSnapshot {
pass:[/**] @property description An optional description of the job. */
description?: string
pass:[/**] @property job_id A numerical character string that uniquely identifies the job that the snapshot was created for. */
job_id: <<Id>>
pass:[/**] @property latest_record_time_stamp The timestamp of the latest processed record. */
latest_record_time_stamp?: <<integer>>
pass:[/**] @property latest_result_time_stamp The timestamp of the latest bucket result. */
latest_result_time_stamp?: <<integer>>
pass:[/**] @property min_version The minimum version required to be able to restore the model snapshot. */
min_version: <<VersionString>>
pass:[/**] @property model_size_stats <<Summary>> information describing the model. */
model_size_stats?: <<MlModelSizeStats>>
pass:[/**] @property retain If true, this snapshot will not be deleted during automatic cleanup of snapshots older than model_snapshot_retention_days. However, this snapshot will be deleted when the job is deleted. The default value is false. */
retain: boolean
pass:[/**] @property snapshot_doc_count For internal use only. */
snapshot_doc_count: <<long>>
pass:[/**] @property snapshot_id A numerical character string that uniquely identifies the model snapshot. */
snapshot_id: <<Id>>
pass:[/**] @property timestamp The creation timestamp for the snapshot. */
timestamp: <<long>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlModelSnapshotUpgrade]]
=== MlModelSnapshotUpgrade
[pass]
++++
<pre>
++++
interface MlModelSnapshotUpgrade {
job_id: <<Id>>
snapshot_id: <<Id>>
state: <<MlSnapshotUpgradeState>>
node: <<MlDiscoveryNode>>
assignment_explanation: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlNerInferenceOptions]]
=== MlNerInferenceOptions
[pass]
++++
<pre>
++++
interface MlNerInferenceOptions {
pass:[/**] @property tokenization The tokenization options */
tokenization?: <<MlTokenizationConfigContainer>>
pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */
results_field?: string
pass:[/**] @property classification_labels The token classification labels. Must be IOB formatted tags */
classification_labels?: string[]
vocabulary?: <<MlVocabulary>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlNerInferenceUpdateOptions]]
=== MlNerInferenceUpdateOptions
[pass]
++++
<pre>
++++
interface MlNerInferenceUpdateOptions {
pass:[/**] @property tokenization The tokenization options to update when inferring */
tokenization?: <<MlNlpTokenizationUpdateOptions>>
pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */
results_field?: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlNlpBertTokenizationConfig]]
=== MlNlpBertTokenizationConfig
[pass]
++++
<pre>
++++
interface MlNlpBertTokenizationConfig {
pass:[/**] @property do_lower_case Should the tokenizer lower case the text */
do_lower_case?: boolean
pass:[/**] @property with_special_tokens Is tokenization completed with special tokens */
with_special_tokens?: boolean
pass:[/**] @property max_sequence_length Maximum input sequence length for the model */
max_sequence_length?: <<integer>>
pass:[/**] @property truncate Should tokenization input be automatically truncated before sending to the model for inference */
truncate?: <<MlTokenizationTruncate>>
pass:[/**] @property span Tokenization spanning options. Special value of -1 indicates no spanning takes place */
span?: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlNlpRobertaTokenizationConfig]]
=== MlNlpRobertaTokenizationConfig
[pass]
++++
<pre>
++++
interface MlNlpRobertaTokenizationConfig {
pass:[/**] @property add_prefix_space Should the tokenizer prefix input with a space character */
add_prefix_space?: boolean
pass:[/**] @property with_special_tokens Is tokenization completed with special tokens */
with_special_tokens?: boolean
pass:[/**] @property max_sequence_length Maximum input sequence length for the model */
max_sequence_length?: <<integer>>
pass:[/**] @property truncate Should tokenization input be automatically truncated before sending to the model for inference */
truncate?: <<MlTokenizationTruncate>>
pass:[/**] @property span Tokenization spanning options. Special value of -1 indicates no spanning takes place */
span?: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlNlpTokenizationUpdateOptions]]
=== MlNlpTokenizationUpdateOptions
[pass]
++++
<pre>
++++
interface MlNlpTokenizationUpdateOptions {
pass:[/**] @property truncate Truncate options to apply */
truncate?: <<MlTokenizationTruncate>>
pass:[/**] @property span Span options to apply */
span?: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlOutlierDetectionParameters]]
=== MlOutlierDetectionParameters
[pass]
++++
<pre>
++++
interface MlOutlierDetectionParameters {
pass:[/**] @property compute_feature_influence Specifies whether the feature influence calculation is enabled. */
compute_feature_influence?: boolean
pass:[/**] @property feature_influence_threshold The minimum outlier score that a document needs to have in order to calculate its feature influence score. Value range: 0-1 */
feature_influence_threshold?: <<double>>
pass:[/**] @property method The method that outlier detection uses. Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`, and `ensemble`. The default value is ensemble, which means that outlier detection uses an ensemble of different methods and normalises and combines their individual outlier scores to obtain the overall outlier score. */
method?: string
pass:[/**] @property n_neighbors Defines the value for how many nearest neighbors each method of outlier detection uses to calculate its outlier score. When the value is not set, different values are used for different ensemble members. This default behavior helps improve the diversity in the ensemble; only override it if you are confident that the value you choose is appropriate for the data set. */
n_neighbors?: <<integer>>
pass:[/**] @property outlier_fraction The proportion of the data set that is assumed to be outlying prior to outlier detection. For example, 0.05 means it is assumed that 5% of values are real outliers and 95% are inliers. */
outlier_fraction?: <<double>>
pass:[/**] @property standardization_enabled If `true`, the following operation is performed on the columns before computing outlier scores: (x_i - mean(x_i)) / sd(x_i). */
standardization_enabled?: boolean
}
[pass]
++++
</pre>
++++
[discrete]
[[MlOverallBucket]]
=== MlOverallBucket
[pass]
++++
<pre>
++++
interface MlOverallBucket {
pass:[/**] @property bucket_span The length of the bucket in seconds. Matches the job with the longest bucket_span value. */
bucket_span: <<DurationValue>><<<UnitSeconds>>>
pass:[/**] @property is_interim If true, this is an interim result. In other words, the results are calculated based on partial input data. */
is_interim: boolean
pass:[/**] @property jobs An array of objects that contain the max_anomaly_score per job_id. */
jobs: <<MlOverallBucketJob>>[]
pass:[/**] @property overall_score The top_n average of the maximum bucket anomaly_score per job. */
overall_score: <<double>>
pass:[/**] @property result_type Internal. This is always set to overall_bucket. */
result_type: string
pass:[/**] @property timestamp The start time of the bucket for which these results were calculated. */
timestamp: <<EpochTime>><<<UnitMillis>>>
pass:[/**] @property timestamp_string The start time of the bucket for which these results were calculated. */
timestamp_string: <<DateTime>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlOverallBucketJob]]
=== MlOverallBucketJob
[pass]
++++
<pre>
++++
interface MlOverallBucketJob {
job_id: <<Id>>
max_anomaly_score: <<double>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlPage]]
=== MlPage
[pass]
++++
<pre>
++++
interface MlPage {
pass:[/**] @property from Skips the specified number of items. */
from?: <<integer>>
pass:[/**] @property size Specifies the maximum number of items to obtain. */
size?: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlPassThroughInferenceOptions]]
=== MlPassThroughInferenceOptions
[pass]
++++
<pre>
++++
interface MlPassThroughInferenceOptions {
pass:[/**] @property tokenization The tokenization options */
tokenization?: <<MlTokenizationConfigContainer>>
pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */
results_field?: string
vocabulary?: <<MlVocabulary>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlPassThroughInferenceUpdateOptions]]
=== MlPassThroughInferenceUpdateOptions
[pass]
++++
<pre>
++++
interface MlPassThroughInferenceUpdateOptions {
pass:[/**] @property tokenization The tokenization options to update when inferring */
tokenization?: <<MlNlpTokenizationUpdateOptions>>
pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */
results_field?: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlPerPartitionCategorization]]
=== MlPerPartitionCategorization
[pass]
++++
<pre>
++++
interface MlPerPartitionCategorization {
pass:[/**] @property enabled To enable this setting, you must also set the `partition_field_name` property to the same value in every detector that uses the keyword `mlcategory`. Otherwise, job creation fails. */
enabled?: boolean
pass:[/**] @property stop_on_warn This setting can be set to true only if per-partition categorization is enabled. If true, both categorization and subsequent anomaly detection stops for partitions where the categorization status changes to warn. This setting makes it viable to have a job where it is expected that categorization works well for some partitions but not others; you do not pay the cost of bad categorization forever in the partitions where it works badly. */
stop_on_warn?: boolean
}
[pass]
++++
</pre>
++++
[discrete]
[[MlPredictedValue]]
=== MlPredictedValue
[pass]
++++
<pre>
++++
type MlPredictedValue = <<ScalarValue>> | <<ScalarValue>>[]
[pass]
++++
</pre>
++++
[discrete]
[[MlQuestionAnsweringInferenceOptions]]
=== MlQuestionAnsweringInferenceOptions
[pass]
++++
<pre>
++++
interface MlQuestionAnsweringInferenceOptions {
pass:[/**] @property num_top_classes Specifies the number of top class predictions to return. Defaults to 0. */
num_top_classes?: <<integer>>
pass:[/**] @property tokenization The tokenization options to update when inferring */
tokenization?: <<MlTokenizationConfigContainer>>
pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */
results_field?: string
pass:[/**] @property max_answer_length The maximum answer length to consider */
max_answer_length?: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlQuestionAnsweringInferenceUpdateOptions]]
=== MlQuestionAnsweringInferenceUpdateOptions
[pass]
++++
<pre>
++++
interface MlQuestionAnsweringInferenceUpdateOptions {
pass:[/**] @property question The question to answer given the inference context */
question: string
pass:[/**] @property num_top_classes Specifies the number of top class predictions to return. Defaults to 0. */
num_top_classes?: <<integer>>
pass:[/**] @property tokenization The tokenization options to update when inferring */
tokenization?: <<MlNlpTokenizationUpdateOptions>>
pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */
results_field?: string
pass:[/**] @property max_answer_length The maximum answer length to consider for extraction */
max_answer_length?: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlRegressionInferenceOptions]]
=== MlRegressionInferenceOptions
[pass]
++++
<pre>
++++
interface MlRegressionInferenceOptions {
pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */
results_field?: <<Field>>
pass:[/**] @property num_top_feature_importance_values Specifies the maximum number of feature importance values per document. */
num_top_feature_importance_values?: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlRoutingState]]
=== MlRoutingState
[pass]
++++
<pre>
++++
type MlRoutingState = 'failed' | 'started' | 'starting' | 'stopped' | 'stopping'
[pass]
++++
</pre>
++++
[discrete]
[[MlRuleAction]]
=== MlRuleAction
[pass]
++++
<pre>
++++
type MlRuleAction = 'skip_result' | 'skip_model_update'
[pass]
++++
</pre>
++++
[discrete]
[[MlRuleCondition]]
=== MlRuleCondition
[pass]
++++
<pre>
++++
interface MlRuleCondition {
pass:[/**] @property applies_to Specifies the result property to which the condition applies. If your detector uses `lat_long`, `metric`, `rare`, or `freq_rare` functions, you can only specify conditions that apply to time. */
applies_to: <<MlAppliesTo>>
pass:[/**] @property operator Specifies the condition operator. The available options are greater than, greater than or equals, less than, and less than or equals. */
operator: <<MlConditionOperator>>
pass:[/**] @property value The value that is compared against the `applies_to` field using the operator. */
value: <<double>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlRunningStateSearchInterval]]
=== MlRunningStateSearchInterval
[pass]
++++
<pre>
++++
interface MlRunningStateSearchInterval {
pass:[/**] @property end The end time. */
end?: <<Duration>>
pass:[/**] @property end_ms The end time as an epoch in milliseconds. */
end_ms: <<DurationValue>><<<UnitMillis>>>
pass:[/**] @property start The start time. */
start?: <<Duration>>
pass:[/**] @property start_ms The start time as an epoch in milliseconds. */
start_ms: <<DurationValue>><<<UnitMillis>>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlSnapshotUpgradeState]]
=== MlSnapshotUpgradeState
[pass]
++++
<pre>
++++
type MlSnapshotUpgradeState = 'loading_old_state' | 'saving_new_state' | 'stopped' | 'failed'
[pass]
++++
</pre>
++++
[discrete]
[[MlTextClassificationInferenceOptions]]
=== MlTextClassificationInferenceOptions
[pass]
++++
<pre>
++++
interface MlTextClassificationInferenceOptions {
pass:[/**] @property num_top_classes Specifies the number of top class predictions to return. Defaults to 0. */
num_top_classes?: <<integer>>
pass:[/**] @property tokenization The tokenization options */
tokenization?: <<MlTokenizationConfigContainer>>
pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */
results_field?: string
pass:[/**] @property classification_labels Classification labels to apply other than the stored labels. Must have the same deminsions as the default configured labels */
classification_labels?: string[]
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTextClassificationInferenceUpdateOptions]]
=== MlTextClassificationInferenceUpdateOptions
[pass]
++++
<pre>
++++
interface MlTextClassificationInferenceUpdateOptions {
pass:[/**] @property num_top_classes Specifies the number of top class predictions to return. Defaults to 0. */
num_top_classes?: <<integer>>
pass:[/**] @property tokenization The tokenization options to update when inferring */
tokenization?: <<MlNlpTokenizationUpdateOptions>>
pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */
results_field?: string
pass:[/**] @property classification_labels Classification labels to apply other than the stored labels. Must have the same deminsions as the default configured labels */
classification_labels?: string[]
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTextEmbeddingInferenceOptions]]
=== MlTextEmbeddingInferenceOptions
[pass]
++++
<pre>
++++
interface MlTextEmbeddingInferenceOptions {
pass:[/**] @property embedding_size The number of dimensions in the embedding output */
embedding_size?: <<integer>>
pass:[/**] @property tokenization The tokenization options */
tokenization?: <<MlTokenizationConfigContainer>>
pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */
results_field?: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTextEmbeddingInferenceUpdateOptions]]
=== MlTextEmbeddingInferenceUpdateOptions
[pass]
++++
<pre>
++++
interface MlTextEmbeddingInferenceUpdateOptions {
tokenization?: <<MlNlpTokenizationUpdateOptions>>
pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */
results_field?: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTextExpansionInferenceOptions]]
=== MlTextExpansionInferenceOptions
[pass]
++++
<pre>
++++
interface MlTextExpansionInferenceOptions {
pass:[/**] @property tokenization The tokenization options */
tokenization?: <<MlTokenizationConfigContainer>>
pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */
results_field?: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTextExpansionInferenceUpdateOptions]]
=== MlTextExpansionInferenceUpdateOptions
[pass]
++++
<pre>
++++
interface MlTextExpansionInferenceUpdateOptions {
tokenization?: <<MlNlpTokenizationUpdateOptions>>
pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */
results_field?: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTimingStats]]
=== MlTimingStats
[pass]
++++
<pre>
++++
interface MlTimingStats {
pass:[/**] @property elapsed_time Runtime of the analysis in milliseconds. */
elapsed_time: <<DurationValue>><<<UnitMillis>>>
pass:[/**] @property iteration_time Runtime of the latest iteration of the analysis in milliseconds. */
iteration_time?: <<DurationValue>><<<UnitMillis>>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTokenizationConfigContainer]]
=== MlTokenizationConfigContainer
[pass]
++++
<pre>
++++
interface MlTokenizationConfigContainer {
pass:[/**] @property bert Indicates BERT tokenization and its options */
bert?: <<MlNlpBertTokenizationConfig>>
pass:[/**] @property mpnet Indicates MPNET tokenization and its options */
mpnet?: <<MlNlpBertTokenizationConfig>>
pass:[/**] @property roberta Indicates RoBERTa tokenization and its options */
roberta?: <<MlNlpRobertaTokenizationConfig>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTokenizationTruncate]]
=== MlTokenizationTruncate
[pass]
++++
<pre>
++++
type MlTokenizationTruncate = 'first' | 'second' | 'none'
[pass]
++++
</pre>
++++
[discrete]
[[MlTopClassEntry]]
=== MlTopClassEntry
[pass]
++++
<pre>
++++
interface MlTopClassEntry {
class_name: string
class_probability: <<double>>
class_score: <<double>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTotalFeatureImportance]]
=== MlTotalFeatureImportance
[pass]
++++
<pre>
++++
interface MlTotalFeatureImportance {
pass:[/**] @property feature_name The feature for which this importance was calculated. */
feature_name: <<Name>>
pass:[/**] @property importance A collection of feature importance statistics related to the training data set for this particular feature. */
importance: <<MlTotalFeatureImportanceStatistics>>[]
pass:[/**] @property classes If the trained model is a classification model, feature importance statistics are gathered per target class value. */
classes: <<MlTotalFeatureImportanceClass>>[]
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTotalFeatureImportanceClass]]
=== MlTotalFeatureImportanceClass
[pass]
++++
<pre>
++++
interface MlTotalFeatureImportanceClass {
pass:[/**] @property class_name The target class value. Could be a string, boolean, or number. */
class_name: <<Name>>
pass:[/**] @property importance A collection of feature importance statistics related to the training data set for this particular feature. */
importance: <<MlTotalFeatureImportanceStatistics>>[]
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTotalFeatureImportanceStatistics]]
=== MlTotalFeatureImportanceStatistics
[pass]
++++
<pre>
++++
interface MlTotalFeatureImportanceStatistics {
pass:[/**] @property mean_magnitude The average magnitude of this feature across all the training data. This value is the average of the absolute values of the importance for this feature. */
mean_magnitude: <<double>>
pass:[/**] @property max The maximum importance value across all the training data for this feature. */
max: <<integer>>
pass:[/**] @property min The minimum importance value across all the training data for this feature. */
min: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainedModelAssignment]]
=== MlTrainedModelAssignment
[pass]
++++
<pre>
++++
interface MlTrainedModelAssignment {
pass:[/**] @property assignment_state The overall assignment state. */
assignment_state: <<MlDeploymentAssignmentState>>
max_assigned_allocations?: <<integer>>
pass:[/**] @property routing_table The allocation state for each node. */
routing_table: Record<string, <<MlTrainedModelAssignmentRoutingTable>>>
pass:[/**] @property start_time The timestamp when the deployment started. */
start_time: <<DateTime>>
task_parameters: <<MlTrainedModelAssignmentTaskParameters>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainedModelAssignmentRoutingTable]]
=== MlTrainedModelAssignmentRoutingTable
[pass]
++++
<pre>
++++
interface MlTrainedModelAssignmentRoutingTable {
pass:[/**] @property reason The reason for the current state. It is usually populated only when the `routing_state` is `failed`. */
reason: string
pass:[/**] @property routing_state The current routing state. */
routing_state: <<MlRoutingState>>
pass:[/**] @property current_allocations Current number of allocations. */
current_allocations: <<integer>>
pass:[/**] @property target_allocations Target number of allocations. */
target_allocations: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainedModelAssignmentTaskParameters]]
=== MlTrainedModelAssignmentTaskParameters
[pass]
++++
<pre>
++++
interface MlTrainedModelAssignmentTaskParameters {
pass:[/**] @property model_bytes The size of the trained model in bytes. */
model_bytes: <<integer>>
pass:[/**] @property model_id The unique identifier for the trained model. */
model_id: <<Id>>
pass:[/**] @property deployment_id The unique identifier for the trained model deployment. */
deployment_id: <<Id>>
pass:[/**] @property cache_size The size of the trained model cache. */
cache_size: <<ByteSize>>
pass:[/**] @property number_of_allocations The total number of allocations this model is assigned across ML nodes. */
number_of_allocations: <<integer>>
priority: <<MlTrainingPriority>>
pass:[/**] @property queue_capacity Number of inference requests are allowed in the queue at a time. */
queue_capacity: <<integer>>
pass:[/**] @property threads_per_allocation Number of threads per allocation. */
threads_per_allocation: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainedModelConfig]]
=== MlTrainedModelConfig
[pass]
++++
<pre>
++++
interface MlTrainedModelConfig {
pass:[/**] @property model_id Identifier for the trained model. */
model_id: <<Id>>
pass:[/**] @property model_type The model type */
model_type?: <<MlTrainedModelType>>
pass:[/**] @property tags A comma delimited string of tags. A trained model can have many tags, or none. */
tags: string[]
pass:[/**] @property version The Elasticsearch version number in which the trained model was created. */
version?: <<VersionString>>
compressed_definition?: string
pass:[/**] @property created_by Information on the creator of the trained model. */
created_by?: string
pass:[/**] @property create_time The time when the trained model was created. */
create_time?: <<DateTime>>
pass:[/**] @property default_field_map Any field map described in the inference configuration takes precedence. */
default_field_map?: Record<string, string>
pass:[/**] @property description The free-text description of the trained model. */
description?: string
pass:[/**] @property estimated_heap_memory_usage_bytes The estimated heap usage in bytes to keep the trained model in memory. */
estimated_heap_memory_usage_bytes?: <<integer>>
pass:[/**] @property estimated_operations The estimated number of operations to use the trained model. */
estimated_operations?: <<integer>>
pass:[/**] @property fully_defined True if the full model definition is present. */
fully_defined?: boolean
pass:[/**] @property inference_config The default configuration for inference. This can be either a regression, classification, or one of the many NLP focused configurations. It must match the underlying definition.trained_model's target_type. For pre-packaged models such as ELSER the config is not required. */
inference_config?: <<MlInferenceConfigCreateContainer>>
pass:[/**] @property input The input field names for the model definition. */
input: <<MlTrainedModelConfigInput>>
pass:[/**] @property license_level The license level of the trained model. */
license_level?: string
pass:[/**] @property metadata An object containing metadata about the trained model. For example, models created by data frame analytics contain analysis_config and input objects. */
metadata?: <<MlTrainedModelConfigMetadata>>
model_size_bytes?: <<ByteSize>>
location?: <<MlTrainedModelLocation>>
prefix_strings?: <<MlTrainedModelPrefixStrings>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainedModelConfigInput]]
=== MlTrainedModelConfigInput
[pass]
++++
<pre>
++++
interface MlTrainedModelConfigInput {
pass:[/**] @property field_names An array of input field names for the model. */
field_names: <<Field>>[]
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainedModelConfigMetadata]]
=== MlTrainedModelConfigMetadata
[pass]
++++
<pre>
++++
interface MlTrainedModelConfigMetadata {
model_aliases?: string[]
pass:[/**] @property feature_importance_baseline An object that contains the baseline for feature importance values. For regression analysis, it is a single value. For classification analysis, there is a value for each class. */
feature_importance_baseline?: Record<string, string>
pass:[/**] @property hyperparameters List of the available hyperparameters optimized during the fine_parameter_tuning phase as well as specified by the user. */
hyperparameters?: <<MlHyperparameter>>[]
pass:[/**] @property total_feature_importance An array of the total feature importance for each feature used from the training data set. This array of objects is returned if data frame analytics trained the model and the request includes total_feature_importance in the include request parameter. */
total_feature_importance?: <<MlTotalFeatureImportance>>[]
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainedModelDeploymentAllocationStatus]]
=== MlTrainedModelDeploymentAllocationStatus
[pass]
++++
<pre>
++++
interface MlTrainedModelDeploymentAllocationStatus {
pass:[/**] @property allocation_count The current number of nodes where the model is allocated. */
allocation_count: <<integer>>
pass:[/**] @property state The detailed allocation state related to the nodes. */
state: <<MlDeploymentAllocationState>>
pass:[/**] @property target_allocation_count The desired number of nodes for model allocation. */
target_allocation_count: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainedModelDeploymentNodesStats]]
=== MlTrainedModelDeploymentNodesStats
[pass]
++++
<pre>
++++
interface MlTrainedModelDeploymentNodesStats {
pass:[/**] @property average_inference_time_ms The average time for each inference call to complete on this node. */
average_inference_time_ms: <<DurationValue>><<<UnitFloatMillis>>>
pass:[/**] @property error_count The number of errors when evaluating the trained model. */
error_count: <<integer>>
pass:[/**] @property inference_count The total number of inference calls made against this node for this model. */
inference_count: <<integer>>
pass:[/**] @property last_access The epoch time stamp of the last inference call for the model on this node. */
last_access: <<long>>
pass:[/**] @property node Information pertaining to the node. */
node: <<MlDiscoveryNode>>
pass:[/**] @property number_of_allocations The number of allocations assigned to this node. */
number_of_allocations: <<integer>>
pass:[/**] @property number_of_pending_requests The number of inference requests queued to be processed. */
number_of_pending_requests: <<integer>>
pass:[/**] @property rejection_execution_count The number of inference requests that were not processed because the queue was full. */
rejection_execution_count: <<integer>>
pass:[/**] @property routing_state The current routing state and reason for the current routing state for this allocation. */
routing_state: <<MlTrainedModelAssignmentRoutingTable>>
pass:[/**] @property start_time The epoch timestamp when the allocation started. */
start_time: <<EpochTime>><<<UnitMillis>>>
pass:[/**] @property threads_per_allocation The number of threads used by each allocation during inference. */
threads_per_allocation: <<integer>>
pass:[/**] @property timeout_count The number of inference requests that timed out before being processed. */
timeout_count: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainedModelDeploymentStats]]
=== MlTrainedModelDeploymentStats
[pass]
++++
<pre>
++++
interface MlTrainedModelDeploymentStats {
pass:[/**] @property allocation_status The detailed allocation status for the deployment. */
allocation_status: <<MlTrainedModelDeploymentAllocationStatus>>
cache_size?: <<ByteSize>>
pass:[/**] @property deployment_id The unique identifier for the trained model deployment. */
deployment_id: <<Id>>
pass:[/**] @property error_count The sum of `error_count` for all nodes in the deployment. */
error_count: <<integer>>
pass:[/**] @property inference_count The sum of `inference_count` for all nodes in the deployment. */
inference_count: <<integer>>
pass:[/**] @property model_id The unique identifier for the trained model. */
model_id: <<Id>>
pass:[/**] @property nodes The deployment stats for each node that currently has the model allocated. In serverless, stats are reported for a single unnamed virtual node. */
nodes: <<MlTrainedModelDeploymentNodesStats>>[]
pass:[/**] @property number_of_allocations The number of allocations requested. */
number_of_allocations: <<integer>>
pass:[/**] @property queue_capacity The number of inference requests that can be queued before new requests are rejected. */
queue_capacity: <<integer>>
pass:[/**] @property rejected_execution_count The sum of `rejected_execution_count` for all nodes in the deployment. Individual nodes reject an inference request if the inference queue is full. The queue size is controlled by the `queue_capacity` setting in the start trained model deployment API. */
rejected_execution_count: <<integer>>
pass:[/**] @property reason The reason for the current deployment state. Usually only populated when the model is not deployed to a node. */
reason: string
pass:[/**] @property start_time The epoch timestamp when the deployment started. */
start_time: <<EpochTime>><<<UnitMillis>>>
pass:[/**] @property state The overall state of the deployment. */
state: <<MlDeploymentAssignmentState>>
pass:[/**] @property threads_per_allocation The number of threads used be each allocation during inference. */
threads_per_allocation: <<integer>>
pass:[/**] @property timeout_count The sum of `timeout_count` for all nodes in the deployment. */
timeout_count: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainedModelEntities]]
=== MlTrainedModelEntities
[pass]
++++
<pre>
++++
interface MlTrainedModelEntities {
class_name: string
class_probability: <<double>>
entity: string
start_pos: <<integer>>
end_pos: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainedModelInferenceClassImportance]]
=== MlTrainedModelInferenceClassImportance
[pass]
++++
<pre>
++++
interface MlTrainedModelInferenceClassImportance {
class_name: string
importance: <<double>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainedModelInferenceFeatureImportance]]
=== MlTrainedModelInferenceFeatureImportance
[pass]
++++
<pre>
++++
interface MlTrainedModelInferenceFeatureImportance {
feature_name: string
importance?: <<double>>
classes?: <<MlTrainedModelInferenceClassImportance>>[]
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainedModelInferenceStats]]
=== MlTrainedModelInferenceStats
[pass]
++++
<pre>
++++
interface MlTrainedModelInferenceStats {
pass:[/**] @property cache_miss_count The number of times the model was loaded for inference and was not retrieved from the cache. If this number is close to the `inference_count`, the cache is not being appropriately used. This can be solved by increasing the cache size or its time-to-live (TTL). Refer to general machine learning settings for the appropriate settings. */
cache_miss_count: <<integer>>
pass:[/**] @property failure_count The number of failures when using the model for inference. */
failure_count: <<integer>>
pass:[/**] @property inference_count The total number of times the model has been called for inference. This is across all inference contexts, including all pipelines. */
inference_count: <<integer>>
pass:[/**] @property missing_all_fields_count The number of inference calls where all the training features for the model were missing. */
missing_all_fields_count: <<integer>>
pass:[/**] @property timestamp The time when the statistics were last updated. */
timestamp: <<EpochTime>><<<UnitMillis>>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainedModelLocation]]
=== MlTrainedModelLocation
[pass]
++++
<pre>
++++
interface MlTrainedModelLocation {
index: <<MlTrainedModelLocationIndex>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainedModelLocationIndex]]
=== MlTrainedModelLocationIndex
[pass]
++++
<pre>
++++
interface MlTrainedModelLocationIndex {
name: <<IndexName>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainedModelPrefixStrings]]
=== MlTrainedModelPrefixStrings
[pass]
++++
<pre>
++++
interface MlTrainedModelPrefixStrings {
pass:[/**] @property ingest String prepended to input at ingest */
ingest?: string
pass:[/**] @property search String prepended to input at search */
search?: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainedModelSizeStats]]
=== MlTrainedModelSizeStats
[pass]
++++
<pre>
++++
interface MlTrainedModelSizeStats {
pass:[/**] @property model_size_bytes The size of the model in bytes. */
model_size_bytes: <<ByteSize>>
pass:[/**] @property required_native_memory_bytes The amount of memory required to load the model in bytes. */
required_native_memory_bytes: <<ByteSize>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainedModelStats]]
=== MlTrainedModelStats
[pass]
++++
<pre>
++++
interface MlTrainedModelStats {
pass:[/**] @property deployment_stats A collection of deployment stats, which is present when the models are deployed. */
deployment_stats?: <<MlTrainedModelDeploymentStats>>
pass:[/**] @property inference_stats A collection of inference stats fields. */
inference_stats?: <<MlTrainedModelInferenceStats>>
pass:[/**] @property ingest A collection of ingest stats for the model across all nodes. The values are summations of the individual node statistics. The format matches the ingest section in the nodes stats API. */
ingest?: Record<string, any>
pass:[/**] @property model_id The unique identifier of the trained model. */
model_id: <<Id>>
pass:[/**] @property model_size_stats A collection of model size stats. */
model_size_stats: <<MlTrainedModelSizeStats>>
pass:[/**] @property pipeline_count The number of ingest pipelines that currently refer to the model. */
pipeline_count: <<integer>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainedModelType]]
=== MlTrainedModelType
[pass]
++++
<pre>
++++
type MlTrainedModelType = 'tree_ensemble' | 'lang_ident' | 'pytorch'
[pass]
++++
</pre>
++++
[discrete]
[[MlTrainingPriority]]
=== MlTrainingPriority
[pass]
++++
<pre>
++++
type MlTrainingPriority = 'normal' | 'low'
[pass]
++++
</pre>
++++
[discrete]
[[MlTransformAuthorization]]
=== MlTransformAuthorization
[pass]
++++
<pre>
++++
interface MlTransformAuthorization {
pass:[/**] @property api_key If an API key was used for the most recent update to the transform, its name and identifier are listed in the response. */
api_key?: <<MlApiKeyAuthorization>>
pass:[/**] @property roles If a user ID was used for the most recent update to the transform, its roles at the time of the update are listed in the response. */
roles?: string[]
pass:[/**] @property service_account If a service account was used for the most recent update to the transform, the account name is listed in the response. */
service_account?: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlValidationLoss]]
=== MlValidationLoss
[pass]
++++
<pre>
++++
interface MlValidationLoss {
pass:[/**] @property fold_values <<Validation>> loss values for every added decision tree during the forest growing procedure. */
fold_values: string[]
pass:[/**] @property loss_type The type of the loss metric. For example, binomial_logistic. */
loss_type: string
}
[pass]
++++
</pre>
++++
[discrete]
[[MlVocabulary]]
=== MlVocabulary
[pass]
++++
<pre>
++++
interface MlVocabulary {
index: <<IndexName>>
}
[pass]
++++
</pre>
++++
[discrete]
[[MlZeroShotClassificationInferenceOptions]]
=== MlZeroShotClassificationInferenceOptions
[pass]
++++
<pre>
++++
interface MlZeroShotClassificationInferenceOptions {
pass:[/**] @property tokenization The tokenization options to update when inferring */
tokenization?: <<MlTokenizationConfigContainer>>
pass:[/**] @property hypothesis_template Hypothesis template used when tokenizing labels for prediction */
hypothesis_template?: string
pass:[/**] @property classification_labels The zero shot classification labels indicating entailment, neutral, and contradiction Must contain exactly and only entailment, neutral, and contradiction */
classification_labels: string[]
pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */
results_field?: string
pass:[/**] @property multi_label Indicates if more than one true label exists. */
multi_label?: boolean
pass:[/**] @property labels The labels to predict. */
labels?: string[]
}
[pass]
++++
</pre>
++++
[discrete]
[[MlZeroShotClassificationInferenceUpdateOptions]]
=== MlZeroShotClassificationInferenceUpdateOptions
[pass]
++++
<pre>
++++
interface MlZeroShotClassificationInferenceUpdateOptions {
pass:[/**] @property tokenization The tokenization options to update when inferring */
tokenization?: <<MlNlpTokenizationUpdateOptions>>
pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */
results_field?: string
pass:[/**] @property multi_label Update the configured multi label option. Indicates if more than one true label exists. Defaults to the configured value. */
multi_label?: boolean
pass:[/**] @property labels The labels to predict. */
labels: string[]
}
[pass]
++++
</pre>
++++