/** * We are still working on this type, it will arrive soon. * If it's critical for you, please open an issue. * https://github.com/elastic/elasticsearch-js */ export type TODO = Record; export interface BulkCreateOperation extends BulkWriteOperation { } export interface BulkDeleteOperation extends BulkOperationBase { } export type BulkFailureStoreStatus = 'not_applicable_or_unknown' | 'used' | 'not_enabled' | 'failed'; export interface BulkIndexOperation extends BulkWriteOperation { } export interface BulkOperationBase { /** The document ID. */ _id?: Id; /** The name of the index or index alias to perform the action on. */ _index?: IndexName; /** A custom value used to route operations to a specific shard. */ routing?: Routing; if_primary_term?: long; if_seq_no?: SequenceNumber; version?: VersionNumber; version_type?: VersionType; } export interface BulkOperationContainer { /** Index the specified document. * If the document exists, it replaces the document and increments the version. * The following line must contain the source data to be indexed. */ index?: BulkIndexOperation; /** Index the specified document if it does not already exist. * The following line must contain the source data to be indexed. */ create?: BulkCreateOperation; /** Perform a partial document update. * The following line must contain the partial document and update options. */ update?: BulkUpdateOperation; /** Remove the specified document from the index. */ delete?: BulkDeleteOperation; } export type BulkOperationType = 'index' | 'create' | 'update' | 'delete'; export interface BulkRequest extends RequestBase { /** The name of the data stream, index, or index alias to perform bulk actions on. */ index?: IndexName; /** True or false if to include the document source in the error message in case of parsing errors. */ include_source_on_error?: boolean; /** If `true`, the response will include the ingest pipelines that were run for each index or create. */ list_executed_pipelines?: boolean; /** The pipeline identifier to use to preprocess incoming documents. * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. * If a final pipeline is configured, it will always run regardless of the value of this parameter. */ pipeline?: string; /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. * If `wait_for`, wait for a refresh to make this operation visible to search. * If `false`, do nothing with refreshes. * Valid values: `true`, `false`, `wait_for`. */ refresh?: Refresh; /** A custom value that is used to route operations to a specific shard. */ routing?: Routing; /** Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. */ _source?: SearchSourceConfigParam; /** A comma-separated list of source fields to exclude from the response. * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields; /** A comma-separated list of source fields to include in the response. * If this parameter is specified, only these source fields are returned. * You can exclude fields from this subset using the `_source_excludes` query parameter. * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields; /** The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. * The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration; /** The number of shard copies that must be active before proceeding with the operation. * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). * The default is `1`, which waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards; /** If `true`, the request's actions must target an index alias. */ require_alias?: boolean; /** If `true`, the request's actions must target a data stream (existing or to be created). */ require_data_stream?: boolean; operations?: (BulkOperationContainer | BulkUpdateAction | TDocument)[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; include_source_on_error?: never; list_executed_pipelines?: never; pipeline?: never; refresh?: never; routing?: never; _source?: never; _source_excludes?: never; _source_includes?: never; timeout?: never; wait_for_active_shards?: never; require_alias?: never; require_data_stream?: never; operations?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; include_source_on_error?: never; list_executed_pipelines?: never; pipeline?: never; refresh?: never; routing?: never; _source?: never; _source_excludes?: never; _source_includes?: never; timeout?: never; wait_for_active_shards?: never; require_alias?: never; require_data_stream?: never; operations?: never; }; } export interface BulkResponse { /** If `true`, one or more of the operations in the bulk request did not complete successfully. */ errors: boolean; /** The result of each operation in the bulk request, in the order they were submitted. */ items: Partial>[]; /** The length of time, in milliseconds, it took to process the bulk request. */ took: long; ingest_took?: long; } export interface BulkResponseItem { /** The document ID associated with the operation. */ _id?: string | null; /** The name of the index associated with the operation. * If the operation targeted a data stream, this is the backing index into which the document was written. */ _index: string; /** The HTTP status code returned for the operation. */ status: integer; failure_store?: BulkFailureStoreStatus; /** Additional information about the failed operation. * The property is returned only for failed operations. */ error?: ErrorCause; /** The primary term assigned to the document for the operation. * This property is returned only for successful operations. */ _primary_term?: long; /** The result of the operation. * Successful values are `created`, `deleted`, and `updated`. */ result?: string; /** The sequence number assigned to the document for the operation. * Sequence numbers are used to ensure an older version of a document doesn't overwrite a newer version. */ _seq_no?: SequenceNumber; /** Shard information for the operation. */ _shards?: ShardStatistics; /** The document version associated with the operation. * The document version is incremented each time the document is updated. * This property is returned only for successful actions. */ _version?: VersionNumber; forced_refresh?: boolean; get?: InlineGet>; } export interface BulkUpdateAction { /** If true, the `result` in the response is set to 'noop' when no changes to the document occur. */ detect_noop?: boolean; /** A partial update to an existing document. */ doc?: TPartialDocument; /** Set to `true` to use the contents of `doc` as the value of `upsert`. */ doc_as_upsert?: boolean; /** The script to run to update the document. */ script?: Script | ScriptSource; /** Set to `true` to run the script whether or not the document exists. */ scripted_upsert?: boolean; /** If `false`, source retrieval is turned off. * You can also specify a comma-separated list of the fields you want to retrieve. */ _source?: SearchSourceConfig; /** If the document does not already exist, the contents of `upsert` are inserted as a new document. * If the document exists, the `script` is run. */ upsert?: TDocument; } export interface BulkUpdateOperation extends BulkOperationBase { /** If `true`, the request's actions must target an index alias. */ require_alias?: boolean; /** The number of times an update should be retried in the case of a version conflict. */ retry_on_conflict?: integer; } export interface BulkWriteOperation extends BulkOperationBase { /** A map from the full name of fields to the name of dynamic templates. * It defaults to an empty map. * If a name matches a dynamic template, that template will be applied regardless of other match predicates defined in the template. * If a field is already defined in the mapping, then this parameter won't be used. */ dynamic_templates?: Record; /** The ID of the pipeline to use to preprocess incoming documents. * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. * If a final pipeline is configured, it will always run regardless of the value of this parameter. */ pipeline?: string; /** If `true`, the request's actions must target an index alias. */ require_alias?: boolean; } export interface ClearScrollRequest extends RequestBase { /** A comma-separated list of scroll IDs to clear. * To clear all scroll IDs, use `_all`. * IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. */ scroll_id?: ScrollIds; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { scroll_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { scroll_id?: never; }; } export interface ClearScrollResponse { /** If `true`, the request succeeded. * This does not indicate whether any scrolling search requests were cleared. */ succeeded: boolean; /** The number of scrolling search requests cleared. */ num_freed: integer; } export interface ClosePointInTimeRequest extends RequestBase { /** The ID of the point-in-time. */ id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; }; } export interface ClosePointInTimeResponse { /** If `true`, all search contexts associated with the point-in-time ID were successfully closed. */ succeeded: boolean; /** The number of search contexts that were successfully closed. */ num_freed: integer; } export interface CountRequest extends RequestBase { /** A comma-separated list of data streams, indices, and aliases to search. * It supports wildcards (`*`). * To search all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean; /** The analyzer to use for the query string. * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string; /** If `true`, wildcard and prefix queries are analyzed. * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean; /** The default operator for query string query: `AND` or `OR`. * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator; /** The field to use as a default when no field prefix is given in the query string. * This parameter can be used only when the `q` query string parameter is specified. */ df?: string; /** The type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `true`, concrete, expanded, or aliased indices are ignored when frozen. */ ignore_throttled?: boolean; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean; /** The minimum `_score` value that documents must have to be included in the result. */ min_score?: double; /** The node or shard the operation should be performed on. * By default, it is random. */ preference?: string; /** A custom value used to route operations to a specific shard. */ routing?: Routing; /** The maximum number of documents to collect for each shard. * If a query reaches this limit, Elasticsearch terminates the query early. * Elasticsearch collects documents before sorting. * * IMPORTANT: Use with caution. * Elasticsearch applies this parameter to each shard handling the request. * When possible, let Elasticsearch perform early termination automatically. * Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ terminate_after?: long; /** The query in Lucene query string syntax. This parameter cannot be used with a request body. */ q?: string; /** Defines the search query using Query DSL. A request body query cannot be used * with the `q` query string parameter. */ query?: QueryDslQueryContainer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; analyzer?: never; analyze_wildcard?: never; default_operator?: never; df?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; lenient?: never; min_score?: never; preference?: never; routing?: never; terminate_after?: never; q?: never; query?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; analyzer?: never; analyze_wildcard?: never; default_operator?: never; df?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; lenient?: never; min_score?: never; preference?: never; routing?: never; terminate_after?: never; q?: never; query?: never; }; } export interface CountResponse { count: long; _shards: ShardStatistics; } export interface CreateRequest extends RequestBase { /** A unique identifier for the document. * To automatically generate a document ID, use the `POST //_doc/` request format. */ id: Id; /** The name of the data stream or index to target. * If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. * If the target doesn't exist and doesn’t match a data stream template, this request creates the index. */ index: IndexName; /** Only perform the operation if the document has this primary term. */ if_primary_term?: long; /** Only perform the operation if the document has this sequence number. */ if_seq_no?: SequenceNumber; /** True or false if to include the document source in the error message in case of parsing errors. */ include_source_on_error?: boolean; /** Set to `create` to only index the document if it does not already exist (put if absent). * If a document with the specified `_id` already exists, the indexing operation will fail. * The behavior is the same as using the `/_create` endpoint. * If a document ID is specified, this paramater defaults to `index`. * Otherwise, it defaults to `create`. * If the request targets a data stream, an `op_type` of `create` is required. */ op_type?: OpType; /** The ID of the pipeline to use to preprocess incoming documents. * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. * If a final pipeline is configured, it will always run regardless of the value of this parameter. */ pipeline?: string; /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. * If `wait_for`, it waits for a refresh to make this operation visible to search. * If `false`, it does nothing with refreshes. */ refresh?: Refresh; /** If `true`, the destination must be an index alias. */ require_alias?: boolean; /** If `true`, the request's actions must target a data stream (existing or to be created). */ require_data_stream?: boolean; /** A custom value that is used to route operations to a specific shard. */ routing?: Routing; /** The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. * Elasticsearch waits for at least the specified timeout period before failing. * The actual wait time could be longer, particularly when multiple waits occur. * * This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. * Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. * By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration; /** The explicit version number for concurrency control. * It must be a non-negative long number. */ version?: VersionNumber; /** The version type. */ version_type?: VersionType; /** The number of shard copies that must be active before proceeding with the operation. * You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). * The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards; document?: TDocument; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; index?: never; if_primary_term?: never; if_seq_no?: never; include_source_on_error?: never; op_type?: never; pipeline?: never; refresh?: never; require_alias?: never; require_data_stream?: never; routing?: never; timeout?: never; version?: never; version_type?: never; wait_for_active_shards?: never; document?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; index?: never; if_primary_term?: never; if_seq_no?: never; include_source_on_error?: never; op_type?: never; pipeline?: never; refresh?: never; require_alias?: never; require_data_stream?: never; routing?: never; timeout?: never; version?: never; version_type?: never; wait_for_active_shards?: never; document?: never; }; } export type CreateResponse = WriteResponseBase; export interface DeleteRequest extends RequestBase { /** A unique identifier for the document. */ id: Id; /** The name of the target index. */ index: IndexName; /** Only perform the operation if the document has this primary term. */ if_primary_term?: long; /** Only perform the operation if the document has this sequence number. */ if_seq_no?: SequenceNumber; /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. * If `wait_for`, it waits for a refresh to make this operation visible to search. * If `false`, it does nothing with refreshes. */ refresh?: Refresh; /** A custom value used to route operations to a specific shard. */ routing?: Routing; /** The period to wait for active shards. * * This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. * Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. * By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. */ timeout?: Duration; /** An explicit version number for concurrency control. * It must match the current version of the document for the request to succeed. */ version?: VersionNumber; /** The version type. */ version_type?: VersionType; /** The minimum number of shard copies that must be active before proceeding with the operation. * You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). * The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; index?: never; if_primary_term?: never; if_seq_no?: never; refresh?: never; routing?: never; timeout?: never; version?: never; version_type?: never; wait_for_active_shards?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; index?: never; if_primary_term?: never; if_seq_no?: never; refresh?: never; routing?: never; timeout?: never; version?: never; version_type?: never; wait_for_active_shards?: never; }; } export type DeleteResponse = WriteResponseBase; export interface DeleteByQueryRequest extends RequestBase { /** A comma-separated list of data streams, indices, and aliases to search. * It supports wildcards (`*`). * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean; /** Analyzer to use for the query string. * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string; /** If `true`, wildcard and prefix queries are analyzed. * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean; /** What to do if delete by query hits version conflicts: `abort` or `proceed`. */ conflicts?: Conflicts; /** The default operator for query string query: `AND` or `OR`. * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator; /** The field to use as default where no field prefix is given in the query string. * This parameter can be used only when the `q` query string parameter is specified. */ df?: string; /** The type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** Skips the specified number of documents. */ from?: long; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean; /** The node or shard the operation should be performed on. * It is random by default. */ preference?: string; /** If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. * This is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed. * Unlike the delete API, it does not support `wait_for`. */ refresh?: boolean; /** If `true`, the request cache is used for this request. * Defaults to the index-level setting. */ request_cache?: boolean; /** The throttle for this request in sub-requests per second. */ requests_per_second?: float; /** A custom value used to route operations to a specific shard. */ routing?: Routing; /** A query in the Lucene query string syntax. */ q?: string; /** The period to retain the search context for scrolling. */ scroll?: Duration; /** The size of the scroll request that powers the operation. */ scroll_size?: long; /** The explicit timeout for each search request. * It defaults to no timeout. */ search_timeout?: Duration; /** The type of the search operation. * Available options include `query_then_fetch` and `dfs_query_then_fetch`. */ search_type?: SearchType; /** The number of slices this task should be divided into. */ slices?: Slices; /** A comma-separated list of `:` pairs. */ sort?: string[]; /** The specific `tag` of the request for logging and statistical purposes. */ stats?: string[]; /** The maximum number of documents to collect for each shard. * If a query reaches this limit, Elasticsearch terminates the query early. * Elasticsearch collects documents before sorting. * * Use with caution. * Elasticsearch applies this parameter to each shard handling the request. * When possible, let Elasticsearch perform early termination automatically. * Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ terminate_after?: long; /** The period each deletion request waits for active shards. */ timeout?: Duration; /** If `true`, returns the document version as part of a hit. */ version?: boolean; /** The number of shard copies that must be active before proceeding with the operation. * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). * The `timeout` value controls how long each write request waits for unavailable shards to become available. */ wait_for_active_shards?: WaitForActiveShards; /** If `true`, the request blocks until the operation is complete. * If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. */ wait_for_completion?: boolean; /** The maximum number of documents to delete. */ max_docs?: long; /** The documents to delete specified with Query DSL. */ query?: QueryDslQueryContainer; /** Slice the request manually using the provided slice ID and total number of slices. */ slice?: SlicedScroll; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; analyzer?: never; analyze_wildcard?: never; conflicts?: never; default_operator?: never; df?: never; expand_wildcards?: never; from?: never; ignore_unavailable?: never; lenient?: never; preference?: never; refresh?: never; request_cache?: never; requests_per_second?: never; routing?: never; q?: never; scroll?: never; scroll_size?: never; search_timeout?: never; search_type?: never; slices?: never; sort?: never; stats?: never; terminate_after?: never; timeout?: never; version?: never; wait_for_active_shards?: never; wait_for_completion?: never; max_docs?: never; query?: never; slice?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; analyzer?: never; analyze_wildcard?: never; conflicts?: never; default_operator?: never; df?: never; expand_wildcards?: never; from?: never; ignore_unavailable?: never; lenient?: never; preference?: never; refresh?: never; request_cache?: never; requests_per_second?: never; routing?: never; q?: never; scroll?: never; scroll_size?: never; search_timeout?: never; search_type?: never; slices?: never; sort?: never; stats?: never; terminate_after?: never; timeout?: never; version?: never; wait_for_active_shards?: never; wait_for_completion?: never; max_docs?: never; query?: never; slice?: never; }; } export interface DeleteByQueryResponse { /** The number of scroll responses pulled back by the delete by query. */ batches?: long; /** The number of documents that were successfully deleted. */ deleted?: long; /** An array of failures if there were any unrecoverable errors during the process. * If this array is not empty, the request ended abnormally because of those failures. * Delete by query is implemented using batches and any failures cause the entire process to end but all failures in the current batch are collected into the array. * You can use the `conflicts` option to prevent reindex from ending on version conflicts. */ failures?: BulkIndexByScrollFailure[]; /** This field is always equal to zero for delete by query. * It exists only so that delete by query, update by query, and reindex APIs return responses with the same structure. */ noops?: long; /** The number of requests per second effectively run during the delete by query. */ requests_per_second?: float; /** The number of retries attempted by delete by query. * `bulk` is the number of bulk actions retried. * `search` is the number of search actions retried. */ retries?: Retries; slice_id?: integer; task?: TaskId; throttled?: Duration; /** The number of milliseconds the request slept to conform to `requests_per_second`. */ throttled_millis?: DurationValue; throttled_until?: Duration; /** This field should always be equal to zero in a `_delete_by_query` response. * It has meaning only when using the task API, where it indicates the next time (in milliseconds since epoch) a throttled request will be run again in order to conform to `requests_per_second`. */ throttled_until_millis?: DurationValue; /** If `true`, some requests run during the delete by query operation timed out. */ timed_out?: boolean; /** The number of milliseconds from start to end of the whole operation. */ took?: DurationValue; /** The number of documents that were successfully processed. */ total?: long; /** The number of version conflicts that the delete by query hit. */ version_conflicts?: long; } export interface DeleteByQueryRethrottleRequest extends RequestBase { /** The ID for the task. */ task_id: TaskId; /** The throttle for this request in sub-requests per second. * To disable throttling, set it to `-1`. */ requests_per_second?: float; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_id?: never; requests_per_second?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_id?: never; requests_per_second?: never; }; } export type DeleteByQueryRethrottleResponse = TasksTaskListResponseBase; export interface DeleteScriptRequest extends RequestBase { /** The identifier for the stored script or search template. */ id: Id; /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration; /** The period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. * It can also be set to `-1` to indicate that the request should never timeout. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; master_timeout?: never; timeout?: never; }; } export type DeleteScriptResponse = AcknowledgedResponseBase; export interface ExistsRequest extends RequestBase { /** A unique document identifier. */ id: Id; /** A comma-separated list of data streams, indices, and aliases. * It supports wildcards (`*`). */ index: IndexName; /** The node or shard the operation should be performed on. * By default, the operation is randomized between the shard replicas. * * If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. * If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. * This can help with "jumping values" when hitting different shards in different refresh states. * A sample value can be something like the web session ID or the user name. */ preference?: string; /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean; /** If `true`, the request refreshes the relevant shards before retrieving the document. * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean; /** A custom value used to route operations to a specific shard. */ routing?: Routing; /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ _source?: SearchSourceConfigParam; /** A comma-separated list of source fields to exclude from the response. * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields; /** A comma-separated list of source fields to include in the response. * If this parameter is specified, only these source fields are returned. * You can exclude fields from this subset using the `_source_excludes` query parameter. * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields; /** A comma-separated list of stored fields to return as part of a hit. * If no fields are specified, no stored fields are included in the response. * If this field is specified, the `_source` parameter defaults to `false`. */ stored_fields?: Fields; /** Explicit version number for concurrency control. * The specified version must match the current version of the document for the request to succeed. */ version?: VersionNumber; /** The version type. */ version_type?: VersionType; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; index?: never; preference?: never; realtime?: never; refresh?: never; routing?: never; _source?: never; _source_excludes?: never; _source_includes?: never; stored_fields?: never; version?: never; version_type?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; index?: never; preference?: never; realtime?: never; refresh?: never; routing?: never; _source?: never; _source_excludes?: never; _source_includes?: never; stored_fields?: never; version?: never; version_type?: never; }; } export type ExistsResponse = boolean; export interface ExistsSourceRequest extends RequestBase { /** A unique identifier for the document. */ id: Id; /** A comma-separated list of data streams, indices, and aliases. * It supports wildcards (`*`). */ index: IndexName; /** The node or shard the operation should be performed on. * By default, the operation is randomized between the shard replicas. */ preference?: string; /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean; /** If `true`, the request refreshes the relevant shards before retrieving the document. * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean; /** A custom value used to route operations to a specific shard. */ routing?: Routing; /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ _source?: SearchSourceConfigParam; /** A comma-separated list of source fields to exclude in the response. */ _source_excludes?: Fields; /** A comma-separated list of source fields to include in the response. */ _source_includes?: Fields; /** The version number for concurrency control. * It must match the current version of the document for the request to succeed. */ version?: VersionNumber; /** The version type. */ version_type?: VersionType; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; index?: never; preference?: never; realtime?: never; refresh?: never; routing?: never; _source?: never; _source_excludes?: never; _source_includes?: never; version?: never; version_type?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; index?: never; preference?: never; realtime?: never; refresh?: never; routing?: never; _source?: never; _source_excludes?: never; _source_includes?: never; version?: never; version_type?: never; }; } export type ExistsSourceResponse = boolean; export interface ExplainExplanation { description: string; details: ExplainExplanationDetail[]; value: float; } export interface ExplainExplanationDetail { description: string; details?: ExplainExplanationDetail[]; value: float; } export interface ExplainRequest extends RequestBase { /** The document identifier. */ id: Id; /** Index names that are used to limit the request. * Only a single index name can be provided to this parameter. */ index: IndexName; /** The analyzer to use for the query string. * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string; /** If `true`, wildcard and prefix queries are analyzed. * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean; /** The default operator for query string query: `AND` or `OR`. * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator; /** The field to use as default where no field prefix is given in the query string. * This parameter can be used only when the `q` query string parameter is specified. */ df?: string; /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean; /** The node or shard the operation should be performed on. * It is random by default. */ preference?: string; /** A custom value used to route operations to a specific shard. */ routing?: Routing; /** `True` or `false` to return the `_source` field or not or a list of fields to return. */ _source?: SearchSourceConfigParam; /** A comma-separated list of source fields to exclude from the response. * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields; /** A comma-separated list of source fields to include in the response. * If this parameter is specified, only these source fields are returned. * You can exclude fields from this subset using the `_source_excludes` query parameter. * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields; /** A comma-separated list of stored fields to return in the response. */ stored_fields?: Fields; /** The query in the Lucene query string syntax. */ q?: string; /** Defines the search definition using the Query DSL. */ query?: QueryDslQueryContainer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; index?: never; analyzer?: never; analyze_wildcard?: never; default_operator?: never; df?: never; lenient?: never; preference?: never; routing?: never; _source?: never; _source_excludes?: never; _source_includes?: never; stored_fields?: never; q?: never; query?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; index?: never; analyzer?: never; analyze_wildcard?: never; default_operator?: never; df?: never; lenient?: never; preference?: never; routing?: never; _source?: never; _source_excludes?: never; _source_includes?: never; stored_fields?: never; q?: never; query?: never; }; } export interface ExplainResponse { _index: IndexName; _id: Id; matched: boolean; explanation?: ExplainExplanationDetail; get?: InlineGet; } export interface FieldCapsFieldCapability { /** Whether this field can be aggregated on all indices. */ aggregatable: boolean; /** The list of indices where this field has the same type family, or null if all indices have the same type family for the field. */ indices?: Indices; /** Merged metadata across all indices as a map of string keys to arrays of values. A value length of 1 indicates that all indices had the same value for this key, while a length of 2 or more indicates that not all indices had the same value for this key. */ meta?: Metadata; /** The list of indices where this field is not aggregatable, or null if all indices have the same definition for the field. */ non_aggregatable_indices?: Indices; /** The list of indices where this field is not searchable, or null if all indices have the same definition for the field. */ non_searchable_indices?: Indices; /** Whether this field is indexed for search on all indices. */ searchable: boolean; type: string; /** Whether this field is registered as a metadata field. */ metadata_field?: boolean; /** Whether this field is used as a time series dimension. * @experimental */ time_series_dimension?: boolean; /** Contains metric type if this fields is used as a time series * metrics, absent if the field is not used as metric. * @experimental */ time_series_metric?: MappingTimeSeriesMetricType; /** If this list is present in response then some indices have the * field marked as a dimension and other indices, the ones in this list, do not. * @experimental */ non_dimension_indices?: IndexName[]; /** The list of indices where this field is present if these indices * don’t have the same `time_series_metric` value for this field. * @experimental */ metric_conflicts_indices?: IndexName[]; } export interface FieldCapsRequest extends RequestBase { /** A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. */ index?: Indices; /** If false, the request returns an error if any wildcard expression, index alias, * or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request * targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean; /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `true`, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean; /** If true, unmapped fields are included in the response. */ include_unmapped?: boolean; /** A comma-separated list of filters to apply to the response. */ filters?: string; /** A comma-separated list of field types to include. * Any fields that do not match one of these types will be excluded from the results. * It defaults to empty, meaning that all field types are returned. */ types?: string[]; /** If false, empty fields are not included in the response. */ include_empty_fields?: boolean; /** A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. */ fields?: Fields; /** Filter indices if the provided query rewrites to `match_none` on every shard. * * IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. * For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. * However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document. */ index_filter?: QueryDslQueryContainer; /** Define ad-hoc runtime fields in the request similar to the way it is done in search requests. * These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. */ runtime_mappings?: MappingRuntimeFields; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; include_unmapped?: never; filters?: never; types?: never; include_empty_fields?: never; fields?: never; index_filter?: never; runtime_mappings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; include_unmapped?: never; filters?: never; types?: never; include_empty_fields?: never; fields?: never; index_filter?: never; runtime_mappings?: never; }; } export interface FieldCapsResponse { /** The list of indices where this field has the same type family, or null if all indices have the same type family for the field. */ indices: Indices; fields: Record>; } export interface GetGetResult { /** The name of the index the document belongs to. */ _index: IndexName; /** If the `stored_fields` parameter is set to `true` and `found` is `true`, it contains the document fields stored in the index. */ fields?: Record; _ignored?: string[]; /** Indicates whether the document exists. */ found: boolean; /** The unique identifier for the document. */ _id: Id; /** The primary term assigned to the document for the indexing operation. */ _primary_term?: long; /** The explicit routing, if set. */ _routing?: string; /** The sequence number assigned to the document for the indexing operation. * Sequence numbers are used to ensure an older version of a document doesn't overwrite a newer version. */ _seq_no?: SequenceNumber; /** If `found` is `true`, it contains the document data formatted in JSON. * If the `_source` parameter is set to `false` or the `stored_fields` parameter is set to `true`, it is excluded. */ _source?: TDocument; /** The document version, which is ncremented each time the document is updated. */ _version?: VersionNumber; } export interface GetRequest extends RequestBase { /** A unique document identifier. */ id: Id; /** The name of the index that contains the document. */ index: IndexName; /** Indicates whether the request forces synthetic `_source`. * Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. * Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. */ force_synthetic_source?: boolean; /** The node or shard the operation should be performed on. * By default, the operation is randomized between the shard replicas. * * If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. * If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. * This can help with "jumping values" when hitting different shards in different refresh states. * A sample value can be something like the web session ID or the user name. */ preference?: string; /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean; /** If `true`, the request refreshes the relevant shards before retrieving the document. * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean; /** A custom value used to route operations to a specific shard. */ routing?: Routing; /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ _source?: SearchSourceConfigParam; /** A comma-separated list of source fields to exclude from the response. * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields; /** A comma-separated list of source fields to include in the response. * If this parameter is specified, only these source fields are returned. * You can exclude fields from this subset using the `_source_excludes` query parameter. * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields; /** A comma-separated list of stored fields to return as part of a hit. * If no fields are specified, no stored fields are included in the response. * If this field is specified, the `_source` parameter defaults to `false`. * Only leaf fields can be retrieved with the `stored_field` option. * Object fields can't be returned;if specified, the request fails. */ stored_fields?: Fields; /** The version number for concurrency control. * It must match the current version of the document for the request to succeed. */ version?: VersionNumber; /** The version type. */ version_type?: VersionType; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; index?: never; force_synthetic_source?: never; preference?: never; realtime?: never; refresh?: never; routing?: never; _source?: never; _source_excludes?: never; _source_includes?: never; stored_fields?: never; version?: never; version_type?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; index?: never; force_synthetic_source?: never; preference?: never; realtime?: never; refresh?: never; routing?: never; _source?: never; _source_excludes?: never; _source_includes?: never; stored_fields?: never; version?: never; version_type?: never; }; } export type GetResponse = GetGetResult; export interface GetScriptRequest extends RequestBase { /** The identifier for the stored script or search template. */ id: Id; /** The period to wait for the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; master_timeout?: never; }; } export interface GetScriptResponse { _id: Id; found: boolean; script?: StoredScript; } export interface GetScriptContextContext { methods: GetScriptContextContextMethod[]; name: Name; } export interface GetScriptContextContextMethod { name: Name; return_type: string; params: GetScriptContextContextMethodParam[]; } export interface GetScriptContextContextMethodParam { name: Name; type: string; } export interface GetScriptContextRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export interface GetScriptContextResponse { contexts: GetScriptContextContext[]; } export interface GetScriptLanguagesLanguageContext { contexts: string[]; language: ScriptLanguage; } export interface GetScriptLanguagesRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export interface GetScriptLanguagesResponse { language_contexts: GetScriptLanguagesLanguageContext[]; types_allowed: string[]; } export interface GetSourceRequest extends RequestBase { /** A unique document identifier. */ id: Id; /** The name of the index that contains the document. */ index: IndexName; /** The node or shard the operation should be performed on. * By default, the operation is randomized between the shard replicas. */ preference?: string; /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean; /** If `true`, the request refreshes the relevant shards before retrieving the document. * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean; /** A custom value used to route operations to a specific shard. */ routing?: Routing; /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ _source?: SearchSourceConfigParam; /** A comma-separated list of source fields to exclude in the response. */ _source_excludes?: Fields; /** A comma-separated list of source fields to include in the response. */ _source_includes?: Fields; /** A comma-separated list of stored fields to return as part of a hit. */ stored_fields?: Fields; /** The version number for concurrency control. * It must match the current version of the document for the request to succeed. */ version?: VersionNumber; /** The version type. */ version_type?: VersionType; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; index?: never; preference?: never; realtime?: never; refresh?: never; routing?: never; _source?: never; _source_excludes?: never; _source_includes?: never; stored_fields?: never; version?: never; version_type?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; index?: never; preference?: never; realtime?: never; refresh?: never; routing?: never; _source?: never; _source_excludes?: never; _source_includes?: never; stored_fields?: never; version?: never; version_type?: never; }; } export type GetSourceResponse = TDocument; export interface HealthReportBaseIndicator { status: HealthReportIndicatorHealthStatus; symptom: string; impacts?: HealthReportImpact[]; diagnosis?: HealthReportDiagnosis[]; } export interface HealthReportDataStreamLifecycleDetails { stagnating_backing_indices_count: integer; total_backing_indices_in_error: integer; stagnating_backing_indices?: HealthReportStagnatingBackingIndices[]; } export interface HealthReportDataStreamLifecycleIndicator extends HealthReportBaseIndicator { details?: HealthReportDataStreamLifecycleDetails; } export interface HealthReportDiagnosis { id: string; action: string; affected_resources: HealthReportDiagnosisAffectedResources; cause: string; help_url: string; } export interface HealthReportDiagnosisAffectedResources { indices?: Indices; nodes?: HealthReportIndicatorNode[]; slm_policies?: string[]; feature_states?: string[]; snapshot_repositories?: string[]; } export interface HealthReportDiskIndicator extends HealthReportBaseIndicator { details?: HealthReportDiskIndicatorDetails; } export interface HealthReportDiskIndicatorDetails { indices_with_readonly_block: long; nodes_with_enough_disk_space: long; nodes_over_high_watermark: long; nodes_over_flood_stage_watermark: long; nodes_with_unknown_disk_status: long; } export interface HealthReportFileSettingsIndicator extends HealthReportBaseIndicator { details?: HealthReportFileSettingsIndicatorDetails; } export interface HealthReportFileSettingsIndicatorDetails { failure_streak: long; most_recent_failure: string; } export interface HealthReportIlmIndicator extends HealthReportBaseIndicator { details?: HealthReportIlmIndicatorDetails; } export interface HealthReportIlmIndicatorDetails { ilm_status: LifecycleOperationMode; policies: long; stagnating_indices: integer; } export interface HealthReportImpact { description: string; id: string; impact_areas: HealthReportImpactArea[]; severity: integer; } export type HealthReportImpactArea = 'search' | 'ingest' | 'backup' | 'deployment_management'; export type HealthReportIndicatorHealthStatus = 'green' | 'yellow' | 'red' | 'unknown'; export interface HealthReportIndicatorNode { name: string | null; node_id: string | null; } export interface HealthReportIndicators { master_is_stable?: HealthReportMasterIsStableIndicator; shards_availability?: HealthReportShardsAvailabilityIndicator; disk?: HealthReportDiskIndicator; repository_integrity?: HealthReportRepositoryIntegrityIndicator; data_stream_lifecycle?: HealthReportDataStreamLifecycleIndicator; ilm?: HealthReportIlmIndicator; slm?: HealthReportSlmIndicator; shards_capacity?: HealthReportShardsCapacityIndicator; file_settings?: HealthReportFileSettingsIndicator; } export interface HealthReportMasterIsStableIndicator extends HealthReportBaseIndicator { details?: HealthReportMasterIsStableIndicatorDetails; } export interface HealthReportMasterIsStableIndicatorClusterFormationNode { name?: string; node_id: string; cluster_formation_message: string; } export interface HealthReportMasterIsStableIndicatorDetails { current_master: HealthReportIndicatorNode; recent_masters: HealthReportIndicatorNode[]; exception_fetching_history?: HealthReportMasterIsStableIndicatorExceptionFetchingHistory; cluster_formation?: HealthReportMasterIsStableIndicatorClusterFormationNode[]; } export interface HealthReportMasterIsStableIndicatorExceptionFetchingHistory { message: string; stack_trace: string; } export interface HealthReportRepositoryIntegrityIndicator extends HealthReportBaseIndicator { details?: HealthReportRepositoryIntegrityIndicatorDetails; } export interface HealthReportRepositoryIntegrityIndicatorDetails { total_repositories?: long; corrupted_repositories?: long; corrupted?: string[]; } export interface HealthReportRequest extends RequestBase { /** A feature of the cluster, as returned by the top-level health report API. */ feature?: string | string[]; /** Explicit operation timeout. */ timeout?: Duration; /** Opt-in for more information about the health of the system. */ verbose?: boolean; /** Limit the number of affected resources the health report API returns. */ size?: integer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { feature?: never; timeout?: never; verbose?: never; size?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { feature?: never; timeout?: never; verbose?: never; size?: never; }; } export interface HealthReportResponse { cluster_name: string; indicators: HealthReportIndicators; status?: HealthReportIndicatorHealthStatus; } export interface HealthReportShardsAvailabilityIndicator extends HealthReportBaseIndicator { details?: HealthReportShardsAvailabilityIndicatorDetails; } export interface HealthReportShardsAvailabilityIndicatorDetails { creating_primaries: long; creating_replicas: long; initializing_primaries: long; initializing_replicas: long; restarting_primaries: long; restarting_replicas: long; started_primaries: long; started_replicas: long; unassigned_primaries: long; unassigned_replicas: long; } export interface HealthReportShardsCapacityIndicator extends HealthReportBaseIndicator { details?: HealthReportShardsCapacityIndicatorDetails; } export interface HealthReportShardsCapacityIndicatorDetails { data: HealthReportShardsCapacityIndicatorTierDetail; frozen: HealthReportShardsCapacityIndicatorTierDetail; } export interface HealthReportShardsCapacityIndicatorTierDetail { max_shards_in_cluster: integer; current_used_shards?: integer; } export interface HealthReportSlmIndicator extends HealthReportBaseIndicator { details?: HealthReportSlmIndicatorDetails; } export interface HealthReportSlmIndicatorDetails { slm_status: LifecycleOperationMode; policies: long; unhealthy_policies?: HealthReportSlmIndicatorUnhealthyPolicies; } export interface HealthReportSlmIndicatorUnhealthyPolicies { count: long; invocations_since_last_success?: Record; } export interface HealthReportStagnatingBackingIndices { index_name: IndexName; first_occurrence_timestamp: long; retry_count: integer; } export interface IndexRequest extends RequestBase { /** A unique identifier for the document. * To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. */ id?: Id; /** The name of the data stream or index to target. * If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. * If the target doesn't exist and doesn't match a data stream template, this request creates the index. * You can check for existing targets with the resolve index API. */ index: IndexName; /** Only perform the operation if the document has this primary term. */ if_primary_term?: long; /** Only perform the operation if the document has this sequence number. */ if_seq_no?: SequenceNumber; /** True or false if to include the document source in the error message in case of parsing errors. */ include_source_on_error?: boolean; /** Set to `create` to only index the document if it does not already exist (put if absent). * If a document with the specified `_id` already exists, the indexing operation will fail. * The behavior is the same as using the `/_create` endpoint. * If a document ID is specified, this paramater defaults to `index`. * Otherwise, it defaults to `create`. * If the request targets a data stream, an `op_type` of `create` is required. */ op_type?: OpType; /** The ID of the pipeline to use to preprocess incoming documents. * If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. * If a final pipeline is configured it will always run, regardless of the value of this parameter. */ pipeline?: string; /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. * If `wait_for`, it waits for a refresh to make this operation visible to search. * If `false`, it does nothing with refreshes. */ refresh?: Refresh; /** A custom value that is used to route operations to a specific shard. */ routing?: Routing; /** The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. * * This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. * Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. * By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration; /** An explicit version number for concurrency control. * It must be a non-negative long number. */ version?: VersionNumber; /** The version type. */ version_type?: VersionType; /** The number of shard copies that must be active before proceeding with the operation. * You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). * The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards; /** If `true`, the destination must be an index alias. */ require_alias?: boolean; document?: TDocument; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; index?: never; if_primary_term?: never; if_seq_no?: never; include_source_on_error?: never; op_type?: never; pipeline?: never; refresh?: never; routing?: never; timeout?: never; version?: never; version_type?: never; wait_for_active_shards?: never; require_alias?: never; document?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; index?: never; if_primary_term?: never; if_seq_no?: never; include_source_on_error?: never; op_type?: never; pipeline?: never; refresh?: never; routing?: never; timeout?: never; version?: never; version_type?: never; wait_for_active_shards?: never; require_alias?: never; document?: never; }; } export type IndexResponse = WriteResponseBase; export interface InfoRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export interface InfoResponse { /** The responding cluster's name. */ cluster_name: Name; cluster_uuid: Uuid; /** The responding node's name. */ name: Name; tagline: string; /** The running version of Elasticsearch. */ version: ElasticsearchVersionInfo; } export interface MgetMultiGetError { error: ErrorCause; _id: Id; _index: IndexName; } export interface MgetOperation { /** The unique document ID. */ _id: Id; /** The index that contains the document. */ _index?: IndexName; /** The key for the primary shard the document resides on. Required if routing is used during indexing. */ routing?: Routing; /** If `false`, excludes all _source fields. */ _source?: SearchSourceConfig; /** The stored fields you want to retrieve. */ stored_fields?: Fields; version?: VersionNumber; version_type?: VersionType; } export interface MgetRequest extends RequestBase { /** Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. */ index?: IndexName; /** Should this request force synthetic _source? * Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. * Fetches with this enabled will be slower the enabling synthetic source natively in the index. */ force_synthetic_source?: boolean; /** Specifies the node or shard the operation should be performed on. Random by default. */ preference?: string; /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean; /** If `true`, the request refreshes relevant shards before retrieving documents. */ refresh?: boolean; /** Custom value used to route operations to a specific shard. */ routing?: Routing; /** True or false to return the `_source` field or not, or a list of fields to return. */ _source?: SearchSourceConfigParam; /** A comma-separated list of source fields to exclude from the response. * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. */ _source_excludes?: Fields; /** A comma-separated list of source fields to include in the response. * If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields; /** If `true`, retrieves the document fields stored in the index rather than the document `_source`. */ stored_fields?: Fields; /** The documents you want to retrieve. Required if no index is specified in the request URI. */ docs?: MgetOperation[]; /** The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. */ ids?: Ids; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; force_synthetic_source?: never; preference?: never; realtime?: never; refresh?: never; routing?: never; _source?: never; _source_excludes?: never; _source_includes?: never; stored_fields?: never; docs?: never; ids?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; force_synthetic_source?: never; preference?: never; realtime?: never; refresh?: never; routing?: never; _source?: never; _source_excludes?: never; _source_includes?: never; stored_fields?: never; docs?: never; ids?: never; }; } export interface MgetResponse { /** The response includes a docs array that contains the documents in the order specified in the request. * The structure of the returned documents is similar to that returned by the get API. * If there is a failure getting a particular document, the error is included in place of the document. */ docs: MgetResponseItem[]; } export type MgetResponseItem = GetGetResult | MgetMultiGetError; export interface MsearchMultiSearchItem extends SearchResponseBody { status?: integer; } export interface MsearchMultiSearchResult> { took: long; responses: MsearchResponseItem[]; } export interface MsearchMultisearchHeader { allow_no_indices?: boolean; expand_wildcards?: ExpandWildcards; ignore_unavailable?: boolean; index?: Indices; preference?: string; request_cache?: boolean; routing?: Routing; search_type?: SearchType; ccs_minimize_roundtrips?: boolean; allow_partial_search_results?: boolean; ignore_throttled?: boolean; } export interface MsearchRequest extends RequestBase { /** Comma-separated list of data streams, indices, and index aliases to search. */ index?: Indices; /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean; /** If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. */ ccs_minimize_roundtrips?: boolean; /** Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. */ expand_wildcards?: ExpandWildcards; /** If true, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean; /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean; /** Indicates whether hit.matched_queries should be rendered as a map that includes * the name of the matched query associated with its score (true) * or as an array containing the name of the matched queries (false) * This functionality reruns each named query on every hit in a search response. * Typically, this adds a small overhead to a request. * However, using computationally expensive named queries on a large number of hits may add significant overhead. */ include_named_queries_score?: boolean; /** Maximum number of concurrent searches the multi search API can execute. * Defaults to `max(1, (# of data nodes * min(search thread pool size, 10)))`. */ max_concurrent_searches?: integer; /** Maximum number of concurrent shard requests that each sub-search request executes per node. */ max_concurrent_shard_requests?: integer; /** Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. */ pre_filter_shard_size?: long; /** If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. */ rest_total_hits_as_int?: boolean; /** Custom routing value used to route search operations to a specific shard. */ routing?: Routing; /** Indicates whether global term and document frequencies should be used when scoring returned documents. */ search_type?: SearchType; /** Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. */ typed_keys?: boolean; searches?: MsearchRequestItem[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; ccs_minimize_roundtrips?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; include_named_queries_score?: never; max_concurrent_searches?: never; max_concurrent_shard_requests?: never; pre_filter_shard_size?: never; rest_total_hits_as_int?: never; routing?: never; search_type?: never; typed_keys?: never; searches?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; ccs_minimize_roundtrips?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; include_named_queries_score?: never; max_concurrent_searches?: never; max_concurrent_shard_requests?: never; pre_filter_shard_size?: never; rest_total_hits_as_int?: never; routing?: never; search_type?: never; typed_keys?: never; searches?: never; }; } export type MsearchRequestItem = MsearchMultisearchHeader | SearchSearchRequestBody; export type MsearchResponse> = MsearchMultiSearchResult; export type MsearchResponseItem = MsearchMultiSearchItem | ErrorResponseBase; export interface MsearchTemplateRequest extends RequestBase { /** A comma-separated list of data streams, indices, and aliases to search. * It supports wildcards (`*`). * To search all data streams and indices, omit this parameter or use `*`. */ index?: Indices; /** If `true`, network round-trips are minimized for cross-cluster search requests. */ ccs_minimize_roundtrips?: boolean; /** The maximum number of concurrent searches the API can run. */ max_concurrent_searches?: long; /** The type of the search operation. */ search_type?: SearchType; /** If `true`, the response returns `hits.total` as an integer. * If `false`, it returns `hits.total` as an object. */ rest_total_hits_as_int?: boolean; /** If `true`, the response prefixes aggregation and suggester names with their respective types. */ typed_keys?: boolean; search_templates?: MsearchTemplateRequestItem[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; ccs_minimize_roundtrips?: never; max_concurrent_searches?: never; search_type?: never; rest_total_hits_as_int?: never; typed_keys?: never; search_templates?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; ccs_minimize_roundtrips?: never; max_concurrent_searches?: never; search_type?: never; rest_total_hits_as_int?: never; typed_keys?: never; search_templates?: never; }; } export type MsearchTemplateRequestItem = MsearchMultisearchHeader | MsearchTemplateTemplateConfig; export type MsearchTemplateResponse> = MsearchMultiSearchResult; export interface MsearchTemplateTemplateConfig { /** If `true`, returns detailed information about score calculation as part of each hit. */ explain?: boolean; /** The ID of the search template to use. If no `source` is specified, * this parameter is required. */ id?: Id; /** Key-value pairs used to replace Mustache variables in the template. * The key is the variable name. * The value is the variable value. */ params?: Record; /** If `true`, the query execution is profiled. */ profile?: boolean; /** An inline search template. Supports the same parameters as the search API's * request body. It also supports Mustache variables. If no `id` is specified, this * parameter is required. */ source?: ScriptSource; } export interface MtermvectorsOperation { /** The ID of the document. */ _id?: Id; /** The index of the document. */ _index?: IndexName; /** An artificial document (a document not present in the index) for which you want to retrieve term vectors. */ doc?: any; /** Comma-separated list or wildcard expressions of fields to include in the statistics. * Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ fields?: Fields; /** If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. */ field_statistics?: boolean; /** Filter terms based on their tf-idf scores. */ filter?: TermvectorsFilter; /** If `true`, the response includes term offsets. */ offsets?: boolean; /** If `true`, the response includes term payloads. */ payloads?: boolean; /** If `true`, the response includes term positions. */ positions?: boolean; /** Custom value used to route operations to a specific shard. */ routing?: Routing; /** If true, the response includes term frequency and document frequency. */ term_statistics?: boolean; /** If `true`, returns the document version as part of a hit. */ version?: VersionNumber; /** Specific version type. */ version_type?: VersionType; } export interface MtermvectorsRequest extends RequestBase { /** The name of the index that contains the documents. */ index?: IndexName; /** A comma-separated list or wildcard expressions of fields to include in the statistics. * It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ fields?: Fields; /** If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. */ field_statistics?: boolean; /** If `true`, the response includes term offsets. */ offsets?: boolean; /** If `true`, the response includes term payloads. */ payloads?: boolean; /** If `true`, the response includes term positions. */ positions?: boolean; /** The node or shard the operation should be performed on. * It is random by default. */ preference?: string; /** If true, the request is real-time as opposed to near-real-time. */ realtime?: boolean; /** A custom value used to route operations to a specific shard. */ routing?: Routing; /** If true, the response includes term frequency and document frequency. */ term_statistics?: boolean; /** If `true`, returns the document version as part of a hit. */ version?: VersionNumber; /** The version type. */ version_type?: VersionType; /** An array of existing or artificial documents. */ docs?: MtermvectorsOperation[]; /** A simplified syntax to specify documents by their ID if they're in the same index. */ ids?: Id[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; fields?: never; field_statistics?: never; offsets?: never; payloads?: never; positions?: never; preference?: never; realtime?: never; routing?: never; term_statistics?: never; version?: never; version_type?: never; docs?: never; ids?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; fields?: never; field_statistics?: never; offsets?: never; payloads?: never; positions?: never; preference?: never; realtime?: never; routing?: never; term_statistics?: never; version?: never; version_type?: never; docs?: never; ids?: never; }; } export interface MtermvectorsResponse { docs: MtermvectorsTermVectorsResult[]; } export interface MtermvectorsTermVectorsResult { _id?: Id; _index: IndexName; _version?: VersionNumber; took?: long; found?: boolean; term_vectors?: Record; error?: ErrorCause; } export interface OpenPointInTimeRequest extends RequestBase { /** A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices */ index: Indices; /** Extend the length of time that the point in time persists. */ keep_alive: Duration; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** The node or shard the operation should be performed on. * By default, it is random. */ preference?: string; /** A custom value that is used to route operations to a specific shard. */ routing?: Routing; /** The type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. * If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. * If `true`, the point in time will contain all the shards that are available at the time of the request. */ allow_partial_search_results?: boolean; /** Maximum number of concurrent shard requests that each sub-search request executes per node. */ max_concurrent_shard_requests?: integer; /** Filter indices if the provided query rewrites to `match_none` on every shard. */ index_filter?: QueryDslQueryContainer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; keep_alive?: never; ignore_unavailable?: never; preference?: never; routing?: never; expand_wildcards?: never; allow_partial_search_results?: never; max_concurrent_shard_requests?: never; index_filter?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; keep_alive?: never; ignore_unavailable?: never; preference?: never; routing?: never; expand_wildcards?: never; allow_partial_search_results?: never; max_concurrent_shard_requests?: never; index_filter?: never; }; } export interface OpenPointInTimeResponse { /** Shards used to create the PIT */ _shards: ShardStatistics; id: Id; } export interface PingRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export type PingResponse = boolean; export interface PutScriptRequest extends RequestBase { /** The identifier for the stored script or search template. * It must be unique within the cluster. */ id: Id; /** The context in which the script or search template should run. * To prevent errors, the API immediately compiles the script or template in this context. */ context?: Name; /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration; /** The period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. * It can also be set to `-1` to indicate that the request should never timeout. */ timeout?: Duration; /** The script or search template, its parameters, and its language. */ script: StoredScript; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; context?: never; master_timeout?: never; timeout?: never; script?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; context?: never; master_timeout?: never; timeout?: never; script?: never; }; } export type PutScriptResponse = AcknowledgedResponseBase; export interface RankEvalDocumentRating { /** The document ID. */ _id: Id; /** The document’s index. For data streams, this should be the document’s backing index. */ _index: IndexName; /** The document’s relevance with regard to this search request. */ rating: integer; } export interface RankEvalRankEvalHit { _id: Id; _index: IndexName; _score: double; } export interface RankEvalRankEvalHitItem { hit: RankEvalRankEvalHit; rating?: double | null; } export interface RankEvalRankEvalMetric { precision?: RankEvalRankEvalMetricPrecision; recall?: RankEvalRankEvalMetricRecall; mean_reciprocal_rank?: RankEvalRankEvalMetricMeanReciprocalRank; dcg?: RankEvalRankEvalMetricDiscountedCumulativeGain; expected_reciprocal_rank?: RankEvalRankEvalMetricExpectedReciprocalRank; } export interface RankEvalRankEvalMetricBase { /** Sets the maximum number of documents retrieved per query. This value will act in place of the usual size parameter in the query. */ k?: integer; } export interface RankEvalRankEvalMetricDetail { /** The metric_score in the details section shows the contribution of this query to the global quality metric score */ metric_score: double; /** The unrated_docs section contains an _index and _id entry for each document in the search result for this query that didn’t have a ratings value. This can be used to ask the user to supply ratings for these documents */ unrated_docs: RankEvalUnratedDocument[]; /** The hits section shows a grouping of the search results with their supplied ratings */ hits: RankEvalRankEvalHitItem[]; /** The metric_details give additional information about the calculated quality metric (e.g. how many of the retrieved documents were relevant). The content varies for each metric but allows for better interpretation of the results */ metric_details: Record>; } export interface RankEvalRankEvalMetricDiscountedCumulativeGain extends RankEvalRankEvalMetricBase { /** If set to true, this metric will calculate the Normalized DCG. */ normalize?: boolean; } export interface RankEvalRankEvalMetricExpectedReciprocalRank extends RankEvalRankEvalMetricBase { /** The highest relevance grade used in the user-supplied relevance judgments. */ maximum_relevance: integer; } export interface RankEvalRankEvalMetricMeanReciprocalRank extends RankEvalRankEvalMetricRatingTreshold { } export interface RankEvalRankEvalMetricPrecision extends RankEvalRankEvalMetricRatingTreshold { /** Controls how unlabeled documents in the search results are counted. If set to true, unlabeled documents are ignored and neither count as relevant or irrelevant. Set to false (the default), they are treated as irrelevant. */ ignore_unlabeled?: boolean; } export interface RankEvalRankEvalMetricRatingTreshold extends RankEvalRankEvalMetricBase { /** Sets the rating threshold above which documents are considered to be "relevant". */ relevant_rating_threshold?: integer; } export interface RankEvalRankEvalMetricRecall extends RankEvalRankEvalMetricRatingTreshold { } export interface RankEvalRankEvalQuery { query: QueryDslQueryContainer; size?: integer; } export interface RankEvalRankEvalRequestItem { /** The search request’s ID, used to group result details later. */ id: Id; /** The query being evaluated. */ request?: RankEvalRankEvalQuery | QueryDslQueryContainer; /** List of document ratings */ ratings: RankEvalDocumentRating[]; /** The search template Id */ template_id?: Id; /** The search template parameters. */ params?: Record; } export interface RankEvalRequest extends RequestBase { /** A comma-separated list of data streams, indices, and index aliases used to limit the request. * Wildcard (`*`) expressions are supported. * To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. */ index?: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean; /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards; /** If `true`, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean; /** Search operation type */ search_type?: string; /** A set of typical search requests, together with their provided ratings. */ requests: RankEvalRankEvalRequestItem[]; /** Definition of the evaluation metric to calculate. */ metric?: RankEvalRankEvalMetric; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; search_type?: never; requests?: never; metric?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; search_type?: never; requests?: never; metric?: never; }; } export interface RankEvalResponse { /** The overall evaluation quality calculated by the defined metric */ metric_score: double; /** The details section contains one entry for every query in the original requests section, keyed by the search request id */ details: Record; failures: Record; } export interface RankEvalUnratedDocument { _id: Id; _index: IndexName; } export interface ReindexDestination { /** The name of the data stream, index, or index alias you are copying to. */ index: IndexName; /** If it is `create`, the operation will only index documents that do not already exist (also known as "put if absent"). * * IMPORTANT: To reindex to a data stream destination, this argument must be `create`. */ op_type?: OpType; /** The name of the pipeline to use. */ pipeline?: string; /** By default, a document's routing is preserved unless it's changed by the script. * If it is `keep`, the routing on the bulk request sent for each match is set to the routing on the match. * If it is `discard`, the routing on the bulk request sent for each match is set to `null`. * If it is `=value`, the routing on the bulk request sent for each match is set to all value specified after the equals sign (`=`). */ routing?: Routing; /** The versioning to use for the indexing operation. */ version_type?: VersionType; } export interface ReindexRemoteSource { /** The remote connection timeout. */ connect_timeout?: Duration; /** An object containing the headers of the request. */ headers?: Record; /** The URL for the remote instance of Elasticsearch that you want to index from. * This information is required when you're indexing from remote. */ host: Host; /** The username to use for authentication with the remote host. */ username?: Username; /** The password to use for authentication with the remote host. */ password?: Password; /** The remote socket read timeout. */ socket_timeout?: Duration; } export interface ReindexRequest extends RequestBase { /** If `true`, the request refreshes affected shards to make this operation visible to search. */ refresh?: boolean; /** The throttle for this request in sub-requests per second. * By default, there is no throttle. */ requests_per_second?: float; /** The period of time that a consistent view of the index should be maintained for scrolled search. */ scroll?: Duration; /** The number of slices this task should be divided into. * It defaults to one slice, which means the task isn't sliced into subtasks. * * Reindex supports sliced scroll to parallelize the reindexing process. * This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. * * NOTE: Reindexing from remote clusters does not support manual or automatic slicing. * * If set to `auto`, Elasticsearch chooses the number of slices to use. * This setting will use one slice per shard, up to a certain limit. * If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. */ slices?: Slices; /** The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. * By default, Elasticsearch waits for at least one minute before failing. * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration; /** The number of shard copies that must be active before proceeding with the operation. * Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). * The default value is one, which means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards; /** If `true`, the request blocks until the operation is complete. */ wait_for_completion?: boolean; /** If `true`, the destination must be an index alias. */ require_alias?: boolean; /** Indicates whether to continue reindexing even when there are conflicts. */ conflicts?: Conflicts; /** The destination you are copying to. */ dest: ReindexDestination; /** The maximum number of documents to reindex. * By default, all documents are reindexed. * If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. * * If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. */ max_docs?: long; /** The script to run to update the document source or metadata when reindexing. */ script?: Script | ScriptSource; size?: long; /** The source you are copying from. */ source: ReindexSource; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { refresh?: never; requests_per_second?: never; scroll?: never; slices?: never; timeout?: never; wait_for_active_shards?: never; wait_for_completion?: never; require_alias?: never; conflicts?: never; dest?: never; max_docs?: never; script?: never; size?: never; source?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { refresh?: never; requests_per_second?: never; scroll?: never; slices?: never; timeout?: never; wait_for_active_shards?: never; wait_for_completion?: never; require_alias?: never; conflicts?: never; dest?: never; max_docs?: never; script?: never; size?: never; source?: never; }; } export interface ReindexResponse { /** The number of scroll responses that were pulled back by the reindex. */ batches?: long; /** The number of documents that were successfully created. */ created?: long; /** The number of documents that were successfully deleted. */ deleted?: long; /** If there were any unrecoverable errors during the process, it is an array of those failures. * If this array is not empty, the request ended because of those failures. * Reindex is implemented using batches and any failure causes the entire process to end but all failures in the current batch are collected into the array. * You can use the `conflicts` option to prevent the reindex from ending on version conflicts. */ failures?: BulkIndexByScrollFailure[]; /** The number of documents that were ignored because the script used for the reindex returned a `noop` value for `ctx.op`. */ noops?: long; /** The number of retries attempted by reindex. */ retries?: Retries; /** The number of requests per second effectively run during the reindex. */ requests_per_second?: float; slice_id?: integer; task?: TaskId; /** The number of milliseconds the request slept to conform to `requests_per_second`. */ throttled_millis?: EpochTime; /** This field should always be equal to zero in a reindex response. * It has meaning only when using the task API, where it indicates the next time (in milliseconds since epoch) that a throttled request will be run again in order to conform to `requests_per_second`. */ throttled_until_millis?: EpochTime; /** If any of the requests that ran during the reindex timed out, it is `true`. */ timed_out?: boolean; /** The total milliseconds the entire operation took. */ took?: DurationValue; /** The number of documents that were successfully processed. */ total?: long; /** The number of documents that were successfully updated. * That is to say, a document with the same ID already existed before the reindex updated it. */ updated?: long; /** The number of version conflicts that occurred. */ version_conflicts?: long; } export interface ReindexSource { /** The name of the data stream, index, or alias you are copying from. * It accepts a comma-separated list to reindex from multiple sources. */ index: Indices; /** The documents to reindex, which is defined with Query DSL. */ query?: QueryDslQueryContainer; /** A remote instance of Elasticsearch that you want to index from. */ remote?: ReindexRemoteSource; /** The number of documents to index per batch. * Use it when you are indexing from remote to ensure that the batches fit within the on-heap buffer, which defaults to a maximum size of 100 MB. */ size?: integer; /** Slice the reindex request manually using the provided slice ID and total number of slices. */ slice?: SlicedScroll; /** A comma-separated list of `:` pairs to sort by before indexing. * Use it in conjunction with `max_docs` to control what documents are reindexed. * * WARNING: Sort in reindex is deprecated. * Sorting in reindex was never guaranteed to index documents in order and prevents further development of reindex such as resilience and performance improvements. * If used in combination with `max_docs`, consider using a query filter instead. */ sort?: Sort; /** If `true`, reindex all source fields. * Set it to a list to reindex select fields. */ _source?: Fields; runtime_mappings?: MappingRuntimeFields; } export interface ReindexRethrottleReindexNode extends SpecUtilsBaseNode { tasks: Record; } export interface ReindexRethrottleReindexStatus { /** The number of scroll responses pulled back by the reindex. */ batches: long; /** The number of documents that were successfully created. */ created: long; /** The number of documents that were successfully deleted. */ deleted: long; /** The number of documents that were ignored because the script used for the reindex returned a `noop` value for `ctx.op`. */ noops: long; /** The number of requests per second effectively executed during the reindex. */ requests_per_second: float; /** The number of retries attempted by reindex. `bulk` is the number of bulk actions retried and `search` is the number of search actions retried. */ retries: Retries; throttled?: Duration; /** Number of milliseconds the request slept to conform to `requests_per_second`. */ throttled_millis: DurationValue; throttled_until?: Duration; /** This field should always be equal to zero in a `_reindex` response. * It only has meaning when using the Task API, where it indicates the next time (in milliseconds since epoch) a throttled request will be executed again in order to conform to `requests_per_second`. */ throttled_until_millis: DurationValue; /** The number of documents that were successfully processed. */ total: long; /** The number of documents that were successfully updated, for example, a document with same ID already existed prior to reindex updating it. */ updated: long; /** The number of version conflicts that reindex hits. */ version_conflicts: long; } export interface ReindexRethrottleReindexTask { action: string; cancellable: boolean; description: string; id: long; node: Name; running_time_in_nanos: DurationValue; start_time_in_millis: EpochTime; status: ReindexRethrottleReindexStatus; type: string; headers: HttpHeaders; } export interface ReindexRethrottleRequest extends RequestBase { /** The task identifier, which can be found by using the tasks API. */ task_id: Id; /** The throttle for this request in sub-requests per second. * It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. */ requests_per_second?: float; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_id?: never; requests_per_second?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_id?: never; requests_per_second?: never; }; } export interface ReindexRethrottleResponse { nodes: Record; } export interface RenderSearchTemplateRequest extends RequestBase { /** The ID of the search template to render. * If no `source` is specified, this or the `id` request body parameter is required. */ id?: Id; file?: string; /** Key-value pairs used to replace Mustache variables in the template. * The key is the variable name. * The value is the variable value. */ params?: Record; /** An inline search template. * It supports the same parameters as the search API's request body. * These parameters also support Mustache variables. * If no `id` or `` is specified, this parameter is required. */ source?: ScriptSource; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; file?: never; params?: never; source?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; file?: never; params?: never; source?: never; }; } export interface RenderSearchTemplateResponse { template_output: Record; } export type ScriptsPainlessExecutePainlessContext = 'painless_test' | 'filter' | 'score' | 'boolean_field' | 'date_field' | 'double_field' | 'geo_point_field' | 'ip_field' | 'keyword_field' | 'long_field' | 'composite_field'; export interface ScriptsPainlessExecutePainlessContextSetup { /** Document that's temporarily indexed in-memory and accessible from the script. */ document: any; /** Index containing a mapping that's compatible with the indexed document. * You may specify a remote index by prefixing the index with the remote cluster alias. * For example, `remote1:my_index` indicates that you want to run the painless script against the "my_index" index on the "remote1" cluster. * This request will be forwarded to the "remote1" cluster if you have configured a connection to that remote cluster. * * NOTE: Wildcards are not accepted in the index expression for this endpoint. * The expression `*:myindex` will return the error "No such remote cluster" and the expression `logs*` or `remote1:logs*` will return the error "index not found". */ index: IndexName; /** Use this parameter to specify a query for computing a score. */ query?: QueryDslQueryContainer; } export interface ScriptsPainlessExecuteRequest extends RequestBase { /** The context that the script should run in. * NOTE: Result ordering in the field contexts is not guaranteed. */ context?: ScriptsPainlessExecutePainlessContext; /** Additional parameters for the `context`. * NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. */ context_setup?: ScriptsPainlessExecutePainlessContextSetup; /** The Painless script to run. */ script?: Script | ScriptSource; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { context?: never; context_setup?: never; script?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { context?: never; context_setup?: never; script?: never; }; } export interface ScriptsPainlessExecuteResponse { result: TResult; } export interface ScrollRequest extends RequestBase { /** The scroll ID */ scroll_id?: ScrollId; /** If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. */ rest_total_hits_as_int?: boolean; /** The period to retain the search context for scrolling. */ scroll?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { scroll_id?: never; rest_total_hits_as_int?: never; scroll?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { scroll_id?: never; rest_total_hits_as_int?: never; scroll?: never; }; } export type ScrollResponse> = SearchResponseBody; export interface SearchRequest extends RequestBase { /** A comma-separated list of data streams, indices, and aliases to search. * It supports wildcards (`*`). * To search all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean; /** If `true` and there are shard request timeouts or shard failures, the request returns partial results. * If `false`, it returns an error with no partial results. * * To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. */ allow_partial_search_results?: boolean; /** The analyzer to use for the query string. * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string; /** If `true`, wildcard and prefix queries are analyzed. * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean; /** The number of shard results that should be reduced at once on the coordinating node. * If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. */ batched_reduce_size?: long; /** If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. */ ccs_minimize_roundtrips?: boolean; /** The default operator for the query string query: `AND` or `OR`. * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator; /** The field to use as a default when no field prefix is given in the query string. * This parameter can be used only when the `q` query string parameter is specified. */ df?: string; /** The type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * It supports comma-separated values such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `true`, concrete, expanded or aliased indices will be ignored when frozen. */ ignore_throttled?: boolean; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** If `true`, the response includes the score contribution from any named queries. * * This functionality reruns each named query on every hit in a search response. * Typically, this adds a small overhead to a request. * However, using computationally expensive named queries on a large number of hits may add significant overhead. */ include_named_queries_score?: boolean; /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean; /** The number of concurrent shard requests per node that the search runs concurrently. * This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. */ max_concurrent_shard_requests?: integer; /** The nodes and shards used for the search. * By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. * Valid values are: * * * `_only_local` to run the search only on shards on the local node. * * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. * * `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. * * `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. */ preference?: string; /** A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. * This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). * When unspecified, the pre-filter phase is executed if any of these conditions is met: * * * The request targets more than 128 shards. * * The request targets one or more read-only index. * * The primary sort of the query targets an indexed field. */ pre_filter_shard_size?: long; /** If `true`, the caching of search results is enabled for requests where `size` is `0`. * It defaults to index level settings. */ request_cache?: boolean; /** A custom value that is used to route operations to a specific shard. */ routing?: Routing; /** The period to retain the search context for scrolling. * By default, this value cannot exceed `1d` (24 hours). * You can change this limit by using the `search.max_keep_alive` cluster-level setting. */ scroll?: Duration; /** Indicates how distributed term frequencies are calculated for relevance scoring. */ search_type?: SearchType; /** The field to use for suggestions. */ suggest_field?: Field; /** The suggest mode. * This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ suggest_mode?: SuggestMode; /** The number of suggestions to return. * This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ suggest_size?: long; /** The source text for which the suggestions should be returned. * This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ suggest_text?: string; /** If `true`, aggregation and suggester names are be prefixed by their respective types in the response. */ typed_keys?: boolean; /** Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. */ rest_total_hits_as_int?: boolean; /** A comma-separated list of source fields to exclude from the response. * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields; /** A comma-separated list of source fields to include in the response. * If this parameter is specified, only these source fields are returned. * You can exclude fields from this subset using the `_source_excludes` query parameter. * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields; /** A query in the Lucene query string syntax. * Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. * * IMPORTANT: This parameter overrides the query parameter in the request body. * If both parameters are specified, documents matching the query request body parameter are not returned. */ q?: string; /** Should this request force synthetic _source? * Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. * Fetches with this enabled will be slower the enabling synthetic source natively in the index. */ force_synthetic_source?: boolean; /** Defines the aggregations that are run as part of the search request. */ aggregations?: Record; /** Defines the aggregations that are run as part of the search request. * @alias aggregations */ aggs?: Record; /** Collapses search results the values of the specified field. */ collapse?: SearchFieldCollapse; /** If `true`, the request returns detailed information about score computation as part of a hit. */ explain?: boolean; /** Configuration of search extensions defined by Elasticsearch plugins. */ ext?: Record; /** The starting document offset, which must be non-negative. * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. * To page through more hits, use the `search_after` parameter. */ from?: integer; /** Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. */ highlight?: SearchHighlight; /** Number of hits matching the query to count accurately. * If `true`, the exact number of hits is returned at the cost of some performance. * If `false`, the response does not include the total number of hits matching the query. */ track_total_hits?: SearchTrackHits; /** Boost the `_score` of documents from specified indices. * The boost value is the factor by which scores are multiplied. * A boost value greater than `1.0` increases the score. * A boost value between `0` and `1.0` decreases the score. */ indices_boost?: Partial>[]; /** An array of wildcard (`*`) field patterns. * The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[]; /** The approximate kNN search to run. */ knn?: KnnSearch | KnnSearch[]; /** The Reciprocal Rank Fusion (RRF) to use. * @remarks This property is not supported on Elastic Cloud Serverless. */ rank?: RankContainer; /** The minimum `_score` for matching documents. * Documents with a lower `_score` are not included in search results and results collected by aggregations. */ min_score?: double; /** Use the `post_filter` parameter to filter search results. * The search hits are filtered after the aggregations are calculated. * A post filter has no impact on the aggregation results. */ post_filter?: QueryDslQueryContainer; /** Set to `true` to return detailed timing information about the execution of individual components in a search request. * NOTE: This is a debugging tool and adds significant overhead to search execution. */ profile?: boolean; /** The search definition using the Query DSL. */ query?: QueryDslQueryContainer; /** Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. */ rescore?: SearchRescore | SearchRescore[]; /** A retriever is a specification to describe top documents returned from a search. * A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. */ retriever?: RetrieverContainer; /** Retrieve a script evaluation (based on different fields) for each hit. */ script_fields?: Record; /** Used to retrieve the next page of hits using a set of sort values from the previous page. */ search_after?: SortResults; /** The number of hits to return, which must not be negative. * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. * To page through more hits, use the `search_after` property. */ size?: integer; /** Split a scrolled search into multiple slices that can be consumed independently. */ slice?: SlicedScroll; /** A comma-separated list of : pairs. */ sort?: Sort; /** The source fields that are returned for matching documents. * These fields are returned in the `hits._source` property of the search response. * If the `stored_fields` property is specified, the `_source` property defaults to `false`. * Otherwise, it defaults to `true`. */ _source?: SearchSourceConfig; /** An array of wildcard (`*`) field patterns. * The request returns values for field names matching these patterns in the `hits.fields` property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[]; /** Defines a suggester that provides similar looking terms based on a provided text. */ suggest?: SearchSuggester; /** The maximum number of documents to collect for each shard. * If a query reaches this limit, Elasticsearch terminates the query early. * Elasticsearch collects documents before sorting. * * IMPORTANT: Use with caution. * Elasticsearch applies this property to each shard handling the request. * When possible, let Elasticsearch perform early termination automatically. * Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. * * If set to `0` (default), the query does not terminate early. */ terminate_after?: long; /** The period of time to wait for a response from each shard. * If no response is received before the timeout expires, the request fails and returns an error. * Defaults to no timeout. */ timeout?: string; /** If `true`, calculate and return document scores, even if the scores are not used for sorting. */ track_scores?: boolean; /** If `true`, the request returns the document version as part of a hit. */ version?: boolean; /** If `true`, the request returns sequence number and primary term of the last modification of each hit. */ seq_no_primary_term?: boolean; /** A comma-separated list of stored fields to return as part of a hit. * If no fields are specified, no stored fields are included in the response. * If this field is specified, the `_source` property defaults to `false`. * You can pass `_source: true` to return both source fields and stored fields in the search response. */ stored_fields?: Fields; /** Limit the search to a point in time (PIT). * If you provide a PIT, you cannot specify an `` in the request path. */ pit?: SearchPointInTimeReference; /** One or more runtime fields in the search request. * These fields take precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields; /** The stats groups to associate with the search. * Each group maintains a statistics aggregation for its associated searches. * You can retrieve these stats using the indices stats API. */ stats?: string[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; allow_partial_search_results?: never; analyzer?: never; analyze_wildcard?: never; batched_reduce_size?: never; ccs_minimize_roundtrips?: never; default_operator?: never; df?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; include_named_queries_score?: never; lenient?: never; max_concurrent_shard_requests?: never; preference?: never; pre_filter_shard_size?: never; request_cache?: never; routing?: never; scroll?: never; search_type?: never; suggest_field?: never; suggest_mode?: never; suggest_size?: never; suggest_text?: never; typed_keys?: never; rest_total_hits_as_int?: never; _source_excludes?: never; _source_includes?: never; q?: never; force_synthetic_source?: never; aggregations?: never; aggs?: never; collapse?: never; explain?: never; ext?: never; from?: never; highlight?: never; track_total_hits?: never; indices_boost?: never; docvalue_fields?: never; knn?: never; rank?: never; min_score?: never; post_filter?: never; profile?: never; query?: never; rescore?: never; retriever?: never; script_fields?: never; search_after?: never; size?: never; slice?: never; sort?: never; _source?: never; fields?: never; suggest?: never; terminate_after?: never; timeout?: never; track_scores?: never; version?: never; seq_no_primary_term?: never; stored_fields?: never; pit?: never; runtime_mappings?: never; stats?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; allow_partial_search_results?: never; analyzer?: never; analyze_wildcard?: never; batched_reduce_size?: never; ccs_minimize_roundtrips?: never; default_operator?: never; df?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; include_named_queries_score?: never; lenient?: never; max_concurrent_shard_requests?: never; preference?: never; pre_filter_shard_size?: never; request_cache?: never; routing?: never; scroll?: never; search_type?: never; suggest_field?: never; suggest_mode?: never; suggest_size?: never; suggest_text?: never; typed_keys?: never; rest_total_hits_as_int?: never; _source_excludes?: never; _source_includes?: never; q?: never; force_synthetic_source?: never; aggregations?: never; aggs?: never; collapse?: never; explain?: never; ext?: never; from?: never; highlight?: never; track_total_hits?: never; indices_boost?: never; docvalue_fields?: never; knn?: never; rank?: never; min_score?: never; post_filter?: never; profile?: never; query?: never; rescore?: never; retriever?: never; script_fields?: never; search_after?: never; size?: never; slice?: never; sort?: never; _source?: never; fields?: never; suggest?: never; terminate_after?: never; timeout?: never; track_scores?: never; version?: never; seq_no_primary_term?: never; stored_fields?: never; pit?: never; runtime_mappings?: never; stats?: never; }; } export type SearchResponse> = SearchResponseBody; export interface SearchResponseBody> { /** The number of milliseconds it took Elasticsearch to run the request. * This value is calculated by measuring the time elapsed between receipt of a request on the coordinating node and the time at which the coordinating node is ready to send the response. * It includes: * * * Communication time between the coordinating node and data nodes * * Time the request spends in the search thread pool, queued for execution * * Actual run time * * It does not include: * * * Time needed to send the request to Elasticsearch * * Time needed to serialize the JSON response * * Time needed to send the response to a client */ took: long; /** If `true`, the request timed out before completion; returned results may be partial or empty. */ timed_out: boolean; /** A count of shards used for the request. */ _shards: ShardStatistics; /** The returned documents and metadata. */ hits: SearchHitsMetadata; aggregations?: TAggregations; _clusters?: ClusterStatistics; fields?: Record; max_score?: double; num_reduce_phases?: long; profile?: SearchProfile; pit_id?: Id; /** The identifier for the search and its search context. * You can use this scroll ID with the scroll API to retrieve the next batch of search results for the request. * This property is returned only if the `scroll` query parameter is specified in the request. */ _scroll_id?: ScrollId; suggest?: Record[]>; terminated_early?: boolean; } export interface SearchAggregationBreakdown { build_aggregation: long; build_aggregation_count: long; build_leaf_collector: long; build_leaf_collector_count: long; collect: long; collect_count: long; initialize: long; initialize_count: long; post_collection?: long; post_collection_count?: long; reduce: long; reduce_count: long; } export interface SearchAggregationProfile { breakdown: SearchAggregationBreakdown; description: string; time_in_nanos: DurationValue; type: string; debug?: SearchAggregationProfileDebug; children?: SearchAggregationProfile[]; } export interface SearchAggregationProfileDebug { segments_with_multi_valued_ords?: integer; collection_strategy?: string; segments_with_single_valued_ords?: integer; total_buckets?: integer; built_buckets?: integer; result_strategy?: string; has_filter?: boolean; delegate?: string; delegate_debug?: SearchAggregationProfileDebug; chars_fetched?: integer; extract_count?: integer; extract_ns?: integer; values_fetched?: integer; collect_analyzed_ns?: integer; collect_analyzed_count?: integer; surviving_buckets?: integer; ordinals_collectors_used?: integer; ordinals_collectors_overhead_too_high?: integer; string_hashing_collectors_used?: integer; numeric_collectors_used?: integer; empty_collectors_used?: integer; deferred_aggregators?: string[]; segments_with_doc_count_field?: integer; segments_with_deleted_docs?: integer; filters?: SearchAggregationProfileDelegateDebugFilter[]; segments_counted?: integer; segments_collected?: integer; map_reducer?: string; brute_force_used?: integer; dynamic_pruning_attempted?: integer; dynamic_pruning_used?: integer; skipped_due_to_no_data?: integer; } export interface SearchAggregationProfileDelegateDebugFilter { results_from_metadata?: integer; query?: string; specialized_for?: string; segments_counted_in_constant_time?: integer; } export type SearchBoundaryScanner = 'chars' | 'sentence' | 'word'; export interface SearchCollector { name: string; reason: string; time_in_nanos: DurationValue; children?: SearchCollector[]; } export interface SearchCompletionContext { /** The factor by which the score of the suggestion should be boosted. * The score is computed by multiplying the boost with the suggestion weight. */ boost?: double; /** The value of the category to filter/boost on. */ context: SearchContext; /** An array of precision values at which neighboring geohashes should be taken into account. * Precision value can be a distance value (`5m`, `10km`, etc.) or a raw geohash precision (`1`..`12`). * Defaults to generating neighbors for index time precision level. */ neighbours?: GeoHashPrecision[]; /** The precision of the geohash to encode the query geo point. * Can be specified as a distance value (`5m`, `10km`, etc.), or as a raw geohash precision (`1`..`12`). * Defaults to index time precision level. */ precision?: GeoHashPrecision; /** Whether the category value should be treated as a prefix or not. */ prefix?: boolean; } export interface SearchCompletionSuggest extends SearchSuggestBase { options: SearchCompletionSuggestOption | SearchCompletionSuggestOption[]; } export interface SearchCompletionSuggestOption { collate_match?: boolean; contexts?: Record; fields?: Record; _id?: string; _index?: IndexName; _routing?: Routing; _score?: double; _source?: TDocument; text: string; score?: double; } export interface SearchCompletionSuggester extends SearchSuggesterBase { /** A value, geo point object, or a geo hash string to filter or boost the suggestion on. */ contexts?: Record; /** Enables fuzziness, meaning you can have a typo in your search and still get results back. */ fuzzy?: SearchSuggestFuzziness; /** A regex query that expresses a prefix as a regular expression. */ regex?: SearchRegexOptions; /** Whether duplicate suggestions should be filtered out. */ skip_duplicates?: boolean; } export type SearchContext = string | GeoLocation; export interface SearchDfsKnnProfile { vector_operations_count?: long; query: SearchKnnQueryProfileResult[]; rewrite_time: long; collector: SearchKnnCollectorResult[]; } export interface SearchDfsProfile { statistics?: SearchDfsStatisticsProfile; knn?: SearchDfsKnnProfile[]; } export interface SearchDfsStatisticsBreakdown { collection_statistics: long; collection_statistics_count: long; create_weight: long; create_weight_count: long; rewrite: long; rewrite_count: long; term_statistics: long; term_statistics_count: long; } export interface SearchDfsStatisticsProfile { type: string; description: string; time?: Duration; time_in_nanos: DurationValue; breakdown: SearchDfsStatisticsBreakdown; debug?: Record; children?: SearchDfsStatisticsProfile[]; } export interface SearchDirectGenerator { /** The field to fetch the candidate suggestions from. * Needs to be set globally or per suggestion. */ field: Field; /** The maximum edit distance candidate suggestions can have in order to be considered as a suggestion. * Can only be `1` or `2`. */ max_edits?: integer; /** A factor that is used to multiply with the shard_size in order to inspect more candidate spelling corrections on the shard level. * Can improve accuracy at the cost of performance. */ max_inspections?: float; /** The maximum threshold in number of documents in which a suggest text token can exist in order to be included. * This can be used to exclude high frequency terms—which are usually spelled correctly—from being spellchecked. * Can be a relative percentage number (for example `0.4`) or an absolute number to represent document frequencies. * If a value higher than 1 is specified, then fractional can not be specified. */ max_term_freq?: float; /** The minimal threshold in number of documents a suggestion should appear in. * This can improve quality by only suggesting high frequency terms. * Can be specified as an absolute number or as a relative percentage of number of documents. * If a value higher than 1 is specified, the number cannot be fractional. */ min_doc_freq?: float; /** The minimum length a suggest text term must have in order to be included. */ min_word_length?: integer; /** A filter (analyzer) that is applied to each of the generated tokens before they are passed to the actual phrase scorer. */ post_filter?: string; /** A filter (analyzer) that is applied to each of the tokens passed to this candidate generator. * This filter is applied to the original token before candidates are generated. */ pre_filter?: string; /** The number of minimal prefix characters that must match in order be a candidate suggestions. * Increasing this number improves spellcheck performance. */ prefix_length?: integer; /** The maximum corrections to be returned per suggest text token. */ size?: integer; /** Controls what suggestions are included on the suggestions generated on each shard. */ suggest_mode?: SuggestMode; } export interface SearchFetchProfile { type: string; description: string; time_in_nanos: DurationValue; breakdown: SearchFetchProfileBreakdown; debug?: SearchFetchProfileDebug; children?: SearchFetchProfile[]; } export interface SearchFetchProfileBreakdown { load_source?: integer; load_source_count?: integer; load_stored_fields?: integer; load_stored_fields_count?: integer; next_reader?: integer; next_reader_count?: integer; process_count?: integer; process?: integer; } export interface SearchFetchProfileDebug { stored_fields?: string[]; fast_path?: integer; } export interface SearchFieldCollapse { /** The field to collapse the result set on */ field: Field; /** The number of inner hits and their sort order */ inner_hits?: SearchInnerHits | SearchInnerHits[]; /** The number of concurrent requests allowed to retrieve the inner_hits per group */ max_concurrent_group_searches?: integer; collapse?: SearchFieldCollapse; } export interface SearchFieldSuggester { /** Provides auto-complete/search-as-you-type functionality. */ completion?: SearchCompletionSuggester; /** Provides access to word alternatives on a per token basis within a certain string distance. */ phrase?: SearchPhraseSuggester; /** Suggests terms based on edit distance. */ term?: SearchTermSuggester; /** Prefix used to search for suggestions. */ prefix?: string; /** A prefix expressed as a regular expression. */ regex?: string; /** The text to use as input for the suggester. * Needs to be set globally or per suggestion. */ text?: string; } export interface SearchHighlight extends SearchHighlightBase { encoder?: SearchHighlighterEncoder; fields: Record; } export interface SearchHighlightBase { type?: SearchHighlighterType; /** A string that contains each boundary character. */ boundary_chars?: string; /** How far to scan for boundary characters. */ boundary_max_scan?: integer; /** Specifies how to break the highlighted fragments: chars, sentence, or word. * Only valid for the unified and fvh highlighters. * Defaults to `sentence` for the `unified` highlighter. Defaults to `chars` for the `fvh` highlighter. */ boundary_scanner?: SearchBoundaryScanner; /** Controls which locale is used to search for sentence and word boundaries. * This parameter takes a form of a language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`. */ boundary_scanner_locale?: string; force_source?: boolean; /** Specifies how text should be broken up in highlight snippets: `simple` or `span`. * Only valid for the `plain` highlighter. */ fragmenter?: SearchHighlighterFragmenter; /** The size of the highlighted fragment in characters. */ fragment_size?: integer; highlight_filter?: boolean; /** Highlight matches for a query other than the search query. * This is especially useful if you use a rescore query because those are not taken into account by highlighting by default. */ highlight_query?: QueryDslQueryContainer; max_fragment_length?: integer; /** If set to a non-negative value, highlighting stops at this defined maximum limit. * The rest of the text is not processed, thus not highlighted and no error is returned * The `max_analyzed_offset` query setting does not override the `index.highlight.max_analyzed_offset` setting, which prevails when it’s set to lower value than the query setting. */ max_analyzed_offset?: integer; /** The amount of text you want to return from the beginning of the field if there are no matching fragments to highlight. */ no_match_size?: integer; /** The maximum number of fragments to return. * If the number of fragments is set to `0`, no fragments are returned. * Instead, the entire field contents are highlighted and returned. * This can be handy when you need to highlight short texts such as a title or address, but fragmentation is not required. * If `number_of_fragments` is `0`, `fragment_size` is ignored. */ number_of_fragments?: integer; options?: Record; /** Sorts highlighted fragments by score when set to `score`. * By default, fragments will be output in the order they appear in the field (order: `none`). * Setting this option to `score` will output the most relevant fragments first. * Each highlighter applies its own logic to compute relevancy scores. */ order?: SearchHighlighterOrder; /** Controls the number of matching phrases in a document that are considered. * Prevents the `fvh` highlighter from analyzing too many phrases and consuming too much memory. * When using `matched_fields`, `phrase_limit` phrases per matched field are considered. Raising the limit increases query time and consumes more memory. * Only supported by the `fvh` highlighter. */ phrase_limit?: integer; /** Use in conjunction with `pre_tags` to define the HTML tags to use for the highlighted text. * By default, highlighted text is wrapped in `` and `` tags. */ post_tags?: string[]; /** Use in conjunction with `post_tags` to define the HTML tags to use for the highlighted text. * By default, highlighted text is wrapped in `` and `` tags. */ pre_tags?: string[]; /** By default, only fields that contains a query match are highlighted. * Set to `false` to highlight all fields. */ require_field_match?: boolean; /** Set to `styled` to use the built-in tag schema. */ tags_schema?: SearchHighlighterTagsSchema; } export interface SearchHighlightField extends SearchHighlightBase { fragment_offset?: integer; matched_fields?: Fields; } export type SearchHighlighterEncoder = 'default' | 'html'; export type SearchHighlighterFragmenter = 'simple' | 'span'; export type SearchHighlighterOrder = 'score'; export type SearchHighlighterTagsSchema = 'styled'; export type SearchHighlighterType = 'plain' | 'fvh' | 'unified' | string; export interface SearchHit { _index: IndexName; _id?: Id; _score?: double | null; _explanation?: ExplainExplanation; fields?: Record; highlight?: Record; inner_hits?: Record; matched_queries?: string[] | Record; _nested?: SearchNestedIdentity; _ignored?: string[]; ignored_field_values?: Record; _shard?: string; _node?: string; _routing?: string; _source?: TDocument; _rank?: integer; _seq_no?: SequenceNumber; _primary_term?: long; _version?: VersionNumber; sort?: SortResults; } export interface SearchHitsMetadata { /** Total hit count information, present only if `track_total_hits` wasn't `false` in the search request. */ total?: SearchTotalHits | long; hits: SearchHit[]; max_score?: double | null; } export interface SearchInnerHits { /** The name for the particular inner hit definition in the response. * Useful when a search request contains multiple inner hits. */ name?: Name; /** The maximum number of hits to return per `inner_hits`. */ size?: integer; /** Inner hit starting document offset. */ from?: integer; collapse?: SearchFieldCollapse; docvalue_fields?: (QueryDslFieldAndFormat | Field)[]; explain?: boolean; highlight?: SearchHighlight; ignore_unmapped?: boolean; script_fields?: Record; seq_no_primary_term?: boolean; fields?: Field[]; /** How the inner hits should be sorted per `inner_hits`. * By default, inner hits are sorted by score. */ sort?: Sort; _source?: SearchSourceConfig; stored_fields?: Fields; track_scores?: boolean; version?: boolean; } export interface SearchInnerHitsResult { hits: SearchHitsMetadata; } export interface SearchKnnCollectorResult { name: string; reason: string; time?: Duration; time_in_nanos: DurationValue; children?: SearchKnnCollectorResult[]; } export interface SearchKnnQueryProfileBreakdown { advance: long; advance_count: long; build_scorer: long; build_scorer_count: long; compute_max_score: long; compute_max_score_count: long; count_weight: long; count_weight_count: long; create_weight: long; create_weight_count: long; match: long; match_count: long; next_doc: long; next_doc_count: long; score: long; score_count: long; set_min_competitive_score: long; set_min_competitive_score_count: long; shallow_advance: long; shallow_advance_count: long; } export interface SearchKnnQueryProfileResult { type: string; description: string; time?: Duration; time_in_nanos: DurationValue; breakdown: SearchKnnQueryProfileBreakdown; debug?: Record; children?: SearchKnnQueryProfileResult[]; } export interface SearchLaplaceSmoothingModel { /** A constant that is added to all counts to balance weights. */ alpha: double; } export interface SearchLearningToRank { /** The unique identifier of the trained model uploaded to Elasticsearch */ model_id: string; /** Named parameters to be passed to the query templates used for feature */ params?: Record; } export interface SearchLinearInterpolationSmoothingModel { bigram_lambda: double; trigram_lambda: double; unigram_lambda: double; } export interface SearchNestedIdentity { field: Field; offset: integer; _nested?: SearchNestedIdentity; } export interface SearchPhraseSuggest extends SearchSuggestBase { options: SearchPhraseSuggestOption | SearchPhraseSuggestOption[]; } export interface SearchPhraseSuggestCollate { /** Parameters to use if the query is templated. */ params?: Record; /** Returns all suggestions with an extra `collate_match` option indicating whether the generated phrase matched any document. */ prune?: boolean; /** A collate query that is run once for every suggestion. */ query: SearchPhraseSuggestCollateQuery; } export interface SearchPhraseSuggestCollateQuery { /** The search template ID. */ id?: Id; /** The query source. */ source?: ScriptSource; } export interface SearchPhraseSuggestHighlight { /** Use in conjunction with `pre_tag` to define the HTML tags to use for the highlighted text. */ post_tag: string; /** Use in conjunction with `post_tag` to define the HTML tags to use for the highlighted text. */ pre_tag: string; } export interface SearchPhraseSuggestOption { text: string; score: double; highlighted?: string; collate_match?: boolean; } export interface SearchPhraseSuggester extends SearchSuggesterBase { /** Checks each suggestion against the specified query to prune suggestions for which no matching docs exist in the index. */ collate?: SearchPhraseSuggestCollate; /** Defines a factor applied to the input phrases score, which is used as a threshold for other suggest candidates. * Only candidates that score higher than the threshold will be included in the result. */ confidence?: double; /** A list of candidate generators that produce a list of possible terms per term in the given text. */ direct_generator?: SearchDirectGenerator[]; force_unigrams?: boolean; /** Sets max size of the n-grams (shingles) in the field. * If the field doesn’t contain n-grams (shingles), this should be omitted or set to `1`. * If the field uses a shingle filter, the `gram_size` is set to the `max_shingle_size` if not explicitly set. */ gram_size?: integer; /** Sets up suggestion highlighting. * If not provided, no highlighted field is returned. */ highlight?: SearchPhraseSuggestHighlight; /** The maximum percentage of the terms considered to be misspellings in order to form a correction. * This method accepts a float value in the range `[0..1)` as a fraction of the actual query terms or a number `>=1` as an absolute number of query terms. */ max_errors?: double; /** The likelihood of a term being misspelled even if the term exists in the dictionary. */ real_word_error_likelihood?: double; /** The separator that is used to separate terms in the bigram field. * If not set, the whitespace character is used as a separator. */ separator?: string; /** Sets the maximum number of suggested terms to be retrieved from each individual shard. */ shard_size?: integer; /** The smoothing model used to balance weight between infrequent grams (grams (shingles) are not existing in the index) and frequent grams (appear at least once in the index). * The default model is Stupid Backoff. */ smoothing?: SearchSmoothingModelContainer; /** The text/query to provide suggestions for. */ text?: string; token_limit?: integer; } export interface SearchPointInTimeReference { id: Id; keep_alive?: Duration; } export interface SearchProfile { shards: SearchShardProfile[]; } export interface SearchQueryBreakdown { advance: long; advance_count: long; build_scorer: long; build_scorer_count: long; create_weight: long; create_weight_count: long; match: long; match_count: long; shallow_advance: long; shallow_advance_count: long; next_doc: long; next_doc_count: long; score: long; score_count: long; compute_max_score: long; compute_max_score_count: long; count_weight: long; count_weight_count: long; set_min_competitive_score: long; set_min_competitive_score_count: long; } export interface SearchQueryProfile { breakdown: SearchQueryBreakdown; description: string; time_in_nanos: DurationValue; type: string; children?: SearchQueryProfile[]; } export interface SearchRegexOptions { /** Optional operators for the regular expression. */ flags?: integer | string; /** Maximum number of automaton states required for the query. */ max_determinized_states?: integer; } export interface SearchRescore { window_size?: integer; query?: SearchRescoreQuery; learning_to_rank?: SearchLearningToRank; } export interface SearchRescoreQuery { /** The query to use for rescoring. * This query is only run on the Top-K results returned by the `query` and `post_filter` phases. */ rescore_query: QueryDslQueryContainer; /** Relative importance of the original query versus the rescore query. */ query_weight?: double; /** Relative importance of the rescore query versus the original query. */ rescore_query_weight?: double; /** Determines how scores are combined. */ score_mode?: SearchScoreMode; } export type SearchScoreMode = 'avg' | 'max' | 'min' | 'multiply' | 'total'; export interface SearchSearchProfile { collector: SearchCollector[]; query: SearchQueryProfile[]; rewrite_time: long; } export interface SearchSearchRequestBody { /** Defines the aggregations that are run as part of the search request. */ aggregations?: Record; /** Defines the aggregations that are run as part of the search request. * @alias aggregations */ aggs?: Record; /** Collapses search results the values of the specified field. */ collapse?: SearchFieldCollapse; /** If `true`, the request returns detailed information about score computation as part of a hit. */ explain?: boolean; /** Configuration of search extensions defined by Elasticsearch plugins. */ ext?: Record; /** The starting document offset, which must be non-negative. * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. * To page through more hits, use the `search_after` parameter. */ from?: integer; /** Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. */ highlight?: SearchHighlight; /** Number of hits matching the query to count accurately. * If `true`, the exact number of hits is returned at the cost of some performance. * If `false`, the response does not include the total number of hits matching the query. */ track_total_hits?: SearchTrackHits; /** Boost the `_score` of documents from specified indices. * The boost value is the factor by which scores are multiplied. * A boost value greater than `1.0` increases the score. * A boost value between `0` and `1.0` decreases the score. */ indices_boost?: Partial>[]; /** An array of wildcard (`*`) field patterns. * The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[]; /** The approximate kNN search to run. */ knn?: KnnSearch | KnnSearch[]; /** The Reciprocal Rank Fusion (RRF) to use. * @remarks This property is not supported on Elastic Cloud Serverless. */ rank?: RankContainer; /** The minimum `_score` for matching documents. * Documents with a lower `_score` are not included in search results or results collected by aggregations. */ min_score?: double; /** Use the `post_filter` parameter to filter search results. * The search hits are filtered after the aggregations are calculated. * A post filter has no impact on the aggregation results. */ post_filter?: QueryDslQueryContainer; /** Set to `true` to return detailed timing information about the execution of individual components in a search request. * NOTE: This is a debugging tool and adds significant overhead to search execution. */ profile?: boolean; /** The search definition using the Query DSL. */ query?: QueryDslQueryContainer; /** Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. */ rescore?: SearchRescore | SearchRescore[]; /** A retriever is a specification to describe top documents returned from a search. * A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. */ retriever?: RetrieverContainer; /** Retrieve a script evaluation (based on different fields) for each hit. */ script_fields?: Record; /** Used to retrieve the next page of hits using a set of sort values from the previous page. */ search_after?: SortResults; /** The number of hits to return, which must not be negative. * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. * To page through more hits, use the `search_after` property. */ size?: integer; /** Split a scrolled search into multiple slices that can be consumed independently. */ slice?: SlicedScroll; /** A comma-separated list of : pairs. */ sort?: Sort; /** The source fields that are returned for matching documents. * These fields are returned in the `hits._source` property of the search response. * If the `stored_fields` property is specified, the `_source` property defaults to `false`. * Otherwise, it defaults to `true`. */ _source?: SearchSourceConfig; /** An array of wildcard (`*`) field patterns. * The request returns values for field names matching these patterns in the `hits.fields` property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[]; /** Defines a suggester that provides similar looking terms based on a provided text. */ suggest?: SearchSuggester; /** The maximum number of documents to collect for each shard. * If a query reaches this limit, Elasticsearch terminates the query early. * Elasticsearch collects documents before sorting. * * IMPORTANT: Use with caution. * Elasticsearch applies this property to each shard handling the request. * When possible, let Elasticsearch perform early termination automatically. * Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. * * If set to `0` (default), the query does not terminate early. */ terminate_after?: long; /** The period of time to wait for a response from each shard. * If no response is received before the timeout expires, the request fails and returns an error. * Defaults to no timeout. */ timeout?: string; /** If `true`, calculate and return document scores, even if the scores are not used for sorting. */ track_scores?: boolean; /** If `true`, the request returns the document version as part of a hit. */ version?: boolean; /** If `true`, the request returns sequence number and primary term of the last modification of each hit. */ seq_no_primary_term?: boolean; /** A comma-separated list of stored fields to return as part of a hit. * If no fields are specified, no stored fields are included in the response. * If this field is specified, the `_source` property defaults to `false`. * You can pass `_source: true` to return both source fields and stored fields in the search response. */ stored_fields?: Fields; /** Limit the search to a point in time (PIT). * If you provide a PIT, you cannot specify an `` in the request path. */ pit?: SearchPointInTimeReference; /** One or more runtime fields in the search request. * These fields take precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields; /** The stats groups to associate with the search. * Each group maintains a statistics aggregation for its associated searches. * You can retrieve these stats using the indices stats API. */ stats?: string[]; } export interface SearchShardProfile { aggregations: SearchAggregationProfile[]; cluster: string; dfs?: SearchDfsProfile; fetch?: SearchFetchProfile; id: string; index: IndexName; node_id: NodeId; searches: SearchSearchProfile[]; shard_id: integer; } export interface SearchSmoothingModelContainer { /** A smoothing model that uses an additive smoothing where a constant (typically `1.0` or smaller) is added to all counts to balance weights. */ laplace?: SearchLaplaceSmoothingModel; /** A smoothing model that takes the weighted mean of the unigrams, bigrams, and trigrams based on user supplied weights (lambdas). */ linear_interpolation?: SearchLinearInterpolationSmoothingModel; /** A simple backoff model that backs off to lower order n-gram models if the higher order count is `0` and discounts the lower order n-gram model by a constant factor. */ stupid_backoff?: SearchStupidBackoffSmoothingModel; } export type SearchSourceConfig = boolean | SearchSourceFilter | Fields; export type SearchSourceConfigParam = boolean | Fields; export interface SearchSourceFilter { excludes?: Fields; /** @alias excludes */ exclude?: Fields; includes?: Fields; /** @alias includes */ include?: Fields; } export type SearchStringDistance = 'internal' | 'damerau_levenshtein' | 'levenshtein' | 'jaro_winkler' | 'ngram'; export interface SearchStupidBackoffSmoothingModel { /** A constant factor that the lower order n-gram model is discounted by. */ discount: double; } export type SearchSuggest = SearchCompletionSuggest | SearchPhraseSuggest | SearchTermSuggest; export interface SearchSuggestBase { length: integer; offset: integer; text: string; } export interface SearchSuggestFuzziness { /** The fuzziness factor. */ fuzziness?: Fuzziness; /** Minimum length of the input before fuzzy suggestions are returned. */ min_length?: integer; /** Minimum length of the input, which is not checked for fuzzy alternatives. */ prefix_length?: integer; /** If set to `true`, transpositions are counted as one change instead of two. */ transpositions?: boolean; /** If `true`, all measurements (like fuzzy edit distance, transpositions, and lengths) are measured in Unicode code points instead of in bytes. * This is slightly slower than raw bytes. */ unicode_aware?: boolean; } export type SearchSuggestSort = 'score' | 'frequency'; export interface SearchSuggesterKeys { /** Global suggest text, to avoid repetition when the same text is used in several suggesters */ text?: string; } export type SearchSuggester = SearchSuggesterKeys & { [property: string]: SearchFieldSuggester | string; }; export interface SearchSuggesterBase { /** The field to fetch the candidate suggestions from. * Needs to be set globally or per suggestion. */ field: Field; /** The analyzer to analyze the suggest text with. * Defaults to the search analyzer of the suggest field. */ analyzer?: string; /** The maximum corrections to be returned per suggest text token. */ size?: integer; } export interface SearchTermSuggest extends SearchSuggestBase { options: SearchTermSuggestOption | SearchTermSuggestOption[]; } export interface SearchTermSuggestOption { text: string; score: double; freq: long; highlighted?: string; collate_match?: boolean; } export interface SearchTermSuggester extends SearchSuggesterBase { lowercase_terms?: boolean; /** The maximum edit distance candidate suggestions can have in order to be considered as a suggestion. * Can only be `1` or `2`. */ max_edits?: integer; /** A factor that is used to multiply with the shard_size in order to inspect more candidate spelling corrections on the shard level. * Can improve accuracy at the cost of performance. */ max_inspections?: integer; /** The maximum threshold in number of documents in which a suggest text token can exist in order to be included. * Can be a relative percentage number (for example `0.4`) or an absolute number to represent document frequencies. * If a value higher than 1 is specified, then fractional can not be specified. */ max_term_freq?: float; /** The minimal threshold in number of documents a suggestion should appear in. * This can improve quality by only suggesting high frequency terms. * Can be specified as an absolute number or as a relative percentage of number of documents. * If a value higher than 1 is specified, then the number cannot be fractional. */ min_doc_freq?: float; /** The minimum length a suggest text term must have in order to be included. */ min_word_length?: integer; /** The number of minimal prefix characters that must match in order be a candidate for suggestions. * Increasing this number improves spellcheck performance. */ prefix_length?: integer; /** Sets the maximum number of suggestions to be retrieved from each individual shard. */ shard_size?: integer; /** Defines how suggestions should be sorted per suggest text term. */ sort?: SearchSuggestSort; /** The string distance implementation to use for comparing how similar suggested terms are. */ string_distance?: SearchStringDistance; /** Controls what suggestions are included or controls for what suggest text terms, suggestions should be suggested. */ suggest_mode?: SuggestMode; /** The suggest text. * Needs to be set globally or per suggestion. */ text?: string; } export interface SearchTotalHits { relation: SearchTotalHitsRelation; value: long; } export type SearchTotalHitsRelation = 'eq' | 'gte'; export type SearchTrackHits = boolean | integer; export interface SearchMvtRequest extends RequestBase { /** Comma-separated list of data streams, indices, or aliases to search */ index: Indices; /** Field containing geospatial data to return */ field: Field; /** Zoom level for the vector tile to search */ zoom: SearchMvtZoomLevel; /** X coordinate for the vector tile to search */ x: SearchMvtCoordinate; /** Y coordinate for the vector tile to search */ y: SearchMvtCoordinate; /** Sub-aggregations for the geotile_grid. * * It supports the following aggregation types: * * - `avg` * - `boxplot` * - `cardinality` * - `extended stats` * - `max` * - `median absolute deviation` * - `min` * - `percentile` * - `percentile-rank` * - `stats` * - `sum` * - `value count` * * The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. */ aggs?: Record; /** The size, in pixels, of a clipping buffer outside the tile. This allows renderers * to avoid outline artifacts from geometries that extend past the extent of the tile. */ buffer?: integer; /** If `false`, the meta layer's feature is the bounding box of the tile. * If `true`, the meta layer's feature is a bounding box resulting from a * `geo_bounds` aggregation. The aggregation runs on values that intersect * the `//` tile with `wrap_longitude` set to `false`. The resulting * bounding box may be larger than the vector tile. */ exact_bounds?: boolean; /** The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. */ extent?: integer; /** The fields to return in the `hits` layer. * It supports wildcards (`*`). * This parameter does not support fields with array values. Fields with array * values may return inconsistent results. */ fields?: Fields; /** The aggregation used to create a grid for the `field`. */ grid_agg?: SearchMvtGridAggregationType; /** Additional zoom levels available through the aggs layer. For example, if `` is `7` * and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results * don't include the aggs layer. */ grid_precision?: integer; /** Determines the geometry type for features in the aggs layer. In the aggs layer, * each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon * of the cells bounding box. If `point`, each feature is a Point that is the centroid * of the cell. */ grid_type?: SearchMvtGridType; /** The query DSL used to filter documents for the search. */ query?: QueryDslQueryContainer; /** Defines one or more runtime fields in the search request. These fields take * precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields; /** The maximum number of features to return in the hits layer. Accepts 0-10000. * If 0, results don't include the hits layer. */ size?: integer; /** Sort the features in the hits layer. By default, the API calculates a bounding * box for each feature. It sorts features based on this box's diagonal length, * from longest to shortest. */ sort?: Sort; /** The number of hits matching the query to count accurately. If `true`, the exact number * of hits is returned at the cost of some performance. If `false`, the response does * not include the total number of hits matching the query. */ track_total_hits?: SearchTrackHits; /** If `true`, the hits and aggs layers will contain additional point features representing * suggested label positions for the original features. * * * `Point` and `MultiPoint` features will have one of the points selected. * * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * * The aggregation results will provide one central point for each aggregation bucket. * * All attributes from the original features will also be copied to the new label features. * In addition, the new features will be distinguishable using the tag `_mvt_label_position`. */ with_labels?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; field?: never; zoom?: never; x?: never; y?: never; aggs?: never; buffer?: never; exact_bounds?: never; extent?: never; fields?: never; grid_agg?: never; grid_precision?: never; grid_type?: never; query?: never; runtime_mappings?: never; size?: never; sort?: never; track_total_hits?: never; with_labels?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; field?: never; zoom?: never; x?: never; y?: never; aggs?: never; buffer?: never; exact_bounds?: never; extent?: never; fields?: never; grid_agg?: never; grid_precision?: never; grid_type?: never; query?: never; runtime_mappings?: never; size?: never; sort?: never; track_total_hits?: never; with_labels?: never; }; } export type SearchMvtResponse = MapboxVectorTiles; export type SearchMvtCoordinate = integer; export type SearchMvtGridAggregationType = 'geotile' | 'geohex'; export type SearchMvtGridType = 'grid' | 'point' | 'centroid'; export type SearchMvtZoomLevel = integer; export interface SearchShardsRequest extends RequestBase { /** A comma-separated list of data streams, indices, and aliases to search. * It supports wildcards (`*`). * To search all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** If `true`, the request retrieves information from the local node only. */ local?: boolean; /** The period to wait for a connection to the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * IT can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration; /** The node or shard the operation should be performed on. * It is random by default. */ preference?: string; /** A custom value used to route operations to a specific shard. */ routing?: Routing; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; local?: never; master_timeout?: never; preference?: never; routing?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; local?: never; master_timeout?: never; preference?: never; routing?: never; }; } export interface SearchShardsResponse { nodes: Record; shards: NodeShard[][]; indices: Record; } export interface SearchShardsSearchShardsNodeAttributes { /** The human-readable identifier of the node. */ name: NodeName; /** The ephemeral ID of the node. */ ephemeral_id: Id; /** The host and port where transport HTTP connections are accepted. */ transport_address: TransportAddress; external_id: string; /** Lists node attributes. */ attributes: Record; roles: NodeRoles; version: VersionString; min_index_version: integer; max_index_version: integer; } export interface SearchShardsShardStoreIndex { aliases?: Name[]; filter?: QueryDslQueryContainer; } export interface SearchTemplateRequest extends RequestBase { /** A comma-separated list of data streams, indices, and aliases to search. * It supports wildcards (`*`). */ index?: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean; /** If `true`, network round-trips are minimized for cross-cluster search requests. */ ccs_minimize_roundtrips?: boolean; /** The type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. */ ignore_throttled?: boolean; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** The node or shard the operation should be performed on. * It is random by default. */ preference?: string; /** A custom value used to route operations to a specific shard. */ routing?: Routing; /** Specifies how long a consistent view of the index * should be maintained for scrolled search. */ scroll?: Duration; /** The type of the search operation. */ search_type?: SearchType; /** If `true`, `hits.total` is rendered as an integer in the response. * If `false`, it is rendered as an object. */ rest_total_hits_as_int?: boolean; /** If `true`, the response prefixes aggregation and suggester names with their respective types. */ typed_keys?: boolean; /** If `true`, returns detailed information about score calculation as part of each hit. * If you specify both this and the `explain` query parameter, the API uses only the query parameter. */ explain?: boolean; /** The ID of the search template to use. If no `source` is specified, * this parameter is required. */ id?: Id; /** Key-value pairs used to replace Mustache variables in the template. * The key is the variable name. * The value is the variable value. */ params?: Record; /** If `true`, the query execution is profiled. */ profile?: boolean; /** An inline search template. Supports the same parameters as the search API's * request body. It also supports Mustache variables. If no `id` is specified, this * parameter is required. */ source?: ScriptSource; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; ccs_minimize_roundtrips?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; preference?: never; routing?: never; scroll?: never; search_type?: never; rest_total_hits_as_int?: never; typed_keys?: never; explain?: never; id?: never; params?: never; profile?: never; source?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; ccs_minimize_roundtrips?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; preference?: never; routing?: never; scroll?: never; search_type?: never; rest_total_hits_as_int?: never; typed_keys?: never; explain?: never; id?: never; params?: never; profile?: never; source?: never; }; } export interface SearchTemplateResponse { took: long; timed_out: boolean; _shards: ShardStatistics; hits: SearchHitsMetadata; aggregations?: Record; _clusters?: ClusterStatistics; fields?: Record; max_score?: double; num_reduce_phases?: long; profile?: SearchProfile; pit_id?: Id; _scroll_id?: ScrollId; suggest?: Record[]>; terminated_early?: boolean; } export interface TermsEnumRequest extends RequestBase { /** A comma-separated list of data streams, indices, and index aliases to search. * Wildcard (`*`) expressions are supported. * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index: IndexName; /** The string to match at the start of indexed terms. If not provided, all terms in the field are considered. */ field: Field; /** The number of matching terms to return. */ size?: integer; /** The maximum length of time to spend collecting results. * If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. */ timeout?: Duration; /** When `true`, the provided search string is matched against index terms without case sensitivity. */ case_insensitive?: boolean; /** Filter an index shard if the provided query rewrites to `match_none`. */ index_filter?: QueryDslQueryContainer; /** The string to match at the start of indexed terms. * If it is not provided, all terms in the field are considered. * * > info * > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766. */ string?: string; /** The string after which terms in the index should be returned. * It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request. */ search_after?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; field?: never; size?: never; timeout?: never; case_insensitive?: never; index_filter?: never; string?: never; search_after?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; field?: never; size?: never; timeout?: never; case_insensitive?: never; index_filter?: never; string?: never; search_after?: never; }; } export interface TermsEnumResponse { _shards: ShardStatistics; terms: string[]; /** If `false`, the returned terms set may be incomplete and should be treated as approximate. * This can occur due to a few reasons, such as a request timeout or a node error. */ complete: boolean; } export interface TermvectorsFieldStatistics { doc_count: integer; sum_doc_freq: long; sum_ttf: long; } export interface TermvectorsFilter { /** Ignore words which occur in more than this many docs. * Defaults to unbounded. */ max_doc_freq?: integer; /** The maximum number of terms that must be returned per field. */ max_num_terms?: integer; /** Ignore words with more than this frequency in the source doc. * It defaults to unbounded. */ max_term_freq?: integer; /** The maximum word length above which words will be ignored. * Defaults to unbounded. */ max_word_length?: integer; /** Ignore terms which do not occur in at least this many docs. */ min_doc_freq?: integer; /** Ignore words with less than this frequency in the source doc. */ min_term_freq?: integer; /** The minimum word length below which words will be ignored. */ min_word_length?: integer; } export interface TermvectorsRequest extends RequestBase { /** The name of the index that contains the document. */ index: IndexName; /** A unique identifier for the document. */ id?: Id; /** The node or shard the operation should be performed on. * It is random by default. */ preference?: string; /** If true, the request is real-time as opposed to near-real-time. */ realtime?: boolean; /** An artificial document (a document not present in the index) for which you want to retrieve term vectors. */ doc?: TDocument; /** Filter terms based on their tf-idf scores. * This could be useful in order find out a good characteristic vector of a document. * This feature works in a similar manner to the second phase of the More Like This Query. */ filter?: TermvectorsFilter; /** Override the default per-field analyzer. * This is useful in order to generate term vectors in any fashion, especially when using artificial documents. * When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. */ per_field_analyzer?: Record; /** A list of fields to include in the statistics. * It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ fields?: Fields; /** If `true`, the response includes: * * * The document count (how many documents contain this field). * * The sum of document frequencies (the sum of document frequencies for all terms in this field). * * The sum of total term frequencies (the sum of total term frequencies of each term in this field). */ field_statistics?: boolean; /** If `true`, the response includes term offsets. */ offsets?: boolean; /** If `true`, the response includes term payloads. */ payloads?: boolean; /** If `true`, the response includes term positions. */ positions?: boolean; /** If `true`, the response includes: * * * The total term frequency (how often a term occurs in all documents). * * The document frequency (the number of documents containing the current term). * * By default these values are not returned since term statistics can have a serious performance impact. */ term_statistics?: boolean; /** A custom value that is used to route operations to a specific shard. */ routing?: Routing; /** If `true`, returns the document version as part of a hit. */ version?: VersionNumber; /** The version type. */ version_type?: VersionType; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; id?: never; preference?: never; realtime?: never; doc?: never; filter?: never; per_field_analyzer?: never; fields?: never; field_statistics?: never; offsets?: never; payloads?: never; positions?: never; term_statistics?: never; routing?: never; version?: never; version_type?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; id?: never; preference?: never; realtime?: never; doc?: never; filter?: never; per_field_analyzer?: never; fields?: never; field_statistics?: never; offsets?: never; payloads?: never; positions?: never; term_statistics?: never; routing?: never; version?: never; version_type?: never; }; } export interface TermvectorsResponse { found: boolean; _id?: Id; _index: IndexName; term_vectors?: Record; took: long; _version: VersionNumber; } export interface TermvectorsTerm { doc_freq?: integer; score?: double; term_freq: integer; tokens?: TermvectorsToken[]; ttf?: integer; } export interface TermvectorsTermVector { field_statistics?: TermvectorsFieldStatistics; terms: Record; } export interface TermvectorsToken { end_offset?: integer; payload?: string; position: integer; start_offset?: integer; } export interface UpdateRequest extends RequestBase { /** A unique identifier for the document to be updated. */ id: Id; /** The name of the target index. * By default, the index is created automatically if it doesn't exist. */ index: IndexName; /** Only perform the operation if the document has this primary term. */ if_primary_term?: long; /** Only perform the operation if the document has this sequence number. */ if_seq_no?: SequenceNumber; /** True or false if to include the document source in the error message in case of parsing errors. */ include_source_on_error?: boolean; /** The script language. */ lang?: string; /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. * If 'wait_for', it waits for a refresh to make this operation visible to search. * If 'false', it does nothing with refreshes. */ refresh?: Refresh; /** If `true`, the destination must be an index alias. */ require_alias?: boolean; /** The number of times the operation should be retried when a conflict occurs. */ retry_on_conflict?: integer; /** A custom value used to route operations to a specific shard. */ routing?: Routing; /** The period to wait for the following operations: dynamic mapping updates and waiting for active shards. * Elasticsearch waits for at least the timeout period before failing. * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration; /** The number of copies of each shard that must be active before proceeding with the operation. * Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). * The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards; /** The source fields you want to exclude. */ _source_excludes?: Fields; /** The source fields you want to retrieve. */ _source_includes?: Fields; /** If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document. */ detect_noop?: boolean; /** A partial update to an existing document. * If both `doc` and `script` are specified, `doc` is ignored. */ doc?: TPartialDocument; /** If `true`, use the contents of 'doc' as the value of 'upsert'. * NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. */ doc_as_upsert?: boolean; /** The script to run to update the document. */ script?: Script | ScriptSource; /** If `true`, run the script whether or not the document exists. */ scripted_upsert?: boolean; /** If `false`, turn off source retrieval. * You can also specify a comma-separated list of the fields you want to retrieve. */ _source?: SearchSourceConfig; /** If the document does not already exist, the contents of 'upsert' are inserted as a new document. * If the document exists, the 'script' is run. */ upsert?: TDocument; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; index?: never; if_primary_term?: never; if_seq_no?: never; include_source_on_error?: never; lang?: never; refresh?: never; require_alias?: never; retry_on_conflict?: never; routing?: never; timeout?: never; wait_for_active_shards?: never; _source_excludes?: never; _source_includes?: never; detect_noop?: never; doc?: never; doc_as_upsert?: never; script?: never; scripted_upsert?: never; _source?: never; upsert?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; index?: never; if_primary_term?: never; if_seq_no?: never; include_source_on_error?: never; lang?: never; refresh?: never; require_alias?: never; retry_on_conflict?: never; routing?: never; timeout?: never; wait_for_active_shards?: never; _source_excludes?: never; _source_includes?: never; detect_noop?: never; doc?: never; doc_as_upsert?: never; script?: never; scripted_upsert?: never; _source?: never; upsert?: never; }; } export type UpdateResponse = UpdateUpdateWriteResponseBase; export interface UpdateUpdateWriteResponseBase extends WriteResponseBase { get?: InlineGet; } export interface UpdateByQueryRequest extends RequestBase { /** A comma-separated list of data streams, indices, and aliases to search. * It supports wildcards (`*`). * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean; /** The analyzer to use for the query string. * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string; /** If `true`, wildcard and prefix queries are analyzed. * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean; /** The default operator for query string query: `AND` or `OR`. * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator; /** The field to use as default where no field prefix is given in the query string. * This parameter can be used only when the `q` query string parameter is specified. */ df?: string; /** The type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** Skips the specified number of documents. */ from?: long; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean; /** The ID of the pipeline to use to preprocess incoming documents. * If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. * If a final pipeline is configured it will always run, regardless of the value of this parameter. */ pipeline?: string; /** The node or shard the operation should be performed on. * It is random by default. */ preference?: string; /** A query in the Lucene query string syntax. */ q?: string; /** If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. * This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed. */ refresh?: boolean; /** If `true`, the request cache is used for this request. * It defaults to the index-level setting. */ request_cache?: boolean; /** The throttle for this request in sub-requests per second. */ requests_per_second?: float; /** A custom value used to route operations to a specific shard. */ routing?: Routing; /** The period to retain the search context for scrolling. */ scroll?: Duration; /** The size of the scroll request that powers the operation. */ scroll_size?: long; /** An explicit timeout for each search request. * By default, there is no timeout. */ search_timeout?: Duration; /** The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. */ search_type?: SearchType; /** The number of slices this task should be divided into. */ slices?: Slices; /** A comma-separated list of : pairs. */ sort?: string[]; /** The specific `tag` of the request for logging and statistical purposes. */ stats?: string[]; /** The maximum number of documents to collect for each shard. * If a query reaches this limit, Elasticsearch terminates the query early. * Elasticsearch collects documents before sorting. * * IMPORTANT: Use with caution. * Elasticsearch applies this parameter to each shard handling the request. * When possible, let Elasticsearch perform early termination automatically. * Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ terminate_after?: long; /** The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. * By default, it is one minute. * This guarantees Elasticsearch waits for at least the timeout before failing. * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration; /** If `true`, returns the document version as part of a hit. */ version?: boolean; /** Should the document increment the version number (internal) on hit or not (reindex) */ version_type?: boolean; /** The number of shard copies that must be active before proceeding with the operation. * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). * The `timeout` parameter controls how long each write request waits for unavailable shards to become available. * Both work exactly the way they work in the bulk API. */ wait_for_active_shards?: WaitForActiveShards; /** If `true`, the request blocks until the operation is complete. * If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. * Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. */ wait_for_completion?: boolean; /** The maximum number of documents to update. */ max_docs?: long; /** The documents to update using the Query DSL. */ query?: QueryDslQueryContainer; /** The script to run to update the document source or metadata when updating. */ script?: Script | ScriptSource; /** Slice the request manually using the provided slice ID and total number of slices. */ slice?: SlicedScroll; /** The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. */ conflicts?: Conflicts; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; analyzer?: never; analyze_wildcard?: never; default_operator?: never; df?: never; expand_wildcards?: never; from?: never; ignore_unavailable?: never; lenient?: never; pipeline?: never; preference?: never; q?: never; refresh?: never; request_cache?: never; requests_per_second?: never; routing?: never; scroll?: never; scroll_size?: never; search_timeout?: never; search_type?: never; slices?: never; sort?: never; stats?: never; terminate_after?: never; timeout?: never; version?: never; version_type?: never; wait_for_active_shards?: never; wait_for_completion?: never; max_docs?: never; query?: never; script?: never; slice?: never; conflicts?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; analyzer?: never; analyze_wildcard?: never; default_operator?: never; df?: never; expand_wildcards?: never; from?: never; ignore_unavailable?: never; lenient?: never; pipeline?: never; preference?: never; q?: never; refresh?: never; request_cache?: never; requests_per_second?: never; routing?: never; scroll?: never; scroll_size?: never; search_timeout?: never; search_type?: never; slices?: never; sort?: never; stats?: never; terminate_after?: never; timeout?: never; version?: never; version_type?: never; wait_for_active_shards?: never; wait_for_completion?: never; max_docs?: never; query?: never; script?: never; slice?: never; conflicts?: never; }; } export interface UpdateByQueryResponse { /** The number of scroll responses pulled back by the update by query. */ batches?: long; /** Array of failures if there were any unrecoverable errors during the process. * If this is non-empty then the request ended because of those failures. * Update by query is implemented using batches. * Any failure causes the entire process to end, but all failures in the current batch are collected into the array. * You can use the `conflicts` option to prevent reindex from ending when version conflicts occur. */ failures?: BulkIndexByScrollFailure[]; /** The number of documents that were ignored because the script used for the update by query returned a noop value for `ctx.op`. */ noops?: long; /** The number of documents that were successfully deleted. */ deleted?: long; /** The number of requests per second effectively run during the update by query. */ requests_per_second?: float; /** The number of retries attempted by update by query. * `bulk` is the number of bulk actions retried. * `search` is the number of search actions retried. */ retries?: Retries; task?: TaskId; /** If true, some requests timed out during the update by query. */ timed_out?: boolean; /** The number of milliseconds from start to end of the whole operation. */ took?: DurationValue; /** The number of documents that were successfully processed. */ total?: long; /** The number of documents that were successfully updated. */ updated?: long; /** The number of version conflicts that the update by query hit. */ version_conflicts?: long; throttled?: Duration; /** The number of milliseconds the request slept to conform to `requests_per_second`. */ throttled_millis?: DurationValue; throttled_until?: Duration; /** This field should always be equal to zero in an _update_by_query response. * It only has meaning when using the task API, where it indicates the next time (in milliseconds since epoch) a throttled request will be run again in order to conform to `requests_per_second`. */ throttled_until_millis?: DurationValue; } export interface UpdateByQueryRethrottleRequest extends RequestBase { /** The ID for the task. */ task_id: Id; /** The throttle for this request in sub-requests per second. * To turn off throttling, set it to `-1`. */ requests_per_second?: float; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_id?: never; requests_per_second?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_id?: never; requests_per_second?: never; }; } export interface UpdateByQueryRethrottleResponse { nodes: Record; } export interface UpdateByQueryRethrottleUpdateByQueryRethrottleNode extends SpecUtilsBaseNode { tasks: Record; } export interface SpecUtilsBaseNode { attributes: Record; host: Host; ip: Ip; name: Name; roles?: NodeRoles; transport_address: TransportAddress; } export type SpecUtilsNullValue = null; export type SpecUtilsPipeSeparatedFlags = T | string; export type SpecUtilsStringified = T | string; export type SpecUtilsWithNullValue = T | SpecUtilsNullValue; export interface AcknowledgedResponseBase { /** For a successful response, this value is always true. On failure, an exception is returned instead. */ acknowledged: boolean; } export type AggregateName = string; export interface BulkIndexByScrollFailure { cause: ErrorCause; id: Id; index: IndexName; status: integer; } export interface BulkStats { total_operations: long; total_time?: Duration; total_time_in_millis: DurationValue; total_size?: ByteSize; total_size_in_bytes: long; avg_time?: Duration; avg_time_in_millis: DurationValue; avg_size?: ByteSize; avg_size_in_bytes: long; } export type ByteSize = long | string; export type Bytes = 'b' | 'kb' | 'mb' | 'gb' | 'tb' | 'pb'; export type CategoryId = string; export type ClusterAlias = string; export interface ClusterDetails { status: ClusterSearchStatus; indices: string; took?: DurationValue; timed_out: boolean; _shards?: ShardStatistics; failures?: ShardFailure[]; } export type ClusterInfoTarget = '_all' | 'http' | 'ingest' | 'thread_pool' | 'script'; export type ClusterInfoTargets = ClusterInfoTarget | ClusterInfoTarget[]; export type ClusterSearchStatus = 'running' | 'successful' | 'partial' | 'skipped' | 'failed'; export interface ClusterStatistics { skipped: integer; successful: integer; total: integer; running: integer; partial: integer; failed: integer; details?: Record; } export interface CompletionStats { /** Total amount, in bytes, of memory used for completion across all shards assigned to selected nodes. */ size_in_bytes: long; /** Total amount of memory used for completion across all shards assigned to selected nodes. */ size?: ByteSize; fields?: Record; } export type Conflicts = 'abort' | 'proceed'; export interface CoordsGeoBounds { top: double; bottom: double; left: double; right: double; } export type DFIIndependenceMeasure = 'standardized' | 'saturated' | 'chisquared'; export type DFRAfterEffect = 'no' | 'b' | 'l'; export type DFRBasicModel = 'be' | 'd' | 'g' | 'if' | 'in' | 'ine' | 'p'; export type DataStreamName = string; export type DataStreamNames = DataStreamName | DataStreamName[]; export type DateFormat = string; export type DateMath = string | Date; export type DateTime = string | EpochTime | Date; export type Distance = string; export type DistanceUnit = 'in' | 'ft' | 'yd' | 'mi' | 'nmi' | 'km' | 'm' | 'cm' | 'mm'; export interface DocStats { /** Total number of non-deleted documents across all primary shards assigned to selected nodes. * This number is based on documents in Lucene segments and may include documents from nested fields. */ count: long; /** Total number of deleted documents across all primary shards assigned to selected nodes. * This number is based on documents in Lucene segments. * Elasticsearch reclaims the disk space of deleted Lucene documents when a segment is merged. */ deleted?: long; } export type Duration = string | -1 | 0; export type DurationLarge = string; export type DurationValue = Unit; export interface ElasticsearchVersionInfo { /** The Elasticsearch Git commit's date. */ build_date: DateTime; /** The build flavor. For example, `default`. */ build_flavor: string; /** The Elasticsearch Git commit's SHA hash. */ build_hash: string; /** Indicates whether the Elasticsearch build was a snapshot. */ build_snapshot: boolean; /** The build type that corresponds to how Elasticsearch was installed. * For example, `docker`, `rpm`, or `tar`. */ build_type: string; /** The version number of Elasticsearch's underlying Lucene software. */ lucene_version: VersionString; /** The minimum index version with which the responding node can read from disk. */ minimum_index_compatibility_version: VersionString; /** The minimum node version with which the responding node can communicate. * Also the minimum version from which you can perform a rolling upgrade. */ minimum_wire_compatibility_version: VersionString; /** The Elasticsearch version number. */ number: string; } export interface ElasticsearchVersionMinInfo { build_flavor: string; minimum_index_compatibility_version: VersionString; minimum_wire_compatibility_version: VersionString; number: string; } export interface EmptyObject { } export type EpochTime = Unit; export interface ErrorCauseKeys { /** The type of error */ type: string; /** A human-readable explanation of the error, in English. */ reason?: string | null; /** The server stack trace. Present only if the `error_trace=true` parameter was sent with the request. */ stack_trace?: string; caused_by?: ErrorCause; root_cause?: ErrorCause[]; suppressed?: ErrorCause[]; } export type ErrorCause = ErrorCauseKeys & { [property: string]: any; }; export interface ErrorResponseBase { error: ErrorCause; status: integer; } export type ExpandWildcard = 'all' | 'open' | 'closed' | 'hidden' | 'none'; export type ExpandWildcards = ExpandWildcard | ExpandWildcard[]; export type Field = string; export interface FieldMemoryUsage { memory_size?: ByteSize; memory_size_in_bytes: long; } export interface FieldSizeUsage { size?: ByteSize; size_in_bytes: long; } export interface FieldSort { missing?: AggregationsMissing; mode?: SortMode; nested?: NestedSortValue; order?: SortOrder; unmapped_type?: MappingFieldType; numeric_type?: FieldSortNumericType; format?: string; } export type FieldSortNumericType = 'long' | 'double' | 'date' | 'date_nanos'; export type FieldValue = long | double | string | boolean | null; export interface FielddataStats { evictions?: long; memory_size?: ByteSize; memory_size_in_bytes: long; fields?: Record; } export type Fields = Field | Field[]; export interface FlushStats { periodic: long; total: long; total_time?: Duration; total_time_in_millis: DurationValue; } export type Fuzziness = string | integer; export type GeoBounds = CoordsGeoBounds | TopLeftBottomRightGeoBounds | TopRightBottomLeftGeoBounds | WktGeoBounds; export interface GeoDistanceSortKeys { mode?: SortMode; distance_type?: GeoDistanceType; ignore_unmapped?: boolean; order?: SortOrder; unit?: DistanceUnit; nested?: NestedSortValue; } export type GeoDistanceSort = GeoDistanceSortKeys & { [property: string]: GeoLocation | GeoLocation[] | SortMode | GeoDistanceType | boolean | SortOrder | DistanceUnit | NestedSortValue; }; export type GeoDistanceType = 'arc' | 'plane'; export type GeoHash = string; export interface GeoHashLocation { geohash: GeoHash; } export type GeoHashPrecision = number | string; export type GeoHexCell = string; export interface GeoLine { /** Always `"LineString"` */ type: string; /** Array of `[lon, lat]` coordinates */ coordinates: double[][]; } export type GeoLocation = LatLonGeoLocation | GeoHashLocation | double[] | string; export type GeoShape = any; export type GeoShapeRelation = 'intersects' | 'disjoint' | 'within' | 'contains'; export type GeoTile = string; export type GeoTilePrecision = number; export interface GetStats { current: long; exists_time?: Duration; exists_time_in_millis: DurationValue; exists_total: long; missing_time?: Duration; missing_time_in_millis: DurationValue; missing_total: long; time?: Duration; time_in_millis: DurationValue; total: long; } export type GrokPattern = string; export type HealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED'; export type Host = string; export type HttpHeaders = Record; export type IBDistribution = 'll' | 'spl'; export type IBLambda = 'df' | 'ttf'; export type Id = string; export type Ids = Id | Id[]; export type IndexAlias = string; export type IndexName = string; export type IndexPattern = string; export type IndexPatterns = IndexPattern[]; export interface IndexingStats { index_current: long; delete_current: long; delete_time?: Duration; delete_time_in_millis: DurationValue; delete_total: long; is_throttled: boolean; noop_update_total: long; throttle_time?: Duration; throttle_time_in_millis: DurationValue; index_time?: Duration; index_time_in_millis: DurationValue; index_total: long; index_failed: long; types?: Record; write_load?: double; } export type Indices = IndexName | IndexName[]; export interface IndicesOptions { /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only * missing or closed indices. This behavior applies even if the request targets other open indices. For example, * a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. If the request can target data streams, this argument * determines whether wildcard expressions match hidden data streams. Supports comma-separated values, * such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean; /** If true, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean; } export interface IndicesResponseBase extends AcknowledgedResponseBase { _shards?: ShardStatistics; } export interface InlineGetKeys { fields?: Record; found: boolean; _seq_no?: SequenceNumber; _primary_term?: long; _routing?: Routing; _source?: TDocument; } export type InlineGet = InlineGetKeys & { [property: string]: any; }; export interface InnerRetriever { retriever: RetrieverContainer; weight: float; normalizer: ScoreNormalizer; } export type Ip = string; export interface KnnQuery extends QueryDslQueryBase { /** The name of the vector field to search against */ field: Field; /** The query vector */ query_vector?: QueryVector; /** The query vector builder. You must provide a query_vector_builder or query_vector, but not both. */ query_vector_builder?: QueryVectorBuilder; /** The number of nearest neighbor candidates to consider per shard */ num_candidates?: integer; /** The final number of nearest neighbors to return as top hits */ k?: integer; /** Filters for the kNN search query */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[]; /** The minimum similarity for a vector to be considered a match */ similarity?: float; /** Apply oversampling and rescoring to quantized vectors * * @experimental */ rescore_vector?: RescoreVector; } export interface KnnRetriever extends RetrieverBase { /** The name of the vector field to search against. */ field: string; /** Query vector. Must have the same number of dimensions as the vector field you are searching against. You must provide a query_vector_builder or query_vector, but not both. */ query_vector?: QueryVector; /** Defines a model to build a query vector. */ query_vector_builder?: QueryVectorBuilder; /** Number of nearest neighbors to return as top hits. */ k: integer; /** Number of nearest neighbor candidates to consider per shard. */ num_candidates: integer; /** The minimum similarity required for a document to be considered a match. */ similarity?: float; /** Apply oversampling and rescoring to quantized vectors * * @experimental */ rescore_vector?: RescoreVector; } export interface KnnSearch { /** The name of the vector field to search against */ field: Field; /** The query vector */ query_vector?: QueryVector; /** The query vector builder. You must provide a query_vector_builder or query_vector, but not both. */ query_vector_builder?: QueryVectorBuilder; /** The final number of nearest neighbors to return as top hits */ k?: integer; /** The number of nearest neighbor candidates to consider per shard */ num_candidates?: integer; /** Boost value to apply to kNN scores */ boost?: float; /** Filters for the kNN search query */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[]; /** The minimum similarity for a vector to be considered a match */ similarity?: float; /** If defined, each search hit will contain inner hits. */ inner_hits?: SearchInnerHits; /** Apply oversampling and rescoring to quantized vectors * * @experimental */ rescore_vector?: RescoreVector; } export interface LatLonGeoLocation { /** Latitude */ lat: double; /** Longitude */ lon: double; } export type Level = 'cluster' | 'indices' | 'shards'; export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED'; export interface LinearRetriever extends RetrieverBase { /** Inner retrievers. */ retrievers?: InnerRetriever[]; rank_window_size?: integer; } export type MapboxVectorTiles = ArrayBuffer; export interface MergesStats { current: long; current_docs: long; current_size?: string; current_size_in_bytes: long; total: long; total_auto_throttle?: string; total_auto_throttle_in_bytes: long; total_docs: long; total_size?: string; total_size_in_bytes: long; total_stopped_time?: Duration; total_stopped_time_in_millis: DurationValue; total_throttled_time?: Duration; total_throttled_time_in_millis: DurationValue; total_time?: Duration; total_time_in_millis: DurationValue; } export type Metadata = Record; export type Metrics = string | string[]; export type MinimumShouldMatch = integer | string; export type MultiTermQueryRewrite = string; export type Name = string; export type Names = Name | Name[]; export type Namespace = string; export interface NestedSortValue { filter?: QueryDslQueryContainer; max_children?: integer; nested?: NestedSortValue; path: Field; } export interface NodeAttributes { /** Lists node attributes. */ attributes: Record; /** The ephemeral ID of the node. */ ephemeral_id: Id; /** The unique identifier of the node. */ id?: NodeId; /** The unique identifier of the node. */ name: NodeName; /** The host and port where transport HTTP connections are accepted. */ transport_address: TransportAddress; } export type NodeId = string; export type NodeIds = NodeId | NodeId[]; export type NodeName = string; export type NodeRole = 'master' | 'data' | 'data_cold' | 'data_content' | 'data_frozen' | 'data_hot' | 'data_warm' | 'client' | 'ingest' | 'ml' | 'voting_only' | 'transform' | 'remote_cluster_client' | 'coordinating_only'; export type NodeRoles = NodeRole[]; export interface NodeShard { state: IndicesStatsShardRoutingState; primary: boolean; node?: NodeName; shard: integer; index: IndexName; allocation_id?: Record; recovery_source?: Record; unassigned_info?: ClusterAllocationExplainUnassignedInformation; relocating_node?: NodeId | null; relocation_failure_info?: RelocationFailureInfo; } export interface NodeStatistics { failures?: ErrorCause[]; /** Total number of nodes selected by the request. */ total: integer; /** Number of nodes that responded successfully to the request. */ successful: integer; /** Number of nodes that rejected the request or failed to respond. If this value is not 0, a reason for the rejection or failure is included in the response. */ failed: integer; } export type Normalization = 'no' | 'h1' | 'h2' | 'h3' | 'z'; export type OpType = 'index' | 'create'; export type Password = string; export type Percentage = string | float; export interface PinnedRetriever extends RetrieverBase { /** Inner retriever. */ retriever: RetrieverContainer; ids?: string[]; docs?: SpecifiedDocument[]; rank_window_size?: integer; } export type PipelineName = string; export interface PluginStats { classname: string; description: string; elasticsearch_version: VersionString; extended_plugins: string[]; has_native_controller: boolean; java_version: VersionString; name: Name; version: VersionString; licensed: boolean; } export type PropertyName = string; export interface QueryCacheStats { /** Total number of entries added to the query cache across all shards assigned to selected nodes. * This number includes current and evicted entries. */ cache_count: long; /** Total number of entries currently in the query cache across all shards assigned to selected nodes. */ cache_size: long; /** Total number of query cache evictions across all shards assigned to selected nodes. */ evictions: long; /** Total count of query cache hits across all shards assigned to selected nodes. */ hit_count: long; /** Total amount of memory used for the query cache across all shards assigned to selected nodes. */ memory_size?: ByteSize; /** Total amount, in bytes, of memory used for the query cache across all shards assigned to selected nodes. */ memory_size_in_bytes: long; /** Total count of query cache misses across all shards assigned to selected nodes. */ miss_count: long; /** Total count of hits and misses in the query cache across all shards assigned to selected nodes. */ total_count: long; } export type QueryVector = float[]; export interface QueryVectorBuilder { text_embedding?: TextEmbedding; } export interface RRFRetriever extends RetrieverBase { /** A list of child retrievers to specify which sets of returned top documents will have the RRF formula applied to them. */ retrievers: RetrieverContainer[]; /** This value determines how much influence documents in individual result sets per query have over the final ranked result set. */ rank_constant?: integer; /** This value determines the size of the individual result sets per query. */ rank_window_size?: integer; } export interface RankBase { } export interface RankContainer { /** The reciprocal rank fusion parameters */ rrf?: RrfRank; } export interface RecoveryStats { current_as_source: long; current_as_target: long; throttle_time?: Duration; throttle_time_in_millis: DurationValue; } export type Refresh = boolean | 'true' | 'false' | 'wait_for'; export interface RefreshStats { external_total: long; external_total_time_in_millis: DurationValue; listeners: long; total: long; total_time?: Duration; total_time_in_millis: DurationValue; } export type RelationName = string; export interface RelocationFailureInfo { failed_attempts: integer; } export interface RequestBase extends SpecUtilsCommonQueryParameters { } export interface RequestCacheStats { evictions: long; hit_count: long; memory_size?: string; memory_size_in_bytes: long; miss_count: long; } export interface RescoreVector { /** Applies the specified oversample factor to k on the approximate kNN search */ oversample: float; } export interface RescorerRetriever extends RetrieverBase { /** Inner retriever. */ retriever: RetrieverContainer; rescore: SearchRescore | SearchRescore[]; } export type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop'; export interface Retries { /** The number of bulk actions retried. */ bulk: long; /** The number of search actions retried. */ search: long; } export interface RetrieverBase { /** Query to filter the documents that can match. */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[]; /** Minimum _score for matching documents. Documents with a lower _score are not included in the top documents. */ min_score?: float; /** Retriever name. */ _name?: string; } export interface RetrieverContainer { /** A retriever that replaces the functionality of a traditional query. */ standard?: StandardRetriever; /** A retriever that replaces the functionality of a knn search. */ knn?: KnnRetriever; /** A retriever that produces top documents from reciprocal rank fusion (RRF). */ rrf?: RRFRetriever; /** A retriever that reranks the top documents based on a reranking model using the InferenceAPI */ text_similarity_reranker?: TextSimilarityReranker; /** A retriever that replaces the functionality of a rule query. */ rule?: RuleRetriever; /** A retriever that re-scores only the results produced by its child retriever. */ rescorer?: RescorerRetriever; /** A retriever that supports the combination of different retrievers through a weighted linear combination. */ linear?: LinearRetriever; /** A pinned retriever applies pinned documents to the underlying retriever. * This retriever will rewrite to a PinnedQueryBuilder. */ pinned?: PinnedRetriever; } export type Routing = string; export interface RrfRank { /** How much influence documents in individual result sets per query have over the final ranked result set */ rank_constant?: long; /** Size of the individual result sets per query */ rank_window_size?: long; } export interface RuleRetriever extends RetrieverBase { /** The ruleset IDs containing the rules this retriever is evaluating against. */ ruleset_ids: Id | Id[]; /** The match criteria that will determine if a rule in the provided rulesets should be applied. */ match_criteria: any; /** The retriever whose results rules should be applied to. */ retriever: RetrieverContainer; /** This value determines the size of the individual result set. */ rank_window_size?: integer; } export type ScalarValue = long | double | string | boolean | null; export type ScoreNormalizer = 'none' | 'minmax'; export interface ScoreSort { order?: SortOrder; } export interface Script { /** The script source. */ source?: ScriptSource; /** The `id` for a stored script. */ id?: Id; /** Specifies any named parameters that are passed into the script as variables. * Use parameters instead of hard-coded values to decrease compile time. */ params?: Record; /** Specifies the language the script is written in. */ lang?: ScriptLanguage; options?: Record; } export interface ScriptField { script: Script | ScriptSource; ignore_failure?: boolean; } export type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' | string; export interface ScriptSort { order?: SortOrder; script: Script | ScriptSource; type?: ScriptSortType; mode?: SortMode; nested?: NestedSortValue; } export type ScriptSortType = 'string' | 'number' | 'version'; export type ScriptSource = string | SearchSearchRequestBody; export interface ScriptTransform { lang?: string; params?: Record; source?: ScriptSource; id?: string; } export type ScrollId = string; export type ScrollIds = ScrollId | ScrollId[]; export interface SearchStats { fetch_current: long; fetch_time?: Duration; fetch_time_in_millis: DurationValue; fetch_total: long; open_contexts?: long; query_current: long; query_time?: Duration; query_time_in_millis: DurationValue; query_total: long; scroll_current: long; scroll_time?: Duration; scroll_time_in_millis: DurationValue; scroll_total: long; suggest_current: long; suggest_time?: Duration; suggest_time_in_millis: DurationValue; suggest_total: long; groups?: Record; } export interface SearchTransform { request: WatcherSearchInputRequestDefinition; timeout: Duration; } export type SearchType = 'query_then_fetch' | 'dfs_query_then_fetch'; export interface SegmentsStats { /** Total number of segments across all shards assigned to selected nodes. */ count: integer; /** Total amount of memory used for doc values across all shards assigned to selected nodes. */ doc_values_memory?: ByteSize; /** Total amount, in bytes, of memory used for doc values across all shards assigned to selected nodes. */ doc_values_memory_in_bytes: long; /** This object is not populated by the cluster stats API. * To get information on segment files, use the node stats API. */ file_sizes: Record; /** Total amount of memory used by fixed bit sets across all shards assigned to selected nodes. * Fixed bit sets are used for nested object field types and type filters for join fields. */ fixed_bit_set?: ByteSize; /** Total amount of memory, in bytes, used by fixed bit sets across all shards assigned to selected nodes. */ fixed_bit_set_memory_in_bytes: long; /** Total amount of memory used by all index writers across all shards assigned to selected nodes. */ index_writer_memory?: ByteSize; index_writer_max_memory_in_bytes?: long; /** Total amount, in bytes, of memory used by all index writers across all shards assigned to selected nodes. */ index_writer_memory_in_bytes: long; /** Unix timestamp, in milliseconds, of the most recently retried indexing request. */ max_unsafe_auto_id_timestamp: long; /** Total amount of memory used for segments across all shards assigned to selected nodes. */ memory?: ByteSize; /** Total amount, in bytes, of memory used for segments across all shards assigned to selected nodes. */ memory_in_bytes: long; /** Total amount of memory used for normalization factors across all shards assigned to selected nodes. */ norms_memory?: ByteSize; /** Total amount, in bytes, of memory used for normalization factors across all shards assigned to selected nodes. */ norms_memory_in_bytes: long; /** Total amount of memory used for points across all shards assigned to selected nodes. */ points_memory?: ByteSize; /** Total amount, in bytes, of memory used for points across all shards assigned to selected nodes. */ points_memory_in_bytes: long; stored_memory?: ByteSize; /** Total amount, in bytes, of memory used for stored fields across all shards assigned to selected nodes. */ stored_fields_memory_in_bytes: long; /** Total amount, in bytes, of memory used for terms across all shards assigned to selected nodes. */ terms_memory_in_bytes: long; /** Total amount of memory used for terms across all shards assigned to selected nodes. */ terms_memory?: ByteSize; /** Total amount of memory used for term vectors across all shards assigned to selected nodes. */ term_vectory_memory?: ByteSize; /** Total amount, in bytes, of memory used for term vectors across all shards assigned to selected nodes. */ term_vectors_memory_in_bytes: long; /** Total amount of memory used by all version maps across all shards assigned to selected nodes. */ version_map_memory?: ByteSize; /** Total amount, in bytes, of memory used by all version maps across all shards assigned to selected nodes. */ version_map_memory_in_bytes: long; } export type SequenceNumber = long; export type Service = string; export interface ShardFailure { index?: IndexName; node?: string; reason: ErrorCause; shard: integer; status?: string; } export interface ShardStatistics { /** The number of shards the operation or search attempted to run on but failed. */ failed: uint; /** The number of shards the operation or search succeeded on. */ successful: uint; /** The number of shards the operation or search will run on overall. */ total: uint; failures?: ShardFailure[]; skipped?: uint; } export interface ShardsOperationResponseBase { _shards?: ShardStatistics; } export interface SlicedScroll { field?: Field; id: Id; max: integer; } export type Slices = integer | SlicesCalculation; export type SlicesCalculation = 'auto'; export type Sort = SortCombinations | SortCombinations[]; export type SortCombinations = Field | SortOptions; export type SortMode = 'min' | 'max' | 'sum' | 'avg' | 'median'; export interface SortOptionsKeys { _score?: ScoreSort; _doc?: ScoreSort; _geo_distance?: GeoDistanceSort; _script?: ScriptSort; } export type SortOptions = SortOptionsKeys & { [property: string]: FieldSort | SortOrder | ScoreSort | GeoDistanceSort | ScriptSort; }; export type SortOrder = 'asc' | 'desc'; export type SortResults = FieldValue[]; export interface SpecifiedDocument { index?: IndexName; id: Id; } export interface StandardRetriever extends RetrieverBase { /** Defines a query to retrieve a set of top documents. */ query?: QueryDslQueryContainer; /** Defines a search after object parameter used for pagination. */ search_after?: SortResults; /** Maximum number of documents to collect for each shard. */ terminate_after?: integer; /** A sort object that that specifies the order of matching documents. */ sort?: Sort; /** Collapses the top documents by a specified key into a single top document per key. */ collapse?: SearchFieldCollapse; } export interface StoreStats { /** Total size of all shards assigned to selected nodes. */ size?: ByteSize; /** Total size, in bytes, of all shards assigned to selected nodes. */ size_in_bytes: long; /** A prediction of how much larger the shard stores will eventually grow due to ongoing peer recoveries, restoring snapshots, and similar activities. */ reserved?: ByteSize; /** A prediction, in bytes, of how much larger the shard stores will eventually grow due to ongoing peer recoveries, restoring snapshots, and similar activities. */ reserved_in_bytes: long; /** Total data set size of all shards assigned to selected nodes. * This includes the size of shards not stored fully on the nodes, such as the cache for partially mounted indices. */ total_data_set_size?: ByteSize; /** Total data set size, in bytes, of all shards assigned to selected nodes. * This includes the size of shards not stored fully on the nodes, such as the cache for partially mounted indices. */ total_data_set_size_in_bytes?: long; } export interface StoredScript { /** The language the script is written in. * For search templates, use `mustache`. */ lang: ScriptLanguage; options?: Record; /** The script source. * For search templates, an object containing the search template. */ source: ScriptSource; } export type StreamResult = ArrayBuffer; export type SuggestMode = 'missing' | 'popular' | 'always'; export type SuggestionName = string; export interface TaskFailure { task_id: long; node_id: NodeId; status: string; reason: ErrorCause; } export type TaskId = string | integer; export interface TextEmbedding { model_id: string; model_text: string; } export interface TextSimilarityReranker extends RetrieverBase { /** The nested retriever which will produce the first-level results, that will later be used for reranking. */ retriever: RetrieverContainer; /** This value determines how many documents we will consider from the nested retriever. */ rank_window_size?: integer; /** Unique identifier of the inference endpoint created using the inference API. */ inference_id?: string; /** The text snippet used as the basis for similarity comparison */ inference_text: string; /** The document field to be used for text similarity comparisons. This field should contain the text that will be evaluated against the inference_text */ field: string; } export type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem'; export type TimeOfDay = string; export type TimeUnit = 'nanos' | 'micros' | 'ms' | 's' | 'm' | 'h' | 'd'; export type TimeZone = string; export interface TopLeftBottomRightGeoBounds { top_left: GeoLocation; bottom_right: GeoLocation; } export interface TopRightBottomLeftGeoBounds { top_right: GeoLocation; bottom_left: GeoLocation; } export interface TransformContainer { chain?: TransformContainer[]; script?: ScriptTransform; search?: SearchTransform; } export interface TranslogStats { earliest_last_modified_age: long; operations: long; size?: string; size_in_bytes: long; uncommitted_operations: integer; uncommitted_size?: string; uncommitted_size_in_bytes: long; } export type TransportAddress = string; export type UnitFloatMillis = double; export type UnitMillis = long; export type UnitNanos = long; export type UnitSeconds = long; export type Username = string; export type Uuid = string; export type VersionNumber = long; export type VersionString = string; export type VersionType = 'internal' | 'external' | 'external_gte' | 'force'; export type WaitForActiveShardOptions = 'all' | 'index-setting'; export type WaitForActiveShards = integer | WaitForActiveShardOptions; export type WaitForEvents = 'immediate' | 'urgent' | 'high' | 'normal' | 'low' | 'languid'; export interface WarmerStats { current: long; total: long; total_time?: Duration; total_time_in_millis: DurationValue; } export interface WktGeoBounds { wkt: string; } export interface WriteResponseBase { /** The unique identifier for the added document. */ _id: Id; /** The name of the index the document was added to. */ _index: IndexName; /** The primary term assigned to the document for the indexing operation. */ _primary_term?: long; /** The result of the indexing operation: `created` or `updated`. */ result: Result; /** The sequence number assigned to the document for the indexing operation. * Sequence numbers are used to ensure an older version of a document doesn't overwrite a newer version. */ _seq_no?: SequenceNumber; /** Information about the replication process of the operation. */ _shards: ShardStatistics; /** The document version, which is incremented each time the document is updated. */ _version: VersionNumber; forced_refresh?: boolean; } export type byte = number; export type double = number; export type float = number; export type integer = number; export type long = number; export type short = number; export type uint = number; export type ulong = number; export interface AggregationsAdjacencyMatrixAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsAdjacencyMatrixAggregation extends AggregationsBucketAggregationBase { /** Filters used to create buckets. * At least one filter is required. */ filters?: Record; /** Separator used to concatenate filter names. Defaults to &. */ separator?: string; } export interface AggregationsAdjacencyMatrixBucketKeys extends AggregationsMultiBucketBase { key: string; } export type AggregationsAdjacencyMatrixBucket = AggregationsAdjacencyMatrixBucketKeys & { [property: string]: AggregationsAggregate | string | long; }; export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsTimeSeriesAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate; export interface AggregationsAggregateBase { meta?: Metadata; } export type AggregationsAggregateOrder = Partial> | Partial>[]; export interface AggregationsAggregation { } export interface AggregationsAggregationContainer { /** Sub-aggregations for this aggregation. * Only applies to bucket aggregations. */ aggregations?: Record; /** Sub-aggregations for this aggregation. * Only applies to bucket aggregations. * @alias aggregations */ aggs?: Record; meta?: Metadata; /** A bucket aggregation returning a form of adjacency matrix. * The request provides a collection of named filter expressions, similar to the `filters` aggregation. * Each bucket in the response represents a non-empty cell in the matrix of intersecting filters. */ adjacency_matrix?: AggregationsAdjacencyMatrixAggregation; /** A multi-bucket aggregation similar to the date histogram, except instead of providing an interval to use as the width of each bucket, a target number of buckets is provided. */ auto_date_histogram?: AggregationsAutoDateHistogramAggregation; /** A single-value metrics aggregation that computes the average of numeric values that are extracted from the aggregated documents. */ avg?: AggregationsAverageAggregation; /** A sibling pipeline aggregation which calculates the mean value of a specified metric in a sibling aggregation. * The specified metric must be numeric and the sibling aggregation must be a multi-bucket aggregation. */ avg_bucket?: AggregationsAverageBucketAggregation; /** A metrics aggregation that computes a box plot of numeric values extracted from the aggregated documents. */ boxplot?: AggregationsBoxplotAggregation; /** A parent pipeline aggregation which runs a script which can perform per bucket computations on metrics in the parent multi-bucket aggregation. */ bucket_script?: AggregationsBucketScriptAggregation; /** A parent pipeline aggregation which runs a script to determine whether the current bucket will be retained in the parent multi-bucket aggregation. */ bucket_selector?: AggregationsBucketSelectorAggregation; /** A parent pipeline aggregation which sorts the buckets of its parent multi-bucket aggregation. */ bucket_sort?: AggregationsBucketSortAggregation; /** A sibling pipeline aggregation which runs a two sample Kolmogorov–Smirnov test ("K-S test") against a provided distribution and the distribution implied by the documents counts in the configured sibling aggregation. * @experimental */ bucket_count_ks_test?: AggregationsBucketKsAggregation; /** A sibling pipeline aggregation which runs a correlation function on the configured sibling multi-bucket aggregation. * @experimental */ bucket_correlation?: AggregationsBucketCorrelationAggregation; /** A single-value metrics aggregation that calculates an approximate count of distinct values. */ cardinality?: AggregationsCardinalityAggregation; /** A multi-bucket aggregation that groups semi-structured text into buckets. * @experimental */ categorize_text?: AggregationsCategorizeTextAggregation; /** A single bucket aggregation that selects child documents that have the specified type, as defined in a `join` field. */ children?: AggregationsChildrenAggregation; /** A multi-bucket aggregation that creates composite buckets from different sources. * Unlike the other multi-bucket aggregations, you can use the `composite` aggregation to paginate *all* buckets from a multi-level aggregation efficiently. */ composite?: AggregationsCompositeAggregation; /** A parent pipeline aggregation which calculates the cumulative cardinality in a parent `histogram` or `date_histogram` aggregation. */ cumulative_cardinality?: AggregationsCumulativeCardinalityAggregation; /** A parent pipeline aggregation which calculates the cumulative sum of a specified metric in a parent `histogram` or `date_histogram` aggregation. */ cumulative_sum?: AggregationsCumulativeSumAggregation; /** A multi-bucket values source based aggregation that can be applied on date values or date range values extracted from the documents. * It dynamically builds fixed size (interval) buckets over the values. */ date_histogram?: AggregationsDateHistogramAggregation; /** A multi-bucket value source based aggregation that enables the user to define a set of date ranges - each representing a bucket. */ date_range?: AggregationsDateRangeAggregation; /** A parent pipeline aggregation which calculates the derivative of a specified metric in a parent `histogram` or `date_histogram` aggregation. */ derivative?: AggregationsDerivativeAggregation; /** A filtering aggregation used to limit any sub aggregations' processing to a sample of the top-scoring documents. * Similar to the `sampler` aggregation, but adds the ability to limit the number of matches that share a common value. */ diversified_sampler?: AggregationsDiversifiedSamplerAggregation; /** A multi-value metrics aggregation that computes stats over numeric values extracted from the aggregated documents. */ extended_stats?: AggregationsExtendedStatsAggregation; /** A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a sibling aggregation. */ extended_stats_bucket?: AggregationsExtendedStatsBucketAggregation; /** A bucket aggregation which finds frequent item sets, a form of association rules mining that identifies items that often occur together. */ frequent_item_sets?: AggregationsFrequentItemSetsAggregation; /** A single bucket aggregation that narrows the set of documents to those that match a query. */ filter?: QueryDslQueryContainer; /** A multi-bucket aggregation where each bucket contains the documents that match a query. */ filters?: AggregationsFiltersAggregation; /** A metric aggregation that computes the geographic bounding box containing all values for a Geopoint or Geoshape field. */ geo_bounds?: AggregationsGeoBoundsAggregation; /** A metric aggregation that computes the weighted centroid from all coordinate values for geo fields. */ geo_centroid?: AggregationsGeoCentroidAggregation; /** A multi-bucket aggregation that works on `geo_point` fields. * Evaluates the distance of each document value from an origin point and determines the buckets it belongs to, based on ranges defined in the request. */ geo_distance?: AggregationsGeoDistanceAggregation; /** A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. * Each cell is labeled using a geohash which is of user-definable precision. */ geohash_grid?: AggregationsGeoHashGridAggregation; /** Aggregates all `geo_point` values within a bucket into a `LineString` ordered by the chosen sort field. */ geo_line?: AggregationsGeoLineAggregation; /** A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. * Each cell corresponds to a map tile as used by many online map sites. */ geotile_grid?: AggregationsGeoTileGridAggregation; /** A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. * Each cell corresponds to a H3 cell index and is labeled using the H3Index representation. */ geohex_grid?: AggregationsGeohexGridAggregation; /** Defines a single bucket of all the documents within the search execution context. * This context is defined by the indices and the document types you’re searching on, but is not influenced by the search query itself. */ global?: AggregationsGlobalAggregation; /** A multi-bucket values source based aggregation that can be applied on numeric values or numeric range values extracted from the documents. * It dynamically builds fixed size (interval) buckets over the values. */ histogram?: AggregationsHistogramAggregation; /** A multi-bucket value source based aggregation that enables the user to define a set of IP ranges - each representing a bucket. */ ip_range?: AggregationsIpRangeAggregation; /** A bucket aggregation that groups documents based on the network or sub-network of an IP address. */ ip_prefix?: AggregationsIpPrefixAggregation; /** A parent pipeline aggregation which loads a pre-trained model and performs inference on the collated result fields from the parent bucket aggregation. */ inference?: AggregationsInferenceAggregation; line?: AggregationsGeoLineAggregation; /** A numeric aggregation that computes the following statistics over a set of document fields: `count`, `mean`, `variance`, `skewness`, `kurtosis`, `covariance`, and `covariance`. */ matrix_stats?: AggregationsMatrixStatsAggregation; /** A single-value metrics aggregation that returns the maximum value among the numeric values extracted from the aggregated documents. */ max?: AggregationsMaxAggregation; /** A sibling pipeline aggregation which identifies the bucket(s) with the maximum value of a specified metric in a sibling aggregation and outputs both the value and the key(s) of the bucket(s). */ max_bucket?: AggregationsMaxBucketAggregation; /** A single-value aggregation that approximates the median absolute deviation of its search results. */ median_absolute_deviation?: AggregationsMedianAbsoluteDeviationAggregation; /** A single-value metrics aggregation that returns the minimum value among numeric values extracted from the aggregated documents. */ min?: AggregationsMinAggregation; /** A sibling pipeline aggregation which identifies the bucket(s) with the minimum value of a specified metric in a sibling aggregation and outputs both the value and the key(s) of the bucket(s). */ min_bucket?: AggregationsMinBucketAggregation; /** A field data based single bucket aggregation, that creates a bucket of all documents in the current document set context that are missing a field value (effectively, missing a field or having the configured NULL value set). */ missing?: AggregationsMissingAggregation; moving_avg?: AggregationsMovingAverageAggregation; /** Given an ordered series of percentiles, "slides" a window across those percentiles and computes cumulative percentiles. */ moving_percentiles?: AggregationsMovingPercentilesAggregation; /** Given an ordered series of data, "slides" a window across the data and runs a custom script on each window of data. * For convenience, a number of common functions are predefined such as `min`, `max`, and moving averages. */ moving_fn?: AggregationsMovingFunctionAggregation; /** A multi-bucket value source based aggregation where buckets are dynamically built - one per unique set of values. */ multi_terms?: AggregationsMultiTermsAggregation; /** A special single bucket aggregation that enables aggregating nested documents. */ nested?: AggregationsNestedAggregation; /** A parent pipeline aggregation which calculates the specific normalized/rescaled value for a specific bucket value. */ normalize?: AggregationsNormalizeAggregation; /** A special single bucket aggregation that selects parent documents that have the specified type, as defined in a `join` field. */ parent?: AggregationsParentAggregation; /** A multi-value metrics aggregation that calculates one or more percentile ranks over numeric values extracted from the aggregated documents. */ percentile_ranks?: AggregationsPercentileRanksAggregation; /** A multi-value metrics aggregation that calculates one or more percentiles over numeric values extracted from the aggregated documents. */ percentiles?: AggregationsPercentilesAggregation; /** A sibling pipeline aggregation which calculates percentiles across all bucket of a specified metric in a sibling aggregation. */ percentiles_bucket?: AggregationsPercentilesBucketAggregation; /** A multi-bucket value source based aggregation that enables the user to define a set of ranges - each representing a bucket. */ range?: AggregationsRangeAggregation; /** A multi-bucket value source based aggregation which finds "rare" terms—terms that are at the long-tail of the distribution and are not frequent. */ rare_terms?: AggregationsRareTermsAggregation; /** Calculates a rate of documents or a field in each bucket. * Can only be used inside a `date_histogram` or `composite` aggregation. */ rate?: AggregationsRateAggregation; /** A special single bucket aggregation that enables aggregating on parent documents from nested documents. * Should only be defined inside a `nested` aggregation. */ reverse_nested?: AggregationsReverseNestedAggregation; /** A single bucket aggregation that randomly includes documents in the aggregated results. * Sampling provides significant speed improvement at the cost of accuracy. * @remarks This property is not supported on Elastic Cloud Serverless. * @experimental */ random_sampler?: AggregationsRandomSamplerAggregation; /** A filtering aggregation used to limit any sub aggregations' processing to a sample of the top-scoring documents. */ sampler?: AggregationsSamplerAggregation; /** A metric aggregation that uses scripts to provide a metric output. */ scripted_metric?: AggregationsScriptedMetricAggregation; /** An aggregation that subtracts values in a time series from themselves at different time lags or periods. */ serial_diff?: AggregationsSerialDifferencingAggregation; /** Returns interesting or unusual occurrences of terms in a set. */ significant_terms?: AggregationsSignificantTermsAggregation; /** Returns interesting or unusual occurrences of free-text terms in a set. */ significant_text?: AggregationsSignificantTextAggregation; /** A multi-value metrics aggregation that computes stats over numeric values extracted from the aggregated documents. */ stats?: AggregationsStatsAggregation; /** A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a sibling aggregation. */ stats_bucket?: AggregationsStatsBucketAggregation; /** A multi-value metrics aggregation that computes statistics over string values extracted from the aggregated documents. */ string_stats?: AggregationsStringStatsAggregation; /** A single-value metrics aggregation that sums numeric values that are extracted from the aggregated documents. */ sum?: AggregationsSumAggregation; /** A sibling pipeline aggregation which calculates the sum of a specified metric across all buckets in a sibling aggregation. */ sum_bucket?: AggregationsSumBucketAggregation; /** A multi-bucket value source based aggregation where buckets are dynamically built - one per unique value. */ terms?: AggregationsTermsAggregation; /** The time series aggregation queries data created using a time series index. * This is typically data such as metrics or other data streams with a time component, and requires creating an index using the time series mode. * @experimental */ time_series?: AggregationsTimeSeriesAggregation; /** A metric aggregation that returns the top matching documents per bucket. */ top_hits?: AggregationsTopHitsAggregation; /** A metrics aggregation that performs a statistical hypothesis test in which the test statistic follows a Student’s t-distribution under the null hypothesis on numeric values extracted from the aggregated documents. */ t_test?: AggregationsTTestAggregation; /** A metric aggregation that selects metrics from the document with the largest or smallest sort value. */ top_metrics?: AggregationsTopMetricsAggregation; /** A single-value metrics aggregation that counts the number of values that are extracted from the aggregated documents. */ value_count?: AggregationsValueCountAggregation; /** A single-value metrics aggregation that computes the weighted average of numeric values that are extracted from the aggregated documents. */ weighted_avg?: AggregationsWeightedAverageAggregation; /** A multi-bucket aggregation similar to the histogram, except instead of providing an interval to use as the width of each bucket, a target number of buckets is provided. */ variable_width_histogram?: AggregationsVariableWidthHistogramAggregation; } export interface AggregationsAggregationRange { /** Start of the range (inclusive). */ from?: double | null; /** Custom key to return the range with. */ key?: string; /** End of the range (exclusive). */ to?: double | null; } export interface AggregationsArrayPercentilesItem { key: string; value: double | null; value_as_string?: string; } export interface AggregationsAutoDateHistogramAggregate extends AggregationsMultiBucketAggregateBase { interval: DurationLarge; } export interface AggregationsAutoDateHistogramAggregation extends AggregationsBucketAggregationBase { /** The target number of buckets. */ buckets?: integer; /** The field on which to run the aggregation. */ field?: Field; /** The date format used to format `key_as_string` in the response. * If no `format` is specified, the first date format specified in the field mapping is used. */ format?: string; /** The minimum rounding interval. * This can make the collection process more efficient, as the aggregation will not attempt to round at any interval lower than `minimum_interval`. */ minimum_interval?: AggregationsMinimumInterval; /** The value to apply to documents that do not have a value. * By default, documents without a value are ignored. */ missing?: DateTime; /** Time zone specified as a ISO 8601 UTC offset. */ offset?: string; params?: Record; script?: Script | ScriptSource; /** Time zone ID. */ time_zone?: TimeZone; } export interface AggregationsAverageAggregation extends AggregationsFormatMetricAggregationBase { } export interface AggregationsAverageBucketAggregation extends AggregationsPipelineAggregationBase { } export interface AggregationsAvgAggregate extends AggregationsSingleMetricAggregateBase { } export interface AggregationsBoxPlotAggregate extends AggregationsAggregateBase { min: double; max: double; q1: double; q2: double; q3: double; lower: double; upper: double; min_as_string?: string; max_as_string?: string; q1_as_string?: string; q2_as_string?: string; q3_as_string?: string; lower_as_string?: string; upper_as_string?: string; } export interface AggregationsBoxplotAggregation extends AggregationsMetricAggregationBase { /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ compression?: double; } export interface AggregationsBucketAggregationBase { } export interface AggregationsBucketCorrelationAggregation extends AggregationsBucketPathAggregation { /** The correlation function to execute. */ function: AggregationsBucketCorrelationFunction; } export interface AggregationsBucketCorrelationFunction { /** The configuration to calculate a count correlation. This function is designed for determining the correlation of a term value and a given metric. */ count_correlation: AggregationsBucketCorrelationFunctionCountCorrelation; } export interface AggregationsBucketCorrelationFunctionCountCorrelation { /** The indicator with which to correlate the configured `bucket_path` values. */ indicator: AggregationsBucketCorrelationFunctionCountCorrelationIndicator; } export interface AggregationsBucketCorrelationFunctionCountCorrelationIndicator { /** The total number of documents that initially created the expectations. It’s required to be greater * than or equal to the sum of all values in the buckets_path as this is the originating superset of data * to which the term values are correlated. */ doc_count: integer; /** An array of numbers with which to correlate the configured `bucket_path` values. * The length of this value must always equal the number of buckets returned by the `bucket_path`. */ expectations: double[]; /** An array of fractions to use when averaging and calculating variance. This should be used if * the pre-calculated data and the buckets_path have known gaps. The length of fractions, if provided, * must equal expectations. */ fractions?: double[]; } export interface AggregationsBucketKsAggregation extends AggregationsBucketPathAggregation { /** A list of string values indicating which K-S test alternative to calculate. The valid values * are: "greater", "less", "two_sided". This parameter is key for determining the K-S statistic used * when calculating the K-S test. Default value is all possible alternative hypotheses. */ alternative?: string[]; /** A list of doubles indicating the distribution of the samples with which to compare to the `buckets_path` results. * In typical usage this is the overall proportion of documents in each bucket, which is compared with the actual * document proportions in each bucket from the sibling aggregation counts. The default is to assume that overall * documents are uniformly distributed on these buckets, which they would be if one used equal percentiles of a * metric to define the bucket end points. */ fractions?: double[]; /** Indicates the sampling methodology when calculating the K-S test. Note, this is sampling of the returned values. * This determines the cumulative distribution function (CDF) points used comparing the two samples. Default is * `upper_tail`, which emphasizes the upper end of the CDF points. Valid options are: `upper_tail`, `uniform`, * and `lower_tail`. */ sampling_method?: string; } export interface AggregationsBucketMetricValueAggregate extends AggregationsSingleMetricAggregateBase { keys: string[]; } export interface AggregationsBucketPathAggregation { /** Path to the buckets that contain one set of values to correlate. */ buckets_path?: AggregationsBucketsPath; } export interface AggregationsBucketScriptAggregation extends AggregationsPipelineAggregationBase { /** The script to run for this aggregation. */ script?: Script | ScriptSource; } export interface AggregationsBucketSelectorAggregation extends AggregationsPipelineAggregationBase { /** The script to run for this aggregation. */ script?: Script | ScriptSource; } export interface AggregationsBucketSortAggregation { /** Buckets in positions prior to `from` will be truncated. */ from?: integer; /** The policy to apply when gaps are found in the data. */ gap_policy?: AggregationsGapPolicy; /** The number of buckets to return. * Defaults to all buckets of the parent aggregation. */ size?: integer; /** The list of fields to sort on. */ sort?: Sort; } export type AggregationsBuckets = Record | TBucket[]; export type AggregationsBucketsPath = string | string[] | Record; export type AggregationsCalendarInterval = 'second' | '1s' | 'minute' | '1m' | 'hour' | '1h' | 'day' | '1d' | 'week' | '1w' | 'month' | '1M' | 'quarter' | '1q' | 'year' | '1y'; export interface AggregationsCardinalityAggregate extends AggregationsAggregateBase { value: long; } export interface AggregationsCardinalityAggregation extends AggregationsMetricAggregationBase { /** A unique count below which counts are expected to be close to accurate. * This allows to trade memory for accuracy. */ precision_threshold?: integer; rehash?: boolean; /** Mechanism by which cardinality aggregations is run. */ execution_hint?: AggregationsCardinalityExecutionMode; } export type AggregationsCardinalityExecutionMode = 'global_ordinals' | 'segment_ordinals' | 'direct' | 'save_memory_heuristic' | 'save_time_heuristic'; export interface AggregationsCategorizeTextAggregation { /** The semi-structured text field to categorize. */ field: Field; /** The maximum number of unique tokens at any position up to max_matched_tokens. Must be larger than 1. * Smaller values use less memory and create fewer categories. Larger values will use more memory and * create narrower categories. Max allowed value is 100. */ max_unique_tokens?: integer; /** The maximum number of token positions to match on before attempting to merge categories. Larger * values will use more memory and create narrower categories. Max allowed value is 100. */ max_matched_tokens?: integer; /** The minimum percentage of tokens that must match for text to be added to the category bucket. Must * be between 1 and 100. The larger the value the narrower the categories. Larger values will increase memory * usage and create narrower categories. */ similarity_threshold?: integer; /** This property expects an array of regular expressions. The expressions are used to filter out matching * sequences from the categorization field values. You can use this functionality to fine tune the categorization * by excluding sequences from consideration when categories are defined. For example, you can exclude SQL * statements that appear in your log files. This property cannot be used at the same time as categorization_analyzer. * If you only want to define simple regular expression filters that are applied prior to tokenization, setting * this property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering, * use the categorization_analyzer property instead and include the filters as pattern_replace character filters. */ categorization_filters?: string[]; /** The categorization analyzer specifies how the text is analyzed and tokenized before being categorized. * The syntax is very similar to that used to define the analyzer in the analyze API. This property * cannot be used at the same time as `categorization_filters`. */ categorization_analyzer?: AggregationsCategorizeTextAnalyzer; /** The number of categorization buckets to return from each shard before merging all the results. */ shard_size?: integer; /** The number of buckets to return. */ size?: integer; /** The minimum number of documents in a bucket to be returned to the results. */ min_doc_count?: integer; /** The minimum number of documents in a bucket to be returned from the shard before merging. */ shard_min_doc_count?: integer; } export type AggregationsCategorizeTextAnalyzer = string | AggregationsCustomCategorizeTextAnalyzer; export interface AggregationsChiSquareHeuristic { /** Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */ background_is_superset: boolean; /** Set to `false` to filter out the terms that appear less often in the subset than in documents outside the subset. */ include_negatives: boolean; } export interface AggregationsChildrenAggregateKeys extends AggregationsSingleBucketAggregateBase { } export type AggregationsChildrenAggregate = AggregationsChildrenAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata; }; export interface AggregationsChildrenAggregation extends AggregationsBucketAggregationBase { /** The child type that should be selected. */ type?: RelationName; } export interface AggregationsCompositeAggregate extends AggregationsMultiBucketAggregateBase { after_key?: AggregationsCompositeAggregateKey; } export type AggregationsCompositeAggregateKey = Record; export interface AggregationsCompositeAggregation extends AggregationsBucketAggregationBase { /** When paginating, use the `after_key` value returned in the previous response to retrieve the next page. */ after?: AggregationsCompositeAggregateKey; /** The number of composite buckets that should be returned. */ size?: integer; /** The value sources used to build composite buckets. * Keys are returned in the order of the `sources` definition. */ sources?: Record[]; } export interface AggregationsCompositeAggregationBase { /** Either `field` or `script` must be present */ field?: Field; missing_bucket?: boolean; missing_order?: AggregationsMissingOrder; /** Either `field` or `script` must be present */ script?: Script | ScriptSource; value_type?: AggregationsValueType; order?: SortOrder; } export interface AggregationsCompositeAggregationSource { /** A terms aggregation. */ terms?: AggregationsCompositeTermsAggregation; /** A histogram aggregation. */ histogram?: AggregationsCompositeHistogramAggregation; /** A date histogram aggregation. */ date_histogram?: AggregationsCompositeDateHistogramAggregation; /** A geotile grid aggregation. */ geotile_grid?: AggregationsCompositeGeoTileGridAggregation; } export interface AggregationsCompositeBucketKeys extends AggregationsMultiBucketBase { key: AggregationsCompositeAggregateKey; } export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys & { [property: string]: AggregationsAggregate | AggregationsCompositeAggregateKey | long; }; export interface AggregationsCompositeDateHistogramAggregation extends AggregationsCompositeAggregationBase { format?: string; /** Either `calendar_interval` or `fixed_interval` must be present */ calendar_interval?: DurationLarge; /** Either `calendar_interval` or `fixed_interval` must be present */ fixed_interval?: DurationLarge; offset?: Duration; time_zone?: TimeZone; } export interface AggregationsCompositeGeoTileGridAggregation extends AggregationsCompositeAggregationBase { precision?: integer; bounds?: GeoBounds; } export interface AggregationsCompositeHistogramAggregation extends AggregationsCompositeAggregationBase { interval: double; } export interface AggregationsCompositeTermsAggregation extends AggregationsCompositeAggregationBase { } export interface AggregationsCumulativeCardinalityAggregate extends AggregationsAggregateBase { value: long; value_as_string?: string; } export interface AggregationsCumulativeCardinalityAggregation extends AggregationsPipelineAggregationBase { } export interface AggregationsCumulativeSumAggregation extends AggregationsPipelineAggregationBase { } export interface AggregationsCustomCategorizeTextAnalyzer { char_filter?: string[]; tokenizer?: string; filter?: string[]; } export interface AggregationsDateHistogramAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsDateHistogramAggregation extends AggregationsBucketAggregationBase { /** Calendar-aware interval. * Can be specified using the unit name, such as `month`, or as a single unit quantity, such as `1M`. */ calendar_interval?: AggregationsCalendarInterval; /** Enables extending the bounds of the histogram beyond the data itself. */ extended_bounds?: AggregationsExtendedBounds; /** Limits the histogram to specified bounds. */ hard_bounds?: AggregationsExtendedBounds; /** The date field whose values are use to build a histogram. */ field?: Field; /** Fixed intervals: a fixed number of SI units and never deviate, regardless of where they fall on the calendar. */ fixed_interval?: Duration; /** The date format used to format `key_as_string` in the response. * If no `format` is specified, the first date format specified in the field mapping is used. */ format?: string; interval?: Duration; /** Only returns buckets that have `min_doc_count` number of documents. * By default, all buckets between the first bucket that matches documents and the last one are returned. */ min_doc_count?: integer; /** The value to apply to documents that do not have a value. * By default, documents without a value are ignored. */ missing?: DateTime; /** Changes the start value of each bucket by the specified positive (`+`) or negative offset (`-`) duration. */ offset?: Duration; /** The sort order of the returned buckets. */ order?: AggregationsAggregateOrder; params?: Record; script?: Script | ScriptSource; /** Time zone used for bucketing and rounding. * Defaults to Coordinated Universal Time (UTC). */ time_zone?: TimeZone; /** Set to `true` to associate a unique string key with each bucket and return the ranges as a hash rather than an array. */ keyed?: boolean; } export interface AggregationsDateHistogramBucketKeys extends AggregationsMultiBucketBase { key_as_string?: string; key: EpochTime; } export type AggregationsDateHistogramBucket = AggregationsDateHistogramBucketKeys & { [property: string]: AggregationsAggregate | string | EpochTime | long; }; export interface AggregationsDateRangeAggregate extends AggregationsRangeAggregate { } export interface AggregationsDateRangeAggregation extends AggregationsBucketAggregationBase { /** The date field whose values are use to build ranges. */ field?: Field; /** The date format used to format `from` and `to` in the response. */ format?: string; /** The value to apply to documents that do not have a value. * By default, documents without a value are ignored. */ missing?: AggregationsMissing; /** Array of date ranges. */ ranges?: AggregationsDateRangeExpression[]; /** Time zone used to convert dates from another time zone to UTC. */ time_zone?: TimeZone; /** Set to `true` to associate a unique string key with each bucket and returns the ranges as a hash rather than an array. */ keyed?: boolean; } export interface AggregationsDateRangeExpression { /** Start of the range (inclusive). */ from?: AggregationsFieldDateMath; /** Custom key to return the range with. */ key?: string; /** End of the range (exclusive). */ to?: AggregationsFieldDateMath; } export interface AggregationsDerivativeAggregate extends AggregationsSingleMetricAggregateBase { normalized_value?: double; normalized_value_as_string?: string; } export interface AggregationsDerivativeAggregation extends AggregationsPipelineAggregationBase { } export interface AggregationsDiversifiedSamplerAggregation extends AggregationsBucketAggregationBase { /** The type of value used for de-duplication. */ execution_hint?: AggregationsSamplerAggregationExecutionHint; /** Limits how many documents are permitted per choice of de-duplicating value. */ max_docs_per_value?: integer; script?: Script | ScriptSource; /** Limits how many top-scoring documents are collected in the sample processed on each shard. */ shard_size?: integer; /** The field used to provide values used for de-duplication. */ field?: Field; } export interface AggregationsDoubleTermsAggregate extends AggregationsTermsAggregateBase { } export interface AggregationsDoubleTermsBucketKeys extends AggregationsTermsBucketBase { key: double; key_as_string?: string; } export type AggregationsDoubleTermsBucket = AggregationsDoubleTermsBucketKeys & { [property: string]: AggregationsAggregate | double | string | long; }; export interface AggregationsEwmaModelSettings { alpha?: float; } export interface AggregationsEwmaMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { model: 'ewma'; settings: AggregationsEwmaModelSettings; } export interface AggregationsExtendedBounds { /** Maximum value for the bound. */ max?: T; /** Minimum value for the bound. */ min?: T; } export interface AggregationsExtendedStatsAggregate extends AggregationsStatsAggregate { sum_of_squares: double | null; variance: double | null; variance_population: double | null; variance_sampling: double | null; std_deviation: double | null; std_deviation_population: double | null; std_deviation_sampling: double | null; std_deviation_bounds?: AggregationsStandardDeviationBounds; sum_of_squares_as_string?: string; variance_as_string?: string; variance_population_as_string?: string; variance_sampling_as_string?: string; std_deviation_as_string?: string; std_deviation_bounds_as_string?: AggregationsStandardDeviationBoundsAsString; } export interface AggregationsExtendedStatsAggregation extends AggregationsFormatMetricAggregationBase { /** The number of standard deviations above/below the mean to display. */ sigma?: double; } export interface AggregationsExtendedStatsBucketAggregate extends AggregationsExtendedStatsAggregate { } export interface AggregationsExtendedStatsBucketAggregation extends AggregationsPipelineAggregationBase { /** The number of standard deviations above/below the mean to display. */ sigma?: double; } export type AggregationsFieldDateMath = DateMath | double; export interface AggregationsFilterAggregateKeys extends AggregationsSingleBucketAggregateBase { } export type AggregationsFilterAggregate = AggregationsFilterAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata; }; export interface AggregationsFiltersAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsFiltersAggregation extends AggregationsBucketAggregationBase { /** Collection of queries from which to build buckets. */ filters?: AggregationsBuckets; /** Set to `true` to add a bucket to the response which will contain all documents that do not match any of the given filters. */ other_bucket?: boolean; /** The key with which the other bucket is returned. */ other_bucket_key?: string; /** By default, the named filters aggregation returns the buckets as an object. * Set to `false` to return the buckets as an array of objects. */ keyed?: boolean; } export interface AggregationsFiltersBucketKeys extends AggregationsMultiBucketBase { } export type AggregationsFiltersBucket = AggregationsFiltersBucketKeys & { [property: string]: AggregationsAggregate | long; }; export interface AggregationsFormatMetricAggregationBase extends AggregationsMetricAggregationBase { format?: string; } export interface AggregationsFormattableMetricAggregation extends AggregationsMetricAggregationBase { format?: string; } export interface AggregationsFrequentItemSetsAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsFrequentItemSetsAggregation { /** Fields to analyze. */ fields: AggregationsFrequentItemSetsField[]; /** The minimum size of one item set. */ minimum_set_size?: integer; /** The minimum support of one item set. */ minimum_support?: double; /** The number of top item sets to return. */ size?: integer; /** Query that filters documents from analysis. */ filter?: QueryDslQueryContainer; } export interface AggregationsFrequentItemSetsBucketKeys extends AggregationsMultiBucketBase { key: Record; support: double; } export type AggregationsFrequentItemSetsBucket = AggregationsFrequentItemSetsBucketKeys & { [property: string]: AggregationsAggregate | Record | double | long; }; export interface AggregationsFrequentItemSetsField { field: Field; /** Values to exclude. * Can be regular expression strings or arrays of strings of exact terms. */ exclude?: AggregationsTermsExclude; /** Values to include. * Can be regular expression strings or arrays of strings of exact terms. */ include?: AggregationsTermsInclude; } export type AggregationsGapPolicy = 'skip' | 'insert_zeros' | 'keep_values'; export interface AggregationsGeoBoundsAggregate extends AggregationsAggregateBase { bounds?: GeoBounds; } export interface AggregationsGeoBoundsAggregation extends AggregationsMetricAggregationBase { /** Specifies whether the bounding box should be allowed to overlap the international date line. */ wrap_longitude?: boolean; } export interface AggregationsGeoCentroidAggregate extends AggregationsAggregateBase { count: long; location?: GeoLocation; } export interface AggregationsGeoCentroidAggregation extends AggregationsMetricAggregationBase { count?: long; location?: GeoLocation; } export interface AggregationsGeoDistanceAggregate extends AggregationsRangeAggregate { } export interface AggregationsGeoDistanceAggregation extends AggregationsBucketAggregationBase { /** The distance calculation type. */ distance_type?: GeoDistanceType; /** A field of type `geo_point` used to evaluate the distance. */ field?: Field; /** The origin used to evaluate the distance. */ origin?: GeoLocation; /** An array of ranges used to bucket documents. */ ranges?: AggregationsAggregationRange[]; /** The distance unit. */ unit?: DistanceUnit; } export interface AggregationsGeoHashGridAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsGeoHashGridAggregation extends AggregationsBucketAggregationBase { /** The bounding box to filter the points in each bucket. */ bounds?: GeoBounds; /** Field containing indexed `geo_point` or `geo_shape` values. * If the field contains an array, `geohash_grid` aggregates all array values. */ field?: Field; /** The string length of the geohashes used to define cells/buckets in the results. */ precision?: GeoHashPrecision; /** Allows for more accurate counting of the top cells returned in the final result the aggregation. * Defaults to returning `max(10,(size x number-of-shards))` buckets from each shard. */ shard_size?: integer; /** The maximum number of geohash buckets to return. */ size?: integer; } export interface AggregationsGeoHashGridBucketKeys extends AggregationsMultiBucketBase { key: GeoHash; } export type AggregationsGeoHashGridBucket = AggregationsGeoHashGridBucketKeys & { [property: string]: AggregationsAggregate | GeoHash | long; }; export interface AggregationsGeoHexGridAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsGeoHexGridBucketKeys extends AggregationsMultiBucketBase { key: GeoHexCell; } export type AggregationsGeoHexGridBucket = AggregationsGeoHexGridBucketKeys & { [property: string]: AggregationsAggregate | GeoHexCell | long; }; export interface AggregationsGeoLineAggregate extends AggregationsAggregateBase { type: string; geometry: GeoLine; properties: any; } export interface AggregationsGeoLineAggregation { /** The name of the geo_point field. */ point: AggregationsGeoLinePoint; /** The name of the numeric field to use as the sort key for ordering the points. * When the `geo_line` aggregation is nested inside a `time_series` aggregation, this field defaults to `@timestamp`, and any other value will result in error. */ sort: AggregationsGeoLineSort; /** When `true`, returns an additional array of the sort values in the feature properties. */ include_sort?: boolean; /** The order in which the line is sorted (ascending or descending). */ sort_order?: SortOrder; /** The maximum length of the line represented in the aggregation. * Valid sizes are between 1 and 10000. */ size?: integer; } export interface AggregationsGeoLinePoint { /** The name of the geo_point field. */ field: Field; } export interface AggregationsGeoLineSort { /** The name of the numeric field to use as the sort key for ordering the points. */ field: Field; } export interface AggregationsGeoTileGridAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsGeoTileGridAggregation extends AggregationsBucketAggregationBase { /** Field containing indexed `geo_point` or `geo_shape` values. * If the field contains an array, `geotile_grid` aggregates all array values. */ field?: Field; /** Integer zoom of the key used to define cells/buckets in the results. * Values outside of the range [0,29] will be rejected. */ precision?: GeoTilePrecision; /** Allows for more accurate counting of the top cells returned in the final result the aggregation. * Defaults to returning `max(10,(size x number-of-shards))` buckets from each shard. */ shard_size?: integer; /** The maximum number of buckets to return. */ size?: integer; /** A bounding box to filter the geo-points or geo-shapes in each bucket. */ bounds?: GeoBounds; } export interface AggregationsGeoTileGridBucketKeys extends AggregationsMultiBucketBase { key: GeoTile; } export type AggregationsGeoTileGridBucket = AggregationsGeoTileGridBucketKeys & { [property: string]: AggregationsAggregate | GeoTile | long; }; export interface AggregationsGeohexGridAggregation extends AggregationsBucketAggregationBase { /** Field containing indexed `geo_point` or `geo_shape` values. * If the field contains an array, `geohex_grid` aggregates all array values. */ field: Field; /** Integer zoom of the key used to defined cells or buckets * in the results. Value should be between 0-15. */ precision?: integer; /** Bounding box used to filter the geo-points in each bucket. */ bounds?: GeoBounds; /** Maximum number of buckets to return. */ size?: integer; /** Number of buckets returned from each shard. */ shard_size?: integer; } export interface AggregationsGlobalAggregateKeys extends AggregationsSingleBucketAggregateBase { } export type AggregationsGlobalAggregate = AggregationsGlobalAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata; }; export interface AggregationsGlobalAggregation extends AggregationsBucketAggregationBase { } export interface AggregationsGoogleNormalizedDistanceHeuristic { /** Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */ background_is_superset?: boolean; } export interface AggregationsHdrMethod { /** Specifies the resolution of values for the histogram in number of significant digits. */ number_of_significant_value_digits?: integer; } export interface AggregationsHdrPercentileRanksAggregate extends AggregationsPercentilesAggregateBase { } export interface AggregationsHdrPercentilesAggregate extends AggregationsPercentilesAggregateBase { } export interface AggregationsHistogramAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsHistogramAggregation extends AggregationsBucketAggregationBase { /** Enables extending the bounds of the histogram beyond the data itself. */ extended_bounds?: AggregationsExtendedBounds; /** Limits the range of buckets in the histogram. * It is particularly useful in the case of open data ranges that can result in a very large number of buckets. */ hard_bounds?: AggregationsExtendedBounds; /** The name of the field to aggregate on. */ field?: Field; /** The interval for the buckets. * Must be a positive decimal. */ interval?: double; /** Only returns buckets that have `min_doc_count` number of documents. * By default, the response will fill gaps in the histogram with empty buckets. */ min_doc_count?: integer; /** The value to apply to documents that do not have a value. * By default, documents without a value are ignored. */ missing?: double; /** By default, the bucket keys start with 0 and then continue in even spaced steps of `interval`. * The bucket boundaries can be shifted by using the `offset` option. */ offset?: double; /** The sort order of the returned buckets. * By default, the returned buckets are sorted by their key ascending. */ order?: AggregationsAggregateOrder; script?: Script | ScriptSource; format?: string; /** If `true`, returns buckets as a hash instead of an array, keyed by the bucket keys. */ keyed?: boolean; } export interface AggregationsHistogramBucketKeys extends AggregationsMultiBucketBase { key_as_string?: string; key: double; } export type AggregationsHistogramBucket = AggregationsHistogramBucketKeys & { [property: string]: AggregationsAggregate | string | double | long; }; export interface AggregationsHoltLinearModelSettings { alpha?: float; beta?: float; } export interface AggregationsHoltMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { model: 'holt'; settings: AggregationsHoltLinearModelSettings; } export interface AggregationsHoltWintersModelSettings { alpha?: float; beta?: float; gamma?: float; pad?: boolean; period?: integer; type?: AggregationsHoltWintersType; } export interface AggregationsHoltWintersMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { model: 'holt_winters'; settings: AggregationsHoltWintersModelSettings; } export type AggregationsHoltWintersType = 'add' | 'mult'; export interface AggregationsInferenceAggregateKeys extends AggregationsAggregateBase { value?: FieldValue; feature_importance?: AggregationsInferenceFeatureImportance[]; top_classes?: AggregationsInferenceTopClassEntry[]; warning?: string; } export type AggregationsInferenceAggregate = AggregationsInferenceAggregateKeys & { [property: string]: any; }; export interface AggregationsInferenceAggregation extends AggregationsPipelineAggregationBase { /** The ID or alias for the trained model. */ model_id: Name; /** Contains the inference type and its options. */ inference_config?: AggregationsInferenceConfigContainer; } export interface AggregationsInferenceClassImportance { class_name: string; importance: double; } export interface AggregationsInferenceConfigContainer { /** Regression configuration for inference. */ regression?: MlRegressionInferenceOptions; /** Classification configuration for inference. */ classification?: MlClassificationInferenceOptions; } export interface AggregationsInferenceFeatureImportance { feature_name: string; importance?: double; classes?: AggregationsInferenceClassImportance[]; } export interface AggregationsInferenceTopClassEntry { class_name: FieldValue; class_probability: double; class_score: double; } export interface AggregationsIpPrefixAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsIpPrefixAggregation extends AggregationsBucketAggregationBase { /** The IP address field to aggregation on. The field mapping type must be `ip`. */ field: Field; /** Length of the network prefix. For IPv4 addresses the accepted range is [0, 32]. * For IPv6 addresses the accepted range is [0, 128]. */ prefix_length: integer; /** Defines whether the prefix applies to IPv6 addresses. */ is_ipv6?: boolean; /** Defines whether the prefix length is appended to IP address keys in the response. */ append_prefix_length?: boolean; /** Defines whether buckets are returned as a hash rather than an array in the response. */ keyed?: boolean; /** Minimum number of documents in a bucket for it to be included in the response. */ min_doc_count?: long; } export interface AggregationsIpPrefixBucketKeys extends AggregationsMultiBucketBase { is_ipv6: boolean; key: string; prefix_length: integer; netmask?: string; } export type AggregationsIpPrefixBucket = AggregationsIpPrefixBucketKeys & { [property: string]: AggregationsAggregate | boolean | string | integer | long; }; export interface AggregationsIpRangeAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsIpRangeAggregation extends AggregationsBucketAggregationBase { /** The date field whose values are used to build ranges. */ field?: Field; /** Array of IP ranges. */ ranges?: AggregationsIpRangeAggregationRange[]; } export interface AggregationsIpRangeAggregationRange { /** Start of the range. */ from?: string | null; /** IP range defined as a CIDR mask. */ mask?: string; /** End of the range. */ to?: string | null; } export interface AggregationsIpRangeBucketKeys extends AggregationsMultiBucketBase { key?: string; from?: string; to?: string; } export type AggregationsIpRangeBucket = AggregationsIpRangeBucketKeys & { [property: string]: AggregationsAggregate | string | long; }; export type AggregationsKeyedPercentiles = Record; export interface AggregationsLinearMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { model: 'linear'; settings: EmptyObject; } export interface AggregationsLongRareTermsAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsLongRareTermsBucketKeys extends AggregationsMultiBucketBase { key: long; key_as_string?: string; } export type AggregationsLongRareTermsBucket = AggregationsLongRareTermsBucketKeys & { [property: string]: AggregationsAggregate | long | string; }; export interface AggregationsLongTermsAggregate extends AggregationsTermsAggregateBase { } export interface AggregationsLongTermsBucketKeys extends AggregationsTermsBucketBase { key: long; key_as_string?: string; } export type AggregationsLongTermsBucket = AggregationsLongTermsBucketKeys & { [property: string]: AggregationsAggregate | long | string; }; export interface AggregationsMatrixAggregation { /** An array of fields for computing the statistics. */ fields?: Fields; /** The value to apply to documents that do not have a value. * By default, documents without a value are ignored. */ missing?: Record; } export interface AggregationsMatrixStatsAggregate extends AggregationsAggregateBase { doc_count: long; fields?: AggregationsMatrixStatsFields[]; } export interface AggregationsMatrixStatsAggregation extends AggregationsMatrixAggregation { /** Array value the aggregation will use for array or multi-valued fields. */ mode?: SortMode; } export interface AggregationsMatrixStatsFields { name: Field; count: long; mean: double; variance: double; skewness: double; kurtosis: double; covariance: Record; correlation: Record; } export interface AggregationsMaxAggregate extends AggregationsSingleMetricAggregateBase { } export interface AggregationsMaxAggregation extends AggregationsFormatMetricAggregationBase { } export interface AggregationsMaxBucketAggregation extends AggregationsPipelineAggregationBase { } export interface AggregationsMedianAbsoluteDeviationAggregate extends AggregationsSingleMetricAggregateBase { } export interface AggregationsMedianAbsoluteDeviationAggregation extends AggregationsFormatMetricAggregationBase { /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ compression?: double; } export interface AggregationsMetricAggregationBase { /** The field on which to run the aggregation. */ field?: Field; /** The value to apply to documents that do not have a value. * By default, documents without a value are ignored. */ missing?: AggregationsMissing; script?: Script | ScriptSource; } export interface AggregationsMinAggregate extends AggregationsSingleMetricAggregateBase { } export interface AggregationsMinAggregation extends AggregationsFormatMetricAggregationBase { } export interface AggregationsMinBucketAggregation extends AggregationsPipelineAggregationBase { } export type AggregationsMinimumInterval = 'second' | 'minute' | 'hour' | 'day' | 'month' | 'year'; export type AggregationsMissing = string | integer | double | boolean; export interface AggregationsMissingAggregateKeys extends AggregationsSingleBucketAggregateBase { } export type AggregationsMissingAggregate = AggregationsMissingAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata; }; export interface AggregationsMissingAggregation extends AggregationsBucketAggregationBase { /** The name of the field. */ field?: Field; missing?: AggregationsMissing; } export type AggregationsMissingOrder = 'first' | 'last' | 'default'; export type AggregationsMovingAverageAggregation = AggregationsLinearMovingAverageAggregation | AggregationsSimpleMovingAverageAggregation | AggregationsEwmaMovingAverageAggregation | AggregationsHoltMovingAverageAggregation | AggregationsHoltWintersMovingAverageAggregation; export interface AggregationsMovingAverageAggregationBase extends AggregationsPipelineAggregationBase { minimize?: boolean; predict?: integer; window?: integer; } export interface AggregationsMovingFunctionAggregation extends AggregationsPipelineAggregationBase { /** The script that should be executed on each window of data. */ script?: string; /** By default, the window consists of the last n values excluding the current bucket. * Increasing `shift` by 1, moves the starting window position by 1 to the right. */ shift?: integer; /** The size of window to "slide" across the histogram. */ window?: integer; } export interface AggregationsMovingPercentilesAggregation extends AggregationsPipelineAggregationBase { /** The size of window to "slide" across the histogram. */ window?: integer; /** By default, the window consists of the last n values excluding the current bucket. * Increasing `shift` by 1, moves the starting window position by 1 to the right. */ shift?: integer; keyed?: boolean; } export interface AggregationsMultiBucketAggregateBase extends AggregationsAggregateBase { buckets: AggregationsBuckets; } export interface AggregationsMultiBucketBase { doc_count: long; } export interface AggregationsMultiTermLookup { /** A fields from which to retrieve terms. */ field: Field; /** The value to apply to documents that do not have a value. * By default, documents without a value are ignored. */ missing?: AggregationsMissing; } export interface AggregationsMultiTermsAggregate extends AggregationsTermsAggregateBase { } export interface AggregationsMultiTermsAggregation extends AggregationsBucketAggregationBase { /** Specifies the strategy for data collection. */ collect_mode?: AggregationsTermsAggregationCollectMode; /** Specifies the sort order of the buckets. * Defaults to sorting by descending document count. */ order?: AggregationsAggregateOrder; /** The minimum number of documents in a bucket for it to be returned. */ min_doc_count?: long; /** The minimum number of documents in a bucket on each shard for it to be returned. */ shard_min_doc_count?: long; /** The number of candidate terms produced by each shard. * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ shard_size?: integer; /** Calculates the doc count error on per term basis. */ show_term_doc_count_error?: boolean; /** The number of term buckets should be returned out of the overall terms list. */ size?: integer; /** The field from which to generate sets of terms. */ terms: AggregationsMultiTermLookup[]; } export interface AggregationsMultiTermsBucketKeys extends AggregationsMultiBucketBase { key: FieldValue[]; key_as_string?: string; doc_count_error_upper_bound?: long; } export type AggregationsMultiTermsBucket = AggregationsMultiTermsBucketKeys & { [property: string]: AggregationsAggregate | FieldValue[] | string | long; }; export interface AggregationsMutualInformationHeuristic { /** Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */ background_is_superset?: boolean; /** Set to `false` to filter out the terms that appear less often in the subset than in documents outside the subset. */ include_negatives?: boolean; } export interface AggregationsNestedAggregateKeys extends AggregationsSingleBucketAggregateBase { } export type AggregationsNestedAggregate = AggregationsNestedAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata; }; export interface AggregationsNestedAggregation extends AggregationsBucketAggregationBase { /** The path to the field of type `nested`. */ path?: Field; } export interface AggregationsNormalizeAggregation extends AggregationsPipelineAggregationBase { /** The specific method to apply. */ method?: AggregationsNormalizeMethod; } export type AggregationsNormalizeMethod = 'rescale_0_1' | 'rescale_0_100' | 'percent_of_sum' | 'mean' | 'z-score' | 'softmax'; export interface AggregationsParentAggregateKeys extends AggregationsSingleBucketAggregateBase { } export type AggregationsParentAggregate = AggregationsParentAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata; }; export interface AggregationsParentAggregation extends AggregationsBucketAggregationBase { /** The child type that should be selected. */ type?: RelationName; } export interface AggregationsPercentageScoreHeuristic { } export interface AggregationsPercentileRanksAggregation extends AggregationsFormatMetricAggregationBase { /** By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array. * Set to `false` to disable this behavior. */ keyed?: boolean; /** An array of values for which to calculate the percentile ranks. */ values?: double[] | null; /** Uses the alternative High Dynamic Range Histogram algorithm to calculate percentile ranks. */ hdr?: AggregationsHdrMethod; /** Sets parameters for the default TDigest algorithm used to calculate percentile ranks. */ tdigest?: AggregationsTDigest; } export type AggregationsPercentiles = AggregationsKeyedPercentiles | AggregationsArrayPercentilesItem[]; export interface AggregationsPercentilesAggregateBase extends AggregationsAggregateBase { values: AggregationsPercentiles; } export interface AggregationsPercentilesAggregation extends AggregationsFormatMetricAggregationBase { /** By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array. * Set to `false` to disable this behavior. */ keyed?: boolean; /** The percentiles to calculate. */ percents?: double[]; /** Uses the alternative High Dynamic Range Histogram algorithm to calculate percentiles. */ hdr?: AggregationsHdrMethod; /** Sets parameters for the default TDigest algorithm used to calculate percentiles. */ tdigest?: AggregationsTDigest; } export interface AggregationsPercentilesBucketAggregate extends AggregationsPercentilesAggregateBase { } export interface AggregationsPercentilesBucketAggregation extends AggregationsPipelineAggregationBase { /** The list of percentiles to calculate. */ percents?: double[]; } export interface AggregationsPipelineAggregationBase extends AggregationsBucketPathAggregation { /** `DecimalFormat` pattern for the output value. * If specified, the formatted value is returned in the aggregation’s `value_as_string` property. */ format?: string; /** Policy to apply when gaps are found in the data. */ gap_policy?: AggregationsGapPolicy; } export interface AggregationsRandomSamplerAggregation extends AggregationsBucketAggregationBase { /** The probability that a document will be included in the aggregated data. * Must be greater than 0, less than 0.5, or exactly 1. * The lower the probability, the fewer documents are matched. */ probability: double; /** The seed to generate the random sampling of documents. * When a seed is provided, the random subset of documents is the same between calls. */ seed?: integer; /** When combined with seed, setting shard_seed ensures 100% consistent sampling over shards where data is exactly the same. * @remarks This property is not supported on Elastic Cloud Serverless. */ shard_seed?: integer; } export interface AggregationsRangeAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsRangeAggregation extends AggregationsBucketAggregationBase { /** The date field whose values are use to build ranges. */ field?: Field; /** The value to apply to documents that do not have a value. * By default, documents without a value are ignored. */ missing?: integer; /** An array of ranges used to bucket documents. */ ranges?: AggregationsAggregationRange[]; script?: Script | ScriptSource; /** Set to `true` to associate a unique string key with each bucket and return the ranges as a hash rather than an array. */ keyed?: boolean; format?: string; } export interface AggregationsRangeBucketKeys extends AggregationsMultiBucketBase { from?: double; to?: double; from_as_string?: string; to_as_string?: string; /** The bucket key. Present if the aggregation is _not_ keyed */ key?: string; } export type AggregationsRangeBucket = AggregationsRangeBucketKeys & { [property: string]: AggregationsAggregate | double | string | long; }; export interface AggregationsRareTermsAggregation extends AggregationsBucketAggregationBase { /** Terms that should be excluded from the aggregation. */ exclude?: AggregationsTermsExclude; /** The field from which to return rare terms. */ field?: Field; /** Terms that should be included in the aggregation. */ include?: AggregationsTermsInclude; /** The maximum number of documents a term should appear in. */ max_doc_count?: long; /** The value to apply to documents that do not have a value. * By default, documents without a value are ignored. */ missing?: AggregationsMissing; /** The precision of the internal CuckooFilters. * Smaller precision leads to better approximation, but higher memory usage. */ precision?: double; value_type?: string; } export interface AggregationsRateAggregate extends AggregationsAggregateBase { value: double; value_as_string?: string; } export interface AggregationsRateAggregation extends AggregationsFormatMetricAggregationBase { /** The interval used to calculate the rate. * By default, the interval of the `date_histogram` is used. */ unit?: AggregationsCalendarInterval; /** How the rate is calculated. */ mode?: AggregationsRateMode; } export type AggregationsRateMode = 'sum' | 'value_count'; export interface AggregationsReverseNestedAggregateKeys extends AggregationsSingleBucketAggregateBase { } export type AggregationsReverseNestedAggregate = AggregationsReverseNestedAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata; }; export interface AggregationsReverseNestedAggregation extends AggregationsBucketAggregationBase { /** Defines the nested object field that should be joined back to. * The default is empty, which means that it joins back to the root/main document level. */ path?: Field; } export interface AggregationsSamplerAggregateKeys extends AggregationsSingleBucketAggregateBase { } export type AggregationsSamplerAggregate = AggregationsSamplerAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata; }; export interface AggregationsSamplerAggregation extends AggregationsBucketAggregationBase { /** Limits how many top-scoring documents are collected in the sample processed on each shard. */ shard_size?: integer; } export type AggregationsSamplerAggregationExecutionHint = 'map' | 'global_ordinals' | 'bytes_hash'; export interface AggregationsScriptedHeuristic { script: Script | ScriptSource; } export interface AggregationsScriptedMetricAggregate extends AggregationsAggregateBase { value: any; } export interface AggregationsScriptedMetricAggregation extends AggregationsMetricAggregationBase { /** Runs once on each shard after document collection is complete. * Allows the aggregation to consolidate the state returned from each shard. */ combine_script?: Script | ScriptSource; /** Runs prior to any collection of documents. * Allows the aggregation to set up any initial state. */ init_script?: Script | ScriptSource; /** Run once per document collected. * If no `combine_script` is specified, the resulting state needs to be stored in the `state` object. */ map_script?: Script | ScriptSource; /** A global object with script parameters for `init`, `map` and `combine` scripts. * It is shared between the scripts. */ params?: Record; /** Runs once on the coordinating node after all shards have returned their results. * The script is provided with access to a variable `states`, which is an array of the result of the `combine_script` on each shard. */ reduce_script?: Script | ScriptSource; } export interface AggregationsSerialDifferencingAggregation extends AggregationsPipelineAggregationBase { /** The historical bucket to subtract from the current value. * Must be a positive, non-zero integer. */ lag?: integer; } export interface AggregationsSignificantLongTermsAggregate extends AggregationsSignificantTermsAggregateBase { } export interface AggregationsSignificantLongTermsBucketKeys extends AggregationsSignificantTermsBucketBase { key: long; key_as_string?: string; } export type AggregationsSignificantLongTermsBucket = AggregationsSignificantLongTermsBucketKeys & { [property: string]: AggregationsAggregate | long | string | double; }; export interface AggregationsSignificantStringTermsAggregate extends AggregationsSignificantTermsAggregateBase { } export interface AggregationsSignificantStringTermsBucketKeys extends AggregationsSignificantTermsBucketBase { key: string; } export type AggregationsSignificantStringTermsBucket = AggregationsSignificantStringTermsBucketKeys & { [property: string]: AggregationsAggregate | string | double | long; }; export interface AggregationsSignificantTermsAggregateBase extends AggregationsMultiBucketAggregateBase { bg_count?: long; doc_count?: long; } export interface AggregationsSignificantTermsAggregation extends AggregationsBucketAggregationBase { /** A background filter that can be used to focus in on significant terms within a narrower context, instead of the entire index. */ background_filter?: QueryDslQueryContainer; /** Use Chi square, as described in "Information Retrieval", Manning et al., Chapter 13.5.2, as the significance score. */ chi_square?: AggregationsChiSquareHeuristic; /** Terms to exclude. */ exclude?: AggregationsTermsExclude; /** Mechanism by which the aggregation should be executed: using field values directly or using global ordinals. */ execution_hint?: AggregationsTermsAggregationExecutionHint; /** The field from which to return significant terms. */ field?: Field; /** Use Google normalized distance as described in "The Google Similarity Distance", Cilibrasi and Vitanyi, 2007, as the significance score. */ gnd?: AggregationsGoogleNormalizedDistanceHeuristic; /** Terms to include. */ include?: AggregationsTermsInclude; /** Use JLH score as the significance score. */ jlh?: EmptyObject; /** Only return terms that are found in more than `min_doc_count` hits. */ min_doc_count?: long; /** Use mutual information as described in "Information Retrieval", Manning et al., Chapter 13.5.1, as the significance score. */ mutual_information?: AggregationsMutualInformationHeuristic; /** A simple calculation of the number of documents in the foreground sample with a term divided by the number of documents in the background with the term. */ percentage?: AggregationsPercentageScoreHeuristic; /** Customized score, implemented via a script. */ script_heuristic?: AggregationsScriptedHeuristic; /** Regulates the certainty a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. * Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */ shard_min_doc_count?: long; /** Can be used to control the volumes of candidate terms produced by each shard. * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ shard_size?: integer; /** The number of buckets returned out of the overall terms list. */ size?: integer; } export interface AggregationsSignificantTermsBucketBase extends AggregationsMultiBucketBase { score: double; bg_count: long; } export interface AggregationsSignificantTextAggregation extends AggregationsBucketAggregationBase { /** A background filter that can be used to focus in on significant terms within a narrower context, instead of the entire index. */ background_filter?: QueryDslQueryContainer; /** Use Chi square, as described in "Information Retrieval", Manning et al., Chapter 13.5.2, as the significance score. */ chi_square?: AggregationsChiSquareHeuristic; /** Values to exclude. */ exclude?: AggregationsTermsExclude; /** Determines whether the aggregation will use field values directly or global ordinals. */ execution_hint?: AggregationsTermsAggregationExecutionHint; /** The field from which to return significant text. */ field?: Field; /** Whether to out duplicate text to deal with noisy data. */ filter_duplicate_text?: boolean; /** Use Google normalized distance as described in "The Google Similarity Distance", Cilibrasi and Vitanyi, 2007, as the significance score. */ gnd?: AggregationsGoogleNormalizedDistanceHeuristic; /** Values to include. */ include?: AggregationsTermsInclude; /** Use JLH score as the significance score. */ jlh?: EmptyObject; /** Only return values that are found in more than `min_doc_count` hits. */ min_doc_count?: long; /** Use mutual information as described in "Information Retrieval", Manning et al., Chapter 13.5.1, as the significance score. */ mutual_information?: AggregationsMutualInformationHeuristic; /** A simple calculation of the number of documents in the foreground sample with a term divided by the number of documents in the background with the term. */ percentage?: AggregationsPercentageScoreHeuristic; /** Customized score, implemented via a script. */ script_heuristic?: AggregationsScriptedHeuristic; /** Regulates the certainty a shard has if the values should actually be added to the candidate list or not with respect to the min_doc_count. * Values will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */ shard_min_doc_count?: long; /** The number of candidate terms produced by each shard. * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ shard_size?: integer; /** The number of buckets returned out of the overall terms list. */ size?: integer; /** Overrides the JSON `_source` fields from which text will be analyzed. */ source_fields?: Fields; } export interface AggregationsSimpleMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { model: 'simple'; settings: EmptyObject; } export interface AggregationsSimpleValueAggregate extends AggregationsSingleMetricAggregateBase { } export interface AggregationsSingleBucketAggregateBase extends AggregationsAggregateBase { doc_count: long; } export interface AggregationsSingleMetricAggregateBase extends AggregationsAggregateBase { /** The metric value. A missing value generally means that there was no data to aggregate, * unless specified otherwise. */ value: double | null; value_as_string?: string; } export interface AggregationsStandardDeviationBounds { upper: double | null; lower: double | null; upper_population: double | null; lower_population: double | null; upper_sampling: double | null; lower_sampling: double | null; } export interface AggregationsStandardDeviationBoundsAsString { upper: string; lower: string; upper_population: string; lower_population: string; upper_sampling: string; lower_sampling: string; } export interface AggregationsStatsAggregate extends AggregationsAggregateBase { count: long; min: double | null; max: double | null; avg: double | null; sum: double; min_as_string?: string; max_as_string?: string; avg_as_string?: string; sum_as_string?: string; } export interface AggregationsStatsAggregation extends AggregationsFormatMetricAggregationBase { } export interface AggregationsStatsBucketAggregate extends AggregationsStatsAggregate { } export interface AggregationsStatsBucketAggregation extends AggregationsPipelineAggregationBase { } export interface AggregationsStringRareTermsAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsStringRareTermsBucketKeys extends AggregationsMultiBucketBase { key: string; } export type AggregationsStringRareTermsBucket = AggregationsStringRareTermsBucketKeys & { [property: string]: AggregationsAggregate | string | long; }; export interface AggregationsStringStatsAggregate extends AggregationsAggregateBase { count: long; min_length: integer | null; max_length: integer | null; avg_length: double | null; entropy: double | null; distribution?: Record | null; min_length_as_string?: string; max_length_as_string?: string; avg_length_as_string?: string; } export interface AggregationsStringStatsAggregation extends AggregationsMetricAggregationBase { /** Shows the probability distribution for all characters. */ show_distribution?: boolean; } export interface AggregationsStringTermsAggregate extends AggregationsTermsAggregateBase { } export interface AggregationsStringTermsBucketKeys extends AggregationsTermsBucketBase { key: FieldValue; } export type AggregationsStringTermsBucket = AggregationsStringTermsBucketKeys & { [property: string]: AggregationsAggregate | FieldValue | long; }; export interface AggregationsSumAggregate extends AggregationsSingleMetricAggregateBase { } export interface AggregationsSumAggregation extends AggregationsFormatMetricAggregationBase { } export interface AggregationsSumBucketAggregation extends AggregationsPipelineAggregationBase { } export interface AggregationsTDigest { /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ compression?: integer; } export interface AggregationsTDigestPercentileRanksAggregate extends AggregationsPercentilesAggregateBase { } export interface AggregationsTDigestPercentilesAggregate extends AggregationsPercentilesAggregateBase { } export interface AggregationsTTestAggregate extends AggregationsAggregateBase { value: double | null; value_as_string?: string; } export interface AggregationsTTestAggregation { /** Test population A. */ a?: AggregationsTestPopulation; /** Test population B. */ b?: AggregationsTestPopulation; /** The type of test. */ type?: AggregationsTTestType; } export type AggregationsTTestType = 'paired' | 'homoscedastic' | 'heteroscedastic'; export interface AggregationsTermsAggregateBase extends AggregationsMultiBucketAggregateBase { doc_count_error_upper_bound?: long; sum_other_doc_count?: long; } export interface AggregationsTermsAggregation extends AggregationsBucketAggregationBase { /** Determines how child aggregations should be calculated: breadth-first or depth-first. */ collect_mode?: AggregationsTermsAggregationCollectMode; /** Values to exclude. * Accepts regular expressions and partitions. */ exclude?: AggregationsTermsExclude; /** Determines whether the aggregation will use field values directly or global ordinals. */ execution_hint?: AggregationsTermsAggregationExecutionHint; /** The field from which to return terms. */ field?: Field; /** Values to include. * Accepts regular expressions and partitions. */ include?: AggregationsTermsInclude; /** Only return values that are found in more than `min_doc_count` hits. */ min_doc_count?: integer; /** The value to apply to documents that do not have a value. * By default, documents without a value are ignored. */ missing?: AggregationsMissing; missing_order?: AggregationsMissingOrder; missing_bucket?: boolean; /** Coerced unmapped fields into the specified type. */ value_type?: string; /** Specifies the sort order of the buckets. * Defaults to sorting by descending document count. */ order?: AggregationsAggregateOrder; script?: Script | ScriptSource; /** Regulates the certainty a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. * Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */ shard_min_doc_count?: long; /** The number of candidate terms produced by each shard. * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ shard_size?: integer; /** Set to `true` to return the `doc_count_error_upper_bound`, which is an upper bound to the error on the `doc_count` returned by each shard. */ show_term_doc_count_error?: boolean; /** The number of buckets returned out of the overall terms list. */ size?: integer; format?: string; } export type AggregationsTermsAggregationCollectMode = 'depth_first' | 'breadth_first'; export type AggregationsTermsAggregationExecutionHint = 'map' | 'global_ordinals' | 'global_ordinals_hash' | 'global_ordinals_low_cardinality'; export interface AggregationsTermsBucketBase extends AggregationsMultiBucketBase { doc_count_error_upper_bound?: long; } export type AggregationsTermsExclude = string | string[]; export type AggregationsTermsInclude = string | string[] | AggregationsTermsPartition; export interface AggregationsTermsPartition { /** The number of partitions. */ num_partitions: long; /** The partition number for this request. */ partition: long; } export interface AggregationsTestPopulation { /** The field to aggregate. */ field: Field; script?: Script | ScriptSource; /** A filter used to define a set of records to run unpaired t-test on. */ filter?: QueryDslQueryContainer; } export interface AggregationsTimeSeriesAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsTimeSeriesAggregation extends AggregationsBucketAggregationBase { /** The maximum number of results to return. */ size?: integer; /** Set to `true` to associate a unique string key with each bucket and returns the ranges as a hash rather than an array. */ keyed?: boolean; } export interface AggregationsTimeSeriesBucketKeys extends AggregationsMultiBucketBase { key: Record; } export type AggregationsTimeSeriesBucket = AggregationsTimeSeriesBucketKeys & { [property: string]: AggregationsAggregate | Record | long; }; export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase { hits: SearchHitsMetadata; } export interface AggregationsTopHitsAggregation extends AggregationsMetricAggregationBase { /** Fields for which to return doc values. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[]; /** If `true`, returns detailed information about score computation as part of a hit. */ explain?: boolean; /** Array of wildcard (*) patterns. The request returns values for field names * matching these patterns in the hits.fields property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[]; /** Starting document offset. */ from?: integer; /** Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in the search results. */ highlight?: SearchHighlight; /** Returns the result of one or more script evaluations for each hit. */ script_fields?: Record; /** The maximum number of top matching hits to return per bucket. */ size?: integer; /** Sort order of the top matching hits. * By default, the hits are sorted by the score of the main query. */ sort?: Sort; /** Selects the fields of the source that are returned. */ _source?: SearchSourceConfig; /** Returns values for the specified stored fields (fields that use the `store` mapping option). */ stored_fields?: Fields; /** If `true`, calculates and returns document scores, even if the scores are not used for sorting. */ track_scores?: boolean; /** If `true`, returns document version as part of a hit. */ version?: boolean; /** If `true`, returns sequence number and primary term of the last modification of each hit. */ seq_no_primary_term?: boolean; } export interface AggregationsTopMetrics { sort: (FieldValue | null)[]; metrics: Record; } export interface AggregationsTopMetricsAggregate extends AggregationsAggregateBase { top: AggregationsTopMetrics[]; } export interface AggregationsTopMetricsAggregation extends AggregationsMetricAggregationBase { /** The fields of the top document to return. */ metrics?: AggregationsTopMetricsValue | AggregationsTopMetricsValue[]; /** The number of top documents from which to return metrics. */ size?: integer; /** The sort order of the documents. */ sort?: Sort; } export interface AggregationsTopMetricsValue { /** A field to return as a metric. */ field: Field; } export interface AggregationsUnmappedRareTermsAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsUnmappedSamplerAggregateKeys extends AggregationsSingleBucketAggregateBase { } export type AggregationsUnmappedSamplerAggregate = AggregationsUnmappedSamplerAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata; }; export interface AggregationsUnmappedSignificantTermsAggregate extends AggregationsSignificantTermsAggregateBase { } export interface AggregationsUnmappedTermsAggregate extends AggregationsTermsAggregateBase { } export interface AggregationsValueCountAggregate extends AggregationsSingleMetricAggregateBase { } export interface AggregationsValueCountAggregation extends AggregationsFormattableMetricAggregation { } export type AggregationsValueType = 'string' | 'long' | 'double' | 'number' | 'date' | 'date_nanos' | 'ip' | 'numeric' | 'geo_point' | 'boolean'; export interface AggregationsVariableWidthHistogramAggregate extends AggregationsMultiBucketAggregateBase { } export interface AggregationsVariableWidthHistogramAggregation { /** The name of the field. */ field?: Field; /** The target number of buckets. */ buckets?: integer; /** The number of buckets that the coordinating node will request from each shard. * Defaults to `buckets * 50`. */ shard_size?: integer; /** Specifies the number of individual documents that will be stored in memory on a shard before the initial bucketing algorithm is run. * Defaults to `min(10 * shard_size, 50000)`. */ initial_buffer?: integer; script?: Script | ScriptSource; } export interface AggregationsVariableWidthHistogramBucketKeys extends AggregationsMultiBucketBase { min: double; key: double; max: double; min_as_string?: string; key_as_string?: string; max_as_string?: string; } export type AggregationsVariableWidthHistogramBucket = AggregationsVariableWidthHistogramBucketKeys & { [property: string]: AggregationsAggregate | double | string | long; }; export interface AggregationsWeightedAverageAggregation { /** A numeric response formatter. */ format?: string; /** Configuration for the field that provides the values. */ value?: AggregationsWeightedAverageValue; value_type?: AggregationsValueType; /** Configuration for the field or script that provides the weights. */ weight?: AggregationsWeightedAverageValue; } export interface AggregationsWeightedAverageValue { /** The field from which to extract the values or weights. */ field?: Field; /** A value or weight to use if the field is missing. */ missing?: double; script?: Script | ScriptSource; } export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetricAggregateBase { } export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer; export interface AnalysisApostropheTokenFilter extends AnalysisTokenFilterBase { type: 'apostrophe'; } export interface AnalysisArabicAnalyzer { type: 'arabic'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisArabicNormalizationTokenFilter extends AnalysisTokenFilterBase { type: 'arabic_normalization'; } export interface AnalysisArmenianAnalyzer { type: 'armenian'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { type: 'asciifolding'; /** If `true`, emit both original tokens and folded tokens. Defaults to `false`. */ preserve_original?: SpecUtilsStringified; } export interface AnalysisBasqueAnalyzer { type: 'basque'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisBengaliAnalyzer { type: 'bengali'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisBrazilianAnalyzer { type: 'brazilian'; stopwords?: AnalysisStopWords; stopwords_path?: string; } export interface AnalysisBulgarianAnalyzer { type: 'bulgarian'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisCatalanAnalyzer { type: 'catalan'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export type AnalysisCharFilter = string | AnalysisCharFilterDefinition; export interface AnalysisCharFilterBase { version?: VersionString; } export type AnalysisCharFilterDefinition = AnalysisHtmlStripCharFilter | AnalysisMappingCharFilter | AnalysisPatternReplaceCharFilter | AnalysisIcuNormalizationCharFilter | AnalysisKuromojiIterationMarkCharFilter; export interface AnalysisCharGroupTokenizer extends AnalysisTokenizerBase { type: 'char_group'; tokenize_on_chars: string[]; max_token_length?: integer; } export interface AnalysisChineseAnalyzer { type: 'chinese'; stopwords?: AnalysisStopWords; stopwords_path?: string; } export interface AnalysisCjkAnalyzer { type: 'cjk'; stopwords?: AnalysisStopWords; stopwords_path?: string; } export type AnalysisCjkBigramIgnoredScript = 'han' | 'hangul' | 'hiragana' | 'katakana'; export interface AnalysisCjkBigramTokenFilter extends AnalysisTokenFilterBase { type: 'cjk_bigram'; /** Array of character scripts for which to disable bigrams. */ ignored_scripts?: AnalysisCjkBigramIgnoredScript[]; /** If `true`, emit tokens in both bigram and unigram form. If `false`, a CJK character is output in unigram form when it has no adjacent characters. Defaults to `false`. */ output_unigrams?: boolean; } export interface AnalysisCjkWidthTokenFilter extends AnalysisTokenFilterBase { type: 'cjk_width'; } export interface AnalysisClassicTokenFilter extends AnalysisTokenFilterBase { type: 'classic'; } export interface AnalysisClassicTokenizer extends AnalysisTokenizerBase { type: 'classic'; max_token_length?: integer; } export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase { type: 'common_grams'; /** A list of tokens. The filter generates bigrams for these tokens. * Either this or the `common_words_path` parameter is required. */ common_words?: string[]; /** Path to a file containing a list of tokens. The filter generates bigrams for these tokens. * This path must be absolute or relative to the `config` location. The file must be UTF-8 encoded. Each token in the file must be separated by a line break. * Either this or the `common_words` parameter is required. */ common_words_path?: string; /** If `true`, matches for common words matching are case-insensitive. Defaults to `false`. */ ignore_case?: boolean; /** If `true`, the filter excludes the following tokens from the output: * - Unigrams for common words * - Unigrams for terms followed by common words * Defaults to `false`. We recommend enabling this parameter for search analyzers. */ query_mode?: boolean; } export interface AnalysisCompoundWordTokenFilterBase extends AnalysisTokenFilterBase { /** Maximum subword character length. Longer subword tokens are excluded from the output. Defaults to `15`. */ max_subword_size?: integer; /** Minimum subword character length. Shorter subword tokens are excluded from the output. Defaults to `2`. */ min_subword_size?: integer; /** Minimum word character length. Shorter word tokens are excluded from the output. Defaults to `5`. */ min_word_size?: integer; /** If `true`, only include the longest matching subword. Defaults to `false`. */ only_longest_match?: boolean; /** A list of subwords to look for in the token stream. If found, the subword is included in the token output. * Either this parameter or `word_list_path` must be specified. */ word_list?: string[]; /** Path to a file that contains a list of subwords to find in the token stream. If found, the subword is included in the token output. * This path must be absolute or relative to the config location, and the file must be UTF-8 encoded. Each token in the file must be separated by a line break. * Either this parameter or `word_list` must be specified. */ word_list_path?: string; } export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase { type: 'condition'; /** Array of token filters. If a token matches the predicate script in the `script` parameter, these filters are applied to the token in the order provided. */ filter: string[]; /** Predicate script used to apply token filters. If a token matches this script, the filters in the `filter` parameter are applied to the token. */ script: Script | ScriptSource; } export interface AnalysisCustomAnalyzer { type: 'custom'; char_filter?: string | string[]; filter?: string | string[]; position_increment_gap?: integer; position_offset_gap?: integer; tokenizer: string; } export interface AnalysisCustomNormalizer { type: 'custom'; char_filter?: string[]; filter?: string[]; } export interface AnalysisCzechAnalyzer { type: 'czech'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisDanishAnalyzer { type: 'danish'; stopwords?: AnalysisStopWords; stopwords_path?: string; } export interface AnalysisDecimalDigitTokenFilter extends AnalysisTokenFilterBase { type: 'decimal_digit'; } export type AnalysisDelimitedPayloadEncoding = 'int' | 'float' | 'identity'; export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilterBase { type: 'delimited_payload'; /** Character used to separate tokens from payloads. Defaults to `|`. */ delimiter?: string; /** Data type for the stored payload. */ encoding?: AnalysisDelimitedPayloadEncoding; } export interface AnalysisDictionaryDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { type: 'dictionary_decompounder'; } export interface AnalysisDutchAnalyzer { type: 'dutch'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export type AnalysisEdgeNGramSide = 'front' | 'back'; export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { type: 'edge_ngram'; /** Maximum character length of a gram. For custom token filters, defaults to `2`. For the built-in edge_ngram filter, defaults to `1`. */ max_gram?: integer; /** Minimum character length of a gram. Defaults to `1`. */ min_gram?: integer; /** Indicates whether to truncate tokens from the `front` or `back`. Defaults to `front`. */ side?: AnalysisEdgeNGramSide; /** Emits original token when set to `true`. Defaults to `false`. */ preserve_original?: SpecUtilsStringified; } export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { type: 'edge_ngram'; custom_token_chars?: string; max_gram?: integer; min_gram?: integer; token_chars?: AnalysisTokenChar[]; } export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { type: 'elision'; /** List of elisions to remove. * To be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe. Both the elision and apostrophe are removed. * For custom `elision` filters, either this parameter or `articles_path` must be specified. */ articles?: string[]; /** Path to a file that contains a list of elisions to remove. * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each elision in the file must be separated by a line break. * To be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe. Both the elision and apostrophe are removed. * For custom `elision` filters, either this parameter or `articles` must be specified. */ articles_path?: string; /** If `true`, elision matching is case insensitive. If `false`, elision matching is case sensitive. Defaults to `false`. */ articles_case?: SpecUtilsStringified; } export interface AnalysisEnglishAnalyzer { type: 'english'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisEstonianAnalyzer { type: 'estonian'; stopwords?: AnalysisStopWords; stopwords_path?: string; } export interface AnalysisFingerprintAnalyzer { type: 'fingerprint'; version?: VersionString; /** The maximum token size to emit. Tokens larger than this size will be discarded. * Defaults to `255` */ max_output_size?: integer; /** The character to use to concatenate the terms. * Defaults to a space. */ separator?: string; /** A pre-defined stop words list like `_english_` or an array containing a list of stop words. * Defaults to `_none_`. */ stopwords?: AnalysisStopWords; /** The path to a file containing stop words. */ stopwords_path?: string; } export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase { type: 'fingerprint'; /** Maximum character length, including whitespace, of the output token. Defaults to `255`. Concatenated tokens longer than this will result in no token output. */ max_output_size?: integer; /** Character to use to concatenate the token stream input. Defaults to a space. */ separator?: string; } export interface AnalysisFinnishAnalyzer { type: 'finnish'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisFlattenGraphTokenFilter extends AnalysisTokenFilterBase { type: 'flatten_graph'; } export interface AnalysisFrenchAnalyzer { type: 'french'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisGalicianAnalyzer { type: 'galician'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisGermanAnalyzer { type: 'german'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisGermanNormalizationTokenFilter extends AnalysisTokenFilterBase { type: 'german_normalization'; } export interface AnalysisGreekAnalyzer { type: 'greek'; stopwords?: AnalysisStopWords; stopwords_path?: string; } export interface AnalysisHindiAnalyzer { type: 'hindi'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisHindiNormalizationTokenFilter extends AnalysisTokenFilterBase { type: 'hindi_normalization'; } export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase { type: 'html_strip'; escaped_tags?: string[]; } export interface AnalysisHungarianAnalyzer { type: 'hungarian'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase { type: 'hunspell'; /** If `true`, duplicate tokens are removed from the filter’s output. Defaults to `true`. */ dedup?: boolean; /** One or more `.dic` files (e.g, `en_US.dic`, my_custom.dic) to use for the Hunspell dictionary. * By default, the `hunspell` filter uses all `.dic` files in the `<$ES_PATH_CONF>/hunspell/` directory specified using the `lang`, `language`, or `locale` parameter. */ dictionary?: string; /** Locale directory used to specify the `.aff` and `.dic` files for a Hunspell dictionary. */ locale: string; /** Locale directory used to specify the `.aff` and `.dic` files for a Hunspell dictionary. * @alias locale */ lang: string; /** Locale directory used to specify the `.aff` and `.dic` files for a Hunspell dictionary. * @alias locale */ language: string; /** If `true`, only the longest stemmed version of each token is included in the output. If `false`, all stemmed versions of the token are included. Defaults to `false`. */ longest_only?: boolean; } export interface AnalysisHyphenationDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { type: 'hyphenation_decompounder'; /** Path to an Apache FOP (Formatting Objects Processor) XML hyphenation pattern file. * This path must be absolute or relative to the `config` location. Only FOP v1.2 compatible files are supported. */ hyphenation_patterns_path: string; /** If `true`, do not match sub tokens in tokens that are in the word list. Defaults to `false`. */ no_sub_matches?: boolean; /** If `true`, do not allow overlapping tokens. Defaults to `false`. */ no_overlapping_matches?: boolean; } export interface AnalysisIcuAnalyzer { type: 'icu_analyzer'; method: AnalysisIcuNormalizationType; mode: AnalysisIcuNormalizationMode; } export type AnalysisIcuCollationAlternate = 'shifted' | 'non-ignorable'; export type AnalysisIcuCollationCaseFirst = 'lower' | 'upper'; export type AnalysisIcuCollationDecomposition = 'no' | 'identical'; export type AnalysisIcuCollationStrength = 'primary' | 'secondary' | 'tertiary' | 'quaternary' | 'identical'; export interface AnalysisIcuCollationTokenFilter extends AnalysisTokenFilterBase { type: 'icu_collation'; alternate?: AnalysisIcuCollationAlternate; case_first?: AnalysisIcuCollationCaseFirst; case_level?: boolean; country?: string; decomposition?: AnalysisIcuCollationDecomposition; hiragana_quaternary_mode?: boolean; language?: string; numeric?: boolean; rules?: string; strength?: AnalysisIcuCollationStrength; variable_top?: string; variant?: string; } export interface AnalysisIcuFoldingTokenFilter extends AnalysisTokenFilterBase { type: 'icu_folding'; unicode_set_filter: string; } export interface AnalysisIcuNormalizationCharFilter extends AnalysisCharFilterBase { type: 'icu_normalizer'; mode?: AnalysisIcuNormalizationMode; name?: AnalysisIcuNormalizationType; unicode_set_filter?: string; } export type AnalysisIcuNormalizationMode = 'decompose' | 'compose'; export interface AnalysisIcuNormalizationTokenFilter extends AnalysisTokenFilterBase { type: 'icu_normalizer'; name: AnalysisIcuNormalizationType; } export type AnalysisIcuNormalizationType = 'nfc' | 'nfkc' | 'nfkc_cf'; export interface AnalysisIcuTokenizer extends AnalysisTokenizerBase { type: 'icu_tokenizer'; rule_files: string; } export type AnalysisIcuTransformDirection = 'forward' | 'reverse'; export interface AnalysisIcuTransformTokenFilter extends AnalysisTokenFilterBase { type: 'icu_transform'; dir?: AnalysisIcuTransformDirection; id: string; } export interface AnalysisIndicNormalizationTokenFilter extends AnalysisTokenFilterBase { type: 'indic_normalization'; } export interface AnalysisIndonesianAnalyzer { type: 'indonesian'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisIrishAnalyzer { type: 'irish'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisItalianAnalyzer { type: 'italian'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisJaStopTokenFilter extends AnalysisTokenFilterBase { type: 'ja_stop'; stopwords?: AnalysisStopWords; } export interface AnalysisKStemTokenFilter extends AnalysisTokenFilterBase { type: 'kstem'; } export type AnalysisKeepTypesMode = 'include' | 'exclude'; export interface AnalysisKeepTypesTokenFilter extends AnalysisTokenFilterBase { type: 'keep_types'; /** Indicates whether to keep or remove the specified token types. */ mode?: AnalysisKeepTypesMode; /** List of token types to keep or remove. */ types: string[]; } export interface AnalysisKeepWordsTokenFilter extends AnalysisTokenFilterBase { type: 'keep'; /** List of words to keep. Only tokens that match words in this list are included in the output. * Either this parameter or `keep_words_path` must be specified. */ keep_words?: string[]; /** If `true`, lowercase all keep words. Defaults to `false`. */ keep_words_case?: boolean; /** Path to a file that contains a list of words to keep. Only tokens that match words in this list are included in the output. * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each word in the file must be separated by a line break. * Either this parameter or `keep_words` must be specified. */ keep_words_path?: string; } export interface AnalysisKeywordAnalyzer { type: 'keyword'; version?: VersionString; } export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBase { type: 'keyword_marker'; /** If `true`, matching for the `keywords` and `keywords_path` parameters ignores letter case. Defaults to `false`. */ ignore_case?: boolean; /** Array of keywords. Tokens that match these keywords are not stemmed. * This parameter, `keywords_path`, or `keywords_pattern` must be specified. You cannot specify this parameter and `keywords_pattern`. */ keywords?: string | string[]; /** Path to a file that contains a list of keywords. Tokens that match these keywords are not stemmed. * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each word in the file must be separated by a line break. * This parameter, `keywords`, or `keywords_pattern` must be specified. You cannot specify this parameter and `keywords_pattern`. */ keywords_path?: string; /** Java regular expression used to match tokens. Tokens that match this expression are marked as keywords and not stemmed. * This parameter, `keywords`, or `keywords_path` must be specified. You cannot specify this parameter and `keywords` or `keywords_pattern`. */ keywords_pattern?: string; } export interface AnalysisKeywordRepeatTokenFilter extends AnalysisTokenFilterBase { type: 'keyword_repeat'; } export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase { type: 'keyword'; buffer_size?: integer; } export interface AnalysisKuromojiAnalyzer { type: 'kuromoji'; mode: AnalysisKuromojiTokenizationMode; user_dictionary?: string; } export interface AnalysisKuromojiIterationMarkCharFilter extends AnalysisCharFilterBase { type: 'kuromoji_iteration_mark'; normalize_kana: boolean; normalize_kanji: boolean; } export interface AnalysisKuromojiPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { type: 'kuromoji_part_of_speech'; stoptags: string[]; } export interface AnalysisKuromojiReadingFormTokenFilter extends AnalysisTokenFilterBase { type: 'kuromoji_readingform'; use_romaji: boolean; } export interface AnalysisKuromojiStemmerTokenFilter extends AnalysisTokenFilterBase { type: 'kuromoji_stemmer'; minimum_length: integer; } export type AnalysisKuromojiTokenizationMode = 'normal' | 'search' | 'extended'; export interface AnalysisKuromojiTokenizer extends AnalysisTokenizerBase { type: 'kuromoji_tokenizer'; discard_punctuation?: boolean; mode: AnalysisKuromojiTokenizationMode; nbest_cost?: integer; nbest_examples?: string; user_dictionary?: string; user_dictionary_rules?: string[]; discard_compound_token?: boolean; } export interface AnalysisLatvianAnalyzer { type: 'latvian'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisLengthTokenFilter extends AnalysisTokenFilterBase { type: 'length'; /** Maximum character length of a token. Longer tokens are excluded from the output. Defaults to `Integer.MAX_VALUE`, which is `2^31-1` or `2147483647`. */ max?: integer; /** Minimum character length of a token. Shorter tokens are excluded from the output. Defaults to `0`. */ min?: integer; } export interface AnalysisLetterTokenizer extends AnalysisTokenizerBase { type: 'letter'; } export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterBase { type: 'limit'; /** If `true`, the limit filter exhausts the token stream, even if the `max_token_count` has already been reached. Defaults to `false`. */ consume_all_tokens?: boolean; /** Maximum number of tokens to keep. Once this limit is reached, any remaining tokens are excluded from the output. Defaults to `1`. */ max_token_count?: SpecUtilsStringified; } export interface AnalysisLithuanianAnalyzer { type: 'lithuanian'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisLowercaseNormalizer { type: 'lowercase'; } export interface AnalysisLowercaseTokenFilter extends AnalysisTokenFilterBase { type: 'lowercase'; /** Language-specific lowercase token filter to use. */ language?: AnalysisLowercaseTokenFilterLanguages; } export type AnalysisLowercaseTokenFilterLanguages = 'greek' | 'irish' | 'turkish'; export interface AnalysisLowercaseTokenizer extends AnalysisTokenizerBase { type: 'lowercase'; } export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase { type: 'mapping'; mappings?: string[]; mappings_path?: string; } export interface AnalysisMinHashTokenFilter extends AnalysisTokenFilterBase { type: 'min_hash'; /** Number of buckets to which hashes are assigned. Defaults to `512`. */ bucket_count?: integer; /** Number of ways to hash each token in the stream. Defaults to `1`. */ hash_count?: integer; /** Number of hashes to keep from each bucket. Defaults to `1`. * Hashes are retained by ascending size, starting with the bucket’s smallest hash first. */ hash_set_size?: integer; /** If `true`, the filter fills empty buckets with the value of the first non-empty bucket to its circular right if the `hash_set_size` is `1`. If the `bucket_count` argument is greater than 1, this parameter defaults to `true`. Otherwise, this parameter defaults to `false`. */ with_rotation?: boolean; } export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase { type: 'multiplexer'; /** A list of token filters to apply to incoming tokens. */ filters: string[]; /** If `true` (the default) then emit the original token in addition to the filtered tokens. */ preserve_original?: SpecUtilsStringified; } export interface AnalysisNGramTokenFilter extends AnalysisTokenFilterBase { type: 'ngram'; /** Maximum length of characters in a gram. Defaults to `2`. */ max_gram?: integer; /** Minimum length of characters in a gram. Defaults to `1`. */ min_gram?: integer; /** Emits original token when set to `true`. Defaults to `false`. */ preserve_original?: SpecUtilsStringified; } export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase { type: 'ngram'; custom_token_chars?: string; max_gram?: integer; min_gram?: integer; token_chars?: AnalysisTokenChar[]; } export interface AnalysisNoriAnalyzer { type: 'nori'; version?: VersionString; decompound_mode?: AnalysisNoriDecompoundMode; stoptags?: string[]; user_dictionary?: string; } export type AnalysisNoriDecompoundMode = 'discard' | 'none' | 'mixed'; export interface AnalysisNoriPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { type: 'nori_part_of_speech'; /** An array of part-of-speech tags that should be removed. */ stoptags?: string[]; } export interface AnalysisNoriTokenizer extends AnalysisTokenizerBase { type: 'nori_tokenizer'; decompound_mode?: AnalysisNoriDecompoundMode; discard_punctuation?: boolean; user_dictionary?: string; user_dictionary_rules?: string[]; } export type AnalysisNormalizer = AnalysisLowercaseNormalizer | AnalysisCustomNormalizer; export interface AnalysisNorwegianAnalyzer { type: 'norwegian'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { type: 'path_hierarchy'; buffer_size?: SpecUtilsStringified; delimiter?: string; replacement?: string; reverse?: SpecUtilsStringified; skip?: SpecUtilsStringified; } export interface AnalysisPatternAnalyzer { type: 'pattern'; version?: VersionString; /** Java regular expression flags. Flags should be pipe-separated, eg "CASE_INSENSITIVE|COMMENTS". */ flags?: string; /** Should terms be lowercased or not. * Defaults to `true`. */ lowercase?: boolean; /** A Java regular expression. * Defaults to `\W+`. */ pattern?: string; /** A pre-defined stop words list like `_english_` or an array containing a list of stop words. * Defaults to `_none_`. */ stopwords?: AnalysisStopWords; /** The path to a file containing stop words. */ stopwords_path?: string; } export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBase { type: 'pattern_capture'; /** A list of regular expressions to match. */ patterns: string[]; /** If set to `true` (the default) it will emit the original token. */ preserve_original?: SpecUtilsStringified; } export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase { type: 'pattern_replace'; flags?: string; pattern: string; replacement?: string; } export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBase { type: 'pattern_replace'; /** If `true`, all substrings matching the pattern parameter’s regular expression are replaced. If `false`, the filter replaces only the first matching substring in each token. Defaults to `true`. */ all?: boolean; /** Regular expression, written in Java’s regular expression syntax. The filter replaces token substrings matching this pattern with the substring in the `replacement` parameter. */ pattern: string; /** Replacement substring. Defaults to an empty substring (`""`). */ replacement?: string; } export interface AnalysisPatternTokenizer extends AnalysisTokenizerBase { type: 'pattern'; flags?: string; group?: integer; pattern?: string; } export interface AnalysisPersianAnalyzer { type: 'persian'; stopwords?: AnalysisStopWords; stopwords_path?: string; } export interface AnalysisPersianNormalizationTokenFilter extends AnalysisTokenFilterBase { type: 'persian_normalization'; } export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff'; export type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish'; export type AnalysisPhoneticNameType = 'generic' | 'ashkenazi' | 'sephardic'; export type AnalysisPhoneticRuleType = 'approx' | 'exact'; export interface AnalysisPhoneticTokenFilter extends AnalysisTokenFilterBase { type: 'phonetic'; encoder: AnalysisPhoneticEncoder; languageset?: AnalysisPhoneticLanguage | AnalysisPhoneticLanguage[]; max_code_len?: integer; name_type?: AnalysisPhoneticNameType; replace?: boolean; rule_type?: AnalysisPhoneticRuleType; } export interface AnalysisPorterStemTokenFilter extends AnalysisTokenFilterBase { type: 'porter_stem'; } export interface AnalysisPortugueseAnalyzer { type: 'portuguese'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisPredicateTokenFilter extends AnalysisTokenFilterBase { type: 'predicate_token_filter'; /** Script containing a condition used to filter incoming tokens. Only tokens that match this script are included in the output. */ script: Script | ScriptSource; } export interface AnalysisRemoveDuplicatesTokenFilter extends AnalysisTokenFilterBase { type: 'remove_duplicates'; } export interface AnalysisReverseTokenFilter extends AnalysisTokenFilterBase { type: 'reverse'; } export interface AnalysisRomanianAnalyzer { type: 'romanian'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisRussianAnalyzer { type: 'russian'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisScandinavianFoldingTokenFilter extends AnalysisTokenFilterBase { type: 'scandinavian_folding'; } export interface AnalysisScandinavianNormalizationTokenFilter extends AnalysisTokenFilterBase { type: 'scandinavian_normalization'; } export interface AnalysisSerbianAnalyzer { type: 'serbian'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisSerbianNormalizationTokenFilter extends AnalysisTokenFilterBase { type: 'serbian_normalization'; } export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase { type: 'shingle'; /** String used in shingles as a replacement for empty positions that do not contain a token. This filler token is only used in shingles, not original unigrams. Defaults to an underscore (`_`). */ filler_token?: string; /** Maximum number of tokens to concatenate when creating shingles. Defaults to `2`. */ max_shingle_size?: SpecUtilsStringified; /** Minimum number of tokens to concatenate when creating shingles. Defaults to `2`. */ min_shingle_size?: SpecUtilsStringified; /** If `true`, the output includes the original input tokens. If `false`, the output only includes shingles; the original input tokens are removed. Defaults to `true`. */ output_unigrams?: boolean; /** If `true`, the output includes the original input tokens only if no shingles are produced; if shingles are produced, the output only includes shingles. Defaults to `false`. */ output_unigrams_if_no_shingles?: boolean; /** Separator used to concatenate adjacent tokens to form a shingle. Defaults to a space (`" "`). */ token_separator?: string; } export interface AnalysisSimpleAnalyzer { type: 'simple'; version?: VersionString; } export interface AnalysisSimplePatternSplitTokenizer extends AnalysisTokenizerBase { type: 'simple_pattern_split'; pattern?: string; } export interface AnalysisSimplePatternTokenizer extends AnalysisTokenizerBase { type: 'simple_pattern'; pattern?: string; } export interface AnalysisSnowballAnalyzer { type: 'snowball'; version?: VersionString; language: AnalysisSnowballLanguage; stopwords?: AnalysisStopWords; } export type AnalysisSnowballLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Estonian' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Irish' | 'Kp' | 'Lithuanian' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Serbian' | 'Spanish' | 'Swedish' | 'Turkish'; export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase { type: 'snowball'; /** Controls the language used by the stemmer. */ language?: AnalysisSnowballLanguage; } export interface AnalysisSoraniAnalyzer { type: 'sorani'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisSoraniNormalizationTokenFilter extends AnalysisTokenFilterBase { type: 'sorani_normalization'; } export interface AnalysisSpanishAnalyzer { type: 'spanish'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisStandardAnalyzer { type: 'standard'; /** The maximum token length. If a token is seen that exceeds this length then it is split at `max_token_length` intervals. * Defaults to `255`. */ max_token_length?: integer; /** A pre-defined stop words list like `_english_` or an array containing a list of stop words. * Defaults to `_none_`. */ stopwords?: AnalysisStopWords; /** The path to a file containing stop words. */ stopwords_path?: string; } export interface AnalysisStandardTokenizer extends AnalysisTokenizerBase { type: 'standard'; max_token_length?: integer; } export interface AnalysisStemmerOverrideTokenFilter extends AnalysisTokenFilterBase { type: 'stemmer_override'; /** A list of mapping rules to use. */ rules?: string[]; /** A path (either relative to `config` location, or absolute) to a list of mappings. */ rules_path?: string; } export interface AnalysisStemmerTokenFilter extends AnalysisTokenFilterBase { type: 'stemmer'; language?: string; /** @alias language */ name?: string; } export interface AnalysisStopAnalyzer { type: 'stop'; version?: VersionString; /** A pre-defined stop words list like `_english_` or an array containing a list of stop words. * Defaults to `_none_`. */ stopwords?: AnalysisStopWords; /** The path to a file containing stop words. */ stopwords_path?: string; } export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase { type: 'stop'; /** If `true`, stop word matching is case insensitive. For example, if `true`, a stop word of the matches and removes `The`, `THE`, or `the`. Defaults to `false`. */ ignore_case?: boolean; /** If `true`, the last token of a stream is removed if it’s a stop word. Defaults to `true`. */ remove_trailing?: boolean; /** Language value, such as `_arabic_` or `_thai_`. Defaults to `_english_`. */ stopwords?: AnalysisStopWords; /** Path to a file that contains a list of stop words to remove. * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each stop word in the file must be separated by a line break. */ stopwords_path?: string; } export type AnalysisStopWordLanguage = '_arabic_' | '_armenian_' | '_basque_' | '_bengali_' | '_brazilian_' | '_bulgarian_' | '_catalan_' | '_cjk_' | '_czech_' | '_danish_' | '_dutch_' | '_english_' | '_estonian_' | '_finnish_' | '_french_' | '_galician_' | '_german_' | '_greek_' | '_hindi_' | '_hungarian_' | '_indonesian_' | '_irish_' | '_italian_' | '_latvian_' | '_lithuanian_' | '_norwegian_' | '_persian_' | '_portuguese_' | '_romanian_' | '_russian_' | '_serbian_' | '_sorani_' | '_spanish_' | '_swedish_' | '_thai_' | '_turkish_' | '_none_'; export type AnalysisStopWords = AnalysisStopWordLanguage | string[]; export interface AnalysisSwedishAnalyzer { type: 'swedish'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export type AnalysisSynonymFormat = 'solr' | 'wordnet'; export interface AnalysisSynonymGraphTokenFilter extends AnalysisSynonymTokenFilterBase { type: 'synonym_graph'; } export interface AnalysisSynonymTokenFilter extends AnalysisSynonymTokenFilterBase { type: 'synonym'; } export interface AnalysisSynonymTokenFilterBase extends AnalysisTokenFilterBase { /** Expands definitions for equivalent synonym rules. Defaults to `true`. */ expand?: boolean; /** Sets the synonym rules format. */ format?: AnalysisSynonymFormat; /** If `true` ignores errors while parsing the synonym rules. It is important to note that only those synonym rules which cannot get parsed are ignored. Defaults to the value of the `updateable` setting. */ lenient?: boolean; /** Used to define inline synonyms. */ synonyms?: string[]; /** Used to provide a synonym file. This path must be absolute or relative to the `config` location. */ synonyms_path?: string; /** Provide a synonym set created via Synonyms Management APIs. */ synonyms_set?: string; /** Controls the tokenizers that will be used to tokenize the synonym, this parameter is for backwards compatibility for indices that created before 6.0. */ tokenizer?: string; /** If `true` allows reloading search analyzers to pick up changes to synonym files. Only to be used for search analyzers. Defaults to `false`. */ updateable?: boolean; } export interface AnalysisThaiAnalyzer { type: 'thai'; stopwords?: AnalysisStopWords; stopwords_path?: string; } export interface AnalysisThaiTokenizer extends AnalysisTokenizerBase { type: 'thai'; } export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom'; export type AnalysisTokenFilter = string | AnalysisTokenFilterDefinition; export interface AnalysisTokenFilterBase { version?: VersionString; } export type AnalysisTokenFilterDefinition = AnalysisApostropheTokenFilter | AnalysisArabicNormalizationTokenFilter | AnalysisAsciiFoldingTokenFilter | AnalysisCjkBigramTokenFilter | AnalysisCjkWidthTokenFilter | AnalysisClassicTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDecimalDigitTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisFlattenGraphTokenFilter | AnalysisGermanNormalizationTokenFilter | AnalysisHindiNormalizationTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisIndicNormalizationTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKeywordRepeatTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMinHashTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPersianNormalizationTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisScandinavianFoldingTokenFilter | AnalysisScandinavianNormalizationTokenFilter | AnalysisSerbianNormalizationTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisSoraniNormalizationTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisJaStopTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter; export type AnalysisTokenizer = string | AnalysisTokenizerDefinition; export interface AnalysisTokenizerBase { version?: VersionString; } export type AnalysisTokenizerDefinition = AnalysisCharGroupTokenizer | AnalysisClassicTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisPathHierarchyTokenizer | AnalysisPatternTokenizer | AnalysisSimplePatternTokenizer | AnalysisSimplePatternSplitTokenizer | AnalysisStandardTokenizer | AnalysisThaiTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisIcuTokenizer | AnalysisKuromojiTokenizer | AnalysisNoriTokenizer; export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase { type: 'trim'; } export interface AnalysisTruncateTokenFilter extends AnalysisTokenFilterBase { type: 'truncate'; /** Character limit for each token. Tokens exceeding this limit are truncated. Defaults to `10`. */ length?: integer; } export interface AnalysisTurkishAnalyzer { type: 'turkish'; stopwords?: AnalysisStopWords; stopwords_path?: string; stem_exclusion?: string[]; } export interface AnalysisUaxEmailUrlTokenizer extends AnalysisTokenizerBase { type: 'uax_url_email'; max_token_length?: integer; } export interface AnalysisUniqueTokenFilter extends AnalysisTokenFilterBase { type: 'unique'; /** If `true`, only remove duplicate tokens in the same position. Defaults to `false`. */ only_on_same_position?: boolean; } export interface AnalysisUppercaseTokenFilter extends AnalysisTokenFilterBase { type: 'uppercase'; } export interface AnalysisWhitespaceAnalyzer { type: 'whitespace'; version?: VersionString; } export interface AnalysisWhitespaceTokenizer extends AnalysisTokenizerBase { type: 'whitespace'; max_token_length?: integer; } export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisWordDelimiterTokenFilterBase { type: 'word_delimiter_graph'; /** If `true`, the filter adjusts the offsets of split or catenated tokens to better reflect their actual position in the token stream. Defaults to `true`. */ adjust_offsets?: boolean; /** If `true`, the filter skips tokens with a keyword attribute of true. Defaults to `false`. */ ignore_keywords?: boolean; } export interface AnalysisWordDelimiterTokenFilter extends AnalysisWordDelimiterTokenFilterBase { type: 'word_delimiter'; } export interface AnalysisWordDelimiterTokenFilterBase extends AnalysisTokenFilterBase { /** If `true`, the filter produces catenated tokens for chains of alphanumeric characters separated by non-alphabetic delimiters. Defaults to `false`. */ catenate_all?: boolean; /** If `true`, the filter produces catenated tokens for chains of numeric characters separated by non-alphabetic delimiters. Defaults to `false`. */ catenate_numbers?: boolean; /** If `true`, the filter produces catenated tokens for chains of alphabetical characters separated by non-alphabetic delimiters. Defaults to `false`. */ catenate_words?: boolean; /** If `true`, the filter includes tokens consisting of only numeric characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`. */ generate_number_parts?: boolean; /** If `true`, the filter includes tokens consisting of only alphabetical characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`. */ generate_word_parts?: boolean; /** If `true`, the filter includes the original version of any split tokens in the output. This original version includes non-alphanumeric delimiters. Defaults to `false`. */ preserve_original?: SpecUtilsStringified; /** Array of tokens the filter won’t split. */ protected_words?: string[]; /** Path to a file that contains a list of tokens the filter won’t split. * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each token in the file must be separated by a line break. */ protected_words_path?: string; /** If `true`, the filter splits tokens at letter case transitions. For example: camelCase -> [ camel, Case ]. Defaults to `true`. */ split_on_case_change?: boolean; /** If `true`, the filter splits tokens at letter-number transitions. For example: j2se -> [ j, 2, se ]. Defaults to `true`. */ split_on_numerics?: boolean; /** If `true`, the filter removes the English possessive (`'s`) from the end of each token. For example: O'Neil's -> [ O, Neil ]. Defaults to `true`. */ stem_english_possessive?: boolean; /** Array of custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters. */ type_table?: string[]; /** Path to a file that contains custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters. */ type_table_path?: string; } export interface MappingAggregateMetricDoubleProperty extends MappingPropertyBase { type: 'aggregate_metric_double'; default_metric: string; metrics: string[]; time_series_metric?: MappingTimeSeriesMetricType; } export interface MappingAllField { analyzer: string; enabled: boolean; omit_norms: boolean; search_analyzer: string; similarity: string; store: boolean; store_term_vector_offsets: boolean; store_term_vector_payloads: boolean; store_term_vector_positions: boolean; store_term_vectors: boolean; } export interface MappingBinaryProperty extends MappingDocValuesPropertyBase { type: 'binary'; } export interface MappingBooleanProperty extends MappingDocValuesPropertyBase { boost?: double; fielddata?: IndicesNumericFielddata; index?: boolean; null_value?: boolean; ignore_malformed?: boolean; script?: Script | ScriptSource; on_script_error?: MappingOnScriptError; /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. * @experimental */ time_series_dimension?: boolean; type: 'boolean'; } export interface MappingByteNumberProperty extends MappingNumberPropertyBase { type: 'byte'; null_value?: byte; } export interface MappingCompletionProperty extends MappingDocValuesPropertyBase { analyzer?: string; contexts?: MappingSuggestContext[]; max_input_length?: integer; preserve_position_increments?: boolean; preserve_separators?: boolean; search_analyzer?: string; type: 'completion'; } export interface MappingCompositeSubField { type: MappingRuntimeFieldType; } export interface MappingConstantKeywordProperty extends MappingPropertyBase { value?: any; type: 'constant_keyword'; } export interface MappingCorePropertyBase extends MappingPropertyBase { copy_to?: Fields; store?: boolean; } export interface MappingCountedKeywordProperty extends MappingPropertyBase { type: 'counted_keyword'; index?: boolean; } export interface MappingDataStreamTimestamp { enabled: boolean; } export interface MappingDateNanosProperty extends MappingDocValuesPropertyBase { boost?: double; format?: string; ignore_malformed?: boolean; index?: boolean; script?: Script | ScriptSource; on_script_error?: MappingOnScriptError; null_value?: DateTime; precision_step?: integer; type: 'date_nanos'; } export interface MappingDateProperty extends MappingDocValuesPropertyBase { boost?: double; fielddata?: IndicesNumericFielddata; format?: string; ignore_malformed?: boolean; index?: boolean; script?: Script | ScriptSource; on_script_error?: MappingOnScriptError; null_value?: DateTime; precision_step?: integer; locale?: string; type: 'date'; } export interface MappingDateRangeProperty extends MappingRangePropertyBase { format?: string; type: 'date_range'; } export type MappingDenseVectorElementType = 'bit' | 'byte' | 'float'; export interface MappingDenseVectorIndexOptions { /** The confidence interval to use when quantizing the vectors. Can be any value between and including `0.90` and * `1.0` or exactly `0`. When the value is `0`, this indicates that dynamic quantiles should be calculated for * optimized quantization. When between `0.90` and `1.0`, this value restricts the values used when calculating * the quantization thresholds. * * For example, a value of `0.95` will only use the middle `95%` of the values when calculating the quantization * thresholds (e.g. the highest and lowest `2.5%` of values will be ignored). * * Defaults to `1/(dims + 1)` for `int8` quantized vectors and `0` for `int4` for dynamic quantile calculation. * * Only applicable to `int8_hnsw`, `int4_hnsw`, `int8_flat`, and `int4_flat` index types. */ confidence_interval?: float; /** The number of candidates to track while assembling the list of nearest neighbors for each new node. * * Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index types. */ ef_construction?: integer; /** The number of neighbors each node will be connected to in the HNSW graph. * * Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index types. */ m?: integer; /** The type of kNN algorithm to use. */ type: MappingDenseVectorIndexOptionsType; } export type MappingDenseVectorIndexOptionsType = 'bbq_flat' | 'bbq_hnsw' | 'flat' | 'hnsw' | 'int4_flat' | 'int4_hnsw' | 'int8_flat' | 'int8_hnsw'; export interface MappingDenseVectorProperty extends MappingPropertyBase { type: 'dense_vector'; /** Number of vector dimensions. Can't exceed `4096`. If `dims` is not specified, it will be set to the length of * the first vector added to the field. */ dims?: integer; /** The data type used to encode vectors. The supported data types are `float` (default), `byte`, and `bit`. */ element_type?: MappingDenseVectorElementType; /** If `true`, you can search this field using the kNN search API. */ index?: boolean; /** An optional section that configures the kNN indexing algorithm. The HNSW algorithm has two internal parameters * that influence how the data structure is built. These can be adjusted to improve the accuracy of results, at the * expense of slower indexing speed. * * This parameter can only be specified when `index` is `true`. */ index_options?: MappingDenseVectorIndexOptions; /** The vector similarity metric to use in kNN search. * * Documents are ranked by their vector field's similarity to the query vector. The `_score` of each document will * be derived from the similarity, in a way that ensures scores are positive and that a larger score corresponds * to a higher ranking. * * Defaults to `l2_norm` when `element_type` is `bit` otherwise defaults to `cosine`. * * `bit` vectors only support `l2_norm` as their similarity metric. * * This parameter can only be specified when `index` is `true`. */ similarity?: MappingDenseVectorSimilarity; } export type MappingDenseVectorSimilarity = 'cosine' | 'dot_product' | 'l2_norm' | 'max_inner_product'; export interface MappingDocValuesPropertyBase extends MappingCorePropertyBase { doc_values?: boolean; } export interface MappingDoubleNumberProperty extends MappingNumberPropertyBase { type: 'double'; null_value?: double; } export interface MappingDoubleRangeProperty extends MappingRangePropertyBase { type: 'double_range'; } export type MappingDynamicMapping = boolean | 'strict' | 'runtime' | 'true' | 'false'; export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { type: '{dynamic_type}'; enabled?: boolean; null_value?: FieldValue; boost?: double; coerce?: boolean; script?: Script | ScriptSource; on_script_error?: MappingOnScriptError; ignore_malformed?: boolean; time_series_metric?: MappingTimeSeriesMetricType; analyzer?: string; eager_global_ordinals?: boolean; index?: boolean; index_options?: MappingIndexOptions; index_phrases?: boolean; index_prefixes?: MappingTextIndexPrefixes | null; norms?: boolean; position_increment_gap?: integer; search_analyzer?: string; search_quote_analyzer?: string; term_vector?: MappingTermVectorOption; format?: string; precision_step?: integer; locale?: string; } export interface MappingDynamicTemplate { mapping?: MappingProperty; runtime?: MappingRuntimeField; match?: string | string[]; path_match?: string | string[]; unmatch?: string | string[]; path_unmatch?: string | string[]; match_mapping_type?: string | string[]; unmatch_mapping_type?: string | string[]; match_pattern?: MappingMatchType; } export interface MappingFieldAliasProperty extends MappingPropertyBase { path?: Field; type: 'alias'; } export interface MappingFieldMapping { full_name: string; mapping: Partial>; } export interface MappingFieldNamesField { enabled: boolean; } export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'passthrough' | 'version' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'counted_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'semantic_text' | 'sparse_vector' | 'match_only_text' | 'icu_collation_keyword'; export interface MappingFlattenedProperty extends MappingPropertyBase { boost?: double; depth_limit?: integer; doc_values?: boolean; eager_global_ordinals?: boolean; index?: boolean; index_options?: MappingIndexOptions; null_value?: string; similarity?: string; split_queries_on_whitespace?: boolean; type: 'flattened'; } export interface MappingFloatNumberProperty extends MappingNumberPropertyBase { type: 'float'; null_value?: float; } export interface MappingFloatRangeProperty extends MappingRangePropertyBase { type: 'float_range'; } export type MappingGeoOrientation = 'right' | 'RIGHT' | 'counterclockwise' | 'ccw' | 'left' | 'LEFT' | 'clockwise' | 'cw'; export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { ignore_malformed?: boolean; ignore_z_value?: boolean; null_value?: GeoLocation; index?: boolean; on_script_error?: MappingOnScriptError; script?: Script | ScriptSource; type: 'geo_point'; } export interface MappingGeoShapeProperty extends MappingDocValuesPropertyBase { coerce?: boolean; ignore_malformed?: boolean; ignore_z_value?: boolean; index?: boolean; orientation?: MappingGeoOrientation; strategy?: MappingGeoStrategy; type: 'geo_shape'; } export type MappingGeoStrategy = 'recursive' | 'term'; export interface MappingHalfFloatNumberProperty extends MappingNumberPropertyBase { type: 'half_float'; null_value?: float; } export interface MappingHistogramProperty extends MappingPropertyBase { ignore_malformed?: boolean; type: 'histogram'; } export interface MappingIcuCollationProperty extends MappingDocValuesPropertyBase { type: 'icu_collation_keyword'; norms?: boolean; index_options?: MappingIndexOptions; /** Should the field be searchable? */ index?: boolean; /** Accepts a string value which is substituted for any explicit null values. Defaults to null, which means the field is treated as missing. */ null_value?: string; rules?: string; language?: string; country?: string; variant?: string; strength?: AnalysisIcuCollationStrength; decomposition?: AnalysisIcuCollationDecomposition; alternate?: AnalysisIcuCollationAlternate; case_level?: boolean; case_first?: AnalysisIcuCollationCaseFirst; numeric?: boolean; variable_top?: string; hiragana_quaternary_mode?: boolean; } export interface MappingIndexField { enabled: boolean; } export type MappingIndexOptions = 'docs' | 'freqs' | 'positions' | 'offsets'; export interface MappingIntegerNumberProperty extends MappingNumberPropertyBase { type: 'integer'; null_value?: integer; } export interface MappingIntegerRangeProperty extends MappingRangePropertyBase { type: 'integer_range'; } export interface MappingIpProperty extends MappingDocValuesPropertyBase { boost?: double; index?: boolean; ignore_malformed?: boolean; null_value?: string; on_script_error?: MappingOnScriptError; script?: Script | ScriptSource; /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. * @experimental */ time_series_dimension?: boolean; type: 'ip'; } export interface MappingIpRangeProperty extends MappingRangePropertyBase { type: 'ip_range'; } export interface MappingJoinProperty extends MappingPropertyBase { relations?: Record; eager_global_ordinals?: boolean; type: 'join'; } export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { boost?: double; eager_global_ordinals?: boolean; index?: boolean; index_options?: MappingIndexOptions; script?: Script | ScriptSource; on_script_error?: MappingOnScriptError; normalizer?: string; norms?: boolean; null_value?: string; similarity?: string | null; split_queries_on_whitespace?: boolean; /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. * @experimental */ time_series_dimension?: boolean; type: 'keyword'; } export interface MappingLongNumberProperty extends MappingNumberPropertyBase { type: 'long'; null_value?: long; } export interface MappingLongRangeProperty extends MappingRangePropertyBase { type: 'long_range'; } export interface MappingMatchOnlyTextProperty { type: 'match_only_text'; /** Multi-fields allow the same string value to be indexed in multiple ways for different purposes, such as one * field for search and a multi-field for sorting and aggregations, or the same string value analyzed by different analyzers. */ fields?: Record; /** Metadata about the field. */ meta?: Record; /** Allows you to copy the values of multiple fields into a group * field, which can then be queried as a single field. */ copy_to?: Fields; } export type MappingMatchType = 'simple' | 'regex'; export interface MappingMurmur3HashProperty extends MappingDocValuesPropertyBase { type: 'murmur3'; } export interface MappingNestedProperty extends MappingCorePropertyBase { enabled?: boolean; include_in_parent?: boolean; include_in_root?: boolean; type: 'nested'; } export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase { boost?: double; coerce?: boolean; ignore_malformed?: boolean; index?: boolean; on_script_error?: MappingOnScriptError; script?: Script | ScriptSource; /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. * @experimental */ time_series_metric?: MappingTimeSeriesMetricType; /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. * @experimental */ time_series_dimension?: boolean; } export interface MappingObjectProperty extends MappingCorePropertyBase { enabled?: boolean; subobjects?: MappingSubobjects; type?: 'object'; } export type MappingOnScriptError = 'fail' | 'continue'; export interface MappingPassthroughObjectProperty extends MappingCorePropertyBase { type?: 'passthrough'; enabled?: boolean; priority?: integer; time_series_dimension?: boolean; } export interface MappingPercolatorProperty extends MappingPropertyBase { type: 'percolator'; } export interface MappingPointProperty extends MappingDocValuesPropertyBase { ignore_malformed?: boolean; ignore_z_value?: boolean; null_value?: string; type: 'point'; } export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingPassthroughObjectProperty | MappingSemanticTextProperty | MappingSparseVectorProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingCountedKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty; export interface MappingPropertyBase { /** Metadata about the field. */ meta?: Record; properties?: Record; ignore_above?: integer; dynamic?: MappingDynamicMapping; fields?: Record; synthetic_source_keep?: MappingSyntheticSourceKeepEnum; } export interface MappingRangePropertyBase extends MappingDocValuesPropertyBase { boost?: double; coerce?: boolean; index?: boolean; } export interface MappingRankFeatureProperty extends MappingPropertyBase { positive_score_impact?: boolean; type: 'rank_feature'; } export interface MappingRankFeaturesProperty extends MappingPropertyBase { positive_score_impact?: boolean; type: 'rank_features'; } export interface MappingRoutingField { required: boolean; } export interface MappingRuntimeField { /** For type `composite` */ fields?: Record; /** For type `lookup` */ fetch_fields?: (MappingRuntimeFieldFetchFields | Field)[]; /** A custom format for `date` type runtime fields. */ format?: string; /** For type `lookup` */ input_field?: Field; /** For type `lookup` */ target_field?: Field; /** For type `lookup` */ target_index?: IndexName; /** Painless script executed at query time. */ script?: Script | ScriptSource; /** Field type, which can be: `boolean`, `composite`, `date`, `double`, `geo_point`, `ip`,`keyword`, `long`, or `lookup`. */ type: MappingRuntimeFieldType; } export interface MappingRuntimeFieldFetchFields { field: Field; format?: string; } export type MappingRuntimeFieldType = 'boolean' | 'composite' | 'date' | 'double' | 'geo_point' | 'geo_shape' | 'ip' | 'keyword' | 'long' | 'lookup'; export type MappingRuntimeFields = Record; export interface MappingScaledFloatNumberProperty extends MappingNumberPropertyBase { type: 'scaled_float'; null_value?: double; scaling_factor?: double; } export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase { analyzer?: string; index?: boolean; index_options?: MappingIndexOptions; max_shingle_size?: integer; norms?: boolean; search_analyzer?: string; search_quote_analyzer?: string; similarity?: string | null; term_vector?: MappingTermVectorOption; type: 'search_as_you_type'; } export interface MappingSemanticTextProperty { type: 'semantic_text'; meta?: Record; /** Inference endpoint that will be used to generate embeddings for the field. * This parameter cannot be updated. Use the Create inference API to create the endpoint. * If `search_inference_id` is specified, the inference endpoint will only be used at index time. */ inference_id?: Id; /** Inference endpoint that will be used to generate embeddings at query time. * You can update this parameter by using the Update mapping API. Use the Create inference API to create the endpoint. * If not specified, the inference endpoint defined by inference_id will be used at both index and query time. */ search_inference_id?: Id; } export interface MappingShapeProperty extends MappingDocValuesPropertyBase { coerce?: boolean; ignore_malformed?: boolean; ignore_z_value?: boolean; orientation?: MappingGeoOrientation; type: 'shape'; } export interface MappingShortNumberProperty extends MappingNumberPropertyBase { type: 'short'; null_value?: short; } export interface MappingSizeField { enabled: boolean; } export interface MappingSourceField { compress?: boolean; compress_threshold?: string; enabled?: boolean; excludes?: string[]; includes?: string[]; mode?: MappingSourceFieldMode; } export type MappingSourceFieldMode = 'disabled' | 'stored' | 'synthetic'; export interface MappingSparseVectorProperty extends MappingPropertyBase { type: 'sparse_vector'; } export type MappingSubobjects = boolean | 'true' | 'false' | 'auto'; export interface MappingSuggestContext { name: Name; path?: Field; type: string; precision?: integer | string; } export type MappingSyntheticSourceKeepEnum = 'none' | 'arrays' | 'all'; export type MappingTermVectorOption = 'no' | 'yes' | 'with_offsets' | 'with_positions' | 'with_positions_offsets' | 'with_positions_offsets_payloads' | 'with_positions_payloads'; export interface MappingTextIndexPrefixes { max_chars: integer; min_chars: integer; } export interface MappingTextProperty extends MappingCorePropertyBase { analyzer?: string; boost?: double; eager_global_ordinals?: boolean; fielddata?: boolean; fielddata_frequency_filter?: IndicesFielddataFrequencyFilter; index?: boolean; index_options?: MappingIndexOptions; index_phrases?: boolean; index_prefixes?: MappingTextIndexPrefixes | null; norms?: boolean; position_increment_gap?: integer; search_analyzer?: string; search_quote_analyzer?: string; similarity?: string | null; term_vector?: MappingTermVectorOption; type: 'text'; } export type MappingTimeSeriesMetricType = 'gauge' | 'counter' | 'summary' | 'histogram' | 'position'; export interface MappingTokenCountProperty extends MappingDocValuesPropertyBase { analyzer?: string; boost?: double; index?: boolean; null_value?: double; enable_position_increments?: boolean; type: 'token_count'; } export interface MappingTypeMapping { all_field?: MappingAllField; date_detection?: boolean; dynamic?: MappingDynamicMapping; dynamic_date_formats?: string[]; dynamic_templates?: Partial>[]; _field_names?: MappingFieldNamesField; index_field?: MappingIndexField; _meta?: Metadata; numeric_detection?: boolean; properties?: Record; _routing?: MappingRoutingField; _size?: MappingSizeField; _source?: MappingSourceField; runtime?: Record; enabled?: boolean; subobjects?: MappingSubobjects; _data_stream_timestamp?: MappingDataStreamTimestamp; } export interface MappingUnsignedLongNumberProperty extends MappingNumberPropertyBase { type: 'unsigned_long'; null_value?: ulong; } export interface MappingVersionProperty extends MappingDocValuesPropertyBase { type: 'version'; } export interface MappingWildcardProperty extends MappingDocValuesPropertyBase { type: 'wildcard'; null_value?: string; } export interface QueryDslBoolQuery extends QueryDslQueryBase { /** The clause (query) must appear in matching documents. * However, unlike `must`, the score of the query will be ignored. */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[]; /** Specifies the number or percentage of `should` clauses returned documents must match. */ minimum_should_match?: MinimumShouldMatch; /** The clause (query) must appear in matching documents and will contribute to the score. */ must?: QueryDslQueryContainer | QueryDslQueryContainer[]; /** The clause (query) must not appear in the matching documents. * Because scoring is ignored, a score of `0` is returned for all documents. */ must_not?: QueryDslQueryContainer | QueryDslQueryContainer[]; /** The clause (query) should appear in the matching document. */ should?: QueryDslQueryContainer | QueryDslQueryContainer[]; } export interface QueryDslBoostingQuery extends QueryDslQueryBase { /** Floating point number between 0 and 1.0 used to decrease the relevance scores of documents matching the `negative` query. */ negative_boost: double; /** Query used to decrease the relevance score of matching documents. */ negative: QueryDslQueryContainer; /** Any returned documents must match this query. */ positive: QueryDslQueryContainer; } export type QueryDslChildScoreMode = 'none' | 'avg' | 'sum' | 'max' | 'min'; export type QueryDslCombinedFieldsOperator = 'or' | 'and'; export interface QueryDslCombinedFieldsQuery extends QueryDslQueryBase { /** List of fields to search. Field wildcard patterns are allowed. Only `text` fields are supported, and they must all have the same search `analyzer`. */ fields: Field[]; /** Text to search for in the provided `fields`. * The `combined_fields` query analyzes the provided text before performing a search. */ query: string; /** If true, match phrase queries are automatically created for multi-term synonyms. */ auto_generate_synonyms_phrase_query?: boolean; /** Boolean logic used to interpret text in the query value. */ operator?: QueryDslCombinedFieldsOperator; /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch; /** Indicates whether no documents are returned if the analyzer removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslCombinedFieldsZeroTerms; } export type QueryDslCombinedFieldsZeroTerms = 'none' | 'all'; export interface QueryDslCommonTermsQuery extends QueryDslQueryBase { analyzer?: string; cutoff_frequency?: double; high_freq_operator?: QueryDslOperator; low_freq_operator?: QueryDslOperator; minimum_should_match?: MinimumShouldMatch; query: string; } export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { /** Filter query you wish to run. Any returned documents must match this query. * Filter queries do not calculate relevance scores. * To speed up performance, Elasticsearch automatically caches frequently used filter queries. */ filter: QueryDslQueryContainer; } export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys & { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode; }; export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { /** Date format used to convert `date` values in the query. */ format?: DateFormat; /** Coordinated Universal Time (UTC) offset or IANA time zone used to convert `date` values in the query to UTC. */ time_zone?: TimeZone; } export type QueryDslDecayFunction = QueryDslUntypedDecayFunction | QueryDslDateDecayFunction | QueryDslNumericDecayFunction | QueryDslGeoDecayFunction; export interface QueryDslDecayFunctionBase { /** Determines how the distance is calculated when a field used for computing the decay contains multiple values. */ multi_value_mode?: QueryDslMultiValueMode; } export interface QueryDslDecayPlacement { /** Defines how documents are scored at the distance given at scale. */ decay?: double; /** If defined, the decay function will only compute the decay function for documents with a distance greater than the defined `offset`. */ offset?: TScale; /** Defines the distance from origin + offset at which the computed score will equal `decay` parameter. */ scale?: TScale; /** The point of origin used for calculating distance. Must be given as a number for numeric field, date for date fields and geo point for geo fields. */ origin?: TOrigin; } export interface QueryDslDisMaxQuery extends QueryDslQueryBase { /** One or more query clauses. * Returned documents must match one or more of these queries. * If a document matches multiple queries, Elasticsearch uses the highest relevance score. */ queries: QueryDslQueryContainer[]; /** Floating point number between 0 and 1.0 used to increase the relevance scores of documents matching multiple query clauses. */ tie_breaker?: double; } export type QueryDslDistanceFeatureQuery = QueryDslUntypedDistanceFeatureQuery | QueryDslGeoDistanceFeatureQuery | QueryDslDateDistanceFeatureQuery; export interface QueryDslDistanceFeatureQueryBase extends QueryDslQueryBase { /** Date or point of origin used to calculate distances. * If the `field` value is a `date` or `date_nanos` field, the `origin` value must be a date. * Date Math, such as `now-1h`, is supported. * If the field value is a `geo_point` field, the `origin` value must be a geopoint. */ origin: TOrigin; /** Distance from the `origin` at which relevance scores receive half of the `boost` value. * If the `field` value is a `date` or `date_nanos` field, the `pivot` value must be a time unit, such as `1h` or `10d`. If the `field` value is a `geo_point` field, the `pivot` value must be a distance unit, such as `1km` or `12m`. */ pivot: TDistance; /** Name of the field used to calculate distances. This field must meet the following criteria: * be a `date`, `date_nanos` or `geo_point` field; * have an `index` mapping parameter value of `true`, which is the default; * have an `doc_values` mapping parameter value of `true`, which is the default. */ field: Field; } export interface QueryDslExistsQuery extends QueryDslQueryBase { /** Name of the field you wish to search. */ field: Field; } export interface QueryDslFieldAndFormat { /** A wildcard pattern. The request returns values for field names matching this pattern. */ field: Field; /** The format in which the values are returned. */ format?: string; include_unmapped?: boolean; } export interface QueryDslFieldLookup { /** `id` of the document. */ id: Id; /** Index from which to retrieve the document. */ index?: IndexName; /** Name of the field. */ path?: Field; /** Custom routing value. */ routing?: Routing; } export type QueryDslFieldValueFactorModifier = 'none' | 'log' | 'log1p' | 'log2p' | 'ln' | 'ln1p' | 'ln2p' | 'square' | 'sqrt' | 'reciprocal'; export interface QueryDslFieldValueFactorScoreFunction { /** Field to be extracted from the document. */ field: Field; /** Optional factor to multiply the field value with. */ factor?: double; /** Value used if the document doesn’t have that field. * The modifier and factor are still applied to it as though it were read from the document. */ missing?: double; /** Modifier to apply to the field value. */ modifier?: QueryDslFieldValueFactorModifier; } export type QueryDslFunctionBoostMode = 'multiply' | 'replace' | 'sum' | 'avg' | 'max' | 'min'; export interface QueryDslFunctionScoreContainer { /** Function that scores a document with a exponential decay, depending on the distance of a numeric field value of the document from an origin. */ exp?: QueryDslDecayFunction; /** Function that scores a document with a normal decay, depending on the distance of a numeric field value of the document from an origin. */ gauss?: QueryDslDecayFunction; /** Function that scores a document with a linear decay, depending on the distance of a numeric field value of the document from an origin. */ linear?: QueryDslDecayFunction; /** Function allows you to use a field from a document to influence the score. * It’s similar to using the script_score function, however, it avoids the overhead of scripting. */ field_value_factor?: QueryDslFieldValueFactorScoreFunction; /** Generates scores that are uniformly distributed from 0 up to but not including 1. * In case you want scores to be reproducible, it is possible to provide a `seed` and `field`. */ random_score?: QueryDslRandomScoreFunction; /** Enables you to wrap another query and customize the scoring of it optionally with a computation derived from other numeric field values in the doc using a script expression. */ script_score?: QueryDslScriptScoreFunction; filter?: QueryDslQueryContainer; weight?: double; } export type QueryDslFunctionScoreMode = 'multiply' | 'sum' | 'avg' | 'first' | 'max' | 'min'; export interface QueryDslFunctionScoreQuery extends QueryDslQueryBase { /** Defines how he newly computed score is combined with the score of the query */ boost_mode?: QueryDslFunctionBoostMode; /** One or more functions that compute a new score for each document returned by the query. */ functions?: QueryDslFunctionScoreContainer[]; /** Restricts the new score to not exceed the provided limit. */ max_boost?: double; /** Excludes documents that do not meet the provided score threshold. */ min_score?: double; /** A query that determines the documents for which a new score is computed. */ query?: QueryDslQueryContainer; /** Specifies how the computed scores are combined */ score_mode?: QueryDslFunctionScoreMode; } export interface QueryDslFuzzyQuery extends QueryDslQueryBase { /** Maximum number of variations created. */ max_expansions?: integer; /** Number of beginning characters left unchanged when creating expansions. */ prefix_length?: integer; /** Number of beginning characters left unchanged when creating expansions. */ rewrite?: MultiTermQueryRewrite; /** Indicates whether edits include transpositions of two adjacent characters (for example `ab` to `ba`). */ transpositions?: boolean; /** Maximum edit distance allowed for matching. */ fuzziness?: Fuzziness; /** Term you wish to find in the provided field. */ value: string | double | boolean; } export interface QueryDslGeoBoundingBoxQueryKeys extends QueryDslQueryBase { type?: QueryDslGeoExecution; /** Set to `IGNORE_MALFORMED` to accept geo points with invalid latitude or longitude. * Set to `COERCE` to also try to infer correct latitude or longitude. */ validation_method?: QueryDslGeoValidationMethod; /** Set to `true` to ignore an unmapped field and not match any documents for this query. * Set to `false` to throw an exception if the field is not mapped. */ ignore_unmapped?: boolean; } export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys & { [property: string]: GeoBounds | QueryDslGeoExecution | QueryDslGeoValidationMethod | boolean | float | string; }; export interface QueryDslGeoDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys & { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode; }; export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { /** The radius of the circle centred on the specified location. * Points which fall into this circle are considered to be matches. */ distance: Distance; /** How to compute the distance. * Set to `plane` for a faster calculation that's inaccurate on long distances and close to the poles. */ distance_type?: GeoDistanceType; /** Set to `IGNORE_MALFORMED` to accept geo points with invalid latitude or longitude. * Set to `COERCE` to also try to infer correct latitude or longitude. */ validation_method?: QueryDslGeoValidationMethod; /** Set to `true` to ignore an unmapped field and not match any documents for this query. * Set to `false` to throw an exception if the field is not mapped. */ ignore_unmapped?: boolean; } export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys & { [property: string]: GeoLocation | Distance | GeoDistanceType | QueryDslGeoValidationMethod | boolean | float | string; }; export type QueryDslGeoExecution = 'memory' | 'indexed'; export interface QueryDslGeoGridQuery extends QueryDslQueryBase { geotile?: GeoTile; geohash?: GeoHash; geohex?: GeoHexCell; } export interface QueryDslGeoPolygonPoints { points: GeoLocation[]; } export interface QueryDslGeoPolygonQueryKeys extends QueryDslQueryBase { validation_method?: QueryDslGeoValidationMethod; ignore_unmapped?: boolean; } export type QueryDslGeoPolygonQuery = QueryDslGeoPolygonQueryKeys & { [property: string]: QueryDslGeoPolygonPoints | QueryDslGeoValidationMethod | boolean | float | string; }; export interface QueryDslGeoShapeFieldQuery { shape?: GeoShape; /** Query using an indexed shape retrieved from the the specified document and path. */ indexed_shape?: QueryDslFieldLookup; /** Spatial relation operator used to search a geo field. */ relation?: GeoShapeRelation; } export interface QueryDslGeoShapeQueryKeys extends QueryDslQueryBase { /** Set to `true` to ignore an unmapped field and not match any documents for this query. * Set to `false` to throw an exception if the field is not mapped. */ ignore_unmapped?: boolean; } export type QueryDslGeoShapeQuery = QueryDslGeoShapeQueryKeys & { [property: string]: QueryDslGeoShapeFieldQuery | boolean | float | string; }; export type QueryDslGeoValidationMethod = 'coerce' | 'ignore_malformed' | 'strict'; export interface QueryDslHasChildQuery extends QueryDslQueryBase { /** Indicates whether to ignore an unmapped `type` and not return any documents instead of an error. */ ignore_unmapped?: boolean; /** If defined, each search hit will contain inner hits. */ inner_hits?: SearchInnerHits; /** Maximum number of child documents that match the query allowed for a returned parent document. * If the parent document exceeds this limit, it is excluded from the search results. */ max_children?: integer; /** Minimum number of child documents that match the query required to match the query for a returned parent document. * If the parent document does not meet this limit, it is excluded from the search results. */ min_children?: integer; /** Query you wish to run on child documents of the `type` field. * If a child document matches the search, the query returns the parent document. */ query: QueryDslQueryContainer; /** Indicates how scores for matching child documents affect the root parent document’s relevance score. */ score_mode?: QueryDslChildScoreMode; /** Name of the child relationship mapped for the `join` field. */ type: RelationName; } export interface QueryDslHasParentQuery extends QueryDslQueryBase { /** Indicates whether to ignore an unmapped `parent_type` and not return any documents instead of an error. * You can use this parameter to query multiple indices that may not contain the `parent_type`. */ ignore_unmapped?: boolean; /** If defined, each search hit will contain inner hits. */ inner_hits?: SearchInnerHits; /** Name of the parent relationship mapped for the `join` field. */ parent_type: RelationName; /** Query you wish to run on parent documents of the `parent_type` field. * If a parent document matches the search, the query returns its child documents. */ query: QueryDslQueryContainer; /** Indicates whether the relevance score of a matching parent document is aggregated into its child documents. */ score?: boolean; } export interface QueryDslIdsQuery extends QueryDslQueryBase { /** An array of document IDs. */ values?: Ids; } export interface QueryDslIntervalsAllOf { /** An array of rules to combine. All rules must produce a match in a document for the overall source to match. */ intervals: QueryDslIntervalsContainer[]; /** Maximum number of positions between the matching terms. * Intervals produced by the rules further apart than this are not considered matches. */ max_gaps?: integer; /** If `true`, intervals produced by the rules should appear in the order in which they are specified. */ ordered?: boolean; /** Rule used to filter returned intervals. */ filter?: QueryDslIntervalsFilter; } export interface QueryDslIntervalsAnyOf { /** An array of rules to match. */ intervals: QueryDslIntervalsContainer[]; /** Rule used to filter returned intervals. */ filter?: QueryDslIntervalsFilter; } export interface QueryDslIntervalsContainer { /** Returns matches that span a combination of other rules. */ all_of?: QueryDslIntervalsAllOf; /** Returns intervals produced by any of its sub-rules. */ any_of?: QueryDslIntervalsAnyOf; /** Matches analyzed text. */ fuzzy?: QueryDslIntervalsFuzzy; /** Matches analyzed text. */ match?: QueryDslIntervalsMatch; /** Matches terms that start with a specified set of characters. */ prefix?: QueryDslIntervalsPrefix; range?: QueryDslIntervalsRange; regexp?: QueryDslIntervalsRegexp; /** Matches terms using a wildcard pattern. */ wildcard?: QueryDslIntervalsWildcard; } export interface QueryDslIntervalsFilter { /** Query used to return intervals that follow an interval from the `filter` rule. */ after?: QueryDslIntervalsContainer; /** Query used to return intervals that occur before an interval from the `filter` rule. */ before?: QueryDslIntervalsContainer; /** Query used to return intervals contained by an interval from the `filter` rule. */ contained_by?: QueryDslIntervalsContainer; /** Query used to return intervals that contain an interval from the `filter` rule. */ containing?: QueryDslIntervalsContainer; /** Query used to return intervals that are **not** contained by an interval from the `filter` rule. */ not_contained_by?: QueryDslIntervalsContainer; /** Query used to return intervals that do **not** contain an interval from the `filter` rule. */ not_containing?: QueryDslIntervalsContainer; /** Query used to return intervals that do **not** overlap with an interval from the `filter` rule. */ not_overlapping?: QueryDslIntervalsContainer; /** Query used to return intervals that overlap with an interval from the `filter` rule. */ overlapping?: QueryDslIntervalsContainer; /** Script used to return matching documents. * This script must return a boolean value: `true` or `false`. */ script?: Script | ScriptSource; } export interface QueryDslIntervalsFuzzy { /** Analyzer used to normalize the term. */ analyzer?: string; /** Maximum edit distance allowed for matching. */ fuzziness?: Fuzziness; /** Number of beginning characters left unchanged when creating expansions. */ prefix_length?: integer; /** The term to match. */ term: string; /** Indicates whether edits include transpositions of two adjacent characters (for example, `ab` to `ba`). */ transpositions?: boolean; /** If specified, match intervals from this field rather than the top-level field. * The `term` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field; } export interface QueryDslIntervalsMatch { /** Analyzer used to analyze terms in the query. */ analyzer?: string; /** Maximum number of positions between the matching terms. * Terms further apart than this are not considered matches. */ max_gaps?: integer; /** If `true`, matching terms must appear in their specified order. */ ordered?: boolean; /** Text you wish to find in the provided field. */ query: string; /** If specified, match intervals from this field rather than the top-level field. * The `term` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field; /** An optional interval filter. */ filter?: QueryDslIntervalsFilter; } export interface QueryDslIntervalsPrefix { /** Analyzer used to analyze the `prefix`. */ analyzer?: string; /** Beginning characters of terms you wish to find in the top-level field. */ prefix: string; /** If specified, match intervals from this field rather than the top-level field. * The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field; } export interface QueryDslIntervalsQuery extends QueryDslQueryBase { /** Returns matches that span a combination of other rules. */ all_of?: QueryDslIntervalsAllOf; /** Returns intervals produced by any of its sub-rules. */ any_of?: QueryDslIntervalsAnyOf; /** Matches terms that are similar to the provided term, within an edit distance defined by `fuzziness`. */ fuzzy?: QueryDslIntervalsFuzzy; /** Matches analyzed text. */ match?: QueryDslIntervalsMatch; /** Matches terms that start with a specified set of characters. */ prefix?: QueryDslIntervalsPrefix; range?: QueryDslIntervalsRange; regexp?: QueryDslIntervalsRegexp; /** Matches terms using a wildcard pattern. */ wildcard?: QueryDslIntervalsWildcard; } export interface QueryDslIntervalsRange { /** Analyzer used to analyze the `prefix`. */ analyzer?: string; /** Lower term, either gte or gt must be provided. */ gte?: string; /** Lower term, either gte or gt must be provided. */ gt?: string; /** Upper term, either lte or lt must be provided. */ lte?: string; /** Upper term, either lte or lt must be provided. */ lt?: string; /** If specified, match intervals from this field rather than the top-level field. * The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field; } export interface QueryDslIntervalsRegexp { /** Analyzer used to analyze the `prefix`. */ analyzer?: string; /** Regex pattern. */ pattern: string; /** If specified, match intervals from this field rather than the top-level field. * The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field; } export interface QueryDslIntervalsWildcard { /** Analyzer used to analyze the `pattern`. * Defaults to the top-level field's analyzer. */ analyzer?: string; /** Wildcard pattern used to find matching terms. */ pattern: string; /** If specified, match intervals from this field rather than the top-level field. * The `pattern` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field; } export type QueryDslLike = string | QueryDslLikeDocument; export interface QueryDslLikeDocument { /** A document not present in the index. */ doc?: any; fields?: Field[]; /** ID of a document. */ _id?: Id; /** Index of a document. */ _index?: IndexName; /** Overrides the default analyzer. */ per_field_analyzer?: Record; routing?: Routing; version?: VersionNumber; version_type?: VersionType; } export interface QueryDslMatchAllQuery extends QueryDslQueryBase { } export interface QueryDslMatchBoolPrefixQuery extends QueryDslQueryBase { /** Analyzer used to convert the text in the query value into tokens. */ analyzer?: string; /** Maximum edit distance allowed for matching. * Can be applied to the term subqueries constructed for all terms but the final term. */ fuzziness?: Fuzziness; /** Method used to rewrite the query. * Can be applied to the term subqueries constructed for all terms but the final term. */ fuzzy_rewrite?: MultiTermQueryRewrite; /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). * Can be applied to the term subqueries constructed for all terms but the final term. */ fuzzy_transpositions?: boolean; /** Maximum number of terms to which the query will expand. * Can be applied to the term subqueries constructed for all terms but the final term. */ max_expansions?: integer; /** Minimum number of clauses that must match for a document to be returned. * Applied to the constructed bool query. */ minimum_should_match?: MinimumShouldMatch; /** Boolean logic used to interpret text in the query value. * Applied to the constructed bool query. */ operator?: QueryDslOperator; /** Number of beginning characters left unchanged for fuzzy matching. * Can be applied to the term subqueries constructed for all terms but the final term. */ prefix_length?: integer; /** Terms you wish to find in the provided field. * The last term is used in a prefix query. */ query: string; } export interface QueryDslMatchNoneQuery extends QueryDslQueryBase { } export interface QueryDslMatchPhrasePrefixQuery extends QueryDslQueryBase { /** Analyzer used to convert text in the query value into tokens. */ analyzer?: string; /** Maximum number of terms to which the last provided term of the query value will expand. */ max_expansions?: integer; /** Text you wish to find in the provided field. */ query: string; /** Maximum number of positions allowed between matching tokens. */ slop?: integer; /** Indicates whether no documents are returned if the analyzer removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslZeroTermsQuery; } export interface QueryDslMatchPhraseQuery extends QueryDslQueryBase { /** Analyzer used to convert the text in the query value into tokens. */ analyzer?: string; /** Query terms that are analyzed and turned into a phrase query. */ query: string; /** Maximum number of positions allowed between matching tokens. */ slop?: integer; /** Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslZeroTermsQuery; } export interface QueryDslMatchQuery extends QueryDslQueryBase { /** Analyzer used to convert the text in the query value into tokens. */ analyzer?: string; /** If `true`, match phrase queries are automatically created for multi-term synonyms. */ auto_generate_synonyms_phrase_query?: boolean; cutoff_frequency?: double; /** Maximum edit distance allowed for matching. */ fuzziness?: Fuzziness; /** Method used to rewrite the query. */ fuzzy_rewrite?: MultiTermQueryRewrite; /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */ fuzzy_transpositions?: boolean; /** If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored. */ lenient?: boolean; /** Maximum number of terms to which the query will expand. */ max_expansions?: integer; /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch; /** Boolean logic used to interpret text in the query value. */ operator?: QueryDslOperator; /** Number of beginning characters left unchanged for fuzzy matching. */ prefix_length?: integer; /** Text, number, boolean value or date you wish to find in the provided field. */ query: string | float | boolean; /** Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslZeroTermsQuery; } export interface QueryDslMoreLikeThisQuery extends QueryDslQueryBase { /** The analyzer that is used to analyze the free form text. * Defaults to the analyzer associated with the first field in fields. */ analyzer?: string; /** Each term in the formed query could be further boosted by their tf-idf score. * This sets the boost factor to use when using this feature. * Defaults to deactivated (0). */ boost_terms?: double; /** Controls whether the query should fail (throw an exception) if any of the specified fields are not of the supported types (`text` or `keyword`). */ fail_on_unsupported_field?: boolean; /** A list of fields to fetch and analyze the text from. * Defaults to the `index.query.default_field` index setting, which has a default value of `*`. */ fields?: Field[]; /** Specifies whether the input documents should also be included in the search results returned. */ include?: boolean; /** Specifies free form text and/or a single or multiple documents for which you want to find similar documents. */ like: QueryDslLike | QueryDslLike[]; /** The maximum document frequency above which the terms are ignored from the input document. */ max_doc_freq?: integer; /** The maximum number of query terms that can be selected. */ max_query_terms?: integer; /** The maximum word length above which the terms are ignored. * Defaults to unbounded (`0`). */ max_word_length?: integer; /** The minimum document frequency below which the terms are ignored from the input document. */ min_doc_freq?: integer; /** After the disjunctive query has been formed, this parameter controls the number of terms that must match. */ minimum_should_match?: MinimumShouldMatch; /** The minimum term frequency below which the terms are ignored from the input document. */ min_term_freq?: integer; /** The minimum word length below which the terms are ignored. */ min_word_length?: integer; routing?: Routing; /** An array of stop words. * Any word in this set is ignored. */ stop_words?: AnalysisStopWords; /** Used in combination with `like` to exclude documents that match a set of terms. */ unlike?: QueryDslLike | QueryDslLike[]; version?: VersionNumber; version_type?: VersionType; } export interface QueryDslMultiMatchQuery extends QueryDslQueryBase { /** Analyzer used to convert the text in the query value into tokens. */ analyzer?: string; /** If `true`, match phrase queries are automatically created for multi-term synonyms. */ auto_generate_synonyms_phrase_query?: boolean; cutoff_frequency?: double; /** The fields to be queried. * Defaults to the `index.query.default_field` index settings, which in turn defaults to `*`. */ fields?: Fields; /** Maximum edit distance allowed for matching. */ fuzziness?: Fuzziness; /** Method used to rewrite the query. */ fuzzy_rewrite?: MultiTermQueryRewrite; /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). * Can be applied to the term subqueries constructed for all terms but the final term. */ fuzzy_transpositions?: boolean; /** If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored. */ lenient?: boolean; /** Maximum number of terms to which the query will expand. */ max_expansions?: integer; /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch; /** Boolean logic used to interpret text in the query value. */ operator?: QueryDslOperator; /** Number of beginning characters left unchanged for fuzzy matching. */ prefix_length?: integer; /** Text, number, boolean value or date you wish to find in the provided field. */ query: string; /** Maximum number of positions allowed between matching tokens. */ slop?: integer; /** Determines how scores for each per-term blended query and scores across groups are combined. */ tie_breaker?: double; /** How `the` multi_match query is executed internally. */ type?: QueryDslTextQueryType; /** Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslZeroTermsQuery; } export type QueryDslMultiValueMode = 'min' | 'max' | 'avg' | 'sum'; export interface QueryDslNestedQuery extends QueryDslQueryBase { /** Indicates whether to ignore an unmapped path and not return any documents instead of an error. */ ignore_unmapped?: boolean; /** If defined, each search hit will contain inner hits. */ inner_hits?: SearchInnerHits; /** Path to the nested object you wish to search. */ path: Field; /** Query you wish to run on nested objects in the path. */ query: QueryDslQueryContainer; /** How scores for matching child objects affect the root parent document’s relevance score. */ score_mode?: QueryDslChildScoreMode; } export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { } export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys & { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode; }; export type QueryDslOperator = 'and' | 'AND' | 'or' | 'OR'; export interface QueryDslParentIdQuery extends QueryDslQueryBase { /** ID of the parent document. */ id?: Id; /** Indicates whether to ignore an unmapped `type` and not return any documents instead of an error. */ ignore_unmapped?: boolean; /** Name of the child relationship mapped for the `join` field. */ type?: RelationName; } export interface QueryDslPercolateQuery extends QueryDslQueryBase { /** The source of the document being percolated. */ document?: any; /** An array of sources of the documents being percolated. */ documents?: any[]; /** Field that holds the indexed queries. The field must use the `percolator` mapping type. */ field: Field; /** The ID of a stored document to percolate. */ id?: Id; /** The index of a stored document to percolate. */ index?: IndexName; /** The suffix used for the `_percolator_document_slot` field when multiple `percolate` queries are specified. */ name?: string; /** Preference used to fetch document to percolate. */ preference?: string; /** Routing used to fetch document to percolate. */ routing?: Routing; /** The expected version of a stored document to percolate. */ version?: VersionNumber; } export interface QueryDslPinnedDoc { /** The unique document ID. */ _id: Id; /** The index that contains the document. */ _index?: IndexName; } export interface QueryDslPinnedQuery extends QueryDslQueryBase { /** Any choice of query used to rank documents which will be ranked below the "pinned" documents. */ organic: QueryDslQueryContainer; /** Document IDs listed in the order they are to appear in results. * Required if `docs` is not specified. */ ids?: Id[]; /** Documents listed in the order they are to appear in results. * Required if `ids` is not specified. */ docs?: QueryDslPinnedDoc[]; } export interface QueryDslPrefixQuery extends QueryDslQueryBase { /** Method used to rewrite the query. */ rewrite?: MultiTermQueryRewrite; /** Beginning characters of terms you wish to find in the provided field. */ value: string; /** Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`. * Default is `false` which means the case sensitivity of matching depends on the underlying field’s mapping. */ case_insensitive?: boolean; } export interface QueryDslQueryBase { /** Floating point number used to decrease or increase the relevance scores of the query. * Boost values are relative to the default value of 1.0. * A boost value between 0 and 1.0 decreases the relevance score. * A value greater than 1.0 increases the relevance score. */ boost?: float; _name?: string; } export interface QueryDslQueryContainer { /** matches documents matching boolean combinations of other queries. */ bool?: QueryDslBoolQuery; /** Returns documents matching a `positive` query while reducing the relevance score of documents that also match a `negative` query. */ boosting?: QueryDslBoostingQuery; common?: Partial>; /** The `combined_fields` query supports searching multiple text fields as if their contents had been indexed into one combined field. */ combined_fields?: QueryDslCombinedFieldsQuery; /** Wraps a filter query and returns every matching document with a relevance score equal to the `boost` parameter value. */ constant_score?: QueryDslConstantScoreQuery; /** Returns documents matching one or more wrapped queries, called query clauses or clauses. * If a returned document matches multiple query clauses, the `dis_max` query assigns the document the highest relevance score from any matching clause, plus a tie breaking increment for any additional matching subqueries. */ dis_max?: QueryDslDisMaxQuery; /** Boosts the relevance score of documents closer to a provided origin date or point. * For example, you can use this query to give more weight to documents closer to a certain date or location. */ distance_feature?: QueryDslDistanceFeatureQuery; /** Returns documents that contain an indexed value for a field. */ exists?: QueryDslExistsQuery; /** The `function_score` enables you to modify the score of documents that are retrieved by a query. */ function_score?: QueryDslFunctionScoreQuery | QueryDslFunctionScoreContainer[]; /** Returns documents that contain terms similar to the search term, as measured by a Levenshtein edit distance. */ fuzzy?: Partial>; /** Matches geo_point and geo_shape values that intersect a bounding box. */ geo_bounding_box?: QueryDslGeoBoundingBoxQuery; /** Matches `geo_point` and `geo_shape` values within a given distance of a geopoint. */ geo_distance?: QueryDslGeoDistanceQuery; /** Matches `geo_point` and `geo_shape` values that intersect a grid cell from a GeoGrid aggregation. */ geo_grid?: Partial>; geo_polygon?: QueryDslGeoPolygonQuery; /** Filter documents indexed using either the `geo_shape` or the `geo_point` type. */ geo_shape?: QueryDslGeoShapeQuery; /** Returns parent documents whose joined child documents match a provided query. */ has_child?: QueryDslHasChildQuery; /** Returns child documents whose joined parent document matches a provided query. */ has_parent?: QueryDslHasParentQuery; /** Returns documents based on their IDs. * This query uses document IDs stored in the `_id` field. */ ids?: QueryDslIdsQuery; /** Returns documents based on the order and proximity of matching terms. */ intervals?: Partial>; /** Finds the k nearest vectors to a query vector, as measured by a similarity * metric. knn query finds nearest vectors through approximate search on indexed * dense_vectors. */ knn?: KnnQuery; /** Returns documents that match a provided text, number, date or boolean value. * The provided text is analyzed before matching. */ match?: Partial>; /** Matches all documents, giving them all a `_score` of 1.0. */ match_all?: QueryDslMatchAllQuery; /** Analyzes its input and constructs a `bool` query from the terms. * Each term except the last is used in a `term` query. * The last term is used in a prefix query. */ match_bool_prefix?: Partial>; /** Matches no documents. */ match_none?: QueryDslMatchNoneQuery; /** Analyzes the text and creates a phrase query out of the analyzed text. */ match_phrase?: Partial>; /** Returns documents that contain the words of a provided text, in the same order as provided. * The last term of the provided text is treated as a prefix, matching any words that begin with that term. */ match_phrase_prefix?: Partial>; /** Returns documents that are "like" a given set of documents. */ more_like_this?: QueryDslMoreLikeThisQuery; /** Enables you to search for a provided text, number, date or boolean value across multiple fields. * The provided text is analyzed before matching. */ multi_match?: QueryDslMultiMatchQuery; /** Wraps another query to search nested fields. * If an object matches the search, the nested query returns the root parent document. */ nested?: QueryDslNestedQuery; /** Returns child documents joined to a specific parent document. */ parent_id?: QueryDslParentIdQuery; /** Matches queries stored in an index. */ percolate?: QueryDslPercolateQuery; /** Promotes selected documents to rank higher than those matching a given query. */ pinned?: QueryDslPinnedQuery; /** Returns documents that contain a specific prefix in a provided field. */ prefix?: Partial>; /** Returns documents based on a provided query string, using a parser with a strict syntax. */ query_string?: QueryDslQueryStringQuery; /** Returns documents that contain terms within a provided range. */ range?: Partial>; /** Boosts the relevance score of documents based on the numeric value of a `rank_feature` or `rank_features` field. */ rank_feature?: QueryDslRankFeatureQuery; /** Returns documents that contain terms matching a regular expression. */ regexp?: Partial>; rule?: QueryDslRuleQuery; /** Filters documents based on a provided script. * The script query is typically used in a filter context. */ script?: QueryDslScriptQuery; /** Uses a script to provide a custom score for returned documents. */ script_score?: QueryDslScriptScoreQuery; /** A semantic query to semantic_text field types */ semantic?: QueryDslSemanticQuery; /** Queries documents that contain fields indexed using the `shape` type. */ shape?: QueryDslShapeQuery; /** Returns documents based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ simple_query_string?: QueryDslSimpleQueryStringQuery; /** Returns matches which enclose another span query. */ span_containing?: QueryDslSpanContainingQuery; /** Wrapper to allow span queries to participate in composite single-field span queries by _lying_ about their search field. */ span_field_masking?: QueryDslSpanFieldMaskingQuery; /** Matches spans near the beginning of a field. */ span_first?: QueryDslSpanFirstQuery; /** Allows you to wrap a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query) as a `span` query, so it can be nested. */ span_multi?: QueryDslSpanMultiTermQuery; /** Matches spans which are near one another. * You can specify `slop`, the maximum number of intervening unmatched positions, as well as whether matches are required to be in-order. */ span_near?: QueryDslSpanNearQuery; /** Removes matches which overlap with another span query or which are within x tokens before (controlled by the parameter `pre`) or y tokens after (controlled by the parameter `post`) another span query. */ span_not?: QueryDslSpanNotQuery; /** Matches the union of its span clauses. */ span_or?: QueryDslSpanOrQuery; /** Matches spans containing a term. */ span_term?: Partial>; /** Returns matches which are enclosed inside another span query. */ span_within?: QueryDslSpanWithinQuery; /** Using input query vectors or a natural language processing model to convert a query into a list of token-weight pairs, queries against a sparse vector field. */ sparse_vector?: QueryDslSparseVectorQuery; /** Returns documents that contain an exact term in a provided field. * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ term?: Partial>; /** Returns documents that contain one or more exact terms in a provided field. * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ terms?: QueryDslTermsQuery; /** Returns documents that contain a minimum number of exact terms in a provided field. * To return a document, a required number of terms must exactly match the field values, including whitespace and capitalization. */ terms_set?: Partial>; /** Uses a natural language processing model to convert the query text into a list of token-weight pairs which are then used in a query against a sparse vector or rank features field. */ text_expansion?: Partial>; /** Supports returning text_expansion query results by sending in precomputed tokens with the query. */ weighted_tokens?: Partial>; /** Returns documents that contain terms matching a wildcard pattern. */ wildcard?: Partial>; /** A query that accepts any other query as base64 encoded string. */ wrapper?: QueryDslWrapperQuery; type?: QueryDslTypeQuery; } export interface QueryDslQueryStringQuery extends QueryDslQueryBase { /** If `true`, the wildcard characters `*` and `?` are allowed as the first character of the query string. */ allow_leading_wildcard?: boolean; /** Analyzer used to convert text in the query string into tokens. */ analyzer?: string; /** If `true`, the query attempts to analyze wildcard terms in the query string. */ analyze_wildcard?: boolean; /** If `true`, match phrase queries are automatically created for multi-term synonyms. */ auto_generate_synonyms_phrase_query?: boolean; /** Default field to search if no field is provided in the query string. * Supports wildcards (`*`). * Defaults to the `index.query.default_field` index setting, which has a default value of `*`. */ default_field?: Field; /** Default boolean logic used to interpret text in the query string if no operators are specified. */ default_operator?: QueryDslOperator; /** If `true`, enable position increments in queries constructed from a `query_string` search. */ enable_position_increments?: boolean; escape?: boolean; /** Array of fields to search. Supports wildcards (`*`). */ fields?: Field[]; /** Maximum edit distance allowed for fuzzy matching. */ fuzziness?: Fuzziness; /** Maximum number of terms to which the query expands for fuzzy matching. */ fuzzy_max_expansions?: integer; /** Number of beginning characters left unchanged for fuzzy matching. */ fuzzy_prefix_length?: integer; /** Method used to rewrite the query. */ fuzzy_rewrite?: MultiTermQueryRewrite; /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */ fuzzy_transpositions?: boolean; /** If `true`, format-based errors, such as providing a text value for a numeric field, are ignored. */ lenient?: boolean; /** Maximum number of automaton states required for the query. */ max_determinized_states?: integer; /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch; /** Maximum number of positions allowed between matching tokens for phrases. */ phrase_slop?: double; /** Query string you wish to parse and use for search. */ query: string; /** Analyzer used to convert quoted text in the query string into tokens. * For quoted text, this parameter overrides the analyzer specified in the `analyzer` parameter. */ quote_analyzer?: string; /** Suffix appended to quoted text in the query string. * You can use this suffix to use a different analysis method for exact matches. */ quote_field_suffix?: string; /** Method used to rewrite the query. */ rewrite?: MultiTermQueryRewrite; /** How to combine the queries generated from the individual search terms in the resulting `dis_max` query. */ tie_breaker?: double; /** Coordinated Universal Time (UTC) offset or IANA time zone used to convert date values in the query string to UTC. */ time_zone?: TimeZone; /** Determines how the query matches and scores documents. */ type?: QueryDslTextQueryType; } export interface QueryDslRandomScoreFunction { field?: Field; seed?: long | string; } export type QueryDslRangeQuery = QueryDslUntypedRangeQuery | QueryDslDateRangeQuery | QueryDslNumberRangeQuery | QueryDslTermRangeQuery; export interface QueryDslRangeQueryBase extends QueryDslQueryBase { /** Indicates how the range query matches values for `range` fields. */ relation?: QueryDslRangeRelation; /** Greater than. */ gt?: T; /** Greater than or equal to. */ gte?: T; /** Less than. */ lt?: T; /** Less than or equal to. */ lte?: T; } export type QueryDslRangeRelation = 'within' | 'contains' | 'intersects'; export interface QueryDslRankFeatureFunction { } export interface QueryDslRankFeatureFunctionLinear { } export interface QueryDslRankFeatureFunctionLogarithm { /** Configurable scaling factor. */ scaling_factor: float; } export interface QueryDslRankFeatureFunctionSaturation { /** Configurable pivot value so that the result will be less than 0.5. */ pivot?: float; } export interface QueryDslRankFeatureFunctionSigmoid { /** Configurable pivot value so that the result will be less than 0.5. */ pivot: float; /** Configurable Exponent. */ exponent: float; } export interface QueryDslRankFeatureQuery extends QueryDslQueryBase { /** `rank_feature` or `rank_features` field used to boost relevance scores. */ field: Field; /** Saturation function used to boost relevance scores based on the value of the rank feature `field`. */ saturation?: QueryDslRankFeatureFunctionSaturation; /** Logarithmic function used to boost relevance scores based on the value of the rank feature `field`. */ log?: QueryDslRankFeatureFunctionLogarithm; /** Linear function used to boost relevance scores based on the value of the rank feature `field`. */ linear?: QueryDslRankFeatureFunctionLinear; /** Sigmoid function used to boost relevance scores based on the value of the rank feature `field`. */ sigmoid?: QueryDslRankFeatureFunctionSigmoid; } export interface QueryDslRegexpQuery extends QueryDslQueryBase { /** Allows case insensitive matching of the regular expression value with the indexed field values when set to `true`. * When `false`, case sensitivity of matching depends on the underlying field’s mapping. */ case_insensitive?: boolean; /** Enables optional operators for the regular expression. */ flags?: string; /** Maximum number of automaton states required for the query. */ max_determinized_states?: integer; /** Method used to rewrite the query. */ rewrite?: MultiTermQueryRewrite; /** Regular expression for terms you wish to find in the provided field. */ value: string; } export interface QueryDslRuleQuery extends QueryDslQueryBase { organic: QueryDslQueryContainer; ruleset_ids?: Id | Id[]; ruleset_id?: string; match_criteria: any; } export interface QueryDslScriptQuery extends QueryDslQueryBase { /** Contains a script to run as a query. * This script must return a boolean value, `true` or `false`. */ script: Script | ScriptSource; } export interface QueryDslScriptScoreFunction { /** A script that computes a score. */ script: Script | ScriptSource; } export interface QueryDslScriptScoreQuery extends QueryDslQueryBase { /** Documents with a score lower than this floating point number are excluded from the search results. */ min_score?: float; /** Query used to return documents. */ query: QueryDslQueryContainer; /** Script used to compute the score of documents returned by the query. * Important: final relevance scores from the `script_score` query cannot be negative. */ script: Script | ScriptSource; } export interface QueryDslSemanticQuery extends QueryDslQueryBase { /** The field to query, which must be a semantic_text field type */ field: string; /** The query text */ query: string; } export interface QueryDslShapeFieldQuery { /** Queries using a pre-indexed shape. */ indexed_shape?: QueryDslFieldLookup; /** Spatial relation between the query shape and the document shape. */ relation?: GeoShapeRelation; /** Queries using an inline shape definition in GeoJSON or Well Known Text (WKT) format. */ shape?: GeoShape; } export interface QueryDslShapeQueryKeys extends QueryDslQueryBase { /** When set to `true` the query ignores an unmapped field and will not match any documents. */ ignore_unmapped?: boolean; } export type QueryDslShapeQuery = QueryDslShapeQueryKeys & { [property: string]: QueryDslShapeFieldQuery | boolean | float | string; }; export type QueryDslSimpleQueryStringFlag = 'NONE' | 'AND' | 'NOT' | 'OR' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL'; export type QueryDslSimpleQueryStringFlags = SpecUtilsPipeSeparatedFlags; export interface QueryDslSimpleQueryStringQuery extends QueryDslQueryBase { /** Analyzer used to convert text in the query string into tokens. */ analyzer?: string; /** If `true`, the query attempts to analyze wildcard terms in the query string. */ analyze_wildcard?: boolean; /** If `true`, the parser creates a match_phrase query for each multi-position token. */ auto_generate_synonyms_phrase_query?: boolean; /** Default boolean logic used to interpret text in the query string if no operators are specified. */ default_operator?: QueryDslOperator; /** Array of fields you wish to search. * Accepts wildcard expressions. * You also can boost relevance scores for matches to particular fields using a caret (`^`) notation. * Defaults to the `index.query.default_field index` setting, which has a default value of `*`. */ fields?: Field[]; /** List of enabled operators for the simple query string syntax. */ flags?: QueryDslSimpleQueryStringFlags; /** Maximum number of terms to which the query expands for fuzzy matching. */ fuzzy_max_expansions?: integer; /** Number of beginning characters left unchanged for fuzzy matching. */ fuzzy_prefix_length?: integer; /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */ fuzzy_transpositions?: boolean; /** If `true`, format-based errors, such as providing a text value for a numeric field, are ignored. */ lenient?: boolean; /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch; /** Query string in the simple query string syntax you wish to parse and use for search. */ query: string; /** Suffix appended to quoted text in the query string. */ quote_field_suffix?: string; } export interface QueryDslSpanContainingQuery extends QueryDslQueryBase { /** Can be any span query. * Matching spans from `big` that contain matches from `little` are returned. */ big: QueryDslSpanQuery; /** Can be any span query. * Matching spans from `big` that contain matches from `little` are returned. */ little: QueryDslSpanQuery; } export interface QueryDslSpanFieldMaskingQuery extends QueryDslQueryBase { field: Field; query: QueryDslSpanQuery; } export interface QueryDslSpanFirstQuery extends QueryDslQueryBase { /** Controls the maximum end position permitted in a match. */ end: integer; /** Can be any other span type query. */ match: QueryDslSpanQuery; } export type QueryDslSpanGapQuery = Partial>; export interface QueryDslSpanMultiTermQuery extends QueryDslQueryBase { /** Should be a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query). */ match: QueryDslQueryContainer; } export interface QueryDslSpanNearQuery extends QueryDslQueryBase { /** Array of one or more other span type queries. */ clauses: QueryDslSpanQuery[]; /** Controls whether matches are required to be in-order. */ in_order?: boolean; /** Controls the maximum number of intervening unmatched positions permitted. */ slop?: integer; } export interface QueryDslSpanNotQuery extends QueryDslQueryBase { /** The number of tokens from within the include span that can’t have overlap with the exclude span. * Equivalent to setting both `pre` and `post`. */ dist?: integer; /** Span query whose matches must not overlap those returned. */ exclude: QueryDslSpanQuery; /** Span query whose matches are filtered. */ include: QueryDslSpanQuery; /** The number of tokens after the include span that can’t have overlap with the exclude span. */ post?: integer; /** The number of tokens before the include span that can’t have overlap with the exclude span. */ pre?: integer; } export interface QueryDslSpanOrQuery extends QueryDslQueryBase { /** Array of one or more other span type queries. */ clauses: QueryDslSpanQuery[]; } export interface QueryDslSpanQuery { /** Accepts a list of span queries, but only returns those spans which also match a second span query. */ span_containing?: QueryDslSpanContainingQuery; /** Allows queries like `span_near` or `span_or` across different fields. */ span_field_masking?: QueryDslSpanFieldMaskingQuery; /** Accepts another span query whose matches must appear within the first N positions of the field. */ span_first?: QueryDslSpanFirstQuery; span_gap?: QueryDslSpanGapQuery; /** Wraps a `term`, `range`, `prefix`, `wildcard`, `regexp`, or `fuzzy` query. */ span_multi?: QueryDslSpanMultiTermQuery; /** Accepts multiple span queries whose matches must be within the specified distance of each other, and possibly in the same order. */ span_near?: QueryDslSpanNearQuery; /** Wraps another span query, and excludes any documents which match that query. */ span_not?: QueryDslSpanNotQuery; /** Combines multiple span queriesandreturns documents which match any of the specified queries. */ span_or?: QueryDslSpanOrQuery; /** The equivalent of the `term` query but for use with other span queries. */ span_term?: Partial>; /** The result from a single span query is returned as long is its span falls within the spans returned by a list of other span queries. */ span_within?: QueryDslSpanWithinQuery; } export interface QueryDslSpanTermQuery extends QueryDslQueryBase { value: FieldValue; /** @alias value */ term: FieldValue; } export interface QueryDslSpanWithinQuery extends QueryDslQueryBase { /** Can be any span query. * Matching spans from `little` that are enclosed within `big` are returned. */ big: QueryDslSpanQuery; /** Can be any span query. * Matching spans from `little` that are enclosed within `big` are returned. */ little: QueryDslSpanQuery; } export interface QueryDslSparseVectorQuery extends QueryDslQueryBase { /** The name of the field that contains the token-weight pairs to be searched against. * This field must be a mapped sparse_vector field. */ field: Field; /** Dictionary of precomputed sparse vectors and their associated weights. * Only one of inference_id or query_vector may be supplied in a request. */ query_vector?: Record; /** The inference ID to use to convert the query text into token-weight pairs. * It must be the same inference ID that was used to create the tokens from the input text. * Only one of inference_id and query_vector is allowed. * If inference_id is specified, query must also be specified. * Only one of inference_id or query_vector may be supplied in a request. */ inference_id?: Id; /** The query text you want to use for search. * If inference_id is specified, query must also be specified. */ query?: string; /** Whether to perform pruning, omitting the non-significant tokens from the query to improve query performance. * If prune is true but the pruning_config is not specified, pruning will occur but default values will be used. * Default: false * @experimental */ prune?: boolean; /** Optional pruning configuration. * If enabled, this will omit non-significant tokens from the query in order to improve query performance. * This is only used if prune is set to true. * If prune is set to true but pruning_config is not specified, default values will be used. * @experimental */ pruning_config?: QueryDslTokenPruningConfig; } export interface QueryDslTermQuery extends QueryDslQueryBase { /** Term you wish to find in the provided field. */ value: FieldValue; /** Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`. * When `false`, the case sensitivity of matching depends on the underlying field’s mapping. */ case_insensitive?: boolean; } export interface QueryDslTermRangeQuery extends QueryDslRangeQueryBase { } export interface QueryDslTermsLookup { index: IndexName; id: Id; path: Field; routing?: Routing; } export interface QueryDslTermsQueryKeys extends QueryDslQueryBase { } export type QueryDslTermsQuery = QueryDslTermsQueryKeys & { [property: string]: QueryDslTermsQueryField | float | string; }; export type QueryDslTermsQueryField = FieldValue[] | QueryDslTermsLookup; export interface QueryDslTermsSetQuery extends QueryDslQueryBase { /** Specification describing number of matching terms required to return a document. */ minimum_should_match?: MinimumShouldMatch; /** Numeric field containing the number of matching terms required to return a document. */ minimum_should_match_field?: Field; /** Custom script containing the number of matching terms required to return a document. */ minimum_should_match_script?: Script | ScriptSource; /** Array of terms you wish to find in the provided field. */ terms: FieldValue[]; } export interface QueryDslTextExpansionQuery extends QueryDslQueryBase { /** The text expansion NLP model to use */ model_id: string; /** The query text */ model_text: string; /** Token pruning configurations * @experimental */ pruning_config?: QueryDslTokenPruningConfig; } export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix'; export interface QueryDslTokenPruningConfig { /** Tokens whose frequency is more than this threshold times the average frequency of all tokens in the specified field are considered outliers and pruned. */ tokens_freq_ratio_threshold?: integer; /** Tokens whose weight is less than this threshold are considered nonsignificant and pruned. */ tokens_weight_threshold?: float; /** Whether to only score pruned tokens, vs only scoring kept tokens. */ only_score_pruned_tokens?: boolean; } export interface QueryDslTypeQuery extends QueryDslQueryBase { value: string; } export interface QueryDslUntypedDecayFunctionKeys extends QueryDslDecayFunctionBase { } export type QueryDslUntypedDecayFunction = QueryDslUntypedDecayFunctionKeys & { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode; }; export interface QueryDslUntypedDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { } export interface QueryDslUntypedRangeQuery extends QueryDslRangeQueryBase { /** Date format used to convert `date` values in the query. */ format?: DateFormat; /** Coordinated Universal Time (UTC) offset or IANA time zone used to convert `date` values in the query to UTC. */ time_zone?: TimeZone; } export interface QueryDslWeightedTokensQuery extends QueryDslQueryBase { /** The tokens representing this query */ tokens: Record; /** Token pruning configurations */ pruning_config?: QueryDslTokenPruningConfig; } export interface QueryDslWildcardQuery extends QueryDslQueryBase { /** Allows case insensitive matching of the pattern with the indexed field values when set to true. Default is false which means the case sensitivity of matching depends on the underlying field’s mapping. */ case_insensitive?: boolean; /** Method used to rewrite the query. */ rewrite?: MultiTermQueryRewrite; /** Wildcard pattern for terms you wish to find in the provided field. Required, when wildcard is not set. */ value?: string; /** Wildcard pattern for terms you wish to find in the provided field. Required, when value is not set. */ wildcard?: string; } export interface QueryDslWrapperQuery extends QueryDslQueryBase { /** A base64 encoded query. * The binary data format can be any of JSON, YAML, CBOR or SMILE encodings */ query: string; } export type QueryDslZeroTermsQuery = 'all' | 'none'; export interface AsyncSearchAsyncSearch> { /** Partial aggregations results, coming from the shards that have already completed running the query. */ aggregations?: TAggregations; _clusters?: ClusterStatistics; fields?: Record; hits: SearchHitsMetadata; max_score?: double; /** Indicates how many reductions of the results have been performed. * If this number increases compared to the last retrieved results for a get asynch search request, you can expect additional results included in the search response. */ num_reduce_phases?: long; profile?: SearchProfile; pit_id?: Id; _scroll_id?: ScrollId; /** Indicates how many shards have run the query. * Note that in order for shard results to be included in the search response, they need to be reduced first. */ _shards: ShardStatistics; suggest?: Record[]>; terminated_early?: boolean; timed_out: boolean; took: long; } export interface AsyncSearchAsyncSearchDocumentResponseBase> extends AsyncSearchAsyncSearchResponseBase { response: AsyncSearchAsyncSearch; } export interface AsyncSearchAsyncSearchResponseBase { id?: Id; /** When the query is no longer running, this property indicates whether the search failed or was successfully completed on all shards. * While the query is running, `is_partial` is always set to `true`. */ is_partial: boolean; /** Indicates whether the search is still running or has completed. * * > info * > If the search failed after some shards returned their results or the node that is coordinating the async search dies, results may be partial even though `is_running` is `false`. */ is_running: boolean; /** Indicates when the async search will expire. */ expiration_time?: DateTime; expiration_time_in_millis: EpochTime; start_time?: DateTime; start_time_in_millis: EpochTime; /** Indicates when the async search completed. * It is present only when the search has completed. */ completion_time?: DateTime; completion_time_in_millis?: EpochTime; } export interface AsyncSearchDeleteRequest extends RequestBase { /** A unique identifier for the async search. */ id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; }; } export type AsyncSearchDeleteResponse = AcknowledgedResponseBase; export interface AsyncSearchGetRequest extends RequestBase { /** A unique identifier for the async search. */ id: Id; /** The length of time that the async search should be available in the cluster. * When not specified, the `keep_alive` set with the corresponding submit async request will be used. * Otherwise, it is possible to override the value and extend the validity of the request. * When this period expires, the search, if still running, is cancelled. * If the search is completed, its saved results are deleted. */ keep_alive?: Duration; /** Specify whether aggregation and suggester names should be prefixed by their respective types in the response */ typed_keys?: boolean; /** Specifies to wait for the search to be completed up until the provided timeout. * Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. * By default no timeout is set meaning that the currently available results will be returned without any additional wait. */ wait_for_completion_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; keep_alive?: never; typed_keys?: never; wait_for_completion_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; keep_alive?: never; typed_keys?: never; wait_for_completion_timeout?: never; }; } export type AsyncSearchGetResponse> = AsyncSearchAsyncSearchDocumentResponseBase; export interface AsyncSearchStatusRequest extends RequestBase { /** A unique identifier for the async search. */ id: Id; /** The length of time that the async search needs to be available. * Ongoing async searches and any saved search results are deleted after this period. */ keep_alive?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; keep_alive?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; keep_alive?: never; }; } export type AsyncSearchStatusResponse = AsyncSearchStatusStatusResponseBase; export interface AsyncSearchStatusStatusResponseBase extends AsyncSearchAsyncSearchResponseBase { /** The number of shards that have run the query so far. */ _shards: ShardStatistics; /** Metadata about clusters involved in the cross-cluster search. * It is not shown for local-only searches. */ _clusters?: ClusterStatistics; /** If the async search completed, this field shows the status code of the search. * For example, `200` indicates that the async search was successfully completed. * `503` indicates that the async search was completed with an error. */ completion_status?: integer; } export interface AsyncSearchSubmitRequest extends RequestBase { /** A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices */ index?: Indices; /** Blocks and waits until the search is completed up to a certain timeout. * When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. */ wait_for_completion_timeout?: Duration; /** Specifies how long the async search needs to be available. * Ongoing async searches and any saved search results are deleted after this period. */ keep_alive?: Duration; /** If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. */ keep_on_completion?: boolean; /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean; /** Indicate if an error should be returned if there is a partial search failure or timeout */ allow_partial_search_results?: boolean; /** The analyzer to use for the query string */ analyzer?: string; /** Specify whether wildcard and prefix queries should be analyzed (default: false) */ analyze_wildcard?: boolean; /** Affects how often partial results become available, which happens whenever shard results are reduced. * A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). */ batched_reduce_size?: long; /** The default value is the only supported value. */ ccs_minimize_roundtrips?: boolean; /** The default operator for query string query (AND or OR) */ default_operator?: QueryDslOperator; /** The field to use as default where no field prefix is given in the query string */ df?: string; /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards; /** Whether specified concrete, expanded or aliased indices should be ignored when throttled */ ignore_throttled?: boolean; /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean; /** Specify whether format-based query failures (such as providing text to a numeric field) should be ignored */ lenient?: boolean; /** The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests */ max_concurrent_shard_requests?: integer; /** Specify the node or shard the operation should be performed on (default: random) */ preference?: string; /** Specify if request cache should be used for this request or not, defaults to true */ request_cache?: boolean; /** A comma-separated list of specific routing values */ routing?: Routing; /** Search operation type */ search_type?: SearchType; /** Specifies which field to use for suggestions. */ suggest_field?: Field; /** Specify suggest mode */ suggest_mode?: SuggestMode; /** How many suggestions to return in response */ suggest_size?: long; /** The source text for which the suggestions should be returned. */ suggest_text?: string; /** Specify whether aggregation and suggester names should be prefixed by their respective types in the response */ typed_keys?: boolean; /** Indicates whether hits.total should be rendered as an integer or an object in the rest search response */ rest_total_hits_as_int?: boolean; /** A list of fields to exclude from the returned _source field */ _source_excludes?: Fields; /** A list of fields to extract and return from the _source field */ _source_includes?: Fields; /** Query in the Lucene query string syntax */ q?: string; aggregations?: Record; /** @alias aggregations */ aggs?: Record; collapse?: SearchFieldCollapse; /** If true, returns detailed information about score computation as part of a hit. */ explain?: boolean; /** Configuration of search extensions defined by Elasticsearch plugins. */ ext?: Record; /** Starting document offset. By default, you cannot page through more than 10,000 * hits using the from and size parameters. To page through more hits, use the * search_after parameter. */ from?: integer; highlight?: SearchHighlight; /** Number of hits matching the query to count accurately. If true, the exact * number of hits is returned at the cost of some performance. If false, the * response does not include the total number of hits matching the query. * Defaults to 10,000 hits. */ track_total_hits?: SearchTrackHits; /** Boosts the _score of documents from specified indices. */ indices_boost?: Partial>[]; /** Array of wildcard (*) patterns. The request returns doc values for field * names matching these patterns in the hits.fields property of the response. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[]; /** Defines the approximate kNN search to run. */ knn?: KnnSearch | KnnSearch[]; /** Minimum _score for matching documents. Documents with a lower _score are * not included in search results and results collected by aggregations. */ min_score?: double; post_filter?: QueryDslQueryContainer; profile?: boolean; /** Defines the search definition using the Query DSL. */ query?: QueryDslQueryContainer; rescore?: SearchRescore | SearchRescore[]; /** Retrieve a script evaluation (based on different fields) for each hit. */ script_fields?: Record; search_after?: SortResults; /** The number of hits to return. By default, you cannot page through more * than 10,000 hits using the from and size parameters. To page through more * hits, use the search_after parameter. */ size?: integer; slice?: SlicedScroll; sort?: Sort; /** Indicates which source fields are returned for matching documents. These * fields are returned in the hits._source property of the search response. */ _source?: SearchSourceConfig; /** Array of wildcard (*) patterns. The request returns values for field names * matching these patterns in the hits.fields property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[]; suggest?: SearchSuggester; /** Maximum number of documents to collect for each shard. If a query reaches this * limit, Elasticsearch terminates the query early. Elasticsearch collects documents * before sorting. Defaults to 0, which does not terminate query execution early. */ terminate_after?: long; /** Specifies the period of time to wait for a response from each shard. If no response * is received before the timeout expires, the request fails and returns an error. * Defaults to no timeout. */ timeout?: string; /** If true, calculate and return document scores, even if the scores are not used for sorting. */ track_scores?: boolean; /** If true, returns document version as part of a hit. */ version?: boolean; /** If true, returns sequence number and primary term of the last modification * of each hit. See Optimistic concurrency control. */ seq_no_primary_term?: boolean; /** List of stored fields to return as part of a hit. If no fields are specified, * no stored fields are included in the response. If this field is specified, the _source * parameter defaults to false. You can pass _source: true to return both source fields * and stored fields in the search response. */ stored_fields?: Fields; /** Limits the search to a point in time (PIT). If you provide a PIT, you * cannot specify an in the request path. */ pit?: SearchPointInTimeReference; /** Defines one or more runtime fields in the search request. These fields take * precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields; /** Stats groups to associate with the search. Each group maintains a statistics * aggregation for its associated searches. You can retrieve these stats using * the indices stats API. */ stats?: string[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; wait_for_completion_timeout?: never; keep_alive?: never; keep_on_completion?: never; allow_no_indices?: never; allow_partial_search_results?: never; analyzer?: never; analyze_wildcard?: never; batched_reduce_size?: never; ccs_minimize_roundtrips?: never; default_operator?: never; df?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; lenient?: never; max_concurrent_shard_requests?: never; preference?: never; request_cache?: never; routing?: never; search_type?: never; suggest_field?: never; suggest_mode?: never; suggest_size?: never; suggest_text?: never; typed_keys?: never; rest_total_hits_as_int?: never; _source_excludes?: never; _source_includes?: never; q?: never; aggregations?: never; aggs?: never; collapse?: never; explain?: never; ext?: never; from?: never; highlight?: never; track_total_hits?: never; indices_boost?: never; docvalue_fields?: never; knn?: never; min_score?: never; post_filter?: never; profile?: never; query?: never; rescore?: never; script_fields?: never; search_after?: never; size?: never; slice?: never; sort?: never; _source?: never; fields?: never; suggest?: never; terminate_after?: never; timeout?: never; track_scores?: never; version?: never; seq_no_primary_term?: never; stored_fields?: never; pit?: never; runtime_mappings?: never; stats?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; wait_for_completion_timeout?: never; keep_alive?: never; keep_on_completion?: never; allow_no_indices?: never; allow_partial_search_results?: never; analyzer?: never; analyze_wildcard?: never; batched_reduce_size?: never; ccs_minimize_roundtrips?: never; default_operator?: never; df?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; lenient?: never; max_concurrent_shard_requests?: never; preference?: never; request_cache?: never; routing?: never; search_type?: never; suggest_field?: never; suggest_mode?: never; suggest_size?: never; suggest_text?: never; typed_keys?: never; rest_total_hits_as_int?: never; _source_excludes?: never; _source_includes?: never; q?: never; aggregations?: never; aggs?: never; collapse?: never; explain?: never; ext?: never; from?: never; highlight?: never; track_total_hits?: never; indices_boost?: never; docvalue_fields?: never; knn?: never; min_score?: never; post_filter?: never; profile?: never; query?: never; rescore?: never; script_fields?: never; search_after?: never; size?: never; slice?: never; sort?: never; _source?: never; fields?: never; suggest?: never; terminate_after?: never; timeout?: never; track_scores?: never; version?: never; seq_no_primary_term?: never; stored_fields?: never; pit?: never; runtime_mappings?: never; stats?: never; }; } export type AsyncSearchSubmitResponse> = AsyncSearchAsyncSearchDocumentResponseBase; export interface AutoscalingAutoscalingPolicy { roles: string[]; /** Decider settings. */ deciders: Record; } export interface AutoscalingDeleteAutoscalingPolicyRequest extends RequestBase { /** the name of the autoscaling policy */ name: Name; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }; } export type AutoscalingDeleteAutoscalingPolicyResponse = AcknowledgedResponseBase; export interface AutoscalingGetAutoscalingCapacityAutoscalingCapacity { node: AutoscalingGetAutoscalingCapacityAutoscalingResources; total: AutoscalingGetAutoscalingCapacityAutoscalingResources; } export interface AutoscalingGetAutoscalingCapacityAutoscalingDecider { required_capacity: AutoscalingGetAutoscalingCapacityAutoscalingCapacity; reason_summary?: string; reason_details?: any; } export interface AutoscalingGetAutoscalingCapacityAutoscalingDeciders { required_capacity: AutoscalingGetAutoscalingCapacityAutoscalingCapacity; current_capacity: AutoscalingGetAutoscalingCapacityAutoscalingCapacity; current_nodes: AutoscalingGetAutoscalingCapacityAutoscalingNode[]; deciders: Record; } export interface AutoscalingGetAutoscalingCapacityAutoscalingNode { name: NodeName; } export interface AutoscalingGetAutoscalingCapacityAutoscalingResources { storage: integer; memory: integer; } export interface AutoscalingGetAutoscalingCapacityRequest extends RequestBase { /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; }; } export interface AutoscalingGetAutoscalingCapacityResponse { policies: Record; } export interface AutoscalingGetAutoscalingPolicyRequest extends RequestBase { /** the name of the autoscaling policy */ name: Name; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; }; } export type AutoscalingGetAutoscalingPolicyResponse = AutoscalingAutoscalingPolicy; export interface AutoscalingPutAutoscalingPolicyRequest extends RequestBase { /** the name of the autoscaling policy */ name: Name; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; policy?: AutoscalingAutoscalingPolicy; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; policy?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; policy?: never; }; } export type AutoscalingPutAutoscalingPolicyResponse = AcknowledgedResponseBase; export type CatCatAnomalyDetectorColumn = 'assignment_explanation' | 'ae' | 'buckets.count' | 'bc' | 'bucketsCount' | 'buckets.time.exp_avg' | 'btea' | 'bucketsTimeExpAvg' | 'buckets.time.exp_avg_hour' | 'bteah' | 'bucketsTimeExpAvgHour' | 'buckets.time.max' | 'btmax' | 'bucketsTimeMax' | 'buckets.time.min' | 'btmin' | 'bucketsTimeMin' | 'buckets.time.total' | 'btt' | 'bucketsTimeTotal' | 'data.buckets' | 'db' | 'dataBuckets' | 'data.earliest_record' | 'der' | 'dataEarliestRecord' | 'data.empty_buckets' | 'deb' | 'dataEmptyBuckets' | 'data.input_bytes' | 'dib' | 'dataInputBytes' | 'data.input_fields' | 'dif' | 'dataInputFields' | 'data.input_records' | 'dir' | 'dataInputRecords' | 'data.invalid_dates' | 'did' | 'dataInvalidDates' | 'data.last' | 'dl' | 'dataLast' | 'data.last_empty_bucket' | 'dleb' | 'dataLastEmptyBucket' | 'data.last_sparse_bucket' | 'dlsb' | 'dataLastSparseBucket' | 'data.latest_record' | 'dlr' | 'dataLatestRecord' | 'data.missing_fields' | 'dmf' | 'dataMissingFields' | 'data.out_of_order_timestamps' | 'doot' | 'dataOutOfOrderTimestamps' | 'data.processed_fields' | 'dpf' | 'dataProcessedFields' | 'data.processed_records' | 'dpr' | 'dataProcessedRecords' | 'data.sparse_buckets' | 'dsb' | 'dataSparseBuckets' | 'forecasts.memory.avg' | 'fmavg' | 'forecastsMemoryAvg' | 'forecasts.memory.max' | 'fmmax' | 'forecastsMemoryMax' | 'forecasts.memory.min' | 'fmmin' | 'forecastsMemoryMin' | 'forecasts.memory.total' | 'fmt' | 'forecastsMemoryTotal' | 'forecasts.records.avg' | 'fravg' | 'forecastsRecordsAvg' | 'forecasts.records.max' | 'frmax' | 'forecastsRecordsMax' | 'forecasts.records.min' | 'frmin' | 'forecastsRecordsMin' | 'forecasts.records.total' | 'frt' | 'forecastsRecordsTotal' | 'forecasts.time.avg' | 'ftavg' | 'forecastsTimeAvg' | 'forecasts.time.max' | 'ftmax' | 'forecastsTimeMax' | 'forecasts.time.min' | 'ftmin' | 'forecastsTimeMin' | 'forecasts.time.total' | 'ftt' | 'forecastsTimeTotal' | 'forecasts.total' | 'ft' | 'forecastsTotal' | 'id' | 'model.bucket_allocation_failures' | 'mbaf' | 'modelBucketAllocationFailures' | 'model.by_fields' | 'mbf' | 'modelByFields' | 'model.bytes' | 'mb' | 'modelBytes' | 'model.bytes_exceeded' | 'mbe' | 'modelBytesExceeded' | 'model.categorization_status' | 'mcs' | 'modelCategorizationStatus' | 'model.categorized_doc_count' | 'mcdc' | 'modelCategorizedDocCount' | 'model.dead_category_count' | 'mdcc' | 'modelDeadCategoryCount' | 'model.failed_category_count' | 'mdcc' | 'modelFailedCategoryCount' | 'model.frequent_category_count' | 'mfcc' | 'modelFrequentCategoryCount' | 'model.log_time' | 'mlt' | 'modelLogTime' | 'model.memory_limit' | 'mml' | 'modelMemoryLimit' | 'model.memory_status' | 'mms' | 'modelMemoryStatus' | 'model.over_fields' | 'mof' | 'modelOverFields' | 'model.partition_fields' | 'mpf' | 'modelPartitionFields' | 'model.rare_category_count' | 'mrcc' | 'modelRareCategoryCount' | 'model.timestamp' | 'mt' | 'modelTimestamp' | 'model.total_category_count' | 'mtcc' | 'modelTotalCategoryCount' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'opened_time' | 'ot' | 'state' | 's'; export type CatCatAnonalyDetectorColumns = CatCatAnomalyDetectorColumn | CatCatAnomalyDetectorColumn[]; export type CatCatDatafeedColumn = 'ae' | 'assignment_explanation' | 'bc' | 'buckets.count' | 'bucketsCount' | 'id' | 'na' | 'node.address' | 'nodeAddress' | 'ne' | 'node.ephemeral_id' | 'nodeEphemeralId' | 'ni' | 'node.id' | 'nodeId' | 'nn' | 'node.name' | 'nodeName' | 'sba' | 'search.bucket_avg' | 'searchBucketAvg' | 'sc' | 'search.count' | 'searchCount' | 'seah' | 'search.exp_avg_hour' | 'searchExpAvgHour' | 'st' | 'search.time' | 'searchTime' | 's' | 'state'; export type CatCatDatafeedColumns = CatCatDatafeedColumn | CatCatDatafeedColumn[]; export type CatCatDfaColumn = 'assignment_explanation' | 'ae' | 'create_time' | 'ct' | 'createTime' | 'description' | 'd' | 'dest_index' | 'di' | 'destIndex' | 'failure_reason' | 'fr' | 'failureReason' | 'id' | 'model_memory_limit' | 'mml' | 'modelMemoryLimit' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'progress' | 'p' | 'source_index' | 'si' | 'sourceIndex' | 'state' | 's' | 'type' | 't' | 'version' | 'v'; export type CatCatDfaColumns = CatCatDfaColumn | CatCatDfaColumn[]; export type CatCatNodeColumn = 'build' | 'b' | 'completion.size' | 'cs' | 'completionSize' | 'cpu' | 'disk.avail' | 'd' | 'disk' | 'diskAvail' | 'disk.total' | 'dt' | 'diskTotal' | 'disk.used' | 'du' | 'diskUsed' | 'disk.used_percent' | 'dup' | 'diskUsedPercent' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'file_desc.current' | 'fdc' | 'fileDescriptorCurrent' | 'file_desc.max' | 'fdm' | 'fileDescriptorMax' | 'file_desc.percent' | 'fdp' | 'fileDescriptorPercent' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'heap.current' | 'hc' | 'heapCurrent' | 'heap.max' | 'hm' | 'heapMax' | 'heap.percent' | 'hp' | 'heapPercent' | 'http_address' | 'http' | 'id' | 'nodeId' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'i' | 'jdk' | 'j' | 'load_1m' | 'l' | 'load_5m' | 'l' | 'load_15m' | 'l' | 'mappings.total_count' | 'mtc' | 'mappingsTotalCount' | 'mappings.total_estimated_overhead_in_bytes' | 'mteo' | 'mappingsTotalEstimatedOverheadInBytes' | 'master' | 'm' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'name' | 'n' | 'node.role' | 'r' | 'role' | 'nodeRole' | 'pid' | 'p' | 'port' | 'po' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.hit_count' | 'qchc' | 'queryCacheHitCount' | 'query_cache.miss_count' | 'qcmc' | 'queryCacheMissCount' | 'ram.current' | 'rc' | 'ramCurrent' | 'ram.max' | 'rm' | 'ramMax' | 'ram.percent' | 'rp' | 'ramPercent' | 'refresh.total' | 'rto' | 'refreshTotal' | 'refresh.time' | 'rti' | 'refreshTime' | 'request_cache.memory_size' | 'rcm' | 'requestCacheMemory' | 'request_cache.evictions' | 'rce' | 'requestCacheEvictions' | 'request_cache.hit_count' | 'rchc' | 'requestCacheHitCount' | 'request_cache.miss_count' | 'rcmc' | 'requestCacheMissCount' | 'script.compilations' | 'scrcc' | 'scriptCompilations' | 'script.cache_evictions' | 'scrce' | 'scriptCacheEvictions' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'shard_stats.total_count' | 'sstc' | 'shards' | 'shardStatsTotalCount' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'uptime' | 'u' | 'version' | 'v' | string; export type CatCatNodeColumns = CatCatNodeColumn | CatCatNodeColumn[]; export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { } export type CatCatTrainedModelsColumn = 'create_time' | 'ct' | 'created_by' | 'c' | 'createdBy' | 'data_frame_analytics_id' | 'df' | 'dataFrameAnalytics' | 'dfid' | 'description' | 'd' | 'heap_size' | 'hs' | 'modelHeapSize' | 'id' | 'ingest.count' | 'ic' | 'ingestCount' | 'ingest.current' | 'icurr' | 'ingestCurrent' | 'ingest.failed' | 'if' | 'ingestFailed' | 'ingest.pipelines' | 'ip' | 'ingestPipelines' | 'ingest.time' | 'it' | 'ingestTime' | 'license' | 'l' | 'operations' | 'o' | 'modelOperations' | 'version' | 'v'; export type CatCatTrainedModelsColumns = CatCatTrainedModelsColumn | CatCatTrainedModelsColumn[]; export type CatCatTransformColumn = 'changes_last_detection_time' | 'cldt' | 'checkpoint' | 'cp' | 'checkpoint_duration_time_exp_avg' | 'cdtea' | 'checkpointTimeExpAvg' | 'checkpoint_progress' | 'c' | 'checkpointProgress' | 'create_time' | 'ct' | 'createTime' | 'delete_time' | 'dtime' | 'description' | 'd' | 'dest_index' | 'di' | 'destIndex' | 'documents_deleted' | 'docd' | 'documents_indexed' | 'doci' | 'docs_per_second' | 'dps' | 'documents_processed' | 'docp' | 'frequency' | 'f' | 'id' | 'index_failure' | 'if' | 'index_time' | 'itime' | 'index_total' | 'it' | 'indexed_documents_exp_avg' | 'idea' | 'last_search_time' | 'lst' | 'lastSearchTime' | 'max_page_search_size' | 'mpsz' | 'pages_processed' | 'pp' | 'pipeline' | 'p' | 'processed_documents_exp_avg' | 'pdea' | 'processing_time' | 'pt' | 'reason' | 'r' | 'search_failure' | 'sf' | 'search_time' | 'stime' | 'search_total' | 'st' | 'source_index' | 'si' | 'sourceIndex' | 'state' | 's' | 'transform_type' | 'tt' | 'trigger_count' | 'tc' | 'version' | 'v'; export type CatCatTransformColumns = CatCatTransformColumn | CatCatTransformColumn[]; export interface CatAliasesAliasesRecord { /** alias name */ alias?: string; /** alias name * @alias alias */ a?: string; /** index alias points to */ index?: IndexName; /** index alias points to * @alias index */ i?: IndexName; /** index alias points to * @alias index */ idx?: IndexName; /** filter */ filter?: string; /** filter * @alias filter */ f?: string; /** filter * @alias filter */ fi?: string; /** index routing */ 'routing.index'?: string; /** index routing * @alias 'routing.index' */ ri?: string; /** index routing * @alias 'routing.index' */ routingIndex?: string; /** search routing */ 'routing.search'?: string; /** search routing * @alias 'routing.search' */ rs?: string; /** search routing * @alias 'routing.search' */ routingSearch?: string; /** write index */ is_write_index?: string; /** write index * @alias is_write_index */ w?: string; /** write index * @alias is_write_index */ isWriteIndex?: string; } export interface CatAliasesRequest extends CatCatRequestBase { /** A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. */ name?: Names; /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names; /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** The type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** The period to wait for a connection to the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * To indicated that the request should never timeout, you can set it to `-1`. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; h?: never; s?: never; expand_wildcards?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; h?: never; s?: never; expand_wildcards?: never; master_timeout?: never; }; } export type CatAliasesResponse = CatAliasesAliasesRecord[]; export interface CatAllocationAllocationRecord { /** Number of primary and replica shards assigned to the node. */ shards?: string; /** Number of primary and replica shards assigned to the node. * @alias shards */ s?: string; /** Amount of shards that are scheduled to be moved elsewhere in the cluster or -1 other than desired balance allocator is used */ 'shards.undesired'?: string | null; /** Sum of index write load forecasts */ 'write_load.forecast'?: SpecUtilsStringified | null; /** Sum of index write load forecasts * @alias 'write_load.forecast' */ wlf?: SpecUtilsStringified | null; /** Sum of index write load forecasts * @alias 'write_load.forecast' */ writeLoadForecast?: SpecUtilsStringified | null; /** Sum of shard size forecasts */ 'disk.indices.forecast'?: ByteSize | null; /** Sum of shard size forecasts * @alias 'disk.indices.forecast' */ dif?: ByteSize | null; /** Sum of shard size forecasts * @alias 'disk.indices.forecast' */ diskIndicesForecast?: ByteSize | null; /** Disk space used by the node’s shards. Does not include disk space for the translog or unassigned shards. * IMPORTANT: This metric double-counts disk space for hard-linked files, such as those created when shrinking, splitting, or cloning an index. */ 'disk.indices'?: ByteSize | null; /** Disk space used by the node’s shards. Does not include disk space for the translog or unassigned shards. * IMPORTANT: This metric double-counts disk space for hard-linked files, such as those created when shrinking, splitting, or cloning an index. * @alias 'disk.indices' */ di?: ByteSize | null; /** Disk space used by the node’s shards. Does not include disk space for the translog or unassigned shards. * IMPORTANT: This metric double-counts disk space for hard-linked files, such as those created when shrinking, splitting, or cloning an index. * @alias 'disk.indices' */ diskIndices?: ByteSize | null; /** Total disk space in use. * Elasticsearch retrieves this metric from the node’s operating system (OS). * The metric includes disk space for: Elasticsearch, including the translog and unassigned shards; the node’s operating system; any other applications or files on the node. * Unlike `disk.indices`, this metric does not double-count disk space for hard-linked files. */ 'disk.used'?: ByteSize | null; /** Total disk space in use. * Elasticsearch retrieves this metric from the node’s operating system (OS). * The metric includes disk space for: Elasticsearch, including the translog and unassigned shards; the node’s operating system; any other applications or files on the node. * Unlike `disk.indices`, this metric does not double-count disk space for hard-linked files. * @alias 'disk.used' */ du?: ByteSize | null; /** Total disk space in use. * Elasticsearch retrieves this metric from the node’s operating system (OS). * The metric includes disk space for: Elasticsearch, including the translog and unassigned shards; the node’s operating system; any other applications or files on the node. * Unlike `disk.indices`, this metric does not double-count disk space for hard-linked files. * @alias 'disk.used' */ diskUsed?: ByteSize | null; /** Free disk space available to Elasticsearch. * Elasticsearch retrieves this metric from the node’s operating system. * Disk-based shard allocation uses this metric to assign shards to nodes based on available disk space. */ 'disk.avail'?: ByteSize | null; /** Free disk space available to Elasticsearch. * Elasticsearch retrieves this metric from the node’s operating system. * Disk-based shard allocation uses this metric to assign shards to nodes based on available disk space. * @alias 'disk.avail' */ da?: ByteSize | null; /** Free disk space available to Elasticsearch. * Elasticsearch retrieves this metric from the node’s operating system. * Disk-based shard allocation uses this metric to assign shards to nodes based on available disk space. * @alias 'disk.avail' */ diskAvail?: ByteSize | null; /** Total disk space for the node, including in-use and available space. */ 'disk.total'?: ByteSize | null; /** Total disk space for the node, including in-use and available space. * @alias 'disk.total' */ dt?: ByteSize | null; /** Total disk space for the node, including in-use and available space. * @alias 'disk.total' */ diskTotal?: ByteSize | null; /** Total percentage of disk space in use. Calculated as `disk.used / disk.total`. */ 'disk.percent'?: Percentage | null; /** Total percentage of disk space in use. Calculated as `disk.used / disk.total`. * @alias 'disk.percent' */ dp?: Percentage | null; /** Total percentage of disk space in use. Calculated as `disk.used / disk.total`. * @alias 'disk.percent' */ diskPercent?: Percentage | null; /** Network host for the node. Set using the `network.host` setting. */ host?: Host | null; /** Network host for the node. Set using the `network.host` setting. * @alias host */ h?: Host | null; /** IP address and port for the node. */ ip?: Ip | null; /** Name for the node. Set using the `node.name` setting. */ node?: string; /** Name for the node. Set using the `node.name` setting. * @alias node */ n?: string; /** Node roles */ 'node.role'?: string | null; /** Node roles * @alias 'node.role' */ r?: string | null; /** Node roles * @alias 'node.role' */ role?: string | null; /** Node roles * @alias 'node.role' */ nodeRole?: string | null; } export interface CatAllocationRequest extends CatCatRequestBase { /** A comma-separated list of node identifiers or names used to limit the returned information. */ node_id?: NodeIds; /** The unit used to display byte values. */ bytes?: Bytes; /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names; /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** If `true`, the request computes the list of selected nodes from the * local cluster state. If `false` the list of selected nodes are computed * from the cluster state of the master node. In both cases the coordinating * node will send requests for further information to each selected node. */ local?: boolean; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { node_id?: never; bytes?: never; h?: never; s?: never; local?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { node_id?: never; bytes?: never; h?: never; s?: never; local?: never; master_timeout?: never; }; } export type CatAllocationResponse = CatAllocationAllocationRecord[]; export interface CatComponentTemplatesComponentTemplate { name: string; version: string | null; alias_count: string; mapping_count: string; settings_count: string; metadata_count: string; included_in: string; } export interface CatComponentTemplatesRequest extends CatCatRequestBase { /** The name of the component template. * It accepts wildcard expressions. * If it is omitted, all component templates are returned. */ name?: string; /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names; /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** If `true`, the request computes the list of selected nodes from the * local cluster state. If `false` the list of selected nodes are computed * from the cluster state of the master node. In both cases the coordinating * node will send requests for further information to each selected node. */ local?: boolean; /** The period to wait for a connection to the master node. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; h?: never; s?: never; local?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; h?: never; s?: never; local?: never; master_timeout?: never; }; } export type CatComponentTemplatesResponse = CatComponentTemplatesComponentTemplate[]; export interface CatCountCountRecord { /** seconds since 1970-01-01 00:00:00 */ epoch?: SpecUtilsStringified>; /** seconds since 1970-01-01 00:00:00 * @alias epoch */ t?: SpecUtilsStringified>; /** seconds since 1970-01-01 00:00:00 * @alias epoch */ time?: SpecUtilsStringified>; /** time in HH:MM:SS */ timestamp?: TimeOfDay; /** time in HH:MM:SS * @alias timestamp */ ts?: TimeOfDay; /** time in HH:MM:SS * @alias timestamp */ hms?: TimeOfDay; /** time in HH:MM:SS * @alias timestamp */ hhmmss?: TimeOfDay; /** the document count */ count?: string; /** the document count * @alias count */ dc?: string; /** the document count * @alias count */ 'docs.count'?: string; /** the document count * @alias count */ docsCount?: string; } export interface CatCountRequest extends CatCatRequestBase { /** A comma-separated list of data streams, indices, and aliases used to limit the request. * It supports wildcards (`*`). * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names; /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; h?: never; s?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; h?: never; s?: never; }; } export type CatCountResponse = CatCountCountRecord[]; export interface CatFielddataFielddataRecord { /** node id */ id?: string; /** host name */ host?: string; /** host name * @alias host */ h?: string; /** ip address */ ip?: string; /** node name */ node?: string; /** node name * @alias node */ n?: string; /** field name */ field?: string; /** field name * @alias field */ f?: string; /** field data usage */ size?: string; } export interface CatFielddataRequest extends CatCatRequestBase { /** Comma-separated list of fields used to limit returned information. * To retrieve all fields, omit this parameter. */ fields?: Fields; /** The unit used to display byte values. */ bytes?: Bytes; /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names; /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { fields?: never; bytes?: never; h?: never; s?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { fields?: never; bytes?: never; h?: never; s?: never; }; } export type CatFielddataResponse = CatFielddataFielddataRecord[]; export interface CatHealthHealthRecord { /** seconds since 1970-01-01 00:00:00 */ epoch?: SpecUtilsStringified>; /** seconds since 1970-01-01 00:00:00 * @alias epoch */ time?: SpecUtilsStringified>; /** time in HH:MM:SS */ timestamp?: TimeOfDay; /** time in HH:MM:SS * @alias timestamp */ ts?: TimeOfDay; /** time in HH:MM:SS * @alias timestamp */ hms?: TimeOfDay; /** time in HH:MM:SS * @alias timestamp */ hhmmss?: TimeOfDay; /** cluster name */ cluster?: string; /** cluster name * @alias cluster */ cl?: string; /** health status */ status?: string; /** health status * @alias status */ st?: string; /** total number of nodes */ 'node.total'?: string; /** total number of nodes * @alias 'node.total' */ nt?: string; /** total number of nodes * @alias 'node.total' */ nodeTotal?: string; /** number of nodes that can store data */ 'node.data'?: string; /** number of nodes that can store data * @alias 'node.data' */ nd?: string; /** number of nodes that can store data * @alias 'node.data' */ nodeData?: string; /** total number of shards */ shards?: string; /** total number of shards * @alias shards */ t?: string; /** total number of shards * @alias shards */ sh?: string; /** total number of shards * @alias shards */ 'shards.total'?: string; /** total number of shards * @alias shards */ shardsTotal?: string; /** number of primary shards */ pri?: string; /** number of primary shards * @alias pri */ p?: string; /** number of primary shards * @alias pri */ 'shards.primary'?: string; /** number of primary shards * @alias pri */ shardsPrimary?: string; /** number of relocating nodes */ relo?: string; /** number of relocating nodes * @alias relo */ r?: string; /** number of relocating nodes * @alias relo */ 'shards.relocating'?: string; /** number of relocating nodes * @alias relo */ shardsRelocating?: string; /** number of initializing nodes */ init?: string; /** number of initializing nodes * @alias init */ i?: string; /** number of initializing nodes * @alias init */ 'shards.initializing'?: string; /** number of initializing nodes * @alias init */ shardsInitializing?: string; /** number of unassigned primary shards */ 'unassign.pri'?: string; /** number of unassigned primary shards * @alias 'unassign.pri' */ up?: string; /** number of unassigned primary shards * @alias 'unassign.pri' */ 'shards.unassigned.primary'?: string; /** number of unassigned primary shards * @alias 'unassign.pri' */ shardsUnassignedPrimary?: string; /** number of unassigned shards */ unassign?: string; /** number of unassigned shards * @alias unassign */ u?: string; /** number of unassigned shards * @alias unassign */ 'shards.unassigned'?: string; /** number of unassigned shards * @alias unassign */ shardsUnassigned?: string; /** number of pending tasks */ pending_tasks?: string; /** number of pending tasks * @alias pending_tasks */ pt?: string; /** number of pending tasks * @alias pending_tasks */ pendingTasks?: string; /** wait time of longest task pending */ max_task_wait_time?: string; /** wait time of longest task pending * @alias max_task_wait_time */ mtwt?: string; /** wait time of longest task pending * @alias max_task_wait_time */ maxTaskWaitTime?: string; /** active number of shards in percent */ active_shards_percent?: string; /** active number of shards in percent * @alias active_shards_percent */ asp?: string; /** active number of shards in percent * @alias active_shards_percent */ activeShardsPercent?: string; } export interface CatHealthRequest extends CatCatRequestBase { /** The unit used to display time values. */ time?: TimeUnit; /** If true, returns `HH:MM:SS` and Unix epoch timestamps. */ ts?: boolean; /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names; /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { time?: never; ts?: never; h?: never; s?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { time?: never; ts?: never; h?: never; s?: never; }; } export type CatHealthResponse = CatHealthHealthRecord[]; export interface CatHelpRequest { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export interface CatHelpResponse { } export interface CatIndicesIndicesRecord { /** current health status */ health?: string; /** current health status * @alias health */ h?: string; /** open/close status */ status?: string; /** open/close status * @alias status */ s?: string; /** index name */ index?: string; /** index name * @alias index */ i?: string; /** index name * @alias index */ idx?: string; /** index uuid */ uuid?: string; /** index uuid * @alias uuid */ id?: string; /** number of primary shards */ pri?: string; /** number of primary shards * @alias pri */ p?: string; /** number of primary shards * @alias pri */ 'shards.primary'?: string; /** number of primary shards * @alias pri */ shardsPrimary?: string; /** number of replica shards */ rep?: string; /** number of replica shards * @alias rep */ r?: string; /** number of replica shards * @alias rep */ 'shards.replica'?: string; /** number of replica shards * @alias rep */ shardsReplica?: string; /** available docs */ 'docs.count'?: string | null; /** available docs * @alias 'docs.count' */ dc?: string | null; /** available docs * @alias 'docs.count' */ docsCount?: string | null; /** deleted docs */ 'docs.deleted'?: string | null; /** deleted docs * @alias 'docs.deleted' */ dd?: string | null; /** deleted docs * @alias 'docs.deleted' */ docsDeleted?: string | null; /** index creation date (millisecond value) */ 'creation.date'?: string; /** index creation date (millisecond value) * @alias 'creation.date' */ cd?: string; /** index creation date (as string) */ 'creation.date.string'?: string; /** index creation date (as string) * @alias 'creation.date.string' */ cds?: string; /** store size of primaries & replicas */ 'store.size'?: string | null; /** store size of primaries & replicas * @alias 'store.size' */ ss?: string | null; /** store size of primaries & replicas * @alias 'store.size' */ storeSize?: string | null; /** store size of primaries */ 'pri.store.size'?: string | null; /** total size of dataset (including the cache for partially mounted indices) */ 'dataset.size'?: string | null; /** size of completion */ 'completion.size'?: string; /** size of completion * @alias 'completion.size' */ cs?: string; /** size of completion * @alias 'completion.size' */ completionSize?: string; /** size of completion */ 'pri.completion.size'?: string; /** used fielddata cache */ 'fielddata.memory_size'?: string; /** used fielddata cache * @alias 'fielddata.memory_size' */ fm?: string; /** used fielddata cache * @alias 'fielddata.memory_size' */ fielddataMemory?: string; /** used fielddata cache */ 'pri.fielddata.memory_size'?: string; /** fielddata evictions */ 'fielddata.evictions'?: string; /** fielddata evictions * @alias 'fielddata.evictions' */ fe?: string; /** fielddata evictions * @alias 'fielddata.evictions' */ fielddataEvictions?: string; /** fielddata evictions */ 'pri.fielddata.evictions'?: string; /** used query cache */ 'query_cache.memory_size'?: string; /** used query cache * @alias 'query_cache.memory_size' */ qcm?: string; /** used query cache * @alias 'query_cache.memory_size' */ queryCacheMemory?: string; /** used query cache */ 'pri.query_cache.memory_size'?: string; /** query cache evictions */ 'query_cache.evictions'?: string; /** query cache evictions * @alias 'query_cache.evictions' */ qce?: string; /** query cache evictions * @alias 'query_cache.evictions' */ queryCacheEvictions?: string; /** query cache evictions */ 'pri.query_cache.evictions'?: string; /** used request cache */ 'request_cache.memory_size'?: string; /** used request cache * @alias 'request_cache.memory_size' */ rcm?: string; /** used request cache * @alias 'request_cache.memory_size' */ requestCacheMemory?: string; /** used request cache */ 'pri.request_cache.memory_size'?: string; /** request cache evictions */ 'request_cache.evictions'?: string; /** request cache evictions * @alias 'request_cache.evictions' */ rce?: string; /** request cache evictions * @alias 'request_cache.evictions' */ requestCacheEvictions?: string; /** request cache evictions */ 'pri.request_cache.evictions'?: string; /** request cache hit count */ 'request_cache.hit_count'?: string; /** request cache hit count * @alias 'request_cache.hit_count' */ rchc?: string; /** request cache hit count * @alias 'request_cache.hit_count' */ requestCacheHitCount?: string; /** request cache hit count */ 'pri.request_cache.hit_count'?: string; /** request cache miss count */ 'request_cache.miss_count'?: string; /** request cache miss count * @alias 'request_cache.miss_count' */ rcmc?: string; /** request cache miss count * @alias 'request_cache.miss_count' */ requestCacheMissCount?: string; /** request cache miss count */ 'pri.request_cache.miss_count'?: string; /** number of flushes */ 'flush.total'?: string; /** number of flushes * @alias 'flush.total' */ ft?: string; /** number of flushes * @alias 'flush.total' */ flushTotal?: string; /** number of flushes */ 'pri.flush.total'?: string; /** time spent in flush */ 'flush.total_time'?: string; /** time spent in flush * @alias 'flush.total_time' */ ftt?: string; /** time spent in flush * @alias 'flush.total_time' */ flushTotalTime?: string; /** time spent in flush */ 'pri.flush.total_time'?: string; /** number of current get ops */ 'get.current'?: string; /** number of current get ops * @alias 'get.current' */ gc?: string; /** number of current get ops * @alias 'get.current' */ getCurrent?: string; /** number of current get ops */ 'pri.get.current'?: string; /** time spent in get */ 'get.time'?: string; /** time spent in get * @alias 'get.time' */ gti?: string; /** time spent in get * @alias 'get.time' */ getTime?: string; /** time spent in get */ 'pri.get.time'?: string; /** number of get ops */ 'get.total'?: string; /** number of get ops * @alias 'get.total' */ gto?: string; /** number of get ops * @alias 'get.total' */ getTotal?: string; /** number of get ops */ 'pri.get.total'?: string; /** time spent in successful gets */ 'get.exists_time'?: string; /** time spent in successful gets * @alias 'get.exists_time' */ geti?: string; /** time spent in successful gets * @alias 'get.exists_time' */ getExistsTime?: string; /** time spent in successful gets */ 'pri.get.exists_time'?: string; /** number of successful gets */ 'get.exists_total'?: string; /** number of successful gets * @alias 'get.exists_total' */ geto?: string; /** number of successful gets * @alias 'get.exists_total' */ getExistsTotal?: string; /** number of successful gets */ 'pri.get.exists_total'?: string; /** time spent in failed gets */ 'get.missing_time'?: string; /** time spent in failed gets * @alias 'get.missing_time' */ gmti?: string; /** time spent in failed gets * @alias 'get.missing_time' */ getMissingTime?: string; /** time spent in failed gets */ 'pri.get.missing_time'?: string; /** number of failed gets */ 'get.missing_total'?: string; /** number of failed gets * @alias 'get.missing_total' */ gmto?: string; /** number of failed gets * @alias 'get.missing_total' */ getMissingTotal?: string; /** number of failed gets */ 'pri.get.missing_total'?: string; /** number of current deletions */ 'indexing.delete_current'?: string; /** number of current deletions * @alias 'indexing.delete_current' */ idc?: string; /** number of current deletions * @alias 'indexing.delete_current' */ indexingDeleteCurrent?: string; /** number of current deletions */ 'pri.indexing.delete_current'?: string; /** time spent in deletions */ 'indexing.delete_time'?: string; /** time spent in deletions * @alias 'indexing.delete_time' */ idti?: string; /** time spent in deletions * @alias 'indexing.delete_time' */ indexingDeleteTime?: string; /** time spent in deletions */ 'pri.indexing.delete_time'?: string; /** number of delete ops */ 'indexing.delete_total'?: string; /** number of delete ops * @alias 'indexing.delete_total' */ idto?: string; /** number of delete ops * @alias 'indexing.delete_total' */ indexingDeleteTotal?: string; /** number of delete ops */ 'pri.indexing.delete_total'?: string; /** number of current indexing ops */ 'indexing.index_current'?: string; /** number of current indexing ops * @alias 'indexing.index_current' */ iic?: string; /** number of current indexing ops * @alias 'indexing.index_current' */ indexingIndexCurrent?: string; /** number of current indexing ops */ 'pri.indexing.index_current'?: string; /** time spent in indexing */ 'indexing.index_time'?: string; /** time spent in indexing * @alias 'indexing.index_time' */ iiti?: string; /** time spent in indexing * @alias 'indexing.index_time' */ indexingIndexTime?: string; /** time spent in indexing */ 'pri.indexing.index_time'?: string; /** number of indexing ops */ 'indexing.index_total'?: string; /** number of indexing ops * @alias 'indexing.index_total' */ iito?: string; /** number of indexing ops * @alias 'indexing.index_total' */ indexingIndexTotal?: string; /** number of indexing ops */ 'pri.indexing.index_total'?: string; /** number of failed indexing ops */ 'indexing.index_failed'?: string; /** number of failed indexing ops * @alias 'indexing.index_failed' */ iif?: string; /** number of failed indexing ops * @alias 'indexing.index_failed' */ indexingIndexFailed?: string; /** number of failed indexing ops */ 'pri.indexing.index_failed'?: string; /** number of current merges */ 'merges.current'?: string; /** number of current merges * @alias 'merges.current' */ mc?: string; /** number of current merges * @alias 'merges.current' */ mergesCurrent?: string; /** number of current merges */ 'pri.merges.current'?: string; /** number of current merging docs */ 'merges.current_docs'?: string; /** number of current merging docs * @alias 'merges.current_docs' */ mcd?: string; /** number of current merging docs * @alias 'merges.current_docs' */ mergesCurrentDocs?: string; /** number of current merging docs */ 'pri.merges.current_docs'?: string; /** size of current merges */ 'merges.current_size'?: string; /** size of current merges * @alias 'merges.current_size' */ mcs?: string; /** size of current merges * @alias 'merges.current_size' */ mergesCurrentSize?: string; /** size of current merges */ 'pri.merges.current_size'?: string; /** number of completed merge ops */ 'merges.total'?: string; /** number of completed merge ops * @alias 'merges.total' */ mt?: string; /** number of completed merge ops * @alias 'merges.total' */ mergesTotal?: string; /** number of completed merge ops */ 'pri.merges.total'?: string; /** docs merged */ 'merges.total_docs'?: string; /** docs merged * @alias 'merges.total_docs' */ mtd?: string; /** docs merged * @alias 'merges.total_docs' */ mergesTotalDocs?: string; /** docs merged */ 'pri.merges.total_docs'?: string; /** size merged */ 'merges.total_size'?: string; /** size merged * @alias 'merges.total_size' */ mts?: string; /** size merged * @alias 'merges.total_size' */ mergesTotalSize?: string; /** size merged */ 'pri.merges.total_size'?: string; /** time spent in merges */ 'merges.total_time'?: string; /** time spent in merges * @alias 'merges.total_time' */ mtt?: string; /** time spent in merges * @alias 'merges.total_time' */ mergesTotalTime?: string; /** time spent in merges */ 'pri.merges.total_time'?: string; /** total refreshes */ 'refresh.total'?: string; /** total refreshes * @alias 'refresh.total' */ rto?: string; /** total refreshes * @alias 'refresh.total' */ refreshTotal?: string; /** total refreshes */ 'pri.refresh.total'?: string; /** time spent in refreshes */ 'refresh.time'?: string; /** time spent in refreshes * @alias 'refresh.time' */ rti?: string; /** time spent in refreshes * @alias 'refresh.time' */ refreshTime?: string; /** time spent in refreshes */ 'pri.refresh.time'?: string; /** total external refreshes */ 'refresh.external_total'?: string; /** total external refreshes * @alias 'refresh.external_total' */ reto?: string; /** total external refreshes */ 'pri.refresh.external_total'?: string; /** time spent in external refreshes */ 'refresh.external_time'?: string; /** time spent in external refreshes * @alias 'refresh.external_time' */ reti?: string; /** time spent in external refreshes */ 'pri.refresh.external_time'?: string; /** number of pending refresh listeners */ 'refresh.listeners'?: string; /** number of pending refresh listeners * @alias 'refresh.listeners' */ rli?: string; /** number of pending refresh listeners * @alias 'refresh.listeners' */ refreshListeners?: string; /** number of pending refresh listeners */ 'pri.refresh.listeners'?: string; /** current fetch phase ops */ 'search.fetch_current'?: string; /** current fetch phase ops * @alias 'search.fetch_current' */ sfc?: string; /** current fetch phase ops * @alias 'search.fetch_current' */ searchFetchCurrent?: string; /** current fetch phase ops */ 'pri.search.fetch_current'?: string; /** time spent in fetch phase */ 'search.fetch_time'?: string; /** time spent in fetch phase * @alias 'search.fetch_time' */ sfti?: string; /** time spent in fetch phase * @alias 'search.fetch_time' */ searchFetchTime?: string; /** time spent in fetch phase */ 'pri.search.fetch_time'?: string; /** total fetch ops */ 'search.fetch_total'?: string; /** total fetch ops * @alias 'search.fetch_total' */ sfto?: string; /** total fetch ops * @alias 'search.fetch_total' */ searchFetchTotal?: string; /** total fetch ops */ 'pri.search.fetch_total'?: string; /** open search contexts */ 'search.open_contexts'?: string; /** open search contexts * @alias 'search.open_contexts' */ so?: string; /** open search contexts * @alias 'search.open_contexts' */ searchOpenContexts?: string; /** open search contexts */ 'pri.search.open_contexts'?: string; /** current query phase ops */ 'search.query_current'?: string; /** current query phase ops * @alias 'search.query_current' */ sqc?: string; /** current query phase ops * @alias 'search.query_current' */ searchQueryCurrent?: string; /** current query phase ops */ 'pri.search.query_current'?: string; /** time spent in query phase */ 'search.query_time'?: string; /** time spent in query phase * @alias 'search.query_time' */ sqti?: string; /** time spent in query phase * @alias 'search.query_time' */ searchQueryTime?: string; /** time spent in query phase */ 'pri.search.query_time'?: string; /** total query phase ops */ 'search.query_total'?: string; /** total query phase ops * @alias 'search.query_total' */ sqto?: string; /** total query phase ops * @alias 'search.query_total' */ searchQueryTotal?: string; /** total query phase ops */ 'pri.search.query_total'?: string; /** open scroll contexts */ 'search.scroll_current'?: string; /** open scroll contexts * @alias 'search.scroll_current' */ scc?: string; /** open scroll contexts * @alias 'search.scroll_current' */ searchScrollCurrent?: string; /** open scroll contexts */ 'pri.search.scroll_current'?: string; /** time scroll contexts held open */ 'search.scroll_time'?: string; /** time scroll contexts held open * @alias 'search.scroll_time' */ scti?: string; /** time scroll contexts held open * @alias 'search.scroll_time' */ searchScrollTime?: string; /** time scroll contexts held open */ 'pri.search.scroll_time'?: string; /** completed scroll contexts */ 'search.scroll_total'?: string; /** completed scroll contexts * @alias 'search.scroll_total' */ scto?: string; /** completed scroll contexts * @alias 'search.scroll_total' */ searchScrollTotal?: string; /** completed scroll contexts */ 'pri.search.scroll_total'?: string; /** number of segments */ 'segments.count'?: string; /** number of segments * @alias 'segments.count' */ sc?: string; /** number of segments * @alias 'segments.count' */ segmentsCount?: string; /** number of segments */ 'pri.segments.count'?: string; /** memory used by segments */ 'segments.memory'?: string; /** memory used by segments * @alias 'segments.memory' */ sm?: string; /** memory used by segments * @alias 'segments.memory' */ segmentsMemory?: string; /** memory used by segments */ 'pri.segments.memory'?: string; /** memory used by index writer */ 'segments.index_writer_memory'?: string; /** memory used by index writer * @alias 'segments.index_writer_memory' */ siwm?: string; /** memory used by index writer * @alias 'segments.index_writer_memory' */ segmentsIndexWriterMemory?: string; /** memory used by index writer */ 'pri.segments.index_writer_memory'?: string; /** memory used by version map */ 'segments.version_map_memory'?: string; /** memory used by version map * @alias 'segments.version_map_memory' */ svmm?: string; /** memory used by version map * @alias 'segments.version_map_memory' */ segmentsVersionMapMemory?: string; /** memory used by version map */ 'pri.segments.version_map_memory'?: string; /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields */ 'segments.fixed_bitset_memory'?: string; /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields * @alias 'segments.fixed_bitset_memory' */ sfbm?: string; /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields * @alias 'segments.fixed_bitset_memory' */ fixedBitsetMemory?: string; /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields */ 'pri.segments.fixed_bitset_memory'?: string; /** current warmer ops */ 'warmer.current'?: string; /** current warmer ops * @alias 'warmer.current' */ wc?: string; /** current warmer ops * @alias 'warmer.current' */ warmerCurrent?: string; /** current warmer ops */ 'pri.warmer.current'?: string; /** total warmer ops */ 'warmer.total'?: string; /** total warmer ops * @alias 'warmer.total' */ wto?: string; /** total warmer ops * @alias 'warmer.total' */ warmerTotal?: string; /** total warmer ops */ 'pri.warmer.total'?: string; /** time spent in warmers */ 'warmer.total_time'?: string; /** time spent in warmers * @alias 'warmer.total_time' */ wtt?: string; /** time spent in warmers * @alias 'warmer.total_time' */ warmerTotalTime?: string; /** time spent in warmers */ 'pri.warmer.total_time'?: string; /** number of current suggest ops */ 'suggest.current'?: string; /** number of current suggest ops * @alias 'suggest.current' */ suc?: string; /** number of current suggest ops * @alias 'suggest.current' */ suggestCurrent?: string; /** number of current suggest ops */ 'pri.suggest.current'?: string; /** time spend in suggest */ 'suggest.time'?: string; /** time spend in suggest * @alias 'suggest.time' */ suti?: string; /** time spend in suggest * @alias 'suggest.time' */ suggestTime?: string; /** time spend in suggest */ 'pri.suggest.time'?: string; /** number of suggest ops */ 'suggest.total'?: string; /** number of suggest ops * @alias 'suggest.total' */ suto?: string; /** number of suggest ops * @alias 'suggest.total' */ suggestTotal?: string; /** number of suggest ops */ 'pri.suggest.total'?: string; /** total used memory */ 'memory.total'?: string; /** total used memory * @alias 'memory.total' */ tm?: string; /** total used memory * @alias 'memory.total' */ memoryTotal?: string; /** total user memory */ 'pri.memory.total'?: string; /** indicates if the index is search throttled */ 'search.throttled'?: string; /** indicates if the index is search throttled * @alias 'search.throttled' */ sth?: string; /** number of bulk shard ops */ 'bulk.total_operations'?: string; /** number of bulk shard ops * @alias 'bulk.total_operations' */ bto?: string; /** number of bulk shard ops * @alias 'bulk.total_operations' */ bulkTotalOperation?: string; /** number of bulk shard ops */ 'pri.bulk.total_operations'?: string; /** time spend in shard bulk */ 'bulk.total_time'?: string; /** time spend in shard bulk * @alias 'bulk.total_time' */ btti?: string; /** time spend in shard bulk * @alias 'bulk.total_time' */ bulkTotalTime?: string; /** time spend in shard bulk */ 'pri.bulk.total_time'?: string; /** total size in bytes of shard bulk */ 'bulk.total_size_in_bytes'?: string; /** total size in bytes of shard bulk * @alias 'bulk.total_size_in_bytes' */ btsi?: string; /** total size in bytes of shard bulk * @alias 'bulk.total_size_in_bytes' */ bulkTotalSizeInBytes?: string; /** total size in bytes of shard bulk */ 'pri.bulk.total_size_in_bytes'?: string; /** average time spend in shard bulk */ 'bulk.avg_time'?: string; /** average time spend in shard bulk * @alias 'bulk.avg_time' */ bati?: string; /** average time spend in shard bulk * @alias 'bulk.avg_time' */ bulkAvgTime?: string; /** average time spend in shard bulk */ 'pri.bulk.avg_time'?: string; /** average size in bytes of shard bulk */ 'bulk.avg_size_in_bytes'?: string; /** average size in bytes of shard bulk * @alias 'bulk.avg_size_in_bytes' */ basi?: string; /** average size in bytes of shard bulk * @alias 'bulk.avg_size_in_bytes' */ bulkAvgSizeInBytes?: string; /** average size in bytes of shard bulk */ 'pri.bulk.avg_size_in_bytes'?: string; } export interface CatIndicesRequest extends CatCatRequestBase { /** Comma-separated list of data streams, indices, and aliases used to limit the request. * Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** The unit used to display byte values. */ bytes?: Bytes; /** The type of index that wildcard patterns can match. */ expand_wildcards?: ExpandWildcards; /** The health status used to limit returned indices. By default, the response includes indices of any health status. */ health?: HealthStatus; /** If true, the response includes information from segments that are not loaded into memory. */ include_unloaded_segments?: boolean; /** If true, the response only includes information from primary shards. */ pri?: boolean; /** The unit used to display time values. */ time?: TimeUnit; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names; /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; bytes?: never; expand_wildcards?: never; health?: never; include_unloaded_segments?: never; pri?: never; time?: never; master_timeout?: never; h?: never; s?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; bytes?: never; expand_wildcards?: never; health?: never; include_unloaded_segments?: never; pri?: never; time?: never; master_timeout?: never; h?: never; s?: never; }; } export type CatIndicesResponse = CatIndicesIndicesRecord[]; export interface CatMasterMasterRecord { /** node id */ id?: string; /** host name */ host?: string; /** host name * @alias host */ h?: string; /** ip address */ ip?: string; /** node name */ node?: string; /** node name * @alias node */ n?: string; } export interface CatMasterRequest extends CatCatRequestBase { /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names; /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** If `true`, the request computes the list of selected nodes from the * local cluster state. If `false` the list of selected nodes are computed * from the cluster state of the master node. In both cases the coordinating * node will send requests for further information to each selected node. */ local?: boolean; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { h?: never; s?: never; local?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { h?: never; s?: never; local?: never; master_timeout?: never; }; } export type CatMasterResponse = CatMasterMasterRecord[]; export interface CatMlDataFrameAnalyticsDataFrameAnalyticsRecord { /** The identifier for the job. */ id?: Id; /** The type of analysis that the job performs. */ type?: string; /** The type of analysis that the job performs. * @alias type */ t?: string; /** The time when the job was created. */ create_time?: string; /** The time when the job was created. * @alias create_time */ ct?: string; /** The time when the job was created. * @alias create_time */ createTime?: string; /** The version of Elasticsearch when the job was created. */ version?: VersionString; /** The version of Elasticsearch when the job was created. * @alias version */ v?: VersionString; /** The name of the source index. */ source_index?: IndexName; /** The name of the source index. * @alias source_index */ si?: IndexName; /** The name of the source index. * @alias source_index */ sourceIndex?: IndexName; /** The name of the destination index. */ dest_index?: IndexName; /** The name of the destination index. * @alias dest_index */ di?: IndexName; /** The name of the destination index. * @alias dest_index */ destIndex?: IndexName; /** A description of the job. */ description?: string; /** A description of the job. * @alias description */ d?: string; /** The approximate maximum amount of memory resources that are permitted for the job. */ model_memory_limit?: string; /** The approximate maximum amount of memory resources that are permitted for the job. * @alias model_memory_limit */ mml?: string; /** The approximate maximum amount of memory resources that are permitted for the job. * @alias model_memory_limit */ modelMemoryLimit?: string; /** The current status of the job. */ state?: string; /** The current status of the job. * @alias state */ s?: string; /** Messages about the reason why the job failed. */ failure_reason?: string; /** Messages about the reason why the job failed. * @alias failure_reason */ fr?: string; /** Messages about the reason why the job failed. * @alias failure_reason */ failureReason?: string; /** The progress report for the job by phase. */ progress?: string; /** The progress report for the job by phase. * @alias progress */ p?: string; /** Messages related to the selection of a node. */ assignment_explanation?: string; /** Messages related to the selection of a node. * @alias assignment_explanation */ ae?: string; /** Messages related to the selection of a node. * @alias assignment_explanation */ assignmentExplanation?: string; /** The unique identifier of the assigned node. */ 'node.id'?: Id; /** The unique identifier of the assigned node. * @alias 'node.id' */ ni?: Id; /** The unique identifier of the assigned node. * @alias 'node.id' */ nodeId?: Id; /** The name of the assigned node. */ 'node.name'?: Name; /** The name of the assigned node. * @alias 'node.name' */ nn?: Name; /** The name of the assigned node. * @alias 'node.name' */ nodeName?: Name; /** The ephemeral identifier of the assigned node. */ 'node.ephemeral_id'?: Id; /** The ephemeral identifier of the assigned node. * @alias 'node.ephemeral_id' */ ne?: Id; /** The ephemeral identifier of the assigned node. * @alias 'node.ephemeral_id' */ nodeEphemeralId?: Id; /** The network address of the assigned node. */ 'node.address'?: string; /** The network address of the assigned node. * @alias 'node.address' */ na?: string; /** The network address of the assigned node. * @alias 'node.address' */ nodeAddress?: string; } export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { /** The ID of the data frame analytics to fetch */ id?: Id; /** Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) */ allow_no_match?: boolean; /** The unit in which to display byte values */ bytes?: Bytes; /** Comma-separated list of column names to display. */ h?: CatCatDfaColumns; /** Comma-separated list of column names or column aliases used to sort the * response. */ s?: CatCatDfaColumns; /** Unit used to display time values. */ time?: TimeUnit; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; allow_no_match?: never; bytes?: never; h?: never; s?: never; time?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; allow_no_match?: never; bytes?: never; h?: never; s?: never; time?: never; }; } export type CatMlDataFrameAnalyticsResponse = CatMlDataFrameAnalyticsDataFrameAnalyticsRecord[]; export interface CatMlDatafeedsDatafeedsRecord { /** The datafeed identifier. */ id?: string; /** The status of the datafeed. */ state?: MlDatafeedState; /** The status of the datafeed. * @alias state */ s?: MlDatafeedState; /** For started datafeeds only, contains messages relating to the selection of a node. */ assignment_explanation?: string; /** For started datafeeds only, contains messages relating to the selection of a node. * @alias assignment_explanation */ ae?: string; /** The number of buckets processed. */ 'buckets.count'?: string; /** The number of buckets processed. * @alias 'buckets.count' */ bc?: string; /** The number of buckets processed. * @alias 'buckets.count' */ bucketsCount?: string; /** The number of searches run by the datafeed. */ 'search.count'?: string; /** The number of searches run by the datafeed. * @alias 'search.count' */ sc?: string; /** The number of searches run by the datafeed. * @alias 'search.count' */ searchCount?: string; /** The total time the datafeed spent searching, in milliseconds. */ 'search.time'?: string; /** The total time the datafeed spent searching, in milliseconds. * @alias 'search.time' */ st?: string; /** The total time the datafeed spent searching, in milliseconds. * @alias 'search.time' */ searchTime?: string; /** The average search time per bucket, in milliseconds. */ 'search.bucket_avg'?: string; /** The average search time per bucket, in milliseconds. * @alias 'search.bucket_avg' */ sba?: string; /** The average search time per bucket, in milliseconds. * @alias 'search.bucket_avg' */ searchBucketAvg?: string; /** The exponential average search time per hour, in milliseconds. */ 'search.exp_avg_hour'?: string; /** The exponential average search time per hour, in milliseconds. * @alias 'search.exp_avg_hour' */ seah?: string; /** The exponential average search time per hour, in milliseconds. * @alias 'search.exp_avg_hour' */ searchExpAvgHour?: string; /** The unique identifier of the assigned node. * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ 'node.id'?: string; /** The unique identifier of the assigned node. * For started datafeeds only, this information pertains to the node upon which the datafeed is started. * @alias 'node.id' */ ni?: string; /** The unique identifier of the assigned node. * For started datafeeds only, this information pertains to the node upon which the datafeed is started. * @alias 'node.id' */ nodeId?: string; /** The name of the assigned node. * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ 'node.name'?: string; /** The name of the assigned node. * For started datafeeds only, this information pertains to the node upon which the datafeed is started. * @alias 'node.name' */ nn?: string; /** The name of the assigned node. * For started datafeeds only, this information pertains to the node upon which the datafeed is started. * @alias 'node.name' */ nodeName?: string; /** The ephemeral identifier of the assigned node. * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ 'node.ephemeral_id'?: string; /** The ephemeral identifier of the assigned node. * For started datafeeds only, this information pertains to the node upon which the datafeed is started. * @alias 'node.ephemeral_id' */ ne?: string; /** The ephemeral identifier of the assigned node. * For started datafeeds only, this information pertains to the node upon which the datafeed is started. * @alias 'node.ephemeral_id' */ nodeEphemeralId?: string; /** The network address of the assigned node. * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ 'node.address'?: string; /** The network address of the assigned node. * For started datafeeds only, this information pertains to the node upon which the datafeed is started. * @alias 'node.address' */ na?: string; /** The network address of the assigned node. * For started datafeeds only, this information pertains to the node upon which the datafeed is started. * @alias 'node.address' */ nodeAddress?: string; } export interface CatMlDatafeedsRequest extends CatCatRequestBase { /** A numerical character string that uniquely identifies the datafeed. */ datafeed_id?: Id; /** Specifies what to do when the request: * * * Contains wildcard expressions and there are no datafeeds that match. * * Contains the `_all` string or no identifiers and there are no matches. * * Contains wildcard expressions and there are only partial matches. * * If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when * there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only * partial matches. */ allow_no_match?: boolean; /** Comma-separated list of column names to display. */ h?: CatCatDatafeedColumns; /** Comma-separated list of column names or column aliases used to sort the response. */ s?: CatCatDatafeedColumns; /** The unit used to display time values. */ time?: TimeUnit; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { datafeed_id?: never; allow_no_match?: never; h?: never; s?: never; time?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { datafeed_id?: never; allow_no_match?: never; h?: never; s?: never; time?: never; }; } export type CatMlDatafeedsResponse = CatMlDatafeedsDatafeedsRecord[]; export interface CatMlJobsJobsRecord { /** The anomaly detection job identifier. */ id?: Id; /** The status of the anomaly detection job. */ state?: MlJobState; /** The status of the anomaly detection job. * @alias state */ s?: MlJobState; /** For open jobs only, the amount of time the job has been opened. */ opened_time?: string; /** For open jobs only, the amount of time the job has been opened. * @alias opened_time */ ot?: string; /** For open anomaly detection jobs only, contains messages relating to the selection of a node to run the job. */ assignment_explanation?: string; /** For open anomaly detection jobs only, contains messages relating to the selection of a node to run the job. * @alias assignment_explanation */ ae?: string; /** The number of input documents that have been processed by the anomaly detection job. * This value includes documents with missing fields, since they are nonetheless analyzed. * If you use datafeeds and have aggregations in your search query, the `processed_record_count` is the number of aggregation results processed, not the number of Elasticsearch documents. */ 'data.processed_records'?: string; /** The number of input documents that have been processed by the anomaly detection job. * This value includes documents with missing fields, since they are nonetheless analyzed. * If you use datafeeds and have aggregations in your search query, the `processed_record_count` is the number of aggregation results processed, not the number of Elasticsearch documents. * @alias 'data.processed_records' */ dpr?: string; /** The number of input documents that have been processed by the anomaly detection job. * This value includes documents with missing fields, since they are nonetheless analyzed. * If you use datafeeds and have aggregations in your search query, the `processed_record_count` is the number of aggregation results processed, not the number of Elasticsearch documents. * @alias 'data.processed_records' */ dataProcessedRecords?: string; /** The total number of fields in all the documents that have been processed by the anomaly detection job. * Only fields that are specified in the detector configuration object contribute to this count. * The timestamp is not included in this count. */ 'data.processed_fields'?: string; /** The total number of fields in all the documents that have been processed by the anomaly detection job. * Only fields that are specified in the detector configuration object contribute to this count. * The timestamp is not included in this count. * @alias 'data.processed_fields' */ dpf?: string; /** The total number of fields in all the documents that have been processed by the anomaly detection job. * Only fields that are specified in the detector configuration object contribute to this count. * The timestamp is not included in this count. * @alias 'data.processed_fields' */ dataProcessedFields?: string; /** The number of bytes of input data posted to the anomaly detection job. */ 'data.input_bytes'?: ByteSize; /** The number of bytes of input data posted to the anomaly detection job. * @alias 'data.input_bytes' */ dib?: ByteSize; /** The number of bytes of input data posted to the anomaly detection job. * @alias 'data.input_bytes' */ dataInputBytes?: ByteSize; /** The number of input documents posted to the anomaly detection job. */ 'data.input_records'?: string; /** The number of input documents posted to the anomaly detection job. * @alias 'data.input_records' */ dir?: string; /** The number of input documents posted to the anomaly detection job. * @alias 'data.input_records' */ dataInputRecords?: string; /** The total number of fields in input documents posted to the anomaly detection job. * This count includes fields that are not used in the analysis. * However, be aware that if you are using a datafeed, it extracts only the required fields from the documents it retrieves before posting them to the job. */ 'data.input_fields'?: string; /** The total number of fields in input documents posted to the anomaly detection job. * This count includes fields that are not used in the analysis. * However, be aware that if you are using a datafeed, it extracts only the required fields from the documents it retrieves before posting them to the job. * @alias 'data.input_fields' */ dif?: string; /** The total number of fields in input documents posted to the anomaly detection job. * This count includes fields that are not used in the analysis. * However, be aware that if you are using a datafeed, it extracts only the required fields from the documents it retrieves before posting them to the job. * @alias 'data.input_fields' */ dataInputFields?: string; /** The number of input documents with either a missing date field or a date that could not be parsed. */ 'data.invalid_dates'?: string; /** The number of input documents with either a missing date field or a date that could not be parsed. * @alias 'data.invalid_dates' */ did?: string; /** The number of input documents with either a missing date field or a date that could not be parsed. * @alias 'data.invalid_dates' */ dataInvalidDates?: string; /** The number of input documents that are missing a field that the anomaly detection job is configured to analyze. * Input documents with missing fields are still processed because it is possible that not all fields are missing. * If you are using datafeeds or posting data to the job in JSON format, a high `missing_field_count` is often not an indication of data issues. * It is not necessarily a cause for concern. */ 'data.missing_fields'?: string; /** The number of input documents that are missing a field that the anomaly detection job is configured to analyze. * Input documents with missing fields are still processed because it is possible that not all fields are missing. * If you are using datafeeds or posting data to the job in JSON format, a high `missing_field_count` is often not an indication of data issues. * It is not necessarily a cause for concern. * @alias 'data.missing_fields' */ dmf?: string; /** The number of input documents that are missing a field that the anomaly detection job is configured to analyze. * Input documents with missing fields are still processed because it is possible that not all fields are missing. * If you are using datafeeds or posting data to the job in JSON format, a high `missing_field_count` is often not an indication of data issues. * It is not necessarily a cause for concern. * @alias 'data.missing_fields' */ dataMissingFields?: string; /** The number of input documents that have a timestamp chronologically preceding the start of the current anomaly detection bucket offset by the latency window. * This information is applicable only when you provide data to the anomaly detection job by using the post data API. * These out of order documents are discarded, since jobs require time series data to be in ascending chronological order. */ 'data.out_of_order_timestamps'?: string; /** The number of input documents that have a timestamp chronologically preceding the start of the current anomaly detection bucket offset by the latency window. * This information is applicable only when you provide data to the anomaly detection job by using the post data API. * These out of order documents are discarded, since jobs require time series data to be in ascending chronological order. * @alias 'data.out_of_order_timestamps' */ doot?: string; /** The number of input documents that have a timestamp chronologically preceding the start of the current anomaly detection bucket offset by the latency window. * This information is applicable only when you provide data to the anomaly detection job by using the post data API. * These out of order documents are discarded, since jobs require time series data to be in ascending chronological order. * @alias 'data.out_of_order_timestamps' */ dataOutOfOrderTimestamps?: string; /** The number of buckets which did not contain any data. * If your data contains many empty buckets, consider increasing your `bucket_span` or using functions that are tolerant to gaps in data such as mean, `non_null_sum` or `non_zero_count`. */ 'data.empty_buckets'?: string; /** The number of buckets which did not contain any data. * If your data contains many empty buckets, consider increasing your `bucket_span` or using functions that are tolerant to gaps in data such as mean, `non_null_sum` or `non_zero_count`. * @alias 'data.empty_buckets' */ deb?: string; /** The number of buckets which did not contain any data. * If your data contains many empty buckets, consider increasing your `bucket_span` or using functions that are tolerant to gaps in data such as mean, `non_null_sum` or `non_zero_count`. * @alias 'data.empty_buckets' */ dataEmptyBuckets?: string; /** The number of buckets that contained few data points compared to the expected number of data points. * If your data contains many sparse buckets, consider using a longer `bucket_span`. */ 'data.sparse_buckets'?: string; /** The number of buckets that contained few data points compared to the expected number of data points. * If your data contains many sparse buckets, consider using a longer `bucket_span`. * @alias 'data.sparse_buckets' */ dsb?: string; /** The number of buckets that contained few data points compared to the expected number of data points. * If your data contains many sparse buckets, consider using a longer `bucket_span`. * @alias 'data.sparse_buckets' */ dataSparseBuckets?: string; /** The total number of buckets processed. */ 'data.buckets'?: string; /** The total number of buckets processed. * @alias 'data.buckets' */ db?: string; /** The total number of buckets processed. * @alias 'data.buckets' */ dataBuckets?: string; /** The timestamp of the earliest chronologically input document. */ 'data.earliest_record'?: string; /** The timestamp of the earliest chronologically input document. * @alias 'data.earliest_record' */ der?: string; /** The timestamp of the earliest chronologically input document. * @alias 'data.earliest_record' */ dataEarliestRecord?: string; /** The timestamp of the latest chronologically input document. */ 'data.latest_record'?: string; /** The timestamp of the latest chronologically input document. * @alias 'data.latest_record' */ dlr?: string; /** The timestamp of the latest chronologically input document. * @alias 'data.latest_record' */ dataLatestRecord?: string; /** The timestamp at which data was last analyzed, according to server time. */ 'data.last'?: string; /** The timestamp at which data was last analyzed, according to server time. * @alias 'data.last' */ dl?: string; /** The timestamp at which data was last analyzed, according to server time. * @alias 'data.last' */ dataLast?: string; /** The timestamp of the last bucket that did not contain any data. */ 'data.last_empty_bucket'?: string; /** The timestamp of the last bucket that did not contain any data. * @alias 'data.last_empty_bucket' */ dleb?: string; /** The timestamp of the last bucket that did not contain any data. * @alias 'data.last_empty_bucket' */ dataLastEmptyBucket?: string; /** The timestamp of the last bucket that was considered sparse. */ 'data.last_sparse_bucket'?: string; /** The timestamp of the last bucket that was considered sparse. * @alias 'data.last_sparse_bucket' */ dlsb?: string; /** The timestamp of the last bucket that was considered sparse. * @alias 'data.last_sparse_bucket' */ dataLastSparseBucket?: string; /** The number of bytes of memory used by the models. * This is the maximum value since the last time the model was persisted. * If the job is closed, this value indicates the latest size. */ 'model.bytes'?: ByteSize; /** The number of bytes of memory used by the models. * This is the maximum value since the last time the model was persisted. * If the job is closed, this value indicates the latest size. * @alias 'model.bytes' */ mb?: ByteSize; /** The number of bytes of memory used by the models. * This is the maximum value since the last time the model was persisted. * If the job is closed, this value indicates the latest size. * @alias 'model.bytes' */ modelBytes?: ByteSize; /** The status of the mathematical models. */ 'model.memory_status'?: MlMemoryStatus; /** The status of the mathematical models. * @alias 'model.memory_status' */ mms?: MlMemoryStatus; /** The status of the mathematical models. * @alias 'model.memory_status' */ modelMemoryStatus?: MlMemoryStatus; /** The number of bytes over the high limit for memory usage at the last allocation failure. */ 'model.bytes_exceeded'?: ByteSize; /** The number of bytes over the high limit for memory usage at the last allocation failure. * @alias 'model.bytes_exceeded' */ mbe?: ByteSize; /** The number of bytes over the high limit for memory usage at the last allocation failure. * @alias 'model.bytes_exceeded' */ modelBytesExceeded?: ByteSize; /** The upper limit for model memory usage, checked on increasing values. */ 'model.memory_limit'?: string; /** The upper limit for model memory usage, checked on increasing values. * @alias 'model.memory_limit' */ mml?: string; /** The upper limit for model memory usage, checked on increasing values. * @alias 'model.memory_limit' */ modelMemoryLimit?: string; /** The number of `by` field values that were analyzed by the models. * This value is cumulative for all detectors in the job. */ 'model.by_fields'?: string; /** The number of `by` field values that were analyzed by the models. * This value is cumulative for all detectors in the job. * @alias 'model.by_fields' */ mbf?: string; /** The number of `by` field values that were analyzed by the models. * This value is cumulative for all detectors in the job. * @alias 'model.by_fields' */ modelByFields?: string; /** The number of `over` field values that were analyzed by the models. * This value is cumulative for all detectors in the job. */ 'model.over_fields'?: string; /** The number of `over` field values that were analyzed by the models. * This value is cumulative for all detectors in the job. * @alias 'model.over_fields' */ mof?: string; /** The number of `over` field values that were analyzed by the models. * This value is cumulative for all detectors in the job. * @alias 'model.over_fields' */ modelOverFields?: string; /** The number of `partition` field values that were analyzed by the models. * This value is cumulative for all detectors in the job. */ 'model.partition_fields'?: string; /** The number of `partition` field values that were analyzed by the models. * This value is cumulative for all detectors in the job. * @alias 'model.partition_fields' */ mpf?: string; /** The number of `partition` field values that were analyzed by the models. * This value is cumulative for all detectors in the job. * @alias 'model.partition_fields' */ modelPartitionFields?: string; /** The number of buckets for which new entities in incoming data were not processed due to insufficient model memory. * This situation is also signified by a `hard_limit: memory_status` property value. */ 'model.bucket_allocation_failures'?: string; /** The number of buckets for which new entities in incoming data were not processed due to insufficient model memory. * This situation is also signified by a `hard_limit: memory_status` property value. * @alias 'model.bucket_allocation_failures' */ mbaf?: string; /** The number of buckets for which new entities in incoming data were not processed due to insufficient model memory. * This situation is also signified by a `hard_limit: memory_status` property value. * @alias 'model.bucket_allocation_failures' */ modelBucketAllocationFailures?: string; /** The status of categorization for the job. */ 'model.categorization_status'?: MlCategorizationStatus; /** The status of categorization for the job. * @alias 'model.categorization_status' */ mcs?: MlCategorizationStatus; /** The status of categorization for the job. * @alias 'model.categorization_status' */ modelCategorizationStatus?: MlCategorizationStatus; /** The number of documents that have had a field categorized. */ 'model.categorized_doc_count'?: string; /** The number of documents that have had a field categorized. * @alias 'model.categorized_doc_count' */ mcdc?: string; /** The number of documents that have had a field categorized. * @alias 'model.categorized_doc_count' */ modelCategorizedDocCount?: string; /** The number of categories created by categorization. */ 'model.total_category_count'?: string; /** The number of categories created by categorization. * @alias 'model.total_category_count' */ mtcc?: string; /** The number of categories created by categorization. * @alias 'model.total_category_count' */ modelTotalCategoryCount?: string; /** The number of categories that match more than 1% of categorized documents. */ 'model.frequent_category_count'?: string; /** The number of categories that match more than 1% of categorized documents. * @alias 'model.frequent_category_count' */ modelFrequentCategoryCount?: string; /** The number of categories that match just one categorized document. */ 'model.rare_category_count'?: string; /** The number of categories that match just one categorized document. * @alias 'model.rare_category_count' */ mrcc?: string; /** The number of categories that match just one categorized document. * @alias 'model.rare_category_count' */ modelRareCategoryCount?: string; /** The number of categories created by categorization that will never be assigned again because another category’s definition makes it a superset of the dead category. * Dead categories are a side effect of the way categorization has no prior training. */ 'model.dead_category_count'?: string; /** The number of categories created by categorization that will never be assigned again because another category’s definition makes it a superset of the dead category. * Dead categories are a side effect of the way categorization has no prior training. * @alias 'model.dead_category_count' */ mdcc?: string; /** The number of categories created by categorization that will never be assigned again because another category’s definition makes it a superset of the dead category. * Dead categories are a side effect of the way categorization has no prior training. * @alias 'model.dead_category_count' */ modelDeadCategoryCount?: string; /** The number of times that categorization wanted to create a new category but couldn’t because the job had hit its `model_memory_limit`. * This count does not track which specific categories failed to be created. * Therefore you cannot use this value to determine the number of unique categories that were missed. */ 'model.failed_category_count'?: string; /** The number of times that categorization wanted to create a new category but couldn’t because the job had hit its `model_memory_limit`. * This count does not track which specific categories failed to be created. * Therefore you cannot use this value to determine the number of unique categories that were missed. * @alias 'model.failed_category_count' */ mfcc?: string; /** The number of times that categorization wanted to create a new category but couldn’t because the job had hit its `model_memory_limit`. * This count does not track which specific categories failed to be created. * Therefore you cannot use this value to determine the number of unique categories that were missed. * @alias 'model.failed_category_count' */ modelFailedCategoryCount?: string; /** The timestamp when the model stats were gathered, according to server time. */ 'model.log_time'?: string; /** The timestamp when the model stats were gathered, according to server time. * @alias 'model.log_time' */ mlt?: string; /** The timestamp when the model stats were gathered, according to server time. * @alias 'model.log_time' */ modelLogTime?: string; /** The timestamp of the last record when the model stats were gathered. */ 'model.timestamp'?: string; /** The timestamp of the last record when the model stats were gathered. * @alias 'model.timestamp' */ mt?: string; /** The timestamp of the last record when the model stats were gathered. * @alias 'model.timestamp' */ modelTimestamp?: string; /** The number of individual forecasts currently available for the job. * A value of one or more indicates that forecasts exist. */ 'forecasts.total'?: string; /** The number of individual forecasts currently available for the job. * A value of one or more indicates that forecasts exist. * @alias 'forecasts.total' */ ft?: string; /** The number of individual forecasts currently available for the job. * A value of one or more indicates that forecasts exist. * @alias 'forecasts.total' */ forecastsTotal?: string; /** The minimum memory usage in bytes for forecasts related to the anomaly detection job. */ 'forecasts.memory.min'?: string; /** The minimum memory usage in bytes for forecasts related to the anomaly detection job. * @alias 'forecasts.memory.min' */ fmmin?: string; /** The minimum memory usage in bytes for forecasts related to the anomaly detection job. * @alias 'forecasts.memory.min' */ forecastsMemoryMin?: string; /** The maximum memory usage in bytes for forecasts related to the anomaly detection job. */ 'forecasts.memory.max'?: string; /** The maximum memory usage in bytes for forecasts related to the anomaly detection job. * @alias 'forecasts.memory.max' */ fmmax?: string; /** The maximum memory usage in bytes for forecasts related to the anomaly detection job. * @alias 'forecasts.memory.max' */ forecastsMemoryMax?: string; /** The average memory usage in bytes for forecasts related to the anomaly detection job. */ 'forecasts.memory.avg'?: string; /** The average memory usage in bytes for forecasts related to the anomaly detection job. * @alias 'forecasts.memory.avg' */ fmavg?: string; /** The average memory usage in bytes for forecasts related to the anomaly detection job. * @alias 'forecasts.memory.avg' */ forecastsMemoryAvg?: string; /** The total memory usage in bytes for forecasts related to the anomaly detection job. */ 'forecasts.memory.total'?: string; /** The total memory usage in bytes for forecasts related to the anomaly detection job. * @alias 'forecasts.memory.total' */ fmt?: string; /** The total memory usage in bytes for forecasts related to the anomaly detection job. * @alias 'forecasts.memory.total' */ forecastsMemoryTotal?: string; /** The minimum number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ 'forecasts.records.min'?: string; /** The minimum number of `model_forecast` documents written for forecasts related to the anomaly detection job. * @alias 'forecasts.records.min' */ frmin?: string; /** The minimum number of `model_forecast` documents written for forecasts related to the anomaly detection job. * @alias 'forecasts.records.min' */ forecastsRecordsMin?: string; /** The maximum number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ 'forecasts.records.max'?: string; /** The maximum number of `model_forecast` documents written for forecasts related to the anomaly detection job. * @alias 'forecasts.records.max' */ frmax?: string; /** The maximum number of `model_forecast` documents written for forecasts related to the anomaly detection job. * @alias 'forecasts.records.max' */ forecastsRecordsMax?: string; /** The average number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ 'forecasts.records.avg'?: string; /** The average number of `model_forecast` documents written for forecasts related to the anomaly detection job. * @alias 'forecasts.records.avg' */ fravg?: string; /** The average number of `model_forecast` documents written for forecasts related to the anomaly detection job. * @alias 'forecasts.records.avg' */ forecastsRecordsAvg?: string; /** The total number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ 'forecasts.records.total'?: string; /** The total number of `model_forecast` documents written for forecasts related to the anomaly detection job. * @alias 'forecasts.records.total' */ frt?: string; /** The total number of `model_forecast` documents written for forecasts related to the anomaly detection job. * @alias 'forecasts.records.total' */ forecastsRecordsTotal?: string; /** The minimum runtime in milliseconds for forecasts related to the anomaly detection job. */ 'forecasts.time.min'?: string; /** The minimum runtime in milliseconds for forecasts related to the anomaly detection job. * @alias 'forecasts.time.min' */ ftmin?: string; /** The minimum runtime in milliseconds for forecasts related to the anomaly detection job. * @alias 'forecasts.time.min' */ forecastsTimeMin?: string; /** The maximum runtime in milliseconds for forecasts related to the anomaly detection job. */ 'forecasts.time.max'?: string; /** The maximum runtime in milliseconds for forecasts related to the anomaly detection job. * @alias 'forecasts.time.max' */ ftmax?: string; /** The maximum runtime in milliseconds for forecasts related to the anomaly detection job. * @alias 'forecasts.time.max' */ forecastsTimeMax?: string; /** The average runtime in milliseconds for forecasts related to the anomaly detection job. */ 'forecasts.time.avg'?: string; /** The average runtime in milliseconds for forecasts related to the anomaly detection job. * @alias 'forecasts.time.avg' */ ftavg?: string; /** The average runtime in milliseconds for forecasts related to the anomaly detection job. * @alias 'forecasts.time.avg' */ forecastsTimeAvg?: string; /** The total runtime in milliseconds for forecasts related to the anomaly detection job. */ 'forecasts.time.total'?: string; /** The total runtime in milliseconds for forecasts related to the anomaly detection job. * @alias 'forecasts.time.total' */ ftt?: string; /** The total runtime in milliseconds for forecasts related to the anomaly detection job. * @alias 'forecasts.time.total' */ forecastsTimeTotal?: string; /** The uniqe identifier of the assigned node. */ 'node.id'?: NodeId; /** The uniqe identifier of the assigned node. * @alias 'node.id' */ ni?: NodeId; /** The uniqe identifier of the assigned node. * @alias 'node.id' */ nodeId?: NodeId; /** The name of the assigned node. */ 'node.name'?: string; /** The name of the assigned node. * @alias 'node.name' */ nn?: string; /** The name of the assigned node. * @alias 'node.name' */ nodeName?: string; /** The ephemeral identifier of the assigned node. */ 'node.ephemeral_id'?: NodeId; /** The ephemeral identifier of the assigned node. * @alias 'node.ephemeral_id' */ ne?: NodeId; /** The ephemeral identifier of the assigned node. * @alias 'node.ephemeral_id' */ nodeEphemeralId?: NodeId; /** The network address of the assigned node. */ 'node.address'?: string; /** The network address of the assigned node. * @alias 'node.address' */ na?: string; /** The network address of the assigned node. * @alias 'node.address' */ nodeAddress?: string; /** The number of bucket results produced by the job. */ 'buckets.count'?: string; /** The number of bucket results produced by the job. * @alias 'buckets.count' */ bc?: string; /** The number of bucket results produced by the job. * @alias 'buckets.count' */ bucketsCount?: string; /** The sum of all bucket processing times, in milliseconds. */ 'buckets.time.total'?: string; /** The sum of all bucket processing times, in milliseconds. * @alias 'buckets.time.total' */ btt?: string; /** The sum of all bucket processing times, in milliseconds. * @alias 'buckets.time.total' */ bucketsTimeTotal?: string; /** The minimum of all bucket processing times, in milliseconds. */ 'buckets.time.min'?: string; /** The minimum of all bucket processing times, in milliseconds. * @alias 'buckets.time.min' */ btmin?: string; /** The minimum of all bucket processing times, in milliseconds. * @alias 'buckets.time.min' */ bucketsTimeMin?: string; /** The maximum of all bucket processing times, in milliseconds. */ 'buckets.time.max'?: string; /** The maximum of all bucket processing times, in milliseconds. * @alias 'buckets.time.max' */ btmax?: string; /** The maximum of all bucket processing times, in milliseconds. * @alias 'buckets.time.max' */ bucketsTimeMax?: string; /** The exponential moving average of all bucket processing times, in milliseconds. */ 'buckets.time.exp_avg'?: string; /** The exponential moving average of all bucket processing times, in milliseconds. * @alias 'buckets.time.exp_avg' */ btea?: string; /** The exponential moving average of all bucket processing times, in milliseconds. * @alias 'buckets.time.exp_avg' */ bucketsTimeExpAvg?: string; /** The exponential moving average of bucket processing times calculated in a one hour time window, in milliseconds. */ 'buckets.time.exp_avg_hour'?: string; /** The exponential moving average of bucket processing times calculated in a one hour time window, in milliseconds. * @alias 'buckets.time.exp_avg_hour' */ bteah?: string; /** The exponential moving average of bucket processing times calculated in a one hour time window, in milliseconds. * @alias 'buckets.time.exp_avg_hour' */ bucketsTimeExpAvgHour?: string; } export interface CatMlJobsRequest extends CatCatRequestBase { /** Identifier for the anomaly detection job. */ job_id?: Id; /** Specifies what to do when the request: * * * Contains wildcard expressions and there are no jobs that match. * * Contains the `_all` string or no identifiers and there are no matches. * * Contains wildcard expressions and there are only partial matches. * * If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there * are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial * matches. */ allow_no_match?: boolean; /** The unit used to display byte values. */ bytes?: Bytes; /** Comma-separated list of column names to display. */ h?: CatCatAnonalyDetectorColumns; /** Comma-separated list of column names or column aliases used to sort the response. */ s?: CatCatAnonalyDetectorColumns; /** The unit used to display time values. */ time?: TimeUnit; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; allow_no_match?: never; bytes?: never; h?: never; s?: never; time?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; allow_no_match?: never; bytes?: never; h?: never; s?: never; time?: never; }; } export type CatMlJobsResponse = CatMlJobsJobsRecord[]; export interface CatMlTrainedModelsRequest extends CatCatRequestBase { /** A unique identifier for the trained model. */ model_id?: Id; /** Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. * If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. * If `false`, the API returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean; /** The unit used to display byte values. */ bytes?: Bytes; /** A comma-separated list of column names to display. */ h?: CatCatTrainedModelsColumns; /** A comma-separated list of column names or aliases used to sort the response. */ s?: CatCatTrainedModelsColumns; /** Skips the specified number of transforms. */ from?: integer; /** The maximum number of transforms to display. */ size?: integer; /** Unit used to display time values. */ time?: TimeUnit; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { model_id?: never; allow_no_match?: never; bytes?: never; h?: never; s?: never; from?: never; size?: never; time?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { model_id?: never; allow_no_match?: never; bytes?: never; h?: never; s?: never; from?: never; size?: never; time?: never; }; } export type CatMlTrainedModelsResponse = CatMlTrainedModelsTrainedModelsRecord[]; export interface CatMlTrainedModelsTrainedModelsRecord { /** The model identifier. */ id?: Id; /** Information about the creator of the model. */ created_by?: string; /** Information about the creator of the model. * @alias created_by */ c?: string; /** Information about the creator of the model. * @alias created_by */ createdBy?: string; /** The estimated heap size to keep the model in memory. */ heap_size?: ByteSize; /** The estimated heap size to keep the model in memory. * @alias heap_size */ hs?: ByteSize; /** The estimated heap size to keep the model in memory. * @alias heap_size */ modelHeapSize?: ByteSize; /** The estimated number of operations to use the model. * This number helps to measure the computational complexity of the model. */ operations?: string; /** The estimated number of operations to use the model. * This number helps to measure the computational complexity of the model. * @alias operations */ o?: string; /** The estimated number of operations to use the model. * This number helps to measure the computational complexity of the model. * @alias operations */ modelOperations?: string; /** The license level of the model. */ license?: string; /** The license level of the model. * @alias license */ l?: string; /** The time the model was created. */ create_time?: DateTime; /** The time the model was created. * @alias create_time */ ct?: DateTime; /** The version of Elasticsearch when the model was created. */ version?: VersionString; /** The version of Elasticsearch when the model was created. * @alias version */ v?: VersionString; /** A description of the model. */ description?: string; /** A description of the model. * @alias description */ d?: string; /** The number of pipelines that are referencing the model. */ 'ingest.pipelines'?: string; /** The number of pipelines that are referencing the model. * @alias 'ingest.pipelines' */ ip?: string; /** The number of pipelines that are referencing the model. * @alias 'ingest.pipelines' */ ingestPipelines?: string; /** The total number of documents that are processed by the model. */ 'ingest.count'?: string; /** The total number of documents that are processed by the model. * @alias 'ingest.count' */ ic?: string; /** The total number of documents that are processed by the model. * @alias 'ingest.count' */ ingestCount?: string; /** The total time spent processing documents with thie model. */ 'ingest.time'?: string; /** The total time spent processing documents with thie model. * @alias 'ingest.time' */ it?: string; /** The total time spent processing documents with thie model. * @alias 'ingest.time' */ ingestTime?: string; /** The total number of documents that are currently being handled by the model. */ 'ingest.current'?: string; /** The total number of documents that are currently being handled by the model. * @alias 'ingest.current' */ icurr?: string; /** The total number of documents that are currently being handled by the model. * @alias 'ingest.current' */ ingestCurrent?: string; /** The total number of failed ingest attempts with the model. */ 'ingest.failed'?: string; /** The total number of failed ingest attempts with the model. * @alias 'ingest.failed' */ if?: string; /** The total number of failed ingest attempts with the model. * @alias 'ingest.failed' */ ingestFailed?: string; /** The identifier for the data frame analytics job that created the model. * Only displayed if the job is still available. */ 'data_frame.id'?: string; /** The identifier for the data frame analytics job that created the model. * Only displayed if the job is still available. * @alias 'data_frame.id' */ dfid?: string; /** The identifier for the data frame analytics job that created the model. * Only displayed if the job is still available. * @alias 'data_frame.id' */ dataFrameAnalytics?: string; /** The time the data frame analytics job was created. */ 'data_frame.create_time'?: string; /** The time the data frame analytics job was created. * @alias 'data_frame.create_time' */ dft?: string; /** The time the data frame analytics job was created. * @alias 'data_frame.create_time' */ dataFrameAnalyticsTime?: string; /** The source index used to train in the data frame analysis. */ 'data_frame.source_index'?: string; /** The source index used to train in the data frame analysis. * @alias 'data_frame.source_index' */ dfsi?: string; /** The source index used to train in the data frame analysis. * @alias 'data_frame.source_index' */ dataFrameAnalyticsSrcIndex?: string; /** The analysis used by the data frame to build the model. */ 'data_frame.analysis'?: string; /** The analysis used by the data frame to build the model. * @alias 'data_frame.analysis' */ dfa?: string; /** The analysis used by the data frame to build the model. * @alias 'data_frame.analysis' */ dataFrameAnalyticsAnalysis?: string; type?: string; } export interface CatNodeattrsNodeAttributesRecord { /** The node name. */ node?: string; /** The unique node identifier. */ id?: string; /** The process identifier. */ pid?: string; /** The host name. */ host?: string; /** The host name. * @alias host */ h?: string; /** The IP address. */ ip?: string; /** The IP address. * @alias ip */ i?: string; /** The bound transport port. */ port?: string; /** The attribute name. */ attr?: string; /** The attribute value. */ value?: string; } export interface CatNodeattrsRequest extends CatCatRequestBase { /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names; /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** If `true`, the request computes the list of selected nodes from the * local cluster state. If `false` the list of selected nodes are computed * from the cluster state of the master node. In both cases the coordinating * node will send requests for further information to each selected node. */ local?: boolean; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { h?: never; s?: never; local?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { h?: never; s?: never; local?: never; master_timeout?: never; }; } export type CatNodeattrsResponse = CatNodeattrsNodeAttributesRecord[]; export interface CatNodesNodesRecord { /** The unique node identifier. */ id?: Id; /** The unique node identifier. * @alias id */ nodeId?: Id; /** The process identifier. */ pid?: string; /** The process identifier. * @alias pid */ p?: string; /** The IP address. */ ip?: string; /** The IP address. * @alias ip */ i?: string; /** The bound transport port. */ port?: string; /** The bound transport port. * @alias port */ po?: string; /** The bound HTTP address. */ http_address?: string; /** The bound HTTP address. * @alias http_address */ http?: string; /** The Elasticsearch version. */ version?: VersionString; /** The Elasticsearch version. * @alias version */ v?: VersionString; /** The Elasticsearch distribution flavor. */ flavor?: string; /** The Elasticsearch distribution flavor. * @alias flavor */ f?: string; /** The Elasticsearch distribution type. */ type?: string; /** The Elasticsearch distribution type. * @alias type */ t?: string; /** The Elasticsearch build hash. */ build?: string; /** The Elasticsearch build hash. * @alias build */ b?: string; /** The Java version. */ jdk?: string; /** The Java version. * @alias jdk */ j?: string; /** The total disk space. */ 'disk.total'?: ByteSize; /** The total disk space. * @alias 'disk.total' */ dt?: ByteSize; /** The total disk space. * @alias 'disk.total' */ diskTotal?: ByteSize; /** The used disk space. */ 'disk.used'?: ByteSize; /** The used disk space. * @alias 'disk.used' */ du?: ByteSize; /** The used disk space. * @alias 'disk.used' */ diskUsed?: ByteSize; /** The available disk space. */ 'disk.avail'?: ByteSize; /** The available disk space. * @alias 'disk.avail' */ d?: ByteSize; /** The available disk space. * @alias 'disk.avail' */ da?: ByteSize; /** The available disk space. * @alias 'disk.avail' */ disk?: ByteSize; /** The available disk space. * @alias 'disk.avail' */ diskAvail?: ByteSize; /** The used disk space percentage. */ 'disk.used_percent'?: Percentage; /** The used disk space percentage. * @alias 'disk.used_percent' */ dup?: Percentage; /** The used disk space percentage. * @alias 'disk.used_percent' */ diskUsedPercent?: Percentage; /** The used heap. */ 'heap.current'?: string; /** The used heap. * @alias 'heap.current' */ hc?: string; /** The used heap. * @alias 'heap.current' */ heapCurrent?: string; /** The used heap ratio. */ 'heap.percent'?: Percentage; /** The used heap ratio. * @alias 'heap.percent' */ hp?: Percentage; /** The used heap ratio. * @alias 'heap.percent' */ heapPercent?: Percentage; /** The maximum configured heap. */ 'heap.max'?: string; /** The maximum configured heap. * @alias 'heap.max' */ hm?: string; /** The maximum configured heap. * @alias 'heap.max' */ heapMax?: string; /** The used machine memory. */ 'ram.current'?: string; /** The used machine memory. * @alias 'ram.current' */ rc?: string; /** The used machine memory. * @alias 'ram.current' */ ramCurrent?: string; /** The used machine memory ratio. */ 'ram.percent'?: Percentage; /** The used machine memory ratio. * @alias 'ram.percent' */ rp?: Percentage; /** The used machine memory ratio. * @alias 'ram.percent' */ ramPercent?: Percentage; /** The total machine memory. */ 'ram.max'?: string; /** The total machine memory. * @alias 'ram.max' */ rn?: string; /** The total machine memory. * @alias 'ram.max' */ ramMax?: string; /** The used file descriptors. */ 'file_desc.current'?: string; /** The used file descriptors. * @alias 'file_desc.current' */ fdc?: string; /** The used file descriptors. * @alias 'file_desc.current' */ fileDescriptorCurrent?: string; /** The used file descriptor ratio. */ 'file_desc.percent'?: Percentage; /** The used file descriptor ratio. * @alias 'file_desc.percent' */ fdp?: Percentage; /** The used file descriptor ratio. * @alias 'file_desc.percent' */ fileDescriptorPercent?: Percentage; /** The maximum number of file descriptors. */ 'file_desc.max'?: string; /** The maximum number of file descriptors. * @alias 'file_desc.max' */ fdm?: string; /** The maximum number of file descriptors. * @alias 'file_desc.max' */ fileDescriptorMax?: string; /** The recent system CPU usage as a percentage. */ cpu?: string; /** The load average for the most recent minute. */ load_1m?: string; /** The load average for the last five minutes. */ load_5m?: string; /** The load average for the last fifteen minutes. */ load_15m?: string; /** The load average for the last fifteen minutes. * @alias load_15m */ l?: string; /** The node uptime. */ uptime?: string; /** The node uptime. * @alias uptime */ u?: string; /** The roles of the node. * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). */ 'node.role'?: string; /** The roles of the node. * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). * @alias 'node.role' */ r?: string; /** The roles of the node. * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). * @alias 'node.role' */ role?: string; /** The roles of the node. * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). * @alias 'node.role' */ nodeRole?: string; /** Indicates whether the node is the elected master node. * Returned values include `*`(elected master) and `-`(not elected master). */ master?: string; /** Indicates whether the node is the elected master node. * Returned values include `*`(elected master) and `-`(not elected master). * @alias master */ m?: string; /** The node name. */ name?: Name; /** The node name. * @alias name */ n?: Name; /** The size of completion. */ 'completion.size'?: string; /** The size of completion. * @alias 'completion.size' */ cs?: string; /** The size of completion. * @alias 'completion.size' */ completionSize?: string; /** The used fielddata cache. */ 'fielddata.memory_size'?: string; /** The used fielddata cache. * @alias 'fielddata.memory_size' */ fm?: string; /** The used fielddata cache. * @alias 'fielddata.memory_size' */ fielddataMemory?: string; /** The fielddata evictions. */ 'fielddata.evictions'?: string; /** The fielddata evictions. * @alias 'fielddata.evictions' */ fe?: string; /** The fielddata evictions. * @alias 'fielddata.evictions' */ fielddataEvictions?: string; /** The used query cache. */ 'query_cache.memory_size'?: string; /** The used query cache. * @alias 'query_cache.memory_size' */ qcm?: string; /** The used query cache. * @alias 'query_cache.memory_size' */ queryCacheMemory?: string; /** The query cache evictions. */ 'query_cache.evictions'?: string; /** The query cache evictions. * @alias 'query_cache.evictions' */ qce?: string; /** The query cache evictions. * @alias 'query_cache.evictions' */ queryCacheEvictions?: string; /** The query cache hit counts. */ 'query_cache.hit_count'?: string; /** The query cache hit counts. * @alias 'query_cache.hit_count' */ qchc?: string; /** The query cache hit counts. * @alias 'query_cache.hit_count' */ queryCacheHitCount?: string; /** The query cache miss counts. */ 'query_cache.miss_count'?: string; /** The query cache miss counts. * @alias 'query_cache.miss_count' */ qcmc?: string; /** The query cache miss counts. * @alias 'query_cache.miss_count' */ queryCacheMissCount?: string; /** The used request cache. */ 'request_cache.memory_size'?: string; /** The used request cache. * @alias 'request_cache.memory_size' */ rcm?: string; /** The used request cache. * @alias 'request_cache.memory_size' */ requestCacheMemory?: string; /** The request cache evictions. */ 'request_cache.evictions'?: string; /** The request cache evictions. * @alias 'request_cache.evictions' */ rce?: string; /** The request cache evictions. * @alias 'request_cache.evictions' */ requestCacheEvictions?: string; /** The request cache hit counts. */ 'request_cache.hit_count'?: string; /** The request cache hit counts. * @alias 'request_cache.hit_count' */ rchc?: string; /** The request cache hit counts. * @alias 'request_cache.hit_count' */ requestCacheHitCount?: string; /** The request cache miss counts. */ 'request_cache.miss_count'?: string; /** The request cache miss counts. * @alias 'request_cache.miss_count' */ rcmc?: string; /** The request cache miss counts. * @alias 'request_cache.miss_count' */ requestCacheMissCount?: string; /** The number of flushes. */ 'flush.total'?: string; /** The number of flushes. * @alias 'flush.total' */ ft?: string; /** The number of flushes. * @alias 'flush.total' */ flushTotal?: string; /** The time spent in flush. */ 'flush.total_time'?: string; /** The time spent in flush. * @alias 'flush.total_time' */ ftt?: string; /** The time spent in flush. * @alias 'flush.total_time' */ flushTotalTime?: string; /** The number of current get ops. */ 'get.current'?: string; /** The number of current get ops. * @alias 'get.current' */ gc?: string; /** The number of current get ops. * @alias 'get.current' */ getCurrent?: string; /** The time spent in get. */ 'get.time'?: string; /** The time spent in get. * @alias 'get.time' */ gti?: string; /** The time spent in get. * @alias 'get.time' */ getTime?: string; /** The number of get ops. */ 'get.total'?: string; /** The number of get ops. * @alias 'get.total' */ gto?: string; /** The number of get ops. * @alias 'get.total' */ getTotal?: string; /** The time spent in successful gets. */ 'get.exists_time'?: string; /** The time spent in successful gets. * @alias 'get.exists_time' */ geti?: string; /** The time spent in successful gets. * @alias 'get.exists_time' */ getExistsTime?: string; /** The number of successful get operations. */ 'get.exists_total'?: string; /** The number of successful get operations. * @alias 'get.exists_total' */ geto?: string; /** The number of successful get operations. * @alias 'get.exists_total' */ getExistsTotal?: string; /** The time spent in failed gets. */ 'get.missing_time'?: string; /** The time spent in failed gets. * @alias 'get.missing_time' */ gmti?: string; /** The time spent in failed gets. * @alias 'get.missing_time' */ getMissingTime?: string; /** The number of failed gets. */ 'get.missing_total'?: string; /** The number of failed gets. * @alias 'get.missing_total' */ gmto?: string; /** The number of failed gets. * @alias 'get.missing_total' */ getMissingTotal?: string; /** The number of current deletions. */ 'indexing.delete_current'?: string; /** The number of current deletions. * @alias 'indexing.delete_current' */ idc?: string; /** The number of current deletions. * @alias 'indexing.delete_current' */ indexingDeleteCurrent?: string; /** The time spent in deletions. */ 'indexing.delete_time'?: string; /** The time spent in deletions. * @alias 'indexing.delete_time' */ idti?: string; /** The time spent in deletions. * @alias 'indexing.delete_time' */ indexingDeleteTime?: string; /** The number of delete operations. */ 'indexing.delete_total'?: string; /** The number of delete operations. * @alias 'indexing.delete_total' */ idto?: string; /** The number of delete operations. * @alias 'indexing.delete_total' */ indexingDeleteTotal?: string; /** The number of current indexing operations. */ 'indexing.index_current'?: string; /** The number of current indexing operations. * @alias 'indexing.index_current' */ iic?: string; /** The number of current indexing operations. * @alias 'indexing.index_current' */ indexingIndexCurrent?: string; /** The time spent in indexing. */ 'indexing.index_time'?: string; /** The time spent in indexing. * @alias 'indexing.index_time' */ iiti?: string; /** The time spent in indexing. * @alias 'indexing.index_time' */ indexingIndexTime?: string; /** The number of indexing operations. */ 'indexing.index_total'?: string; /** The number of indexing operations. * @alias 'indexing.index_total' */ iito?: string; /** The number of indexing operations. * @alias 'indexing.index_total' */ indexingIndexTotal?: string; /** The number of failed indexing operations. */ 'indexing.index_failed'?: string; /** The number of failed indexing operations. * @alias 'indexing.index_failed' */ iif?: string; /** The number of failed indexing operations. * @alias 'indexing.index_failed' */ indexingIndexFailed?: string; /** The number of current merges. */ 'merges.current'?: string; /** The number of current merges. * @alias 'merges.current' */ mc?: string; /** The number of current merges. * @alias 'merges.current' */ mergesCurrent?: string; /** The number of current merging docs. */ 'merges.current_docs'?: string; /** The number of current merging docs. * @alias 'merges.current_docs' */ mcd?: string; /** The number of current merging docs. * @alias 'merges.current_docs' */ mergesCurrentDocs?: string; /** The size of current merges. */ 'merges.current_size'?: string; /** The size of current merges. * @alias 'merges.current_size' */ mcs?: string; /** The size of current merges. * @alias 'merges.current_size' */ mergesCurrentSize?: string; /** The number of completed merge operations. */ 'merges.total'?: string; /** The number of completed merge operations. * @alias 'merges.total' */ mt?: string; /** The number of completed merge operations. * @alias 'merges.total' */ mergesTotal?: string; /** The docs merged. */ 'merges.total_docs'?: string; /** The docs merged. * @alias 'merges.total_docs' */ mtd?: string; /** The docs merged. * @alias 'merges.total_docs' */ mergesTotalDocs?: string; /** The size merged. */ 'merges.total_size'?: string; /** The size merged. * @alias 'merges.total_size' */ mts?: string; /** The size merged. * @alias 'merges.total_size' */ mergesTotalSize?: string; /** The time spent in merges. */ 'merges.total_time'?: string; /** The time spent in merges. * @alias 'merges.total_time' */ mtt?: string; /** The time spent in merges. * @alias 'merges.total_time' */ mergesTotalTime?: string; /** The total refreshes. */ 'refresh.total'?: string; /** The time spent in refreshes. */ 'refresh.time'?: string; /** The total external refreshes. */ 'refresh.external_total'?: string; /** The total external refreshes. * @alias 'refresh.external_total' */ rto?: string; /** The total external refreshes. * @alias 'refresh.external_total' */ refreshTotal?: string; /** The time spent in external refreshes. */ 'refresh.external_time'?: string; /** The time spent in external refreshes. * @alias 'refresh.external_time' */ rti?: string; /** The time spent in external refreshes. * @alias 'refresh.external_time' */ refreshTime?: string; /** The number of pending refresh listeners. */ 'refresh.listeners'?: string; /** The number of pending refresh listeners. * @alias 'refresh.listeners' */ rli?: string; /** The number of pending refresh listeners. * @alias 'refresh.listeners' */ refreshListeners?: string; /** The total script compilations. */ 'script.compilations'?: string; /** The total script compilations. * @alias 'script.compilations' */ scrcc?: string; /** The total script compilations. * @alias 'script.compilations' */ scriptCompilations?: string; /** The total compiled scripts evicted from the cache. */ 'script.cache_evictions'?: string; /** The total compiled scripts evicted from the cache. * @alias 'script.cache_evictions' */ scrce?: string; /** The total compiled scripts evicted from the cache. * @alias 'script.cache_evictions' */ scriptCacheEvictions?: string; /** The script cache compilation limit triggered. */ 'script.compilation_limit_triggered'?: string; /** The script cache compilation limit triggered. * @alias 'script.compilation_limit_triggered' */ scrclt?: string; /** The script cache compilation limit triggered. * @alias 'script.compilation_limit_triggered' */ scriptCacheCompilationLimitTriggered?: string; /** The current fetch phase operations. */ 'search.fetch_current'?: string; /** The current fetch phase operations. * @alias 'search.fetch_current' */ sfc?: string; /** The current fetch phase operations. * @alias 'search.fetch_current' */ searchFetchCurrent?: string; /** The time spent in fetch phase. */ 'search.fetch_time'?: string; /** The time spent in fetch phase. * @alias 'search.fetch_time' */ sfti?: string; /** The time spent in fetch phase. * @alias 'search.fetch_time' */ searchFetchTime?: string; /** The total fetch operations. */ 'search.fetch_total'?: string; /** The total fetch operations. * @alias 'search.fetch_total' */ sfto?: string; /** The total fetch operations. * @alias 'search.fetch_total' */ searchFetchTotal?: string; /** The open search contexts. */ 'search.open_contexts'?: string; /** The open search contexts. * @alias 'search.open_contexts' */ so?: string; /** The open search contexts. * @alias 'search.open_contexts' */ searchOpenContexts?: string; /** The current query phase operations. */ 'search.query_current'?: string; /** The current query phase operations. * @alias 'search.query_current' */ sqc?: string; /** The current query phase operations. * @alias 'search.query_current' */ searchQueryCurrent?: string; /** The time spent in query phase. */ 'search.query_time'?: string; /** The time spent in query phase. * @alias 'search.query_time' */ sqti?: string; /** The time spent in query phase. * @alias 'search.query_time' */ searchQueryTime?: string; /** The total query phase operations. */ 'search.query_total'?: string; /** The total query phase operations. * @alias 'search.query_total' */ sqto?: string; /** The total query phase operations. * @alias 'search.query_total' */ searchQueryTotal?: string; /** The open scroll contexts. */ 'search.scroll_current'?: string; /** The open scroll contexts. * @alias 'search.scroll_current' */ scc?: string; /** The open scroll contexts. * @alias 'search.scroll_current' */ searchScrollCurrent?: string; /** The time scroll contexts held open. */ 'search.scroll_time'?: string; /** The time scroll contexts held open. * @alias 'search.scroll_time' */ scti?: string; /** The time scroll contexts held open. * @alias 'search.scroll_time' */ searchScrollTime?: string; /** The completed scroll contexts. */ 'search.scroll_total'?: string; /** The completed scroll contexts. * @alias 'search.scroll_total' */ scto?: string; /** The completed scroll contexts. * @alias 'search.scroll_total' */ searchScrollTotal?: string; /** The number of segments. */ 'segments.count'?: string; /** The number of segments. * @alias 'segments.count' */ sc?: string; /** The number of segments. * @alias 'segments.count' */ segmentsCount?: string; /** The memory used by segments. */ 'segments.memory'?: string; /** The memory used by segments. * @alias 'segments.memory' */ sm?: string; /** The memory used by segments. * @alias 'segments.memory' */ segmentsMemory?: string; /** The memory used by the index writer. */ 'segments.index_writer_memory'?: string; /** The memory used by the index writer. * @alias 'segments.index_writer_memory' */ siwm?: string; /** The memory used by the index writer. * @alias 'segments.index_writer_memory' */ segmentsIndexWriterMemory?: string; /** The memory used by the version map. */ 'segments.version_map_memory'?: string; /** The memory used by the version map. * @alias 'segments.version_map_memory' */ svmm?: string; /** The memory used by the version map. * @alias 'segments.version_map_memory' */ segmentsVersionMapMemory?: string; /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields. */ 'segments.fixed_bitset_memory'?: string; /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields. * @alias 'segments.fixed_bitset_memory' */ sfbm?: string; /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields. * @alias 'segments.fixed_bitset_memory' */ fixedBitsetMemory?: string; /** The number of current suggest operations. */ 'suggest.current'?: string; /** The number of current suggest operations. * @alias 'suggest.current' */ suc?: string; /** The number of current suggest operations. * @alias 'suggest.current' */ suggestCurrent?: string; /** The time spend in suggest. */ 'suggest.time'?: string; /** The time spend in suggest. * @alias 'suggest.time' */ suti?: string; /** The time spend in suggest. * @alias 'suggest.time' */ suggestTime?: string; /** The number of suggest operations. */ 'suggest.total'?: string; /** The number of suggest operations. * @alias 'suggest.total' */ suto?: string; /** The number of suggest operations. * @alias 'suggest.total' */ suggestTotal?: string; /** The number of bulk shard operations. */ 'bulk.total_operations'?: string; /** The number of bulk shard operations. * @alias 'bulk.total_operations' */ bto?: string; /** The number of bulk shard operations. * @alias 'bulk.total_operations' */ bulkTotalOperations?: string; /** The time spend in shard bulk. */ 'bulk.total_time'?: string; /** The time spend in shard bulk. * @alias 'bulk.total_time' */ btti?: string; /** The time spend in shard bulk. * @alias 'bulk.total_time' */ bulkTotalTime?: string; /** The total size in bytes of shard bulk. */ 'bulk.total_size_in_bytes'?: string; /** The total size in bytes of shard bulk. * @alias 'bulk.total_size_in_bytes' */ btsi?: string; /** The total size in bytes of shard bulk. * @alias 'bulk.total_size_in_bytes' */ bulkTotalSizeInBytes?: string; /** The average time spend in shard bulk. */ 'bulk.avg_time'?: string; /** The average time spend in shard bulk. * @alias 'bulk.avg_time' */ bati?: string; /** The average time spend in shard bulk. * @alias 'bulk.avg_time' */ bulkAvgTime?: string; /** The average size in bytes of shard bulk. */ 'bulk.avg_size_in_bytes'?: string; /** The average size in bytes of shard bulk. * @alias 'bulk.avg_size_in_bytes' */ basi?: string; /** The average size in bytes of shard bulk. * @alias 'bulk.avg_size_in_bytes' */ bulkAvgSizeInBytes?: string; } export interface CatNodesRequest extends CatCatRequestBase { /** The unit used to display byte values. */ bytes?: Bytes; /** If `true`, return the full node ID. If `false`, return the shortened node ID. */ full_id?: boolean | string; /** If true, the response includes information from segments that are not loaded into memory. */ include_unloaded_segments?: boolean; /** A comma-separated list of columns names to display. * It supports simple wildcards. */ h?: CatCatNodeColumns; /** A comma-separated list of column names or aliases that determines the sort order. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** The period to wait for a connection to the master node. */ master_timeout?: Duration; /** The unit used to display time values. */ time?: TimeUnit; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { bytes?: never; full_id?: never; include_unloaded_segments?: never; h?: never; s?: never; master_timeout?: never; time?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { bytes?: never; full_id?: never; include_unloaded_segments?: never; h?: never; s?: never; master_timeout?: never; time?: never; }; } export type CatNodesResponse = CatNodesNodesRecord[]; export interface CatPendingTasksPendingTasksRecord { /** The task insertion order. */ insertOrder?: string; /** The task insertion order. * @alias insertOrder */ o?: string; /** Indicates how long the task has been in queue. */ timeInQueue?: string; /** Indicates how long the task has been in queue. * @alias timeInQueue */ t?: string; /** The task priority. */ priority?: string; /** The task priority. * @alias priority */ p?: string; /** The task source. */ source?: string; /** The task source. * @alias source */ s?: string; } export interface CatPendingTasksRequest extends CatCatRequestBase { /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names; /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** If `true`, the request computes the list of selected nodes from the * local cluster state. If `false` the list of selected nodes are computed * from the cluster state of the master node. In both cases the coordinating * node will send requests for further information to each selected node. */ local?: boolean; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** Unit used to display time values. */ time?: TimeUnit; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { h?: never; s?: never; local?: never; master_timeout?: never; time?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { h?: never; s?: never; local?: never; master_timeout?: never; time?: never; }; } export type CatPendingTasksResponse = CatPendingTasksPendingTasksRecord[]; export interface CatPluginsPluginsRecord { /** The unique node identifier. */ id?: NodeId; /** The node name. */ name?: Name; /** The node name. * @alias name */ n?: Name; /** The component name. */ component?: string; /** The component name. * @alias component */ c?: string; /** The component version. */ version?: VersionString; /** The component version. * @alias version */ v?: VersionString; /** The plugin details. */ description?: string; /** The plugin details. * @alias description */ d?: string; /** The plugin type. */ type?: string; /** The plugin type. * @alias type */ t?: string; } export interface CatPluginsRequest extends CatCatRequestBase { /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names; /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** Include bootstrap plugins in the response */ include_bootstrap?: boolean; /** If `true`, the request computes the list of selected nodes from the * local cluster state. If `false` the list of selected nodes are computed * from the cluster state of the master node. In both cases the coordinating * node will send requests for further information to each selected node. */ local?: boolean; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { h?: never; s?: never; include_bootstrap?: never; local?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { h?: never; s?: never; include_bootstrap?: never; local?: never; master_timeout?: never; }; } export type CatPluginsResponse = CatPluginsPluginsRecord[]; export interface CatRecoveryRecoveryRecord { /** The index name. */ index?: IndexName; /** The index name. * @alias index */ i?: IndexName; /** The index name. * @alias index */ idx?: IndexName; /** The shard name. */ shard?: string; /** The shard name. * @alias shard */ s?: string; /** The shard name. * @alias shard */ sh?: string; /** The recovery start time. */ start_time?: DateTime; /** The recovery start time. * @alias start_time */ start?: DateTime; /** The recovery start time in epoch milliseconds. */ start_time_millis?: EpochTime; /** The recovery start time in epoch milliseconds. * @alias start_time_millis */ start_millis?: EpochTime; /** The recovery stop time. */ stop_time?: DateTime; /** The recovery stop time. * @alias stop_time */ stop?: DateTime; /** The recovery stop time in epoch milliseconds. */ stop_time_millis?: EpochTime; /** The recovery stop time in epoch milliseconds. * @alias stop_time_millis */ stop_millis?: EpochTime; /** The recovery time. */ time?: Duration; /** The recovery time. * @alias time */ t?: Duration; /** The recovery time. * @alias time */ ti?: Duration; /** The recovery type. */ type?: string; /** The recovery type. * @alias type */ ty?: string; /** The recovery stage. */ stage?: string; /** The recovery stage. * @alias stage */ st?: string; /** The source host. */ source_host?: string; /** The source host. * @alias source_host */ shost?: string; /** The source node name. */ source_node?: string; /** The source node name. * @alias source_node */ snode?: string; /** The target host. */ target_host?: string; /** The target host. * @alias target_host */ thost?: string; /** The target node name. */ target_node?: string; /** The target node name. * @alias target_node */ tnode?: string; /** The repository name. */ repository?: string; /** The repository name. * @alias repository */ rep?: string; /** The snapshot name. */ snapshot?: string; /** The snapshot name. * @alias snapshot */ snap?: string; /** The number of files to recover. */ files?: string; /** The number of files to recover. * @alias files */ f?: string; /** The files recovered. */ files_recovered?: string; /** The files recovered. * @alias files_recovered */ fr?: string; /** The ratio of files recovered. */ files_percent?: Percentage; /** The ratio of files recovered. * @alias files_percent */ fp?: Percentage; /** The total number of files. */ files_total?: string; /** The total number of files. * @alias files_total */ tf?: string; /** The number of bytes to recover. */ bytes?: string; /** The number of bytes to recover. * @alias bytes */ b?: string; /** The bytes recovered. */ bytes_recovered?: string; /** The bytes recovered. * @alias bytes_recovered */ br?: string; /** The ratio of bytes recovered. */ bytes_percent?: Percentage; /** The ratio of bytes recovered. * @alias bytes_percent */ bp?: Percentage; /** The total number of bytes. */ bytes_total?: string; /** The total number of bytes. * @alias bytes_total */ tb?: string; /** The number of translog operations to recover. */ translog_ops?: string; /** The number of translog operations to recover. * @alias translog_ops */ to?: string; /** The translog operations recovered. */ translog_ops_recovered?: string; /** The translog operations recovered. * @alias translog_ops_recovered */ tor?: string; /** The ratio of translog operations recovered. */ translog_ops_percent?: Percentage; /** The ratio of translog operations recovered. * @alias translog_ops_percent */ top?: Percentage; } export interface CatRecoveryRequest extends CatCatRequestBase { /** A comma-separated list of data streams, indices, and aliases used to limit the request. * Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** If `true`, the response only includes ongoing shard recoveries. */ active_only?: boolean; /** The unit used to display byte values. */ bytes?: Bytes; /** If `true`, the response includes detailed information about shard recoveries. */ detailed?: boolean; /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names; /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** Unit used to display time values. */ time?: TimeUnit; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; active_only?: never; bytes?: never; detailed?: never; h?: never; s?: never; time?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; active_only?: never; bytes?: never; detailed?: never; h?: never; s?: never; time?: never; }; } export type CatRecoveryResponse = CatRecoveryRecoveryRecord[]; export interface CatRepositoriesRepositoriesRecord { /** The unique repository identifier. */ id?: string; /** The unique repository identifier. * @alias id */ repoId?: string; /** The repository type. */ type?: string; /** The repository type. * @alias type */ t?: string; } export interface CatRepositoriesRequest extends CatCatRequestBase { /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names; /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** If `true`, the request computes the list of selected nodes from the * local cluster state. If `false` the list of selected nodes are computed * from the cluster state of the master node. In both cases the coordinating * node will send requests for further information to each selected node. */ local?: boolean; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { h?: never; s?: never; local?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { h?: never; s?: never; local?: never; master_timeout?: never; }; } export type CatRepositoriesResponse = CatRepositoriesRepositoriesRecord[]; export interface CatSegmentsRequest extends CatCatRequestBase { /** A comma-separated list of data streams, indices, and aliases used to limit the request. * Supports wildcards (`*`). * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** The unit used to display byte values. */ bytes?: Bytes; /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names; /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** If `true`, the request computes the list of selected nodes from the * local cluster state. If `false` the list of selected nodes are computed * from the cluster state of the master node. In both cases the coordinating * node will send requests for further information to each selected node. */ local?: boolean; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; bytes?: never; h?: never; s?: never; local?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; bytes?: never; h?: never; s?: never; local?: never; master_timeout?: never; }; } export type CatSegmentsResponse = CatSegmentsSegmentsRecord[]; export interface CatSegmentsSegmentsRecord { /** The index name. */ index?: IndexName; /** The index name. * @alias index */ i?: IndexName; /** The index name. * @alias index */ idx?: IndexName; /** The shard name. */ shard?: string; /** The shard name. * @alias shard */ s?: string; /** The shard name. * @alias shard */ sh?: string; /** The shard type: `primary` or `replica`. */ prirep?: string; /** The shard type: `primary` or `replica`. * @alias prirep */ p?: string; /** The shard type: `primary` or `replica`. * @alias prirep */ pr?: string; /** The shard type: `primary` or `replica`. * @alias prirep */ primaryOrReplica?: string; /** The IP address of the node where it lives. */ ip?: string; /** The unique identifier of the node where it lives. */ id?: NodeId; /** The segment name, which is derived from the segment generation and used internally to create file names in the directory of the shard. */ segment?: string; /** The segment name, which is derived from the segment generation and used internally to create file names in the directory of the shard. * @alias segment */ seg?: string; /** The segment generation number. * Elasticsearch increments this generation number for each segment written then uses this number to derive the segment name. */ generation?: string; /** The segment generation number. * Elasticsearch increments this generation number for each segment written then uses this number to derive the segment name. * @alias generation */ g?: string; /** The segment generation number. * Elasticsearch increments this generation number for each segment written then uses this number to derive the segment name. * @alias generation */ gen?: string; /** The number of documents in the segment. * This excludes deleted documents and counts any nested documents separately from their parents. * It also excludes documents which were indexed recently and do not yet belong to a segment. */ 'docs.count'?: string; /** The number of documents in the segment. * This excludes deleted documents and counts any nested documents separately from their parents. * It also excludes documents which were indexed recently and do not yet belong to a segment. * @alias 'docs.count' */ dc?: string; /** The number of documents in the segment. * This excludes deleted documents and counts any nested documents separately from their parents. * It also excludes documents which were indexed recently and do not yet belong to a segment. * @alias 'docs.count' */ docsCount?: string; /** The number of deleted documents in the segment, which might be higher or lower than the number of delete operations you have performed. * This number excludes deletes that were performed recently and do not yet belong to a segment. * Deleted documents are cleaned up by the automatic merge process if it makes sense to do so. * Also, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard. */ 'docs.deleted'?: string; /** The number of deleted documents in the segment, which might be higher or lower than the number of delete operations you have performed. * This number excludes deletes that were performed recently and do not yet belong to a segment. * Deleted documents are cleaned up by the automatic merge process if it makes sense to do so. * Also, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard. * @alias 'docs.deleted' */ dd?: string; /** The number of deleted documents in the segment, which might be higher or lower than the number of delete operations you have performed. * This number excludes deletes that were performed recently and do not yet belong to a segment. * Deleted documents are cleaned up by the automatic merge process if it makes sense to do so. * Also, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard. * @alias 'docs.deleted' */ docsDeleted?: string; /** The segment size in bytes. */ size?: ByteSize; /** The segment size in bytes. * @alias size */ si?: ByteSize; /** The segment memory in bytes. * A value of `-1` indicates Elasticsearch was unable to compute this number. */ 'size.memory'?: ByteSize; /** The segment memory in bytes. * A value of `-1` indicates Elasticsearch was unable to compute this number. * @alias 'size.memory' */ sm?: ByteSize; /** The segment memory in bytes. * A value of `-1` indicates Elasticsearch was unable to compute this number. * @alias 'size.memory' */ sizeMemory?: ByteSize; /** If `true`, the segment is synced to disk. * Segments that are synced can survive a hard reboot. * If `false`, the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start. */ committed?: string; /** If `true`, the segment is synced to disk. * Segments that are synced can survive a hard reboot. * If `false`, the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start. * @alias committed */ ic?: string; /** If `true`, the segment is synced to disk. * Segments that are synced can survive a hard reboot. * If `false`, the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start. * @alias committed */ isCommitted?: string; /** If `true`, the segment is searchable. * If `false`, the segment has most likely been written to disk but needs a refresh to be searchable. */ searchable?: string; /** If `true`, the segment is searchable. * If `false`, the segment has most likely been written to disk but needs a refresh to be searchable. * @alias searchable */ is?: string; /** If `true`, the segment is searchable. * If `false`, the segment has most likely been written to disk but needs a refresh to be searchable. * @alias searchable */ isSearchable?: string; /** The version of Lucene used to write the segment. */ version?: VersionString; /** The version of Lucene used to write the segment. * @alias version */ v?: VersionString; /** If `true`, the segment is stored in a compound file. * This means Lucene merged all files from the segment in a single file to save file descriptors. */ compound?: string; /** If `true`, the segment is stored in a compound file. * This means Lucene merged all files from the segment in a single file to save file descriptors. * @alias compound */ ico?: string; /** If `true`, the segment is stored in a compound file. * This means Lucene merged all files from the segment in a single file to save file descriptors. * @alias compound */ isCompound?: string; } export interface CatShardsRequest extends CatCatRequestBase { /** A comma-separated list of data streams, indices, and aliases used to limit the request. * Supports wildcards (`*`). * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** The unit used to display byte values. */ bytes?: Bytes; /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names; /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** Unit used to display time values. */ time?: TimeUnit; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; bytes?: never; h?: never; s?: never; master_timeout?: never; time?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; bytes?: never; h?: never; s?: never; master_timeout?: never; time?: never; }; } export type CatShardsResponse = CatShardsShardsRecord[]; export interface CatShardsShardsRecord { /** The index name. */ index?: string; /** The index name. * @alias index */ i?: string; /** The index name. * @alias index */ idx?: string; /** The shard name. */ shard?: string; /** The shard name. * @alias shard */ s?: string; /** The shard name. * @alias shard */ sh?: string; /** The shard type: `primary` or `replica`. */ prirep?: string; /** The shard type: `primary` or `replica`. * @alias prirep */ p?: string; /** The shard type: `primary` or `replica`. * @alias prirep */ pr?: string; /** The shard type: `primary` or `replica`. * @alias prirep */ primaryOrReplica?: string; /** The shard state. * Returned values include: * `INITIALIZING`: The shard is recovering from a peer shard or gateway. * `RELOCATING`: The shard is relocating. * `STARTED`: The shard has started. * `UNASSIGNED`: The shard is not assigned to any node. */ state?: string; /** The shard state. * Returned values include: * `INITIALIZING`: The shard is recovering from a peer shard or gateway. * `RELOCATING`: The shard is relocating. * `STARTED`: The shard has started. * `UNASSIGNED`: The shard is not assigned to any node. * @alias state */ st?: string; /** The number of documents in the shard. */ docs?: string | null; /** The number of documents in the shard. * @alias docs */ d?: string | null; /** The number of documents in the shard. * @alias docs */ dc?: string | null; /** The disk space used by the shard. */ store?: string | null; /** The disk space used by the shard. * @alias store */ sto?: string | null; /** total size of dataset (including the cache for partially mounted indices) */ dataset?: string | null; /** The IP address of the node. */ ip?: string | null; /** The unique identifier for the node. */ id?: string; /** The name of node. */ node?: string | null; /** The name of node. * @alias node */ n?: string | null; /** The sync identifier. */ sync_id?: string; /** The reason for the last change to the state of an unassigned shard. * It does not explain why the shard is currently unassigned; use the cluster allocation explain API for that information. * Returned values include: * `ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the shard. * `CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery. * `DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling index. * `EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed index. * `FORCED_EMPTY_PRIMARY`: The shard’s allocation was last modified by forcing an empty primary using the cluster reroute API. * `INDEX_CLOSED`: Unassigned because the index was closed. * `INDEX_CREATED`: Unassigned as a result of an API creation of an index. * `INDEX_REOPENED`: Unassigned as a result of opening a closed index. * `MANUAL_ALLOCATION`: The shard’s allocation was last modified by the cluster reroute API. * `NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index. * `NODE_LEFT`: Unassigned as a result of the node hosting it leaving the cluster. * `NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was registered as restarting using the node shutdown API. * `PRIMARY_FAILED`: The shard was initializing as a replica, but the primary shard failed before the initialization completed. * `REALLOCATED_REPLICA`: A better replica location is identified and causes the existing replica allocation to be cancelled. * `REINITIALIZED`: When a shard moves from started back to initializing. * `REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica. * `REROUTE_CANCELLED`: Unassigned as a result of explicit cancel reroute command. */ 'unassigned.reason'?: string; /** The reason for the last change to the state of an unassigned shard. * It does not explain why the shard is currently unassigned; use the cluster allocation explain API for that information. * Returned values include: * `ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the shard. * `CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery. * `DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling index. * `EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed index. * `FORCED_EMPTY_PRIMARY`: The shard’s allocation was last modified by forcing an empty primary using the cluster reroute API. * `INDEX_CLOSED`: Unassigned because the index was closed. * `INDEX_CREATED`: Unassigned as a result of an API creation of an index. * `INDEX_REOPENED`: Unassigned as a result of opening a closed index. * `MANUAL_ALLOCATION`: The shard’s allocation was last modified by the cluster reroute API. * `NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index. * `NODE_LEFT`: Unassigned as a result of the node hosting it leaving the cluster. * `NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was registered as restarting using the node shutdown API. * `PRIMARY_FAILED`: The shard was initializing as a replica, but the primary shard failed before the initialization completed. * `REALLOCATED_REPLICA`: A better replica location is identified and causes the existing replica allocation to be cancelled. * `REINITIALIZED`: When a shard moves from started back to initializing. * `REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica. * `REROUTE_CANCELLED`: Unassigned as a result of explicit cancel reroute command. * @alias 'unassigned.reason' */ ur?: string; /** The time at which the shard became unassigned in Coordinated Universal Time (UTC). */ 'unassigned.at'?: string; /** The time at which the shard became unassigned in Coordinated Universal Time (UTC). * @alias 'unassigned.at' */ ua?: string; /** The time at which the shard was requested to be unassigned in Coordinated Universal Time (UTC). */ 'unassigned.for'?: string; /** The time at which the shard was requested to be unassigned in Coordinated Universal Time (UTC). * @alias 'unassigned.for' */ uf?: string; /** Additional details as to why the shard became unassigned. * It does not explain why the shard is not assigned; use the cluster allocation explain API for that information. */ 'unassigned.details'?: string; /** Additional details as to why the shard became unassigned. * It does not explain why the shard is not assigned; use the cluster allocation explain API for that information. * @alias 'unassigned.details' */ ud?: string; /** The type of recovery source. */ 'recoverysource.type'?: string; /** The type of recovery source. * @alias 'recoverysource.type' */ rs?: string; /** The size of completion. */ 'completion.size'?: string; /** The size of completion. * @alias 'completion.size' */ cs?: string; /** The size of completion. * @alias 'completion.size' */ completionSize?: string; /** The used fielddata cache memory. */ 'fielddata.memory_size'?: string; /** The used fielddata cache memory. * @alias 'fielddata.memory_size' */ fm?: string; /** The used fielddata cache memory. * @alias 'fielddata.memory_size' */ fielddataMemory?: string; /** The fielddata cache evictions. */ 'fielddata.evictions'?: string; /** The fielddata cache evictions. * @alias 'fielddata.evictions' */ fe?: string; /** The fielddata cache evictions. * @alias 'fielddata.evictions' */ fielddataEvictions?: string; /** The used query cache memory. */ 'query_cache.memory_size'?: string; /** The used query cache memory. * @alias 'query_cache.memory_size' */ qcm?: string; /** The used query cache memory. * @alias 'query_cache.memory_size' */ queryCacheMemory?: string; /** The query cache evictions. */ 'query_cache.evictions'?: string; /** The query cache evictions. * @alias 'query_cache.evictions' */ qce?: string; /** The query cache evictions. * @alias 'query_cache.evictions' */ queryCacheEvictions?: string; /** The number of flushes. */ 'flush.total'?: string; /** The number of flushes. * @alias 'flush.total' */ ft?: string; /** The number of flushes. * @alias 'flush.total' */ flushTotal?: string; /** The time spent in flush. */ 'flush.total_time'?: string; /** The time spent in flush. * @alias 'flush.total_time' */ ftt?: string; /** The time spent in flush. * @alias 'flush.total_time' */ flushTotalTime?: string; /** The number of current get operations. */ 'get.current'?: string; /** The number of current get operations. * @alias 'get.current' */ gc?: string; /** The number of current get operations. * @alias 'get.current' */ getCurrent?: string; /** The time spent in get operations. */ 'get.time'?: string; /** The time spent in get operations. * @alias 'get.time' */ gti?: string; /** The time spent in get operations. * @alias 'get.time' */ getTime?: string; /** The number of get operations. */ 'get.total'?: string; /** The number of get operations. * @alias 'get.total' */ gto?: string; /** The number of get operations. * @alias 'get.total' */ getTotal?: string; /** The time spent in successful get operations. */ 'get.exists_time'?: string; /** The time spent in successful get operations. * @alias 'get.exists_time' */ geti?: string; /** The time spent in successful get operations. * @alias 'get.exists_time' */ getExistsTime?: string; /** The number of successful get operations. */ 'get.exists_total'?: string; /** The number of successful get operations. * @alias 'get.exists_total' */ geto?: string; /** The number of successful get operations. * @alias 'get.exists_total' */ getExistsTotal?: string; /** The time spent in failed get operations. */ 'get.missing_time'?: string; /** The time spent in failed get operations. * @alias 'get.missing_time' */ gmti?: string; /** The time spent in failed get operations. * @alias 'get.missing_time' */ getMissingTime?: string; /** The number of failed get operations. */ 'get.missing_total'?: string; /** The number of failed get operations. * @alias 'get.missing_total' */ gmto?: string; /** The number of failed get operations. * @alias 'get.missing_total' */ getMissingTotal?: string; /** The number of current deletion operations. */ 'indexing.delete_current'?: string; /** The number of current deletion operations. * @alias 'indexing.delete_current' */ idc?: string; /** The number of current deletion operations. * @alias 'indexing.delete_current' */ indexingDeleteCurrent?: string; /** The time spent in deletion operations. */ 'indexing.delete_time'?: string; /** The time spent in deletion operations. * @alias 'indexing.delete_time' */ idti?: string; /** The time spent in deletion operations. * @alias 'indexing.delete_time' */ indexingDeleteTime?: string; /** The number of delete operations. */ 'indexing.delete_total'?: string; /** The number of delete operations. * @alias 'indexing.delete_total' */ idto?: string; /** The number of delete operations. * @alias 'indexing.delete_total' */ indexingDeleteTotal?: string; /** The number of current indexing operations. */ 'indexing.index_current'?: string; /** The number of current indexing operations. * @alias 'indexing.index_current' */ iic?: string; /** The number of current indexing operations. * @alias 'indexing.index_current' */ indexingIndexCurrent?: string; /** The time spent in indexing operations. */ 'indexing.index_time'?: string; /** The time spent in indexing operations. * @alias 'indexing.index_time' */ iiti?: string; /** The time spent in indexing operations. * @alias 'indexing.index_time' */ indexingIndexTime?: string; /** The number of indexing operations. */ 'indexing.index_total'?: string; /** The number of indexing operations. * @alias 'indexing.index_total' */ iito?: string; /** The number of indexing operations. * @alias 'indexing.index_total' */ indexingIndexTotal?: string; /** The number of failed indexing operations. */ 'indexing.index_failed'?: string; /** The number of failed indexing operations. * @alias 'indexing.index_failed' */ iif?: string; /** The number of failed indexing operations. * @alias 'indexing.index_failed' */ indexingIndexFailed?: string; /** The number of current merge operations. */ 'merges.current'?: string; /** The number of current merge operations. * @alias 'merges.current' */ mc?: string; /** The number of current merge operations. * @alias 'merges.current' */ mergesCurrent?: string; /** The number of current merging documents. */ 'merges.current_docs'?: string; /** The number of current merging documents. * @alias 'merges.current_docs' */ mcd?: string; /** The number of current merging documents. * @alias 'merges.current_docs' */ mergesCurrentDocs?: string; /** The size of current merge operations. */ 'merges.current_size'?: string; /** The size of current merge operations. * @alias 'merges.current_size' */ mcs?: string; /** The size of current merge operations. * @alias 'merges.current_size' */ mergesCurrentSize?: string; /** The number of completed merge operations. */ 'merges.total'?: string; /** The number of completed merge operations. * @alias 'merges.total' */ mt?: string; /** The number of completed merge operations. * @alias 'merges.total' */ mergesTotal?: string; /** The nuber of merged documents. */ 'merges.total_docs'?: string; /** The nuber of merged documents. * @alias 'merges.total_docs' */ mtd?: string; /** The nuber of merged documents. * @alias 'merges.total_docs' */ mergesTotalDocs?: string; /** The size of current merges. */ 'merges.total_size'?: string; /** The size of current merges. * @alias 'merges.total_size' */ mts?: string; /** The size of current merges. * @alias 'merges.total_size' */ mergesTotalSize?: string; /** The time spent merging documents. */ 'merges.total_time'?: string; /** The time spent merging documents. * @alias 'merges.total_time' */ mtt?: string; /** The time spent merging documents. * @alias 'merges.total_time' */ mergesTotalTime?: string; /** The total number of refreshes. */ 'refresh.total'?: string; /** The time spent in refreshes. */ 'refresh.time'?: string; /** The total nunber of external refreshes. */ 'refresh.external_total'?: string; /** The total nunber of external refreshes. * @alias 'refresh.external_total' */ rto?: string; /** The total nunber of external refreshes. * @alias 'refresh.external_total' */ refreshTotal?: string; /** The time spent in external refreshes. */ 'refresh.external_time'?: string; /** The time spent in external refreshes. * @alias 'refresh.external_time' */ rti?: string; /** The time spent in external refreshes. * @alias 'refresh.external_time' */ refreshTime?: string; /** The number of pending refresh listeners. */ 'refresh.listeners'?: string; /** The number of pending refresh listeners. * @alias 'refresh.listeners' */ rli?: string; /** The number of pending refresh listeners. * @alias 'refresh.listeners' */ refreshListeners?: string; /** The current fetch phase operations. */ 'search.fetch_current'?: string; /** The current fetch phase operations. * @alias 'search.fetch_current' */ sfc?: string; /** The current fetch phase operations. * @alias 'search.fetch_current' */ searchFetchCurrent?: string; /** The time spent in fetch phase. */ 'search.fetch_time'?: string; /** The time spent in fetch phase. * @alias 'search.fetch_time' */ sfti?: string; /** The time spent in fetch phase. * @alias 'search.fetch_time' */ searchFetchTime?: string; /** The total number of fetch operations. */ 'search.fetch_total'?: string; /** The total number of fetch operations. * @alias 'search.fetch_total' */ sfto?: string; /** The total number of fetch operations. * @alias 'search.fetch_total' */ searchFetchTotal?: string; /** The number of open search contexts. */ 'search.open_contexts'?: string; /** The number of open search contexts. * @alias 'search.open_contexts' */ so?: string; /** The number of open search contexts. * @alias 'search.open_contexts' */ searchOpenContexts?: string; /** The current query phase operations. */ 'search.query_current'?: string; /** The current query phase operations. * @alias 'search.query_current' */ sqc?: string; /** The current query phase operations. * @alias 'search.query_current' */ searchQueryCurrent?: string; /** The time spent in query phase. */ 'search.query_time'?: string; /** The time spent in query phase. * @alias 'search.query_time' */ sqti?: string; /** The time spent in query phase. * @alias 'search.query_time' */ searchQueryTime?: string; /** The total number of query phase operations. */ 'search.query_total'?: string; /** The total number of query phase operations. * @alias 'search.query_total' */ sqto?: string; /** The total number of query phase operations. * @alias 'search.query_total' */ searchQueryTotal?: string; /** The open scroll contexts. */ 'search.scroll_current'?: string; /** The open scroll contexts. * @alias 'search.scroll_current' */ scc?: string; /** The open scroll contexts. * @alias 'search.scroll_current' */ searchScrollCurrent?: string; /** The time scroll contexts were held open. */ 'search.scroll_time'?: string; /** The time scroll contexts were held open. * @alias 'search.scroll_time' */ scti?: string; /** The time scroll contexts were held open. * @alias 'search.scroll_time' */ searchScrollTime?: string; /** The number of completed scroll contexts. */ 'search.scroll_total'?: string; /** The number of completed scroll contexts. * @alias 'search.scroll_total' */ scto?: string; /** The number of completed scroll contexts. * @alias 'search.scroll_total' */ searchScrollTotal?: string; /** The number of segments. */ 'segments.count'?: string; /** The number of segments. * @alias 'segments.count' */ sc?: string; /** The number of segments. * @alias 'segments.count' */ segmentsCount?: string; /** The memory used by segments. */ 'segments.memory'?: string; /** The memory used by segments. * @alias 'segments.memory' */ sm?: string; /** The memory used by segments. * @alias 'segments.memory' */ segmentsMemory?: string; /** The memory used by the index writer. */ 'segments.index_writer_memory'?: string; /** The memory used by the index writer. * @alias 'segments.index_writer_memory' */ siwm?: string; /** The memory used by the index writer. * @alias 'segments.index_writer_memory' */ segmentsIndexWriterMemory?: string; /** The memory used by the version map. */ 'segments.version_map_memory'?: string; /** The memory used by the version map. * @alias 'segments.version_map_memory' */ svmm?: string; /** The memory used by the version map. * @alias 'segments.version_map_memory' */ segmentsVersionMapMemory?: string; /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in `_parent` fields. */ 'segments.fixed_bitset_memory'?: string; /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in `_parent` fields. * @alias 'segments.fixed_bitset_memory' */ sfbm?: string; /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in `_parent` fields. * @alias 'segments.fixed_bitset_memory' */ fixedBitsetMemory?: string; /** The maximum sequence number. */ 'seq_no.max'?: string; /** The maximum sequence number. * @alias 'seq_no.max' */ sqm?: string; /** The maximum sequence number. * @alias 'seq_no.max' */ maxSeqNo?: string; /** The local checkpoint. */ 'seq_no.local_checkpoint'?: string; /** The local checkpoint. * @alias 'seq_no.local_checkpoint' */ sql?: string; /** The local checkpoint. * @alias 'seq_no.local_checkpoint' */ localCheckpoint?: string; /** The global checkpoint. */ 'seq_no.global_checkpoint'?: string; /** The global checkpoint. * @alias 'seq_no.global_checkpoint' */ sqg?: string; /** The global checkpoint. * @alias 'seq_no.global_checkpoint' */ globalCheckpoint?: string; /** The number of current warmer operations. */ 'warmer.current'?: string; /** The number of current warmer operations. * @alias 'warmer.current' */ wc?: string; /** The number of current warmer operations. * @alias 'warmer.current' */ warmerCurrent?: string; /** The total number of warmer operations. */ 'warmer.total'?: string; /** The total number of warmer operations. * @alias 'warmer.total' */ wto?: string; /** The total number of warmer operations. * @alias 'warmer.total' */ warmerTotal?: string; /** The time spent in warmer operations. */ 'warmer.total_time'?: string; /** The time spent in warmer operations. * @alias 'warmer.total_time' */ wtt?: string; /** The time spent in warmer operations. * @alias 'warmer.total_time' */ warmerTotalTime?: string; /** The shard data path. */ 'path.data'?: string; /** The shard data path. * @alias 'path.data' */ pd?: string; /** The shard data path. * @alias 'path.data' */ dataPath?: string; /** The shard state path. */ 'path.state'?: string; /** The shard state path. * @alias 'path.state' */ ps?: string; /** The shard state path. * @alias 'path.state' */ statsPath?: string; /** The number of bulk shard operations. */ 'bulk.total_operations'?: string; /** The number of bulk shard operations. * @alias 'bulk.total_operations' */ bto?: string; /** The number of bulk shard operations. * @alias 'bulk.total_operations' */ bulkTotalOperations?: string; /** The time spent in shard bulk operations. */ 'bulk.total_time'?: string; /** The time spent in shard bulk operations. * @alias 'bulk.total_time' */ btti?: string; /** The time spent in shard bulk operations. * @alias 'bulk.total_time' */ bulkTotalTime?: string; /** The total size in bytes of shard bulk operations. */ 'bulk.total_size_in_bytes'?: string; /** The total size in bytes of shard bulk operations. * @alias 'bulk.total_size_in_bytes' */ btsi?: string; /** The total size in bytes of shard bulk operations. * @alias 'bulk.total_size_in_bytes' */ bulkTotalSizeInBytes?: string; /** The average time spent in shard bulk operations. */ 'bulk.avg_time'?: string; /** The average time spent in shard bulk operations. * @alias 'bulk.avg_time' */ bati?: string; /** The average time spent in shard bulk operations. * @alias 'bulk.avg_time' */ bulkAvgTime?: string; /** The average size in bytes of shard bulk operations. */ 'bulk.avg_size_in_bytes'?: string; /** The average size in bytes of shard bulk operations. * @alias 'bulk.avg_size_in_bytes' */ basi?: string; /** The average size in bytes of shard bulk operations. * @alias 'bulk.avg_size_in_bytes' */ bulkAvgSizeInBytes?: string; } export interface CatSnapshotsRequest extends CatCatRequestBase { /** A comma-separated list of snapshot repositories used to limit the request. * Accepts wildcard expressions. * `_all` returns all repositories. * If any repository fails during the request, Elasticsearch returns an error. */ repository?: Names; /** If `true`, the response does not include information from unavailable snapshots. */ ignore_unavailable?: boolean; /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names; /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** Unit used to display time values. */ time?: TimeUnit; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { repository?: never; ignore_unavailable?: never; h?: never; s?: never; master_timeout?: never; time?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { repository?: never; ignore_unavailable?: never; h?: never; s?: never; master_timeout?: never; time?: never; }; } export type CatSnapshotsResponse = CatSnapshotsSnapshotsRecord[]; export interface CatSnapshotsSnapshotsRecord { /** The unique identifier for the snapshot. */ id?: string; /** The unique identifier for the snapshot. * @alias id */ snapshot?: string; /** The repository name. */ repository?: string; /** The repository name. * @alias repository */ re?: string; /** The repository name. * @alias repository */ repo?: string; /** The state of the snapshot process. * Returned values include: * `FAILED`: The snapshot process failed. * `INCOMPATIBLE`: The snapshot process is incompatible with the current cluster version. * `IN_PROGRESS`: The snapshot process started but has not completed. * `PARTIAL`: The snapshot process completed with a partial success. * `SUCCESS`: The snapshot process completed with a full success. */ status?: string; /** The state of the snapshot process. * Returned values include: * `FAILED`: The snapshot process failed. * `INCOMPATIBLE`: The snapshot process is incompatible with the current cluster version. * `IN_PROGRESS`: The snapshot process started but has not completed. * `PARTIAL`: The snapshot process completed with a partial success. * `SUCCESS`: The snapshot process completed with a full success. * @alias status */ s?: string; /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process started. */ start_epoch?: SpecUtilsStringified>; /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process started. * @alias start_epoch */ ste?: SpecUtilsStringified>; /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process started. * @alias start_epoch */ startEpoch?: SpecUtilsStringified>; /** The time (HH:MM:SS) at which the snapshot process started. */ start_time?: WatcherScheduleTimeOfDay; /** The time (HH:MM:SS) at which the snapshot process started. * @alias start_time */ sti?: WatcherScheduleTimeOfDay; /** The time (HH:MM:SS) at which the snapshot process started. * @alias start_time */ startTime?: WatcherScheduleTimeOfDay; /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process ended. */ end_epoch?: SpecUtilsStringified>; /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process ended. * @alias end_epoch */ ete?: SpecUtilsStringified>; /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process ended. * @alias end_epoch */ endEpoch?: SpecUtilsStringified>; /** The time (HH:MM:SS) at which the snapshot process ended. */ end_time?: TimeOfDay; /** The time (HH:MM:SS) at which the snapshot process ended. * @alias end_time */ eti?: TimeOfDay; /** The time (HH:MM:SS) at which the snapshot process ended. * @alias end_time */ endTime?: TimeOfDay; /** The time it took the snapshot process to complete, in time units. */ duration?: Duration; /** The time it took the snapshot process to complete, in time units. * @alias duration */ dur?: Duration; /** The number of indices in the snapshot. */ indices?: string; /** The number of indices in the snapshot. * @alias indices */ i?: string; /** The number of successful shards in the snapshot. */ successful_shards?: string; /** The number of successful shards in the snapshot. * @alias successful_shards */ ss?: string; /** The number of failed shards in the snapshot. */ failed_shards?: string; /** The number of failed shards in the snapshot. * @alias failed_shards */ fs?: string; /** The total number of shards in the snapshot. */ total_shards?: string; /** The total number of shards in the snapshot. * @alias total_shards */ ts?: string; /** The reason for any snapshot failures. */ reason?: string; /** The reason for any snapshot failures. * @alias reason */ r?: string; } export interface CatTasksRequest extends CatCatRequestBase { /** The task action names, which are used to limit the response. */ actions?: string[]; /** If `true`, the response includes detailed information about shard recoveries. */ detailed?: boolean; /** Unique node identifiers, which are used to limit the response. */ nodes?: string[]; /** The parent task identifier, which is used to limit the response. */ parent_task_id?: string; /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names; /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** Unit used to display time values. */ time?: TimeUnit; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** If `true`, the request blocks until the task has completed. */ wait_for_completion?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { actions?: never; detailed?: never; nodes?: never; parent_task_id?: never; h?: never; s?: never; time?: never; timeout?: never; wait_for_completion?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { actions?: never; detailed?: never; nodes?: never; parent_task_id?: never; h?: never; s?: never; time?: never; timeout?: never; wait_for_completion?: never; }; } export type CatTasksResponse = CatTasksTasksRecord[]; export interface CatTasksTasksRecord { /** The identifier of the task with the node. */ id?: Id; /** The task action. */ action?: string; /** The task action. * @alias action */ ac?: string; /** The unique task identifier. */ task_id?: Id; /** The unique task identifier. * @alias task_id */ ti?: Id; /** The parent task identifier. */ parent_task_id?: string; /** The parent task identifier. * @alias parent_task_id */ pti?: string; /** The task type. */ type?: string; /** The task type. * @alias type */ ty?: string; /** The start time in milliseconds. */ start_time?: string; /** The start time in milliseconds. * @alias start_time */ start?: string; /** The start time in `HH:MM:SS` format. */ timestamp?: string; /** The start time in `HH:MM:SS` format. * @alias timestamp */ ts?: string; /** The start time in `HH:MM:SS` format. * @alias timestamp */ hms?: string; /** The start time in `HH:MM:SS` format. * @alias timestamp */ hhmmss?: string; /** The running time in nanoseconds. */ running_time_ns?: string; /** The running time. */ running_time?: string; /** The running time. * @alias running_time */ time?: string; /** The unique node identifier. */ node_id?: NodeId; /** The unique node identifier. * @alias node_id */ ni?: NodeId; /** The IP address for the node. */ ip?: string; /** The IP address for the node. * @alias ip */ i?: string; /** The bound transport port for the node. */ port?: string; /** The bound transport port for the node. * @alias port */ po?: string; /** The node name. */ node?: string; /** The node name. * @alias node */ n?: string; /** The Elasticsearch version. */ version?: VersionString; /** The Elasticsearch version. * @alias version */ v?: VersionString; /** The X-Opaque-ID header. */ x_opaque_id?: string; /** The X-Opaque-ID header. * @alias x_opaque_id */ x?: string; /** The task action description. */ description?: string; /** The task action description. * @alias description */ desc?: string; } export interface CatTemplatesRequest extends CatCatRequestBase { /** The name of the template to return. * Accepts wildcard expressions. If omitted, all templates are returned. */ name?: Name; /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names; /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** If `true`, the request computes the list of selected nodes from the * local cluster state. If `false` the list of selected nodes are computed * from the cluster state of the master node. In both cases the coordinating * node will send requests for further information to each selected node. */ local?: boolean; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; h?: never; s?: never; local?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; h?: never; s?: never; local?: never; master_timeout?: never; }; } export type CatTemplatesResponse = CatTemplatesTemplatesRecord[]; export interface CatTemplatesTemplatesRecord { /** The template name. */ name?: Name; /** The template name. * @alias name */ n?: Name; /** The template index patterns. */ index_patterns?: string; /** The template index patterns. * @alias index_patterns */ t?: string; /** The template application order or priority number. */ order?: string; /** The template application order or priority number. * @alias order */ o?: string; /** The template application order or priority number. * @alias order */ p?: string; /** The template version. */ version?: VersionString | null; /** The template version. * @alias version */ v?: VersionString | null; /** The component templates that comprise the index template. */ composed_of?: string; /** The component templates that comprise the index template. * @alias composed_of */ c?: string; } export interface CatThreadPoolRequest extends CatCatRequestBase { /** A comma-separated list of thread pool names used to limit the request. * Accepts wildcard expressions. */ thread_pool_patterns?: Names; /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names; /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names; /** The unit used to display time values. */ time?: TimeUnit; /** If `true`, the request computes the list of selected nodes from the * local cluster state. If `false` the list of selected nodes are computed * from the cluster state of the master node. In both cases the coordinating * node will send requests for further information to each selected node. */ local?: boolean; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { thread_pool_patterns?: never; h?: never; s?: never; time?: never; local?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { thread_pool_patterns?: never; h?: never; s?: never; time?: never; local?: never; master_timeout?: never; }; } export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[]; export interface CatThreadPoolThreadPoolRecord { /** The node name. */ node_name?: string; /** The node name. * @alias node_name */ nn?: string; /** The persistent node identifier. */ node_id?: NodeId; /** The persistent node identifier. * @alias node_id */ id?: NodeId; /** The ephemeral node identifier. */ ephemeral_node_id?: string; /** The ephemeral node identifier. * @alias ephemeral_node_id */ eid?: string; /** The process identifier. */ pid?: string; /** The process identifier. * @alias pid */ p?: string; /** The host name for the current node. */ host?: string; /** The host name for the current node. * @alias host */ h?: string; /** The IP address for the current node. */ ip?: string; /** The IP address for the current node. * @alias ip */ i?: string; /** The bound transport port for the current node. */ port?: string; /** The bound transport port for the current node. * @alias port */ po?: string; /** The thread pool name. */ name?: string; /** The thread pool name. * @alias name */ n?: string; /** The thread pool type. * Returned values include `fixed`, `fixed_auto_queue_size`, `direct`, and `scaling`. */ type?: string; /** The thread pool type. * Returned values include `fixed`, `fixed_auto_queue_size`, `direct`, and `scaling`. * @alias type */ t?: string; /** The number of active threads in the current thread pool. */ active?: string; /** The number of active threads in the current thread pool. * @alias active */ a?: string; /** The number of threads in the current thread pool. */ pool_size?: string; /** The number of threads in the current thread pool. * @alias pool_size */ psz?: string; /** The number of tasks currently in queue. */ queue?: string; /** The number of tasks currently in queue. * @alias queue */ q?: string; /** The maximum number of tasks permitted in the queue. */ queue_size?: string; /** The maximum number of tasks permitted in the queue. * @alias queue_size */ qs?: string; /** The number of rejected tasks. */ rejected?: string; /** The number of rejected tasks. * @alias rejected */ r?: string; /** The highest number of active threads in the current thread pool. */ largest?: string; /** The highest number of active threads in the current thread pool. * @alias largest */ l?: string; /** The number of completed tasks. */ completed?: string; /** The number of completed tasks. * @alias completed */ c?: string; /** The core number of active threads allowed in a scaling thread pool. */ core?: string | null; /** The core number of active threads allowed in a scaling thread pool. * @alias core */ cr?: string | null; /** The maximum number of active threads allowed in a scaling thread pool. */ max?: string | null; /** The maximum number of active threads allowed in a scaling thread pool. * @alias max */ mx?: string | null; /** The number of active threads allowed in a fixed thread pool. */ size?: string | null; /** The number of active threads allowed in a fixed thread pool. * @alias size */ sz?: string | null; /** The thread keep alive time. */ keep_alive?: string | null; /** The thread keep alive time. * @alias keep_alive */ ka?: string | null; } export interface CatTransformsRequest extends CatCatRequestBase { /** A transform identifier or a wildcard expression. * If you do not specify one of these options, the API returns information for all transforms. */ transform_id?: Id; /** Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. * If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. * If `false`, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean; /** Skips the specified number of transforms. */ from?: integer; /** Comma-separated list of column names to display. */ h?: CatCatTransformColumns; /** Comma-separated list of column names or column aliases used to sort the response. */ s?: CatCatTransformColumns; /** The unit used to display time values. */ time?: TimeUnit; /** The maximum number of transforms to obtain. */ size?: integer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { transform_id?: never; allow_no_match?: never; from?: never; h?: never; s?: never; time?: never; size?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { transform_id?: never; allow_no_match?: never; from?: never; h?: never; s?: never; time?: never; size?: never; }; } export type CatTransformsResponse = CatTransformsTransformsRecord[]; export interface CatTransformsTransformsRecord { /** The transform identifier. */ id?: Id; /** The status of the transform. * Returned values include: * `aborting`: The transform is aborting. * `failed: The transform failed. For more information about the failure, check the `reason` field. * `indexing`: The transform is actively processing data and creating new documents. * `started`: The transform is running but not actively indexing data. * `stopped`: The transform is stopped. * `stopping`: The transform is stopping. */ state?: string; /** The status of the transform. * Returned values include: * `aborting`: The transform is aborting. * `failed: The transform failed. For more information about the failure, check the `reason` field. * `indexing`: The transform is actively processing data and creating new documents. * `started`: The transform is running but not actively indexing data. * `stopped`: The transform is stopped. * `stopping`: The transform is stopping. * @alias state */ s?: string; /** The sequence number for the checkpoint. */ checkpoint?: string; /** The sequence number for the checkpoint. * @alias checkpoint */ c?: string; /** The number of documents that have been processed from the source index of the transform. */ documents_processed?: string; /** The number of documents that have been processed from the source index of the transform. * @alias documents_processed */ docp?: string; /** The number of documents that have been processed from the source index of the transform. * @alias documents_processed */ documentsProcessed?: string; /** The progress of the next checkpoint that is currently in progress. */ checkpoint_progress?: string | null; /** The progress of the next checkpoint that is currently in progress. * @alias checkpoint_progress */ cp?: string | null; /** The progress of the next checkpoint that is currently in progress. * @alias checkpoint_progress */ checkpointProgress?: string | null; /** The timestamp of the last search in the source indices. * This field is shown only if the transform is running. */ last_search_time?: string | null; /** The timestamp of the last search in the source indices. * This field is shown only if the transform is running. * @alias last_search_time */ lst?: string | null; /** The timestamp of the last search in the source indices. * This field is shown only if the transform is running. * @alias last_search_time */ lastSearchTime?: string | null; /** The timestamp when changes were last detected in the source indices. */ changes_last_detection_time?: string | null; /** The timestamp when changes were last detected in the source indices. * @alias changes_last_detection_time */ cldt?: string | null; /** The time the transform was created. */ create_time?: string; /** The time the transform was created. * @alias create_time */ ct?: string; /** The time the transform was created. * @alias create_time */ createTime?: string; /** The version of Elasticsearch that existed on the node when the transform was created. */ version?: VersionString; /** The version of Elasticsearch that existed on the node when the transform was created. * @alias version */ v?: VersionString; /** The source indices for the transform. */ source_index?: string; /** The source indices for the transform. * @alias source_index */ si?: string; /** The source indices for the transform. * @alias source_index */ sourceIndex?: string; /** The destination index for the transform. */ dest_index?: string; /** The destination index for the transform. * @alias dest_index */ di?: string; /** The destination index for the transform. * @alias dest_index */ destIndex?: string; /** The unique identifier for the ingest pipeline. */ pipeline?: string; /** The unique identifier for the ingest pipeline. * @alias pipeline */ p?: string; /** The description of the transform. */ description?: string; /** The description of the transform. * @alias description */ d?: string; /** The type of transform: `batch` or `continuous`. */ transform_type?: string; /** The type of transform: `batch` or `continuous`. * @alias transform_type */ tt?: string; /** The interval between checks for changes in the source indices when the transform is running continuously. */ frequency?: string; /** The interval between checks for changes in the source indices when the transform is running continuously. * @alias frequency */ f?: string; /** The initial page size that is used for the composite aggregation for each checkpoint. */ max_page_search_size?: string; /** The initial page size that is used for the composite aggregation for each checkpoint. * @alias max_page_search_size */ mpsz?: string; /** The number of input documents per second. */ docs_per_second?: string; /** The number of input documents per second. * @alias docs_per_second */ dps?: string; /** If a transform has a `failed` state, these details describe the reason for failure. */ reason?: string; /** If a transform has a `failed` state, these details describe the reason for failure. * @alias reason */ r?: string; /** The total number of search operations on the source index for the transform. */ search_total?: string; /** The total number of search operations on the source index for the transform. * @alias search_total */ st?: string; /** The total number of search failures. */ search_failure?: string; /** The total number of search failures. * @alias search_failure */ sf?: string; /** The total amount of search time, in milliseconds. */ search_time?: string; /** The total amount of search time, in milliseconds. * @alias search_time */ stime?: string; /** The total number of index operations done by the transform. */ index_total?: string; /** The total number of index operations done by the transform. * @alias index_total */ it?: string; /** The total number of indexing failures. */ index_failure?: string; /** The total number of indexing failures. * @alias index_failure */ if?: string; /** The total time spent indexing documents, in milliseconds. */ index_time?: string; /** The total time spent indexing documents, in milliseconds. * @alias index_time */ itime?: string; /** The number of documents that have been indexed into the destination index for the transform. */ documents_indexed?: string; /** The number of documents that have been indexed into the destination index for the transform. * @alias documents_indexed */ doci?: string; /** The total time spent deleting documents, in milliseconds. */ delete_time?: string; /** The total time spent deleting documents, in milliseconds. * @alias delete_time */ dtime?: string; /** The number of documents deleted from the destination index due to the retention policy for the transform. */ documents_deleted?: string; /** The number of documents deleted from the destination index due to the retention policy for the transform. * @alias documents_deleted */ docd?: string; /** The number of times the transform has been triggered by the scheduler. * For example, the scheduler triggers the transform indexer to check for updates or ingest new data at an interval specified in the `frequency` property. */ trigger_count?: string; /** The number of times the transform has been triggered by the scheduler. * For example, the scheduler triggers the transform indexer to check for updates or ingest new data at an interval specified in the `frequency` property. * @alias trigger_count */ tc?: string; /** The number of search or bulk index operations processed. * Documents are processed in batches instead of individually. */ pages_processed?: string; /** The number of search or bulk index operations processed. * Documents are processed in batches instead of individually. * @alias pages_processed */ pp?: string; /** The total time spent processing results, in milliseconds. */ processing_time?: string; /** The total time spent processing results, in milliseconds. * @alias processing_time */ pt?: string; /** The exponential moving average of the duration of the checkpoint, in milliseconds. */ checkpoint_duration_time_exp_avg?: string; /** The exponential moving average of the duration of the checkpoint, in milliseconds. * @alias checkpoint_duration_time_exp_avg */ cdtea?: string; /** The exponential moving average of the duration of the checkpoint, in milliseconds. * @alias checkpoint_duration_time_exp_avg */ checkpointTimeExpAvg?: string; /** The exponential moving average of the number of new documents that have been indexed. */ indexed_documents_exp_avg?: string; /** The exponential moving average of the number of new documents that have been indexed. * @alias indexed_documents_exp_avg */ idea?: string; /** The exponential moving average of the number of documents that have been processed. */ processed_documents_exp_avg?: string; /** The exponential moving average of the number of documents that have been processed. * @alias processed_documents_exp_avg */ pdea?: string; } export interface CcrFollowIndexStats { /** The name of the follower index. */ index: IndexName; /** An array of shard-level following task statistics. */ shards: CcrShardStats[]; } export interface CcrReadException { /** The exception that caused the read to fail. */ exception: ErrorCause; /** The starting sequence number of the batch requested from the leader. */ from_seq_no: SequenceNumber; /** The number of times the batch has been retried. */ retries: integer; } export interface CcrShardStats { /** The total of transferred bytes read from the leader. * This is only an estimate and does not account for compression if enabled. */ bytes_read: long; /** The number of failed reads. */ failed_read_requests: long; /** The number of failed bulk write requests on the follower. */ failed_write_requests: long; fatal_exception?: ErrorCause; /** The index aliases version the follower is synced up to. */ follower_aliases_version: VersionNumber; /** The current global checkpoint on the follower. * The difference between the `leader_global_checkpoint` and the `follower_global_checkpoint` is an indication of how much the follower is lagging the leader. */ follower_global_checkpoint: long; /** The name of the follower index. */ follower_index: string; /** The mapping version the follower is synced up to. */ follower_mapping_version: VersionNumber; /** The current maximum sequence number on the follower. */ follower_max_seq_no: SequenceNumber; /** The index settings version the follower is synced up to. */ follower_settings_version: VersionNumber; /** The starting sequence number of the last batch of operations requested from the leader. */ last_requested_seq_no: SequenceNumber; /** The current global checkpoint on the leader known to the follower task. */ leader_global_checkpoint: long; /** The name of the index in the leader cluster being followed. */ leader_index: string; /** The current maximum sequence number on the leader known to the follower task. */ leader_max_seq_no: SequenceNumber; /** The total number of operations read from the leader. */ operations_read: long; /** The number of operations written on the follower. */ operations_written: long; /** The number of active read requests from the follower. */ outstanding_read_requests: integer; /** The number of active bulk write requests on the follower. */ outstanding_write_requests: integer; /** An array of objects representing failed reads. */ read_exceptions: CcrReadException[]; /** The remote cluster containing the leader index. */ remote_cluster: string; /** The numerical shard ID, with values from 0 to one less than the number of replicas. */ shard_id: integer; /** The number of successful fetches. */ successful_read_requests: long; /** The number of bulk write requests run on the follower. */ successful_write_requests: long; time_since_last_read?: Duration; /** The number of milliseconds since a read request was sent to the leader. * When the follower is caught up to the leader, this number will increase up to the configured `read_poll_timeout` at which point another read request will be sent to the leader. */ time_since_last_read_millis: DurationValue; total_read_remote_exec_time?: Duration; /** The total time reads spent running on the remote cluster. */ total_read_remote_exec_time_millis: DurationValue; total_read_time?: Duration; /** The total time reads were outstanding, measured from the time a read was sent to the leader to the time a reply was returned to the follower. */ total_read_time_millis: DurationValue; total_write_time?: Duration; /** The total time spent writing on the follower. */ total_write_time_millis: DurationValue; /** The number of write operations queued on the follower. */ write_buffer_operation_count: long; /** The total number of bytes of operations currently queued for writing. */ write_buffer_size_in_bytes: ByteSize; } export interface CcrDeleteAutoFollowPatternRequest extends RequestBase { /** The auto-follow pattern collection to delete. */ name: Name; /** The period to wait for a connection to the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; }; } export type CcrDeleteAutoFollowPatternResponse = AcknowledgedResponseBase; export interface CcrFollowRequest extends RequestBase { /** The name of the follower index. */ index: IndexName; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be * active. * A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the * remote Lucene segment files to the follower index. */ wait_for_active_shards?: WaitForActiveShards; /** If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. */ data_stream_name?: string; /** The name of the index in the leader cluster to follow. */ leader_index: IndexName; /** The maximum number of outstanding reads requests from the remote cluster. */ max_outstanding_read_requests?: long; /** The maximum number of outstanding write requests on the follower. */ max_outstanding_write_requests?: integer; /** The maximum number of operations to pull per read from the remote cluster. */ max_read_request_operation_count?: integer; /** The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. */ max_read_request_size?: ByteSize; /** The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when * retrying. */ max_retry_delay?: Duration; /** The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be * deferred until the number of queued operations goes below the limit. */ max_write_buffer_count?: integer; /** The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will * be deferred until the total bytes of queued operations goes below the limit. */ max_write_buffer_size?: ByteSize; /** The maximum number of operations per bulk write request executed on the follower. */ max_write_request_operation_count?: integer; /** The maximum total bytes of operations per bulk write request executed on the follower. */ max_write_request_size?: ByteSize; /** The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. * When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. * Then the follower will immediately attempt to read from the leader again. */ read_poll_timeout?: Duration; /** The remote cluster containing the leader index. */ remote_cluster: string; /** Settings to override from the leader index. */ settings?: IndicesIndexSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; master_timeout?: never; wait_for_active_shards?: never; data_stream_name?: never; leader_index?: never; max_outstanding_read_requests?: never; max_outstanding_write_requests?: never; max_read_request_operation_count?: never; max_read_request_size?: never; max_retry_delay?: never; max_write_buffer_count?: never; max_write_buffer_size?: never; max_write_request_operation_count?: never; max_write_request_size?: never; read_poll_timeout?: never; remote_cluster?: never; settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; master_timeout?: never; wait_for_active_shards?: never; data_stream_name?: never; leader_index?: never; max_outstanding_read_requests?: never; max_outstanding_write_requests?: never; max_read_request_operation_count?: never; max_read_request_size?: never; max_retry_delay?: never; max_write_buffer_count?: never; max_write_buffer_size?: never; max_write_request_operation_count?: never; max_write_request_size?: never; read_poll_timeout?: never; remote_cluster?: never; settings?: never; }; } export interface CcrFollowResponse { follow_index_created: boolean; follow_index_shards_acked: boolean; index_following_started: boolean; } export interface CcrFollowInfoFollowerIndex { /** The name of the follower index. */ follower_index: IndexName; /** The name of the index in the leader cluster that is followed. */ leader_index: IndexName; /** An object that encapsulates cross-cluster replication parameters. If the follower index's status is paused, this object is omitted. */ parameters?: CcrFollowInfoFollowerIndexParameters; /** The remote cluster that contains the leader index. */ remote_cluster: Name; /** The status of the index following: `active` or `paused`. */ status: CcrFollowInfoFollowerIndexStatus; } export interface CcrFollowInfoFollowerIndexParameters { /** The maximum number of outstanding reads requests from the remote cluster. */ max_outstanding_read_requests?: long; /** The maximum number of outstanding write requests on the follower. */ max_outstanding_write_requests?: integer; /** The maximum number of operations to pull per read from the remote cluster. */ max_read_request_operation_count?: integer; /** The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. */ max_read_request_size?: ByteSize; /** The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when * retrying. */ max_retry_delay?: Duration; /** The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be * deferred until the number of queued operations goes below the limit. */ max_write_buffer_count?: integer; /** The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will * be deferred until the total bytes of queued operations goes below the limit. */ max_write_buffer_size?: ByteSize; /** The maximum number of operations per bulk write request executed on the follower. */ max_write_request_operation_count?: integer; /** The maximum total bytes of operations per bulk write request executed on the follower. */ max_write_request_size?: ByteSize; /** The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. * When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. * Then the follower will immediately attempt to read from the leader again. */ read_poll_timeout?: Duration; } export type CcrFollowInfoFollowerIndexStatus = 'active' | 'paused'; export interface CcrFollowInfoRequest extends RequestBase { /** A comma-delimited list of follower index patterns. */ index: Indices; /** The period to wait for a connection to the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; master_timeout?: never; }; } export interface CcrFollowInfoResponse { follower_indices: CcrFollowInfoFollowerIndex[]; } export interface CcrFollowStatsRequest extends RequestBase { /** A comma-delimited list of index patterns. */ index: Indices; /** The period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; timeout?: never; }; } export interface CcrFollowStatsResponse { /** An array of follower index statistics. */ indices: CcrFollowIndexStats[]; } export interface CcrForgetFollowerRequest extends RequestBase { /** the name of the leader index for which specified follower retention leases should be removed */ index: IndexName; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; follower_cluster?: string; follower_index?: IndexName; follower_index_uuid?: Uuid; leader_remote_cluster?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; timeout?: never; follower_cluster?: never; follower_index?: never; follower_index_uuid?: never; leader_remote_cluster?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; timeout?: never; follower_cluster?: never; follower_index?: never; follower_index_uuid?: never; leader_remote_cluster?: never; }; } export interface CcrForgetFollowerResponse { _shards: ShardStatistics; } export interface CcrGetAutoFollowPatternAutoFollowPattern { name: Name; pattern: CcrGetAutoFollowPatternAutoFollowPatternSummary; } export interface CcrGetAutoFollowPatternAutoFollowPatternSummary { active: boolean; /** The remote cluster containing the leader indices to match against. */ remote_cluster: string; /** The name of follower index. */ follow_index_pattern?: IndexPattern; /** An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. */ leader_index_patterns: IndexPatterns; /** An array of simple index patterns that can be used to exclude indices from being auto-followed. */ leader_index_exclusion_patterns: IndexPatterns; /** The maximum number of outstanding reads requests from the remote cluster. */ max_outstanding_read_requests: integer; } export interface CcrGetAutoFollowPatternRequest extends RequestBase { /** The auto-follow pattern collection that you want to retrieve. * If you do not specify a name, the API returns information for all collections. */ name?: Name; /** The period to wait for a connection to the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; }; } export interface CcrGetAutoFollowPatternResponse { patterns: CcrGetAutoFollowPatternAutoFollowPattern[]; } export interface CcrPauseAutoFollowPatternRequest extends RequestBase { /** The name of the auto-follow pattern to pause. */ name: Name; /** The period to wait for a connection to the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; }; } export type CcrPauseAutoFollowPatternResponse = AcknowledgedResponseBase; export interface CcrPauseFollowRequest extends RequestBase { /** The name of the follower index. */ index: IndexName; /** The period to wait for a connection to the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; master_timeout?: never; }; } export type CcrPauseFollowResponse = AcknowledgedResponseBase; export interface CcrPutAutoFollowPatternRequest extends RequestBase { /** The name of the collection of auto-follow patterns. */ name: Name; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** The remote cluster containing the leader indices to match against. */ remote_cluster: string; /** The name of follower index. The template {{leader_index}} can be used to derive the name of the follower index from the name of the leader index. When following a data stream, use {{leader_index}}; CCR does not support changes to the names of a follower data stream’s backing indices. */ follow_index_pattern?: IndexPattern; /** An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. */ leader_index_patterns?: IndexPatterns; /** An array of simple index patterns that can be used to exclude indices from being auto-followed. Indices in the remote cluster whose names are matching one or more leader_index_patterns and one or more leader_index_exclusion_patterns won’t be followed. */ leader_index_exclusion_patterns?: IndexPatterns; /** The maximum number of outstanding reads requests from the remote cluster. */ max_outstanding_read_requests?: integer; /** Settings to override from the leader index. Note that certain settings can not be overrode (e.g., index.number_of_shards). */ settings?: Record; /** The maximum number of outstanding reads requests from the remote cluster. */ max_outstanding_write_requests?: integer; /** The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. */ read_poll_timeout?: Duration; /** The maximum number of operations to pull per read from the remote cluster. */ max_read_request_operation_count?: integer; /** The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. */ max_read_request_size?: ByteSize; /** The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. */ max_retry_delay?: Duration; /** The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. */ max_write_buffer_count?: integer; /** The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. */ max_write_buffer_size?: ByteSize; /** The maximum number of operations per bulk write request executed on the follower. */ max_write_request_operation_count?: integer; /** The maximum total bytes of operations per bulk write request executed on the follower. */ max_write_request_size?: ByteSize; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; remote_cluster?: never; follow_index_pattern?: never; leader_index_patterns?: never; leader_index_exclusion_patterns?: never; max_outstanding_read_requests?: never; settings?: never; max_outstanding_write_requests?: never; read_poll_timeout?: never; max_read_request_operation_count?: never; max_read_request_size?: never; max_retry_delay?: never; max_write_buffer_count?: never; max_write_buffer_size?: never; max_write_request_operation_count?: never; max_write_request_size?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; remote_cluster?: never; follow_index_pattern?: never; leader_index_patterns?: never; leader_index_exclusion_patterns?: never; max_outstanding_read_requests?: never; settings?: never; max_outstanding_write_requests?: never; read_poll_timeout?: never; max_read_request_operation_count?: never; max_read_request_size?: never; max_retry_delay?: never; max_write_buffer_count?: never; max_write_buffer_size?: never; max_write_request_operation_count?: never; max_write_request_size?: never; }; } export type CcrPutAutoFollowPatternResponse = AcknowledgedResponseBase; export interface CcrResumeAutoFollowPatternRequest extends RequestBase { /** The name of the auto-follow pattern to resume. */ name: Name; /** The period to wait for a connection to the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; }; } export type CcrResumeAutoFollowPatternResponse = AcknowledgedResponseBase; export interface CcrResumeFollowRequest extends RequestBase { /** The name of the follow index to resume following. */ index: IndexName; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; max_outstanding_read_requests?: long; max_outstanding_write_requests?: long; max_read_request_operation_count?: long; max_read_request_size?: string; max_retry_delay?: Duration; max_write_buffer_count?: long; max_write_buffer_size?: string; max_write_request_operation_count?: long; max_write_request_size?: string; read_poll_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; master_timeout?: never; max_outstanding_read_requests?: never; max_outstanding_write_requests?: never; max_read_request_operation_count?: never; max_read_request_size?: never; max_retry_delay?: never; max_write_buffer_count?: never; max_write_buffer_size?: never; max_write_request_operation_count?: never; max_write_request_size?: never; read_poll_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; master_timeout?: never; max_outstanding_read_requests?: never; max_outstanding_write_requests?: never; max_read_request_operation_count?: never; max_read_request_size?: never; max_retry_delay?: never; max_write_buffer_count?: never; max_write_buffer_size?: never; max_write_request_operation_count?: never; max_write_request_size?: never; read_poll_timeout?: never; }; } export type CcrResumeFollowResponse = AcknowledgedResponseBase; export interface CcrStatsAutoFollowStats { auto_followed_clusters: CcrStatsAutoFollowedCluster[]; /** The number of indices that the auto-follow coordinator failed to automatically follow. * The causes of recent failures are captured in the logs of the elected master node and in the `auto_follow_stats.recent_auto_follow_errors` field. */ number_of_failed_follow_indices: long; /** The number of times that the auto-follow coordinator failed to retrieve the cluster state from a remote cluster registered in a collection of auto-follow patterns. */ number_of_failed_remote_cluster_state_requests: long; /** The number of indices that the auto-follow coordinator successfully followed. */ number_of_successful_follow_indices: long; /** An array of objects representing failures by the auto-follow coordinator. */ recent_auto_follow_errors: ErrorCause[]; } export interface CcrStatsAutoFollowedCluster { cluster_name: Name; last_seen_metadata_version: VersionNumber; time_since_last_check_millis: DurationValue; } export interface CcrStatsFollowStats { indices: CcrFollowIndexStats[]; } export interface CcrStatsRequest extends RequestBase { /** The period to wait for a connection to the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration; /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; timeout?: never; }; } export interface CcrStatsResponse { /** Statistics for the auto-follow coordinator. */ auto_follow_stats: CcrStatsAutoFollowStats; /** Shard-level statistics for follower indices. */ follow_stats: CcrStatsFollowStats; } export interface CcrUnfollowRequest extends RequestBase { /** The name of the follower index. */ index: IndexName; /** The period to wait for a connection to the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; master_timeout?: never; }; } export type CcrUnfollowResponse = AcknowledgedResponseBase; export interface ClusterComponentTemplate { name: Name; component_template: ClusterComponentTemplateNode; } export interface ClusterComponentTemplateNode { template: ClusterComponentTemplateSummary; version?: VersionNumber; _meta?: Metadata; deprecated?: boolean; } export interface ClusterComponentTemplateSummary { _meta?: Metadata; version?: VersionNumber; settings?: Record; mappings?: MappingTypeMapping; aliases?: Record; lifecycle?: IndicesDataStreamLifecycleWithRollover; } export interface ClusterAllocationExplainAllocationDecision { decider: string; decision: ClusterAllocationExplainAllocationExplainDecision; explanation: string; } export type ClusterAllocationExplainAllocationExplainDecision = 'NO' | 'YES' | 'THROTTLE' | 'ALWAYS'; export interface ClusterAllocationExplainAllocationStore { allocation_id: string; found: boolean; in_sync: boolean; matching_size_in_bytes: long; matching_sync_id: boolean; store_exception: string; } export interface ClusterAllocationExplainClusterInfo { nodes: Record; shard_sizes: Record; shard_data_set_sizes?: Record; shard_paths: Record; reserved_sizes: ClusterAllocationExplainReservedSize[]; } export interface ClusterAllocationExplainCurrentNode { id: Id; name: Name; roles: NodeRoles; attributes: Record; transport_address: TransportAddress; weight_ranking: integer; } export type ClusterAllocationExplainDecision = 'yes' | 'no' | 'worse_balance' | 'throttled' | 'awaiting_info' | 'allocation_delayed' | 'no_valid_shard_copy' | 'no_attempt'; export interface ClusterAllocationExplainDiskUsage { path: string; total_bytes: long; used_bytes: long; free_bytes: long; free_disk_percent: double; used_disk_percent: double; } export interface ClusterAllocationExplainNodeAllocationExplanation { deciders: ClusterAllocationExplainAllocationDecision[]; node_attributes: Record; node_decision: ClusterAllocationExplainDecision; node_id: Id; node_name: Name; roles: NodeRoles; store?: ClusterAllocationExplainAllocationStore; transport_address: TransportAddress; weight_ranking: integer; } export interface ClusterAllocationExplainNodeDiskUsage { node_name: Name; least_available: ClusterAllocationExplainDiskUsage; most_available: ClusterAllocationExplainDiskUsage; } export interface ClusterAllocationExplainRequest extends RequestBase { /** If true, returns information about disk usage and shard sizes. */ include_disk_info?: boolean; /** If true, returns YES decisions in explanation. */ include_yes_decisions?: boolean; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. */ current_node?: string; /** Specifies the name of the index that you would like an explanation for. */ index?: IndexName; /** If true, returns explanation for the primary shard for the given shard ID. */ primary?: boolean; /** Specifies the ID of the shard that you would like an explanation for. */ shard?: integer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { include_disk_info?: never; include_yes_decisions?: never; master_timeout?: never; current_node?: never; index?: never; primary?: never; shard?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { include_disk_info?: never; include_yes_decisions?: never; master_timeout?: never; current_node?: never; index?: never; primary?: never; shard?: never; }; } export interface ClusterAllocationExplainReservedSize { node_id: Id; path: string; total: long; shards: string[]; } export interface ClusterAllocationExplainResponse { allocate_explanation?: string; allocation_delay?: Duration; allocation_delay_in_millis?: DurationValue; can_allocate?: ClusterAllocationExplainDecision; can_move_to_other_node?: ClusterAllocationExplainDecision; can_rebalance_cluster?: ClusterAllocationExplainDecision; can_rebalance_cluster_decisions?: ClusterAllocationExplainAllocationDecision[]; can_rebalance_to_other_node?: ClusterAllocationExplainDecision; can_remain_decisions?: ClusterAllocationExplainAllocationDecision[]; can_remain_on_current_node?: ClusterAllocationExplainDecision; cluster_info?: ClusterAllocationExplainClusterInfo; configured_delay?: Duration; configured_delay_in_millis?: DurationValue; current_node?: ClusterAllocationExplainCurrentNode; current_state: string; index: IndexName; move_explanation?: string; node_allocation_decisions?: ClusterAllocationExplainNodeAllocationExplanation[]; primary: boolean; rebalance_explanation?: string; remaining_delay?: Duration; remaining_delay_in_millis?: DurationValue; shard: integer; unassigned_info?: ClusterAllocationExplainUnassignedInformation; note?: string; } export interface ClusterAllocationExplainUnassignedInformation { at: DateTime; last_allocation_status?: string; reason: ClusterAllocationExplainUnassignedInformationReason; details?: string; failed_allocation_attempts?: integer; delayed?: boolean; allocation_status?: string; } export type ClusterAllocationExplainUnassignedInformationReason = 'INDEX_CREATED' | 'CLUSTER_RECOVERED' | 'INDEX_REOPENED' | 'DANGLING_INDEX_IMPORTED' | 'NEW_INDEX_RESTORED' | 'EXISTING_INDEX_RESTORED' | 'REPLICA_ADDED' | 'ALLOCATION_FAILED' | 'NODE_LEFT' | 'REROUTE_CANCELLED' | 'REINITIALIZED' | 'REALLOCATED_REPLICA' | 'PRIMARY_FAILED' | 'FORCED_EMPTY_PRIMARY' | 'MANUAL_ALLOCATION'; export interface ClusterDeleteComponentTemplateRequest extends RequestBase { /** Comma-separated list or wildcard expression of component template names used to limit the request. */ name: Names; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }; } export type ClusterDeleteComponentTemplateResponse = AcknowledgedResponseBase; export interface ClusterDeleteVotingConfigExclusionsRequest extends RequestBase { /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** Specifies whether to wait for all excluded nodes to be removed from the * cluster before clearing the voting configuration exclusions list. * Defaults to true, meaning that all excluded nodes must be removed from * the cluster before this API takes any action. If set to false then the * voting configuration exclusions list is cleared even if some excluded * nodes are still in the cluster. */ wait_for_removal?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; wait_for_removal?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; wait_for_removal?: never; }; } export type ClusterDeleteVotingConfigExclusionsResponse = boolean; export interface ClusterExistsComponentTemplateRequest extends RequestBase { /** Comma-separated list of component template names used to limit the request. * Wildcard (*) expressions are supported. */ name: Names; /** Period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an * error. */ master_timeout?: Duration; /** If true, the request retrieves information from the local node only. * Defaults to false, which means information is retrieved from the master node. */ local?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; local?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; local?: never; }; } export type ClusterExistsComponentTemplateResponse = boolean; export interface ClusterGetComponentTemplateRequest extends RequestBase { /** Comma-separated list of component template names used to limit the request. * Wildcard (`*`) expressions are supported. */ name?: Name; /** If `true`, returns settings in flat format. */ flat_settings?: boolean; /** Return all default configurations for the component template (default: false) */ include_defaults?: boolean; /** If `true`, the request retrieves information from the local node only. * If `false`, information is retrieved from the master node. */ local?: boolean; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; flat_settings?: never; include_defaults?: never; local?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; flat_settings?: never; include_defaults?: never; local?: never; master_timeout?: never; }; } export interface ClusterGetComponentTemplateResponse { component_templates: ClusterComponentTemplate[]; } export interface ClusterGetSettingsRequest extends RequestBase { /** If `true`, returns settings in flat format. */ flat_settings?: boolean; /** If `true`, returns default cluster settings from the local node. */ include_defaults?: boolean; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { flat_settings?: never; include_defaults?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { flat_settings?: never; include_defaults?: never; master_timeout?: never; timeout?: never; }; } export interface ClusterGetSettingsResponse { /** The settings that persist after the cluster restarts. */ persistent: Record; /** The settings that do not persist after the cluster restarts. */ transient: Record; /** The default setting values. */ defaults?: Record; } export interface ClusterHealthHealthResponseBody { /** The number of active primary shards. */ active_primary_shards: integer; /** The total number of active primary and replica shards. */ active_shards: integer; /** The ratio of active shards in the cluster expressed as a string formatted percentage. */ active_shards_percent?: string; /** The ratio of active shards in the cluster expressed as a percentage. */ active_shards_percent_as_number: double; /** The name of the cluster. */ cluster_name: Name; /** The number of shards whose allocation has been delayed by the timeout settings. */ delayed_unassigned_shards: integer; indices?: Record; /** The number of shards that are under initialization. */ initializing_shards: integer; /** The number of nodes that are dedicated data nodes. */ number_of_data_nodes: integer; /** The number of unfinished fetches. */ number_of_in_flight_fetch: integer; /** The number of nodes within the cluster. */ number_of_nodes: integer; /** The number of cluster-level changes that have not yet been executed. */ number_of_pending_tasks: integer; /** The number of shards that are under relocation. */ relocating_shards: integer; status: HealthStatus; /** The time since the earliest initiated task is waiting for being performed. */ task_max_waiting_in_queue?: Duration; /** The time expressed in milliseconds since the earliest initiated task is waiting for being performed. */ task_max_waiting_in_queue_millis: DurationValue; /** If false the response returned within the period of time that is specified by the timeout parameter (30s by default) */ timed_out: boolean; /** The number of primary shards that are not allocated. */ unassigned_primary_shards: integer; /** The number of shards that are not allocated. */ unassigned_shards: integer; } export interface ClusterHealthIndexHealthStats { active_primary_shards: integer; active_shards: integer; initializing_shards: integer; number_of_replicas: integer; number_of_shards: integer; relocating_shards: integer; shards?: Record; status: HealthStatus; unassigned_shards: integer; unassigned_primary_shards: integer; } export interface ClusterHealthRequest extends RequestBase { /** Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. */ index?: Indices; /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards; /** Can be one of cluster, indices or shards. Controls the details level of the health information returned. */ level?: Level; /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ local?: boolean; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** A number controlling to how many active shards to wait for, all to wait for all shards in the cluster to be active, or 0 to not wait. */ wait_for_active_shards?: WaitForActiveShards; /** Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. */ wait_for_events?: WaitForEvents; /** The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and yellow > red. By default, will not wait for any status. */ wait_for_status?: HealthStatus; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; expand_wildcards?: never; level?: never; local?: never; master_timeout?: never; timeout?: never; wait_for_active_shards?: never; wait_for_events?: never; wait_for_nodes?: never; wait_for_no_initializing_shards?: never; wait_for_no_relocating_shards?: never; wait_for_status?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; expand_wildcards?: never; level?: never; local?: never; master_timeout?: never; timeout?: never; wait_for_active_shards?: never; wait_for_events?: never; wait_for_nodes?: never; wait_for_no_initializing_shards?: never; wait_for_no_relocating_shards?: never; wait_for_status?: never; }; } export type ClusterHealthResponse = ClusterHealthHealthResponseBody; export interface ClusterHealthShardHealthStats { active_shards: integer; initializing_shards: integer; primary_active: boolean; relocating_shards: integer; status: HealthStatus; unassigned_shards: integer; unassigned_primary_shards: integer; } export type ClusterHealthWaitForNodes = string | integer; export interface ClusterInfoRequest extends RequestBase { /** Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest. */ target: ClusterInfoTargets; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { target?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { target?: never; }; } export interface ClusterInfoResponse { cluster_name: Name; http?: NodesHttp; ingest?: NodesIngest; thread_pool?: Record; script?: NodesScripting; } export interface ClusterPendingTasksPendingTask { /** Indicates whether the pending tasks are currently executing or not. */ executing: boolean; /** The number that represents when the task has been inserted into the task queue. */ insert_order: integer; /** The priority of the pending task. * The valid priorities in descending priority order are: `IMMEDIATE` > `URGENT` > `HIGH` > `NORMAL` > `LOW` > `LANGUID`. */ priority: string; /** A general description of the cluster task that may include a reason and origin. */ source: string; /** The time since the task is waiting for being performed. */ time_in_queue?: Duration; /** The time expressed in milliseconds since the task is waiting for being performed. */ time_in_queue_millis: DurationValue; } export interface ClusterPendingTasksRequest extends RequestBase { /** If `true`, the request retrieves information from the local node only. * If `false`, information is retrieved from the master node. */ local?: boolean; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { local?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { local?: never; master_timeout?: never; }; } export interface ClusterPendingTasksResponse { tasks: ClusterPendingTasksPendingTask[]; } export interface ClusterPostVotingConfigExclusionsRequest extends RequestBase { /** A comma-separated list of the names of the nodes to exclude from the * voting configuration. If specified, you may not also specify node_ids. */ node_names?: Names; /** A comma-separated list of the persistent ids of the nodes to exclude * from the voting configuration. If specified, you may not also specify node_names. */ node_ids?: Ids; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** When adding a voting configuration exclusion, the API waits for the * specified nodes to be excluded from the voting configuration before * returning. If the timeout expires before the appropriate condition * is satisfied, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { node_names?: never; node_ids?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { node_names?: never; node_ids?: never; master_timeout?: never; timeout?: never; }; } export type ClusterPostVotingConfigExclusionsResponse = boolean; export interface ClusterPutComponentTemplateRequest extends RequestBase { /** Name of the component template to create. * Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. * Elastic Agent uses these templates to configure backing indices for its data streams. * If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. * If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. */ name: Name; /** If `true`, this request cannot replace or update existing component templates. */ create?: boolean; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** The template to be applied which includes mappings, settings, or aliases configuration. */ template: IndicesIndexState; /** Version number used to manage component templates externally. * This number isn't automatically generated or incremented by Elasticsearch. * To unset a version, replace the template without specifying a version. */ version?: VersionNumber; /** Optional user metadata about the component template. * It may have any contents. This map is not automatically generated by Elasticsearch. * This information is stored in the cluster state, so keeping it short is preferable. * To unset `_meta`, replace the template without specifying this information. */ _meta?: Metadata; /** Marks this index template as deprecated. When creating or updating a non-deprecated index template * that uses deprecated components, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; create?: never; master_timeout?: never; template?: never; version?: never; _meta?: never; deprecated?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; create?: never; master_timeout?: never; template?: never; version?: never; _meta?: never; deprecated?: never; }; } export type ClusterPutComponentTemplateResponse = AcknowledgedResponseBase; export interface ClusterPutSettingsRequest extends RequestBase { /** Return settings in flat format (default: false) */ flat_settings?: boolean; /** Explicit operation timeout for connection to master node */ master_timeout?: Duration; /** Explicit operation timeout */ timeout?: Duration; /** The settings that persist after the cluster restarts. */ persistent?: Record; /** The settings that do not persist after the cluster restarts. */ transient?: Record; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { flat_settings?: never; master_timeout?: never; timeout?: never; persistent?: never; transient?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { flat_settings?: never; master_timeout?: never; timeout?: never; persistent?: never; transient?: never; }; } export interface ClusterPutSettingsResponse { acknowledged: boolean; persistent: Record; transient: Record; } export type ClusterRemoteInfoClusterRemoteInfo = ClusterRemoteInfoClusterRemoteSniffInfo | ClusterRemoteInfoClusterRemoteProxyInfo; export interface ClusterRemoteInfoClusterRemoteProxyInfo { /** The connection mode for the remote cluster. */ mode: 'proxy'; /** If it is `true`, there is at least one open connection to the remote cluster. * If it is `false`, it means that the cluster no longer has an open connection to the remote cluster. * It does not necessarily mean that the remote cluster is down or unavailable, just that at some point a connection was lost. */ connected: boolean; /** The initial connect timeout for remote cluster connections. */ initial_connect_timeout: Duration; /** If `true`, cross-cluster search skips the remote cluster when its nodes are unavailable during the search and ignores errors returned by the remote cluster. */ skip_unavailable: boolean; /** The address for remote connections when proxy mode is configured. */ proxy_address: string; server_name: string; /** The number of open socket connections to the remote cluster when proxy mode is configured. */ num_proxy_sockets_connected: integer; /** The maximum number of socket connections to the remote cluster when proxy mode is configured. */ max_proxy_socket_connections: integer; /** This field is present and has a value of `::es_redacted::` only when the remote cluster is configured with the API key based model. Otherwise, the field is not present. */ cluster_credentials?: string; } export interface ClusterRemoteInfoClusterRemoteSniffInfo { /** The connection mode for the remote cluster. */ mode: 'sniff'; /** If it is `true`, there is at least one open connection to the remote cluster. * If it is `false`, it means that the cluster no longer has an open connection to the remote cluster. * It does not necessarily mean that the remote cluster is down or unavailable, just that at some point a connection was lost. */ connected: boolean; /** The maximum number of connections maintained for the remote cluster when sniff mode is configured. */ max_connections_per_cluster: integer; /** The number of connected nodes in the remote cluster when sniff mode is configured. */ num_nodes_connected: long; /** The initial connect timeout for remote cluster connections. */ initial_connect_timeout: Duration; /** If `true`, cross-cluster search skips the remote cluster when its nodes are unavailable during the search and ignores errors returned by the remote cluster. */ skip_unavailable: boolean; /** The initial seed transport addresses of the remote cluster when sniff mode is configured. */ seeds: string[]; } export interface ClusterRemoteInfoRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export type ClusterRemoteInfoResponse = Record; export interface ClusterRerouteCommand { /** Cancel allocation of a shard (or recovery). Accepts index and shard for index name and shard number, and node for the node to cancel the shard allocation on. This can be used to force resynchronization of existing replicas from the primary shard by cancelling them and allowing them to be reinitialized through the standard recovery process. By default only replica shard allocations can be cancelled. If it is necessary to cancel the allocation of a primary shard then the allow_primary flag must also be included in the request. */ cancel?: ClusterRerouteCommandCancelAction; /** Move a started shard from one node to another node. Accepts index and shard for index name and shard number, from_node for the node to move the shard from, and to_node for the node to move the shard to. */ move?: ClusterRerouteCommandMoveAction; /** Allocate an unassigned replica shard to a node. Accepts index and shard for index name and shard number, and node to allocate the shard to. Takes allocation deciders into account. */ allocate_replica?: ClusterRerouteCommandAllocateReplicaAction; /** Allocate a primary shard to a node that holds a stale copy. Accepts the index and shard for index name and shard number, and node to allocate the shard to. Using this command may lead to data loss for the provided shard id. If a node which has the good copy of the data rejoins the cluster later on, that data will be deleted or overwritten with the data of the stale copy that was forcefully allocated with this command. To ensure that these implications are well-understood, this command requires the flag accept_data_loss to be explicitly set to true. */ allocate_stale_primary?: ClusterRerouteCommandAllocatePrimaryAction; /** Allocate an empty primary shard to a node. Accepts the index and shard for index name and shard number, and node to allocate the shard to. Using this command leads to a complete loss of all data that was indexed into this shard, if it was previously started. If a node which has a copy of the data rejoins the cluster later on, that data will be deleted. To ensure that these implications are well-understood, this command requires the flag accept_data_loss to be explicitly set to true. */ allocate_empty_primary?: ClusterRerouteCommandAllocatePrimaryAction; } export interface ClusterRerouteCommandAllocatePrimaryAction { index: IndexName; shard: integer; node: string; /** If a node which has a copy of the data rejoins the cluster later on, that data will be deleted. To ensure that these implications are well-understood, this command requires the flag accept_data_loss to be explicitly set to true */ accept_data_loss: boolean; } export interface ClusterRerouteCommandAllocateReplicaAction { index: IndexName; shard: integer; node: string; } export interface ClusterRerouteCommandCancelAction { index: IndexName; shard: integer; node: string; allow_primary?: boolean; } export interface ClusterRerouteCommandMoveAction { index: IndexName; shard: integer; /** The node to move the shard from */ from_node: string; /** The node to move the shard to */ to_node: string; } export interface ClusterRerouteRequest extends RequestBase { /** If true, then the request simulates the operation. * It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. */ dry_run?: boolean; /** If true, then the response contains an explanation of why the commands can or cannot run. */ explain?: boolean; /** Limits the information returned to the specified metrics. */ metric?: Metrics; /** If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. */ retry_failed?: boolean; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** Defines the commands to perform. */ commands?: ClusterRerouteCommand[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { dry_run?: never; explain?: never; metric?: never; retry_failed?: never; master_timeout?: never; timeout?: never; commands?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { dry_run?: never; explain?: never; metric?: never; retry_failed?: never; master_timeout?: never; timeout?: never; commands?: never; }; } export interface ClusterRerouteRerouteDecision { decider: string; decision: string; explanation: string; } export interface ClusterRerouteRerouteExplanation { command: string; decisions: ClusterRerouteRerouteDecision[]; parameters: ClusterRerouteRerouteParameters; } export interface ClusterRerouteRerouteParameters { allow_primary: boolean; index: IndexName; node: NodeName; shard: integer; from_node?: NodeName; to_node?: NodeName; } export interface ClusterRerouteResponse { acknowledged: boolean; explanations?: ClusterRerouteRerouteExplanation[]; /** There aren't any guarantees on the output/structure of the raw cluster state. * Here you will find the internal representation of the cluster, which can * differ from the external representation. */ state?: any; } export interface ClusterStateRequest extends RequestBase { /** Limit the information returned to the specified metrics */ metric?: Metrics; /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices; /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean; /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards; /** Return settings in flat format (default: false) */ flat_settings?: boolean; /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean; /** Return local information, do not retrieve the state from master node (default: false) */ local?: boolean; /** Specify timeout for connection to master */ master_timeout?: Duration; /** Wait for the metadata version to be equal or greater than the specified metadata version */ wait_for_metadata_version?: VersionNumber; /** The maximum time to wait for wait_for_metadata_version before timing out */ wait_for_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { metric?: never; index?: never; allow_no_indices?: never; expand_wildcards?: never; flat_settings?: never; ignore_unavailable?: never; local?: never; master_timeout?: never; wait_for_metadata_version?: never; wait_for_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { metric?: never; index?: never; allow_no_indices?: never; expand_wildcards?: never; flat_settings?: never; ignore_unavailable?: never; local?: never; master_timeout?: never; wait_for_metadata_version?: never; wait_for_timeout?: never; }; } export type ClusterStateResponse = any; export interface ClusterStatsCCSStats { /** Contains remote cluster settings and metrics collected from them. * The keys are cluster names, and the values are per-cluster data. * Only present if `include_remotes` option is set to true. */ clusters?: Record; /** Information about cross-cluster search usage. */ _search: ClusterStatsCCSUsageStats; /** Information about ES|QL cross-cluster query usage. */ _esql?: ClusterStatsCCSUsageStats; } export interface ClusterStatsCCSUsageClusterStats { /** The total number of successful (not skipped) cross-cluster search requests that were executed against this cluster. This may include requests where partial results were returned, but not requests in which the cluster has been skipped entirely. */ total: integer; /** The total number of cross-cluster search requests for which this cluster was skipped. */ skipped: integer; /** Statistics about the time taken to execute requests against this cluster. */ took: ClusterStatsCCSUsageTimeValue; } export interface ClusterStatsCCSUsageStats { /** The total number of cross-cluster search requests that have been executed by the cluster. */ total: integer; /** The total number of cross-cluster search requests that have been successfully executed by the cluster. */ success: integer; /** The total number of cross-cluster search requests (successful or failed) that had at least one remote cluster skipped. */ skipped: integer; /** Statistics about the time taken to execute cross-cluster search requests. */ took: ClusterStatsCCSUsageTimeValue; /** Statistics about the time taken to execute cross-cluster search requests for which the `ccs_minimize_roundtrips` setting was set to `true`. */ took_mrt_true?: ClusterStatsCCSUsageTimeValue; /** Statistics about the time taken to execute cross-cluster search requests for which the `ccs_minimize_roundtrips` setting was set to `false`. */ took_mrt_false?: ClusterStatsCCSUsageTimeValue; /** The maximum number of remote clusters that were queried in a single cross-cluster search request. */ remotes_per_search_max: integer; /** The average number of remote clusters that were queried in a single cross-cluster search request. */ remotes_per_search_avg: double; /** Statistics about the reasons for cross-cluster search request failures. The keys are the failure reason names and the values are the number of requests that failed for that reason. */ failure_reasons: Record; /** The keys are the names of the search feature, and the values are the number of requests that used that feature. Single request can use more than one feature (e.g. both `async` and `wildcard`). */ features: Record; /** Statistics about the clients that executed cross-cluster search requests. The keys are the names of the clients, and the values are the number of requests that were executed by that client. Only known clients (such as `kibana` or `elasticsearch`) are counted. */ clients: Record; /** Statistics about the clusters that were queried in cross-cluster search requests. The keys are cluster names, and the values are per-cluster telemetry data. This also includes the local cluster itself, which uses the name `(local)`. */ clusters: Record; } export interface ClusterStatsCCSUsageTimeValue { /** The maximum time taken to execute a request, in milliseconds. */ max: DurationValue; /** The average time taken to execute a request, in milliseconds. */ avg: DurationValue; /** The 90th percentile of the time taken to execute requests, in milliseconds. */ p90: DurationValue; } export interface ClusterStatsCharFilterTypes { /** Contains statistics about analyzer types used in selected nodes. */ analyzer_types: ClusterStatsFieldTypes[]; /** Contains statistics about built-in analyzers used in selected nodes. */ built_in_analyzers: ClusterStatsFieldTypes[]; /** Contains statistics about built-in character filters used in selected nodes. */ built_in_char_filters: ClusterStatsFieldTypes[]; /** Contains statistics about built-in token filters used in selected nodes. */ built_in_filters: ClusterStatsFieldTypes[]; /** Contains statistics about built-in tokenizers used in selected nodes. */ built_in_tokenizers: ClusterStatsFieldTypes[]; /** Contains statistics about character filter types used in selected nodes. */ char_filter_types: ClusterStatsFieldTypes[]; /** Contains statistics about token filter types used in selected nodes. */ filter_types: ClusterStatsFieldTypes[]; /** Contains statistics about tokenizer types used in selected nodes. */ tokenizer_types: ClusterStatsFieldTypes[]; } export interface ClusterStatsClusterFileSystem { /** Total number of bytes available to JVM in file stores across all selected nodes. * Depending on operating system or process-level restrictions, this number may be less than `nodes.fs.free_in_byes`. * This is the actual amount of free disk space the selected Elasticsearch nodes can use. */ available_in_bytes: long; /** Total number of unallocated bytes in file stores across all selected nodes. */ free_in_bytes: long; /** Total size, in bytes, of all file stores across all selected nodes. */ total_in_bytes: long; } export interface ClusterStatsClusterIndices { /** Contains statistics about analyzers and analyzer components used in selected nodes. */ analysis: ClusterStatsCharFilterTypes; /** Contains statistics about memory used for completion in selected nodes. */ completion: CompletionStats; /** Total number of indices with shards assigned to selected nodes. */ count: long; /** Contains counts for documents in selected nodes. */ docs: DocStats; /** Contains statistics about the field data cache of selected nodes. */ fielddata: FielddataStats; /** Contains statistics about the query cache of selected nodes. */ query_cache: QueryCacheStats; /** Contains statistics about segments in selected nodes. */ segments: SegmentsStats; /** Contains statistics about indices with shards assigned to selected nodes. */ shards: ClusterStatsClusterIndicesShards; /** Contains statistics about the size of shards assigned to selected nodes. */ store: StoreStats; /** Contains statistics about field mappings in selected nodes. */ mappings: ClusterStatsFieldTypesMappings; /** Contains statistics about analyzers and analyzer components used in selected nodes. */ versions?: ClusterStatsIndicesVersions[]; } export interface ClusterStatsClusterIndicesShards { /** Contains statistics about shards assigned to selected nodes. */ index?: ClusterStatsClusterIndicesShardsIndex; /** Number of primary shards assigned to selected nodes. */ primaries?: double; /** Ratio of replica shards to primary shards across all selected nodes. */ replication?: double; /** Total number of shards assigned to selected nodes. */ total?: double; } export interface ClusterStatsClusterIndicesShardsIndex { /** Contains statistics about the number of primary shards assigned to selected nodes. */ primaries: ClusterStatsClusterShardMetrics; /** Contains statistics about the number of replication shards assigned to selected nodes. */ replication: ClusterStatsClusterShardMetrics; /** Contains statistics about the number of shards assigned to selected nodes. */ shards: ClusterStatsClusterShardMetrics; } export interface ClusterStatsClusterIngest { number_of_pipelines: integer; processor_stats: Record; } export interface ClusterStatsClusterJvm { /** Uptime duration, in milliseconds, since JVM last started. */ max_uptime_in_millis: DurationValue; /** Contains statistics about memory used by selected nodes. */ mem: ClusterStatsClusterJvmMemory; /** Number of active threads in use by JVM across all selected nodes. */ threads: long; /** Contains statistics about the JVM versions used by selected nodes. */ versions: ClusterStatsClusterJvmVersion[]; } export interface ClusterStatsClusterJvmMemory { /** Maximum amount of memory, in bytes, available for use by the heap across all selected nodes. */ heap_max_in_bytes: long; /** Memory, in bytes, currently in use by the heap across all selected nodes. */ heap_used_in_bytes: long; } export interface ClusterStatsClusterJvmVersion { /** Always `true`. All distributions come with a bundled Java Development Kit (JDK). */ bundled_jdk: boolean; /** Total number of selected nodes using JVM. */ count: integer; /** If `true`, a bundled JDK is in use by JVM. */ using_bundled_jdk: boolean; /** Version of JVM used by one or more selected nodes. */ version: VersionString; /** Name of the JVM. */ vm_name: string; /** Vendor of the JVM. */ vm_vendor: string; /** Full version number of JVM. * The full version number includes a plus sign (+) followed by the build number. */ vm_version: VersionString; } export interface ClusterStatsClusterNetworkTypes { /** Contains statistics about the HTTP network types used by selected nodes. */ http_types: Record; /** Contains statistics about the transport network types used by selected nodes. */ transport_types: Record; } export interface ClusterStatsClusterNodeCount { coordinating_only: integer; data: integer; data_cold: integer; data_content: integer; data_frozen?: integer; data_hot: integer; data_warm: integer; ingest: integer; master: integer; ml: integer; remote_cluster_client: integer; total: integer; transform: integer; voting_only: integer; } export interface ClusterStatsClusterNodes { /** Contains counts for nodes selected by the request’s node filters. */ count: ClusterStatsClusterNodeCount; /** Contains statistics about the discovery types used by selected nodes. */ discovery_types: Record; /** Contains statistics about file stores by selected nodes. */ fs: ClusterStatsClusterFileSystem; indexing_pressure: ClusterStatsIndexingPressure; ingest: ClusterStatsClusterIngest; /** Contains statistics about the Java Virtual Machines (JVMs) used by selected nodes. */ jvm: ClusterStatsClusterJvm; /** Contains statistics about the transport and HTTP networks used by selected nodes. */ network_types: ClusterStatsClusterNetworkTypes; /** Contains statistics about the operating systems used by selected nodes. */ os: ClusterStatsClusterOperatingSystem; /** Contains statistics about Elasticsearch distributions installed on selected nodes. */ packaging_types: ClusterStatsNodePackagingType[]; /** Contains statistics about installed plugins and modules by selected nodes. * If no plugins or modules are installed, this array is empty. */ plugins: PluginStats[]; /** Contains statistics about processes used by selected nodes. */ process: ClusterStatsClusterProcess; /** Array of Elasticsearch versions used on selected nodes. */ versions: VersionString[]; } export interface ClusterStatsClusterOperatingSystem { /** Number of processors used to calculate thread pool size across all selected nodes. * This number can be set with the processors setting of a node and defaults to the number of processors reported by the operating system. * In both cases, this number will never be larger than 32. */ allocated_processors: integer; /** Contains statistics about processor architectures (for example, x86_64 or aarch64) used by selected nodes. */ architectures?: ClusterStatsClusterOperatingSystemArchitecture[]; /** Number of processors available to JVM across all selected nodes. */ available_processors: integer; /** Contains statistics about memory used by selected nodes. */ mem: ClusterStatsOperatingSystemMemoryInfo; /** Contains statistics about operating systems used by selected nodes. */ names: ClusterStatsClusterOperatingSystemName[]; /** Contains statistics about operating systems used by selected nodes. */ pretty_names: ClusterStatsClusterOperatingSystemPrettyName[]; } export interface ClusterStatsClusterOperatingSystemArchitecture { /** Name of an architecture used by one or more selected nodes. */ arch: string; /** Number of selected nodes using the architecture. */ count: integer; } export interface ClusterStatsClusterOperatingSystemName { /** Number of selected nodes using the operating system. */ count: integer; /** Name of an operating system used by one or more selected nodes. */ name: Name; } export interface ClusterStatsClusterOperatingSystemPrettyName { /** Number of selected nodes using the operating system. */ count: integer; /** Human-readable name of an operating system used by one or more selected nodes. */ pretty_name: Name; } export interface ClusterStatsClusterProcess { /** Contains statistics about CPU used by selected nodes. */ cpu: ClusterStatsClusterProcessCpu; /** Contains statistics about open file descriptors in selected nodes. */ open_file_descriptors: ClusterStatsClusterProcessOpenFileDescriptors; } export interface ClusterStatsClusterProcessCpu { /** Percentage of CPU used across all selected nodes. * Returns `-1` if not supported. */ percent: integer; } export interface ClusterStatsClusterProcessOpenFileDescriptors { /** Average number of concurrently open file descriptors. * Returns `-1` if not supported. */ avg: long; /** Maximum number of concurrently open file descriptors allowed across all selected nodes. * Returns `-1` if not supported. */ max: long; /** Minimum number of concurrently open file descriptors across all selected nodes. * Returns -1 if not supported. */ min: long; } export interface ClusterStatsClusterProcessor { count: long; current: long; failed: long; time?: Duration; time_in_millis: DurationValue; } export interface ClusterStatsClusterShardMetrics { /** Mean number of shards in an index, counting only shards assigned to selected nodes. */ avg: double; /** Maximum number of shards in an index, counting only shards assigned to selected nodes. */ max: double; /** Minimum number of shards in an index, counting only shards assigned to selected nodes. */ min: double; } export interface ClusterStatsFieldTypes { /** The name for the field type in selected nodes. */ name: Name; /** The number of occurrences of the field type in selected nodes. */ count: integer; /** The number of indices containing the field type in selected nodes. */ index_count: integer; /** For dense_vector field types, number of indexed vector types in selected nodes. */ indexed_vector_count?: long; /** For dense_vector field types, the maximum dimension of all indexed vector types in selected nodes. */ indexed_vector_dim_max?: long; /** For dense_vector field types, the minimum dimension of all indexed vector types in selected nodes. */ indexed_vector_dim_min?: long; /** The number of fields that declare a script. */ script_count?: integer; } export interface ClusterStatsFieldTypesMappings { /** Contains statistics about field data types used in selected nodes. */ field_types: ClusterStatsFieldTypes[]; /** Contains statistics about runtime field data types used in selected nodes. */ runtime_field_types?: ClusterStatsRuntimeFieldTypes[]; /** Total number of fields in all non-system indices. */ total_field_count?: integer; /** Total number of fields in all non-system indices, accounting for mapping deduplication. */ total_deduplicated_field_count?: integer; /** Total size of all mappings after deduplication and compression. */ total_deduplicated_mapping_size?: ByteSize; /** Total size of all mappings, in bytes, after deduplication and compression. */ total_deduplicated_mapping_size_in_bytes?: long; } export interface ClusterStatsIndexingPressure { memory: ClusterStatsIndexingPressureMemory; } export interface ClusterStatsIndexingPressureMemory { current: ClusterStatsIndexingPressureMemorySummary; limit_in_bytes: long; total: ClusterStatsIndexingPressureMemorySummary; } export interface ClusterStatsIndexingPressureMemorySummary { all_in_bytes: long; combined_coordinating_and_primary_in_bytes: long; coordinating_in_bytes: long; coordinating_rejections?: long; primary_in_bytes: long; primary_rejections?: long; replica_in_bytes: long; replica_rejections?: long; } export interface ClusterStatsIndicesVersions { index_count: integer; primary_shard_count: integer; total_primary_bytes: long; version: VersionString; } export interface ClusterStatsNodePackagingType { /** Number of selected nodes using the distribution flavor and file type. */ count: integer; /** Type of Elasticsearch distribution. This is always `default`. */ flavor: string; /** File type (such as `tar` or `zip`) used for the distribution package. */ type: string; } export interface ClusterStatsOperatingSystemMemoryInfo { /** Total amount, in bytes, of memory across all selected nodes, but using the value specified using the `es.total_memory_bytes` system property instead of measured total memory for those nodes where that system property was set. */ adjusted_total_in_bytes?: long; /** Amount, in bytes, of free physical memory across all selected nodes. */ free_in_bytes: long; /** Percentage of free physical memory across all selected nodes. */ free_percent: integer; /** Total amount, in bytes, of physical memory across all selected nodes. */ total_in_bytes: long; /** Amount, in bytes, of physical memory in use across all selected nodes. */ used_in_bytes: long; /** Percentage of physical memory in use across all selected nodes. */ used_percent: integer; } export interface ClusterStatsRemoteClusterInfo { /** The UUID of the remote cluster. */ cluster_uuid: string; /** The connection mode used to communicate with the remote cluster. */ mode: string; /** The `skip_unavailable` setting used for this remote cluster. */ skip_unavailable: boolean; /** Transport compression setting used for this remote cluster. */ transport_compress: string; /** Health status of the cluster, based on the state of its primary and replica shards. */ status: HealthStatus; /** The list of Elasticsearch versions used by the nodes on the remote cluster. */ version: VersionString[]; /** The total count of nodes in the remote cluster. */ nodes_count: integer; /** The total number of shards in the remote cluster. */ shards_count: integer; /** The total number of indices in the remote cluster. */ indices_count: integer; /** Total data set size, in bytes, of all shards assigned to selected nodes. */ indices_total_size_in_bytes: long; /** Total data set size of all shards assigned to selected nodes, as a human-readable string. */ indices_total_size?: string; /** Maximum amount of memory, in bytes, available for use by the heap across the nodes of the remote cluster. */ max_heap_in_bytes: long; /** Maximum amount of memory available for use by the heap across the nodes of the remote cluster, as a human-readable string. */ max_heap?: string; /** Total amount, in bytes, of physical memory across the nodes of the remote cluster. */ mem_total_in_bytes: long; /** Total amount of physical memory across the nodes of the remote cluster, as a human-readable string. */ mem_total?: string; } export interface ClusterStatsRequest extends RequestBase { /** Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. */ node_id?: NodeIds; /** Include remote cluster data into the response */ include_remotes?: boolean; /** Period to wait for each node to respond. * If a node does not respond before its timeout expires, the response does not include its stats. * However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { node_id?: never; include_remotes?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { node_id?: never; include_remotes?: never; timeout?: never; }; } export type ClusterStatsResponse = ClusterStatsStatsResponseBase; export interface ClusterStatsRuntimeFieldTypes { /** Maximum number of characters for a single runtime field script. */ chars_max: integer; /** Total number of characters for the scripts that define the current runtime field data type. */ chars_total: integer; /** Number of runtime fields mapped to the field data type in selected nodes. */ count: integer; /** Maximum number of accesses to doc_values for a single runtime field script */ doc_max: integer; /** Total number of accesses to doc_values for the scripts that define the current runtime field data type. */ doc_total: integer; /** Number of indices containing a mapping of the runtime field data type in selected nodes. */ index_count: integer; /** Script languages used for the runtime fields scripts. */ lang: string[]; /** Maximum number of lines for a single runtime field script. */ lines_max: integer; /** Total number of lines for the scripts that define the current runtime field data type. */ lines_total: integer; /** Field data type used in selected nodes. */ name: Name; /** Number of runtime fields that don’t declare a script. */ scriptless_count: integer; /** Number of runtime fields that shadow an indexed field. */ shadowed_count: integer; /** Maximum number of accesses to _source for a single runtime field script. */ source_max: integer; /** Total number of accesses to _source for the scripts that define the current runtime field data type. */ source_total: integer; } export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { /** Name of the cluster, based on the cluster name setting. */ cluster_name: Name; /** Unique identifier for the cluster. */ cluster_uuid: Uuid; /** Contains statistics about indices with shards assigned to selected nodes. */ indices: ClusterStatsClusterIndices; /** Contains statistics about nodes selected by the request’s node filters. */ nodes: ClusterStatsClusterNodes; /** Health status of the cluster, based on the state of its primary and replica shards. */ status: HealthStatus; /** Unix timestamp, in milliseconds, for the last time the cluster statistics were refreshed. */ timestamp: long; /** Cross-cluster stats */ ccs: ClusterStatsCCSStats; } export interface ConnectorConnector { api_key_id?: string; api_key_secret_id?: string; configuration: ConnectorConnectorConfiguration; custom_scheduling: ConnectorConnectorCustomScheduling; deleted: boolean; description?: string; error?: string | null; features?: ConnectorConnectorFeatures; filtering: ConnectorFilteringConfig[]; id?: Id; index_name?: IndexName | null; is_native: boolean; language?: string; last_access_control_sync_error?: string; last_access_control_sync_scheduled_at?: DateTime; last_access_control_sync_status?: ConnectorSyncStatus; last_deleted_document_count?: long; last_incremental_sync_scheduled_at?: DateTime; last_indexed_document_count?: long; last_seen?: DateTime; last_sync_error?: string; last_sync_scheduled_at?: DateTime; last_sync_status?: ConnectorSyncStatus; last_synced?: DateTime; name?: string; pipeline?: ConnectorIngestPipelineParams; scheduling: ConnectorSchedulingConfiguration; service_type?: string; status: ConnectorConnectorStatus; sync_cursor?: any; sync_now: boolean; } export interface ConnectorConnectorConfigProperties { category?: string; default_value: ScalarValue; depends_on: ConnectorDependency[]; display: ConnectorDisplayType; label: string; options: ConnectorSelectOption[]; order?: integer; placeholder?: string; required: boolean; sensitive: boolean; tooltip?: string | null; type?: ConnectorConnectorFieldType; ui_restrictions?: string[]; validations?: ConnectorValidation[]; value: any; } export type ConnectorConnectorConfiguration = Record; export type ConnectorConnectorCustomScheduling = Record; export interface ConnectorConnectorFeatures { /** Indicates whether document-level security is enabled. */ document_level_security?: ConnectorFeatureEnabled; /** Indicates whether incremental syncs are enabled. */ incremental_sync?: ConnectorFeatureEnabled; /** Indicates whether managed connector API keys are enabled. */ native_connector_api_keys?: ConnectorFeatureEnabled; sync_rules?: ConnectorSyncRulesFeature; } export type ConnectorConnectorFieldType = 'str' | 'int' | 'list' | 'bool'; export interface ConnectorConnectorScheduling { enabled: boolean; /** The interval is expressed using the crontab syntax */ interval: string; } export type ConnectorConnectorStatus = 'created' | 'needs_configuration' | 'configured' | 'connected' | 'error'; export interface ConnectorConnectorSyncJob { cancelation_requested_at?: DateTime; canceled_at?: DateTime; completed_at?: DateTime; connector: ConnectorSyncJobConnectorReference; created_at: DateTime; deleted_document_count: long; error?: string; id: Id; indexed_document_count: long; indexed_document_volume: long; job_type: ConnectorSyncJobType; last_seen?: DateTime; metadata: Record; started_at?: DateTime; status: ConnectorSyncStatus; total_document_count: long; trigger_method: ConnectorSyncJobTriggerMethod; worker_hostname?: string; } export interface ConnectorCustomScheduling { configuration_overrides: ConnectorCustomSchedulingConfigurationOverrides; enabled: boolean; interval: string; last_synced?: DateTime; name: string; } export interface ConnectorCustomSchedulingConfigurationOverrides { max_crawl_depth?: integer; sitemap_discovery_disabled?: boolean; domain_allowlist?: string[]; sitemap_urls?: string[]; seed_urls?: string[]; } export interface ConnectorDependency { field: string; value: ScalarValue; } export type ConnectorDisplayType = 'textbox' | 'textarea' | 'numeric' | 'toggle' | 'dropdown'; export interface ConnectorFeatureEnabled { enabled: boolean; } export interface ConnectorFilteringAdvancedSnippet { created_at?: DateTime; updated_at?: DateTime; value: any; } export interface ConnectorFilteringConfig { active: ConnectorFilteringRules; domain?: string; draft: ConnectorFilteringRules; } export type ConnectorFilteringPolicy = 'exclude' | 'include'; export interface ConnectorFilteringRule { created_at?: DateTime; field: Field; id: Id; order: integer; policy: ConnectorFilteringPolicy; rule: ConnectorFilteringRuleRule; updated_at?: DateTime; value: string; } export type ConnectorFilteringRuleRule = 'contains' | 'ends_with' | 'equals' | 'regex' | 'starts_with' | '>' | '<'; export interface ConnectorFilteringRules { advanced_snippet: ConnectorFilteringAdvancedSnippet; rules: ConnectorFilteringRule[]; validation: ConnectorFilteringRulesValidation; } export interface ConnectorFilteringRulesValidation { errors: ConnectorFilteringValidation[]; state: ConnectorFilteringValidationState; } export interface ConnectorFilteringValidation { ids: Id[]; messages: string[]; } export type ConnectorFilteringValidationState = 'edited' | 'invalid' | 'valid'; export interface ConnectorGreaterThanValidation { type: 'greater_than'; constraint: double; } export interface ConnectorIncludedInValidation { type: 'included_in'; constraint: ScalarValue[]; } export interface ConnectorIngestPipelineParams { extract_binary_content: boolean; name: string; reduce_whitespace: boolean; run_ml_inference: boolean; } export interface ConnectorLessThanValidation { type: 'less_than'; constraint: double; } export interface ConnectorListTypeValidation { type: 'list_type'; constraint: string; } export interface ConnectorRegexValidation { type: 'regex'; constraint: string; } export interface ConnectorSchedulingConfiguration { access_control?: ConnectorConnectorScheduling; full?: ConnectorConnectorScheduling; incremental?: ConnectorConnectorScheduling; } export interface ConnectorSelectOption { label: string; value: ScalarValue; } export interface ConnectorSyncJobConnectorReference { configuration: ConnectorConnectorConfiguration; filtering: ConnectorFilteringRules; id: Id; index_name: string; language?: string; pipeline?: ConnectorIngestPipelineParams; service_type: string; sync_cursor?: any; } export type ConnectorSyncJobTriggerMethod = 'on_demand' | 'scheduled'; export type ConnectorSyncJobType = 'full' | 'incremental' | 'access_control'; export interface ConnectorSyncRulesFeature { /** Indicates whether advanced sync rules are enabled. */ advanced?: ConnectorFeatureEnabled; /** Indicates whether basic sync rules are enabled. */ basic?: ConnectorFeatureEnabled; } export type ConnectorSyncStatus = 'canceling' | 'canceled' | 'completed' | 'error' | 'in_progress' | 'pending' | 'suspended'; export type ConnectorValidation = ConnectorLessThanValidation | ConnectorGreaterThanValidation | ConnectorListTypeValidation | ConnectorIncludedInValidation | ConnectorRegexValidation; export interface ConnectorCheckInRequest extends RequestBase { /** The unique identifier of the connector to be checked in */ connector_id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_id?: never; }; } export interface ConnectorCheckInResponse { result: Result; } export interface ConnectorDeleteRequest extends RequestBase { /** The unique identifier of the connector to be deleted */ connector_id: Id; /** A flag indicating if associated sync jobs should be also removed. Defaults to false. */ delete_sync_jobs?: boolean; /** A flag indicating if the connector should be hard deleted. */ hard?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_id?: never; delete_sync_jobs?: never; hard?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_id?: never; delete_sync_jobs?: never; hard?: never; }; } export type ConnectorDeleteResponse = AcknowledgedResponseBase; export interface ConnectorGetRequest extends RequestBase { /** The unique identifier of the connector */ connector_id: Id; /** A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. */ include_deleted?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_id?: never; include_deleted?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_id?: never; include_deleted?: never; }; } export type ConnectorGetResponse = ConnectorConnector; export interface ConnectorLastSyncRequest extends RequestBase { /** The unique identifier of the connector to be updated */ connector_id: Id; last_access_control_sync_error?: string; last_access_control_sync_scheduled_at?: DateTime; last_access_control_sync_status?: ConnectorSyncStatus; last_deleted_document_count?: long; last_incremental_sync_scheduled_at?: DateTime; last_indexed_document_count?: long; last_seen?: DateTime; last_sync_error?: string; last_sync_scheduled_at?: DateTime; last_sync_status?: ConnectorSyncStatus; last_synced?: DateTime; sync_cursor?: any; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_id?: never; last_access_control_sync_error?: never; last_access_control_sync_scheduled_at?: never; last_access_control_sync_status?: never; last_deleted_document_count?: never; last_incremental_sync_scheduled_at?: never; last_indexed_document_count?: never; last_seen?: never; last_sync_error?: never; last_sync_scheduled_at?: never; last_sync_status?: never; last_synced?: never; sync_cursor?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_id?: never; last_access_control_sync_error?: never; last_access_control_sync_scheduled_at?: never; last_access_control_sync_status?: never; last_deleted_document_count?: never; last_incremental_sync_scheduled_at?: never; last_indexed_document_count?: never; last_seen?: never; last_sync_error?: never; last_sync_scheduled_at?: never; last_sync_status?: never; last_synced?: never; sync_cursor?: never; }; } export interface ConnectorLastSyncResponse { result: Result; } export interface ConnectorListRequest extends RequestBase { /** Starting offset (default: 0) */ from?: integer; /** Specifies a max number of results to get */ size?: integer; /** A comma-separated list of connector index names to fetch connector documents for */ index_name?: Indices; /** A comma-separated list of connector names to fetch connector documents for */ connector_name?: Names; /** A comma-separated list of connector service types to fetch connector documents for */ service_type?: Names; /** A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. */ include_deleted?: boolean; /** A wildcard query string that filters connectors with matching name, description or index name */ query?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { from?: never; size?: never; index_name?: never; connector_name?: never; service_type?: never; include_deleted?: never; query?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { from?: never; size?: never; index_name?: never; connector_name?: never; service_type?: never; include_deleted?: never; query?: never; }; } export interface ConnectorListResponse { count: long; results: ConnectorConnector[]; } export interface ConnectorPostRequest extends RequestBase { description?: string; index_name?: IndexName; is_native?: boolean; language?: string; name?: string; service_type?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { description?: never; index_name?: never; is_native?: never; language?: never; name?: never; service_type?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { description?: never; index_name?: never; is_native?: never; language?: never; name?: never; service_type?: never; }; } export interface ConnectorPostResponse { result: Result; id: Id; } export interface ConnectorPutRequest extends RequestBase { /** The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. */ connector_id?: Id; description?: string; index_name?: IndexName; is_native?: boolean; language?: string; name?: string; service_type?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_id?: never; description?: never; index_name?: never; is_native?: never; language?: never; name?: never; service_type?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_id?: never; description?: never; index_name?: never; is_native?: never; language?: never; name?: never; service_type?: never; }; } export interface ConnectorPutResponse { result: Result; id: Id; } export interface ConnectorSyncJobCancelRequest extends RequestBase { /** The unique identifier of the connector sync job */ connector_sync_job_id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_sync_job_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_sync_job_id?: never; }; } export interface ConnectorSyncJobCancelResponse { result: Result; } export interface ConnectorSyncJobCheckInRequest extends RequestBase { /** The unique identifier of the connector sync job to be checked in. */ connector_sync_job_id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_sync_job_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_sync_job_id?: never; }; } export interface ConnectorSyncJobCheckInResponse { } export interface ConnectorSyncJobClaimRequest extends RequestBase { /** The unique identifier of the connector sync job. */ connector_sync_job_id: Id; /** The cursor object from the last incremental sync job. * This should reference the `sync_cursor` field in the connector state for which the job runs. */ sync_cursor?: any; /** The host name of the current system that will run the job. */ worker_hostname: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_sync_job_id?: never; sync_cursor?: never; worker_hostname?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_sync_job_id?: never; sync_cursor?: never; worker_hostname?: never; }; } export interface ConnectorSyncJobClaimResponse { } export interface ConnectorSyncJobDeleteRequest extends RequestBase { /** The unique identifier of the connector sync job to be deleted */ connector_sync_job_id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_sync_job_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_sync_job_id?: never; }; } export type ConnectorSyncJobDeleteResponse = AcknowledgedResponseBase; export interface ConnectorSyncJobErrorRequest extends RequestBase { /** The unique identifier for the connector sync job. */ connector_sync_job_id: Id; /** The error for the connector sync job error field. */ error: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_sync_job_id?: never; error?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_sync_job_id?: never; error?: never; }; } export interface ConnectorSyncJobErrorResponse { } export interface ConnectorSyncJobGetRequest extends RequestBase { /** The unique identifier of the connector sync job */ connector_sync_job_id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_sync_job_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_sync_job_id?: never; }; } export type ConnectorSyncJobGetResponse = ConnectorConnectorSyncJob; export interface ConnectorSyncJobListRequest extends RequestBase { /** Starting offset (default: 0) */ from?: integer; /** Specifies a max number of results to get */ size?: integer; /** A sync job status to fetch connector sync jobs for */ status?: ConnectorSyncStatus; /** A connector id to fetch connector sync jobs for */ connector_id?: Id; /** A comma-separated list of job types to fetch the sync jobs for */ job_type?: ConnectorSyncJobType | ConnectorSyncJobType[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { from?: never; size?: never; status?: never; connector_id?: never; job_type?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { from?: never; size?: never; status?: never; connector_id?: never; job_type?: never; }; } export interface ConnectorSyncJobListResponse { count: long; results: ConnectorConnectorSyncJob[]; } export interface ConnectorSyncJobPostRequest extends RequestBase { /** The id of the associated connector */ id: Id; job_type?: ConnectorSyncJobType; trigger_method?: ConnectorSyncJobTriggerMethod; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; job_type?: never; trigger_method?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; job_type?: never; trigger_method?: never; }; } export interface ConnectorSyncJobPostResponse { id: Id; } export interface ConnectorSyncJobUpdateStatsRequest extends RequestBase { /** The unique identifier of the connector sync job. */ connector_sync_job_id: Id; /** The number of documents the sync job deleted. */ deleted_document_count: long; /** The number of documents the sync job indexed. */ indexed_document_count: long; /** The total size of the data (in MiB) the sync job indexed. */ indexed_document_volume: long; /** The timestamp to use in the `last_seen` property for the connector sync job. */ last_seen?: Duration; /** The connector-specific metadata. */ metadata?: Metadata; /** The total number of documents in the target index after the sync job finished. */ total_document_count?: integer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_sync_job_id?: never; deleted_document_count?: never; indexed_document_count?: never; indexed_document_volume?: never; last_seen?: never; metadata?: never; total_document_count?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_sync_job_id?: never; deleted_document_count?: never; indexed_document_count?: never; indexed_document_volume?: never; last_seen?: never; metadata?: never; total_document_count?: never; }; } export interface ConnectorSyncJobUpdateStatsResponse { } export interface ConnectorUpdateActiveFilteringRequest extends RequestBase { /** The unique identifier of the connector to be updated */ connector_id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_id?: never; }; } export interface ConnectorUpdateActiveFilteringResponse { result: Result; } export interface ConnectorUpdateApiKeyIdRequest extends RequestBase { /** The unique identifier of the connector to be updated */ connector_id: Id; api_key_id?: string; api_key_secret_id?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_id?: never; api_key_id?: never; api_key_secret_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_id?: never; api_key_id?: never; api_key_secret_id?: never; }; } export interface ConnectorUpdateApiKeyIdResponse { result: Result; } export interface ConnectorUpdateConfigurationRequest extends RequestBase { /** The unique identifier of the connector to be updated */ connector_id: Id; configuration?: ConnectorConnectorConfiguration; values?: Record; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_id?: never; configuration?: never; values?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_id?: never; configuration?: never; values?: never; }; } export interface ConnectorUpdateConfigurationResponse { result: Result; } export interface ConnectorUpdateErrorRequest extends RequestBase { /** The unique identifier of the connector to be updated */ connector_id: Id; error: SpecUtilsWithNullValue; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_id?: never; error?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_id?: never; error?: never; }; } export interface ConnectorUpdateErrorResponse { result: Result; } export interface ConnectorUpdateFeaturesRequest extends RequestBase { /** The unique identifier of the connector to be updated. */ connector_id: Id; features: ConnectorConnectorFeatures; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_id?: never; features?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_id?: never; features?: never; }; } export interface ConnectorUpdateFeaturesResponse { result: Result; } export interface ConnectorUpdateFilteringRequest extends RequestBase { /** The unique identifier of the connector to be updated */ connector_id: Id; filtering?: ConnectorFilteringConfig[]; rules?: ConnectorFilteringRule[]; advanced_snippet?: ConnectorFilteringAdvancedSnippet; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_id?: never; filtering?: never; rules?: never; advanced_snippet?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_id?: never; filtering?: never; rules?: never; advanced_snippet?: never; }; } export interface ConnectorUpdateFilteringResponse { result: Result; } export interface ConnectorUpdateFilteringValidationRequest extends RequestBase { /** The unique identifier of the connector to be updated */ connector_id: Id; validation: ConnectorFilteringRulesValidation; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_id?: never; validation?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_id?: never; validation?: never; }; } export interface ConnectorUpdateFilteringValidationResponse { result: Result; } export interface ConnectorUpdateIndexNameRequest extends RequestBase { /** The unique identifier of the connector to be updated */ connector_id: Id; index_name: SpecUtilsWithNullValue; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_id?: never; index_name?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_id?: never; index_name?: never; }; } export interface ConnectorUpdateIndexNameResponse { result: Result; } export interface ConnectorUpdateNameRequest extends RequestBase { /** The unique identifier of the connector to be updated */ connector_id: Id; name?: string; description?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_id?: never; name?: never; description?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_id?: never; name?: never; description?: never; }; } export interface ConnectorUpdateNameResponse { result: Result; } export interface ConnectorUpdateNativeRequest extends RequestBase { /** The unique identifier of the connector to be updated */ connector_id: Id; is_native: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_id?: never; is_native?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_id?: never; is_native?: never; }; } export interface ConnectorUpdateNativeResponse { result: Result; } export interface ConnectorUpdatePipelineRequest extends RequestBase { /** The unique identifier of the connector to be updated */ connector_id: Id; pipeline: ConnectorIngestPipelineParams; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_id?: never; pipeline?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_id?: never; pipeline?: never; }; } export interface ConnectorUpdatePipelineResponse { result: Result; } export interface ConnectorUpdateSchedulingRequest extends RequestBase { /** The unique identifier of the connector to be updated */ connector_id: Id; scheduling: ConnectorSchedulingConfiguration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_id?: never; scheduling?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_id?: never; scheduling?: never; }; } export interface ConnectorUpdateSchedulingResponse { result: Result; } export interface ConnectorUpdateServiceTypeRequest extends RequestBase { /** The unique identifier of the connector to be updated */ connector_id: Id; service_type: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_id?: never; service_type?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_id?: never; service_type?: never; }; } export interface ConnectorUpdateServiceTypeResponse { result: Result; } export interface ConnectorUpdateStatusRequest extends RequestBase { /** The unique identifier of the connector to be updated */ connector_id: Id; status: ConnectorConnectorStatus; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { connector_id?: never; status?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { connector_id?: never; status?: never; }; } export interface ConnectorUpdateStatusResponse { result: Result; } export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { /** The UUID of the index to delete. Use the get dangling indices API to find the UUID. */ index_uuid: Uuid; /** This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. */ accept_data_loss: boolean; /** Specify timeout for connection to master */ master_timeout?: Duration; /** Explicit operation timeout */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index_uuid?: never; accept_data_loss?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index_uuid?: never; accept_data_loss?: never; master_timeout?: never; timeout?: never; }; } export type DanglingIndicesDeleteDanglingIndexResponse = AcknowledgedResponseBase; export interface DanglingIndicesImportDanglingIndexRequest extends RequestBase { /** The UUID of the index to import. Use the get dangling indices API to locate the UUID. */ index_uuid: Uuid; /** This parameter must be set to true to import a dangling index. * Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. */ accept_data_loss: boolean; /** Specify timeout for connection to master */ master_timeout?: Duration; /** Explicit operation timeout */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index_uuid?: never; accept_data_loss?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index_uuid?: never; accept_data_loss?: never; master_timeout?: never; timeout?: never; }; } export type DanglingIndicesImportDanglingIndexResponse = AcknowledgedResponseBase; export interface DanglingIndicesListDanglingIndicesDanglingIndex { index_name: string; index_uuid: string; creation_date_millis: EpochTime; node_ids: Ids; } export interface DanglingIndicesListDanglingIndicesRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export interface DanglingIndicesListDanglingIndicesResponse { dangling_indices: DanglingIndicesListDanglingIndicesDanglingIndex[]; } export interface EnrichPolicy { enrich_fields: Fields; indices: Indices; match_field: Field; query?: QueryDslQueryContainer; name?: Name; elasticsearch_version?: string; } export type EnrichPolicyType = 'geo_match' | 'match' | 'range'; export interface EnrichSummary { config: Partial>; } export interface EnrichDeletePolicyRequest extends RequestBase { /** Enrich policy to delete. */ name: Name; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; }; } export type EnrichDeletePolicyResponse = AcknowledgedResponseBase; export type EnrichExecutePolicyEnrichPolicyPhase = 'SCHEDULED' | 'RUNNING' | 'COMPLETE' | 'FAILED' | 'CANCELLED'; export interface EnrichExecutePolicyExecuteEnrichPolicyStatus { phase: EnrichExecutePolicyEnrichPolicyPhase; step?: string; } export interface EnrichExecutePolicyRequest extends RequestBase { /** Enrich policy to execute. */ name: Name; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** If `true`, the request blocks other enrich policy execution requests until complete. */ wait_for_completion?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; wait_for_completion?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; wait_for_completion?: never; }; } export interface EnrichExecutePolicyResponse { status?: EnrichExecutePolicyExecuteEnrichPolicyStatus; task?: TaskId; } export interface EnrichGetPolicyRequest extends RequestBase { /** Comma-separated list of enrich policy names used to limit the request. * To return information for all enrich policies, omit this parameter. */ name?: Names; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; }; } export interface EnrichGetPolicyResponse { policies: EnrichSummary[]; } export interface EnrichPutPolicyRequest extends RequestBase { /** Name of the enrich policy to create or update. */ name: Name; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** Matches enrich data to incoming documents based on a `geo_shape` query. */ geo_match?: EnrichPolicy; /** Matches enrich data to incoming documents based on a `term` query. */ match?: EnrichPolicy; /** Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. */ range?: EnrichPolicy; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; geo_match?: never; match?: never; range?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; geo_match?: never; match?: never; range?: never; }; } export type EnrichPutPolicyResponse = AcknowledgedResponseBase; export interface EnrichStatsCacheStats { node_id: Id; count: integer; hits: integer; hits_time_in_millis: DurationValue; misses: integer; misses_time_in_millis: DurationValue; evictions: integer; size_in_bytes: long; } export interface EnrichStatsCoordinatorStats { executed_searches_total: long; node_id: Id; queue_size: integer; remote_requests_current: integer; remote_requests_total: long; } export interface EnrichStatsExecutingPolicy { name: Name; task: TasksTaskInfo; } export interface EnrichStatsRequest extends RequestBase { /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; }; } export interface EnrichStatsResponse { /** Objects containing information about each coordinating ingest node for configured enrich processors. */ coordinator_stats: EnrichStatsCoordinatorStats[]; /** Objects containing information about each enrich policy that is currently executing. */ executing_policies: EnrichStatsExecutingPolicy[]; /** Objects containing information about the enrich cache stats on each ingest node. */ cache_stats?: EnrichStatsCacheStats[]; } export interface EqlEqlHits { /** Metadata about the number of matching events or sequences. */ total?: SearchTotalHits; /** Contains events matching the query. Each object represents a matching event. */ events?: EqlHitsEvent[]; /** Contains event sequences matching the query. Each object represents a matching sequence. This parameter is only returned for EQL queries containing a sequence. */ sequences?: EqlHitsSequence[]; } export interface EqlEqlSearchResponseBase { /** Identifier for the search. */ id?: Id; /** If true, the response does not contain complete search results. */ is_partial?: boolean; /** If true, the search request is still executing. */ is_running?: boolean; /** Milliseconds it took Elasticsearch to execute the request. */ took?: DurationValue; /** If true, the request timed out before completion. */ timed_out?: boolean; /** Contains matching events and sequences. Also contains related metadata. */ hits: EqlEqlHits; /** Contains information about shard failures (if any), in case allow_partial_search_results=true */ shard_failures?: ShardFailure[]; } export interface EqlHitsEvent { /** Name of the index containing the event. */ _index: IndexName; /** Unique identifier for the event. This ID is only unique within the index. */ _id: Id; /** Original JSON body passed for the event at index time. */ _source: TEvent; /** Set to `true` for events in a timespan-constrained sequence that do not meet a given condition. */ missing?: boolean; fields?: Record; } export interface EqlHitsSequence { /** Contains events matching the query. Each object represents a matching event. */ events: EqlHitsEvent[]; /** Shared field values used to constrain matches in the sequence. These are defined using the by keyword in the EQL query syntax. */ join_keys?: any[]; } export interface EqlDeleteRequest extends RequestBase { /** Identifier for the search to delete. * A search ID is provided in the EQL search API's response for an async search. * A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. */ id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; }; } export type EqlDeleteResponse = AcknowledgedResponseBase; export interface EqlGetRequest extends RequestBase { /** Identifier for the search. */ id: Id; /** Period for which the search and its results are stored on the cluster. * Defaults to the keep_alive value set by the search’s EQL search API request. */ keep_alive?: Duration; /** Timeout duration to wait for the request to finish. * Defaults to no timeout, meaning the request waits for complete search results. */ wait_for_completion_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; keep_alive?: never; wait_for_completion_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; keep_alive?: never; wait_for_completion_timeout?: never; }; } export type EqlGetResponse = EqlEqlSearchResponseBase; export interface EqlGetStatusRequest extends RequestBase { /** Identifier for the search. */ id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; }; } export interface EqlGetStatusResponse { /** Identifier for the search. */ id: Id; /** If true, the search request is still executing. If false, the search is completed. */ is_partial: boolean; /** If true, the response does not contain complete search results. This could be because either the search is still running (is_running status is false), or because it is already completed (is_running status is true) and results are partial due to failures or timeouts. */ is_running: boolean; /** For a running search shows a timestamp when the eql search started, in milliseconds since the Unix epoch. */ start_time_in_millis?: EpochTime; /** Shows a timestamp when the eql search will be expired, in milliseconds since the Unix epoch. When this time is reached, the search and its results are deleted, even if the search is still ongoing. */ expiration_time_in_millis?: EpochTime; /** For a completed search shows the http status code of the completed search. */ completion_status?: integer; } export interface EqlSearchRequest extends RequestBase { /** The name of the index to scope the operation */ index: Indices; allow_no_indices?: boolean; expand_wildcards?: ExpandWildcards; /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean; /** EQL query you wish to run. */ query: string; case_sensitive?: boolean; /** Field containing the event classification, such as process, file, or network. */ event_category_field?: Field; /** Field used to sort hits with the same timestamp in ascending order */ tiebreaker_field?: Field; /** Field containing event timestamp. Default "@timestamp" */ timestamp_field?: Field; /** Maximum number of events to search at a time for sequence queries. */ fetch_size?: uint; /** Query, written in Query DSL, used to filter the events on which the EQL query runs. */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[]; keep_alive?: Duration; keep_on_completion?: boolean; wait_for_completion_timeout?: Duration; /** Allow query execution also in case of shard failures. * If true, the query will keep running and will return results based on the available shards. * For sequences, the behavior can be further refined using allow_partial_sequence_results */ allow_partial_search_results?: boolean; /** This flag applies only to sequences and has effect only if allow_partial_search_results=true. * If true, the sequence query will return results based on the available shards, ignoring the others. * If false, the sequence query will return successfully, but will always have empty results. */ allow_partial_sequence_results?: boolean; /** For basic queries, the maximum number of matching events to return. Defaults to 10 */ size?: uint; /** Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. */ fields?: QueryDslFieldAndFormat | Field | (QueryDslFieldAndFormat | Field)[]; result_position?: EqlSearchResultPosition; runtime_mappings?: MappingRuntimeFields; /** By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` * parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the * `max_samples_per_key` parameter. Pipes are not supported for sample queries. */ max_samples_per_key?: integer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; query?: never; case_sensitive?: never; event_category_field?: never; tiebreaker_field?: never; timestamp_field?: never; fetch_size?: never; filter?: never; keep_alive?: never; keep_on_completion?: never; wait_for_completion_timeout?: never; allow_partial_search_results?: never; allow_partial_sequence_results?: never; size?: never; fields?: never; result_position?: never; runtime_mappings?: never; max_samples_per_key?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; query?: never; case_sensitive?: never; event_category_field?: never; tiebreaker_field?: never; timestamp_field?: never; fetch_size?: never; filter?: never; keep_alive?: never; keep_on_completion?: never; wait_for_completion_timeout?: never; allow_partial_search_results?: never; allow_partial_sequence_results?: never; size?: never; fields?: never; result_position?: never; runtime_mappings?: never; max_samples_per_key?: never; }; } export type EqlSearchResponse = EqlEqlSearchResponseBase; export type EqlSearchResultPosition = 'tail' | 'head'; export interface EsqlAsyncEsqlResult extends EsqlEsqlResult { id?: string; is_running: boolean; } export interface EsqlEsqlClusterDetails { status: EsqlEsqlClusterStatus; indices: string; took?: DurationValue; _shards?: EsqlEsqlShardInfo; failures?: EsqlEsqlShardFailure[]; } export interface EsqlEsqlClusterInfo { total: integer; successful: integer; running: integer; skipped: integer; partial: integer; failed: integer; details: Record; } export type EsqlEsqlClusterStatus = 'running' | 'successful' | 'partial' | 'skipped' | 'failed'; export interface EsqlEsqlColumnInfo { name: string; type: string; } export type EsqlEsqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' | 'arrow'; export interface EsqlEsqlResult { took?: DurationValue; is_partial?: boolean; all_columns?: EsqlEsqlColumnInfo[]; columns: EsqlEsqlColumnInfo[]; values: FieldValue[][]; /** Cross-cluster search information. Present if `include_ccs_metadata` was `true` in the request * and a cross-cluster search was performed. */ _clusters?: EsqlEsqlClusterInfo; /** Profiling information. Present if `profile` was `true` in the request. * The contents of this field are currently unstable. */ profile?: any; } export interface EsqlEsqlShardFailure { shard: integer; index: IndexName | null; node?: NodeId; reason: ErrorCause; } export interface EsqlEsqlShardInfo { total: integer; successful?: integer; skipped?: integer; failed?: integer; } export interface EsqlTableValuesContainer { integer?: EsqlTableValuesIntegerValue[]; keyword?: EsqlTableValuesKeywordValue[]; long?: EsqlTableValuesLongValue[]; double?: EsqlTableValuesLongDouble[]; } export type EsqlTableValuesIntegerValue = integer | integer[]; export type EsqlTableValuesKeywordValue = string | string[]; export type EsqlTableValuesLongDouble = double | double[]; export type EsqlTableValuesLongValue = long | long[]; export interface EsqlAsyncQueryRequest extends RequestBase { /** The character to use between values within a CSV row. * It is valid only for the CSV format. */ delimiter?: string; /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. * If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ drop_null_columns?: boolean; /** A short version of the Accept header, for example `json` or `yaml`. */ format?: EsqlEsqlFormat; /** By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. */ columnar?: boolean; /** Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. */ filter?: QueryDslQueryContainer; locale?: string; /** To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. */ params?: FieldValue[]; /** If provided and `true` the response will include an extra `profile` object * with information on how the query was executed. This information is for human debugging * and its format can change at any time but it can give some insight into the performance * of each part of the query. */ profile?: boolean; /** The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. */ query: string; /** Tables to use with the LOOKUP operation. The top level key is the table * name and the next level key is the column name. */ tables?: Record>; /** When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` * object with information about the clusters that participated in the search along with info such as shards * count. */ include_ccs_metadata?: boolean; /** The period to wait for the request to finish. * By default, the request waits for 1 second for the query results. * If the query completes during this period, results are returned * Otherwise, a query ID is returned that can later be used to retrieve the results. */ wait_for_completion_timeout?: Duration; /** The period for which the query and its results are stored in the cluster. * The default period is five days. * When this period expires, the query and its results are deleted, even if the query is still ongoing. * If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. */ keep_alive?: Duration; /** Indicates whether the query and its results are stored in the cluster. * If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. */ keep_on_completion?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { delimiter?: never; drop_null_columns?: never; format?: never; columnar?: never; filter?: never; locale?: never; params?: never; profile?: never; query?: never; tables?: never; include_ccs_metadata?: never; wait_for_completion_timeout?: never; keep_alive?: never; keep_on_completion?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { delimiter?: never; drop_null_columns?: never; format?: never; columnar?: never; filter?: never; locale?: never; params?: never; profile?: never; query?: never; tables?: never; include_ccs_metadata?: never; wait_for_completion_timeout?: never; keep_alive?: never; keep_on_completion?: never; }; } export type EsqlAsyncQueryResponse = EsqlAsyncEsqlResult; export interface EsqlAsyncQueryDeleteRequest extends RequestBase { /** The unique identifier of the query. * A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. * A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; }; } export type EsqlAsyncQueryDeleteResponse = AcknowledgedResponseBase; export interface EsqlAsyncQueryGetRequest extends RequestBase { /** The unique identifier of the query. * A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. * A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ id: Id; /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. * If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ drop_null_columns?: boolean; /** The period for which the query and its results are stored in the cluster. * When this period expires, the query and its results are deleted, even if the query is still ongoing. */ keep_alive?: Duration; /** The period to wait for the request to finish. * By default, the request waits for complete query results. * If the request completes during the period specified in this parameter, complete query results are returned. * Otherwise, the response returns an `is_running` value of `true` and no results. */ wait_for_completion_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; drop_null_columns?: never; keep_alive?: never; wait_for_completion_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; drop_null_columns?: never; keep_alive?: never; wait_for_completion_timeout?: never; }; } export type EsqlAsyncQueryGetResponse = EsqlAsyncEsqlResult; export interface EsqlAsyncQueryStopRequest extends RequestBase { /** The unique identifier of the query. * A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. * A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ id: Id; /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. * If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ drop_null_columns?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; drop_null_columns?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; drop_null_columns?: never; }; } export type EsqlAsyncQueryStopResponse = EsqlEsqlResult; export interface EsqlQueryRequest extends RequestBase { /** A short version of the Accept header, e.g. json, yaml. */ format?: EsqlEsqlFormat; /** The character to use between values within a CSV row. Only valid for the CSV format. */ delimiter?: string; /** Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? * Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. */ drop_null_columns?: boolean; /** By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. */ columnar?: boolean; /** Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. */ filter?: QueryDslQueryContainer; locale?: string; /** To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. */ params?: FieldValue[]; /** If provided and `true` the response will include an extra `profile` object * with information on how the query was executed. This information is for human debugging * and its format can change at any time but it can give some insight into the performance * of each part of the query. */ profile?: boolean; /** The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. */ query: string; /** Tables to use with the LOOKUP operation. The top level key is the table * name and the next level key is the column name. */ tables?: Record>; /** When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` * object with information about the clusters that participated in the search along with info such as shards * count. */ include_ccs_metadata?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { format?: never; delimiter?: never; drop_null_columns?: never; columnar?: never; filter?: never; locale?: never; params?: never; profile?: never; query?: never; tables?: never; include_ccs_metadata?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { format?: never; delimiter?: never; drop_null_columns?: never; columnar?: never; filter?: never; locale?: never; params?: never; profile?: never; query?: never; tables?: never; include_ccs_metadata?: never; }; } export type EsqlQueryResponse = EsqlEsqlResult; export interface FeaturesFeature { name: string; description: string; } export interface FeaturesGetFeaturesRequest extends RequestBase { /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; }; } export interface FeaturesGetFeaturesResponse { features: FeaturesFeature[]; } export interface FeaturesResetFeaturesRequest extends RequestBase { /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; }; } export interface FeaturesResetFeaturesResponse { features: FeaturesFeature[]; } export type FleetCheckpoint = long; export interface FleetGlobalCheckpointsRequest extends RequestBase { /** A single index or index alias that resolves to a single index. */ index: IndexName | IndexAlias; /** A boolean value which controls whether to wait (until the timeout) for the global checkpoints * to advance past the provided `checkpoints`. */ wait_for_advance?: boolean; /** A boolean value which controls whether to wait (until the timeout) for the target index to exist * and all primary shards be active. Can only be true when `wait_for_advance` is true. */ wait_for_index?: boolean; /** A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, * the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list * will cause Elasticsearch to immediately return the current global checkpoints. */ checkpoints?: FleetCheckpoint[]; /** Period to wait for a global checkpoints to advance past `checkpoints`. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; wait_for_advance?: never; wait_for_index?: never; checkpoints?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; wait_for_advance?: never; wait_for_index?: never; checkpoints?: never; timeout?: never; }; } export interface FleetGlobalCheckpointsResponse { global_checkpoints: FleetCheckpoint[]; timed_out: boolean; } export interface FleetMsearchRequest extends RequestBase { /** A single target to search. If the target is an index alias, it must resolve to a single index. */ index?: IndexName | IndexAlias; /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean; /** If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. */ ccs_minimize_roundtrips?: boolean; /** Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. */ expand_wildcards?: ExpandWildcards; /** If true, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean; /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean; /** Maximum number of concurrent searches the multi search API can execute. */ max_concurrent_searches?: integer; /** Maximum number of concurrent shard requests that each sub-search request executes per node. */ max_concurrent_shard_requests?: integer; /** Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. */ pre_filter_shard_size?: long; /** Indicates whether global term and document frequencies should be used when scoring returned documents. */ search_type?: SearchType; /** If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. */ rest_total_hits_as_int?: boolean; /** Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. */ typed_keys?: boolean; /** A comma separated list of checkpoints. When configured, the search API will only be executed on a shard * after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause * Elasticsearch to immediately execute the search. */ wait_for_checkpoints?: FleetCheckpoint[]; /** If true, returns partial results if there are shard request timeouts or shard failures. * If false, returns an error with no partial results. * Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. */ allow_partial_search_results?: boolean; searches?: MsearchRequestItem[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; ccs_minimize_roundtrips?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; max_concurrent_searches?: never; max_concurrent_shard_requests?: never; pre_filter_shard_size?: never; search_type?: never; rest_total_hits_as_int?: never; typed_keys?: never; wait_for_checkpoints?: never; allow_partial_search_results?: never; searches?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; ccs_minimize_roundtrips?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; max_concurrent_searches?: never; max_concurrent_shard_requests?: never; pre_filter_shard_size?: never; search_type?: never; rest_total_hits_as_int?: never; typed_keys?: never; wait_for_checkpoints?: never; allow_partial_search_results?: never; searches?: never; }; } export interface FleetMsearchResponse { docs: MsearchResponseItem[]; } export interface FleetSearchRequest extends RequestBase { /** A single target to search. If the target is an index alias, it must resolve to a single index. */ index: IndexName | IndexAlias; allow_no_indices?: boolean; analyzer?: string; analyze_wildcard?: boolean; batched_reduce_size?: long; ccs_minimize_roundtrips?: boolean; default_operator?: QueryDslOperator; df?: string; expand_wildcards?: ExpandWildcards; ignore_throttled?: boolean; ignore_unavailable?: boolean; lenient?: boolean; max_concurrent_shard_requests?: integer; preference?: string; pre_filter_shard_size?: long; request_cache?: boolean; routing?: Routing; scroll?: Duration; search_type?: SearchType; /** Specifies which field to use for suggestions. */ suggest_field?: Field; suggest_mode?: SuggestMode; suggest_size?: long; /** The source text for which the suggestions should be returned. */ suggest_text?: string; typed_keys?: boolean; rest_total_hits_as_int?: boolean; _source_excludes?: Fields; _source_includes?: Fields; q?: string; /** A comma separated list of checkpoints. When configured, the search API will only be executed on a shard * after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause * Elasticsearch to immediately execute the search. */ wait_for_checkpoints?: FleetCheckpoint[]; /** If true, returns partial results if there are shard request timeouts or shard failures. * If false, returns an error with no partial results. * Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. */ allow_partial_search_results?: boolean; aggregations?: Record; /** @alias aggregations */ aggs?: Record; collapse?: SearchFieldCollapse; /** If true, returns detailed information about score computation as part of a hit. */ explain?: boolean; /** Configuration of search extensions defined by Elasticsearch plugins. */ ext?: Record; /** Starting document offset. By default, you cannot page through more than 10,000 * hits using the from and size parameters. To page through more hits, use the * search_after parameter. */ from?: integer; highlight?: SearchHighlight; /** Number of hits matching the query to count accurately. If true, the exact * number of hits is returned at the cost of some performance. If false, the * response does not include the total number of hits matching the query. * Defaults to 10,000 hits. */ track_total_hits?: SearchTrackHits; /** Boosts the _score of documents from specified indices. */ indices_boost?: Partial>[]; /** Array of wildcard (*) patterns. The request returns doc values for field * names matching these patterns in the hits.fields property of the response. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[]; /** Minimum _score for matching documents. Documents with a lower _score are * not included in search results and results collected by aggregations. */ min_score?: double; post_filter?: QueryDslQueryContainer; profile?: boolean; /** Defines the search definition using the Query DSL. */ query?: QueryDslQueryContainer; rescore?: SearchRescore | SearchRescore[]; /** Retrieve a script evaluation (based on different fields) for each hit. */ script_fields?: Record; search_after?: SortResults; /** The number of hits to return. By default, you cannot page through more * than 10,000 hits using the from and size parameters. To page through more * hits, use the search_after parameter. */ size?: integer; slice?: SlicedScroll; sort?: Sort; /** Indicates which source fields are returned for matching documents. These * fields are returned in the hits._source property of the search response. */ _source?: SearchSourceConfig; /** Array of wildcard (*) patterns. The request returns values for field names * matching these patterns in the hits.fields property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[]; suggest?: SearchSuggester; /** Maximum number of documents to collect for each shard. If a query reaches this * limit, Elasticsearch terminates the query early. Elasticsearch collects documents * before sorting. Defaults to 0, which does not terminate query execution early. */ terminate_after?: long; /** Specifies the period of time to wait for a response from each shard. If no response * is received before the timeout expires, the request fails and returns an error. * Defaults to no timeout. */ timeout?: string; /** If true, calculate and return document scores, even if the scores are not used for sorting. */ track_scores?: boolean; /** If true, returns document version as part of a hit. */ version?: boolean; /** If true, returns sequence number and primary term of the last modification * of each hit. See Optimistic concurrency control. */ seq_no_primary_term?: boolean; /** List of stored fields to return as part of a hit. If no fields are specified, * no stored fields are included in the response. If this field is specified, the _source * parameter defaults to false. You can pass _source: true to return both source fields * and stored fields in the search response. */ stored_fields?: Fields; /** Limits the search to a point in time (PIT). If you provide a PIT, you * cannot specify an in the request path. */ pit?: SearchPointInTimeReference; /** Defines one or more runtime fields in the search request. These fields take * precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields; /** Stats groups to associate with the search. Each group maintains a statistics * aggregation for its associated searches. You can retrieve these stats using * the indices stats API. */ stats?: string[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; analyzer?: never; analyze_wildcard?: never; batched_reduce_size?: never; ccs_minimize_roundtrips?: never; default_operator?: never; df?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; lenient?: never; max_concurrent_shard_requests?: never; preference?: never; pre_filter_shard_size?: never; request_cache?: never; routing?: never; scroll?: never; search_type?: never; suggest_field?: never; suggest_mode?: never; suggest_size?: never; suggest_text?: never; typed_keys?: never; rest_total_hits_as_int?: never; _source_excludes?: never; _source_includes?: never; q?: never; wait_for_checkpoints?: never; allow_partial_search_results?: never; aggregations?: never; aggs?: never; collapse?: never; explain?: never; ext?: never; from?: never; highlight?: never; track_total_hits?: never; indices_boost?: never; docvalue_fields?: never; min_score?: never; post_filter?: never; profile?: never; query?: never; rescore?: never; script_fields?: never; search_after?: never; size?: never; slice?: never; sort?: never; _source?: never; fields?: never; suggest?: never; terminate_after?: never; timeout?: never; track_scores?: never; version?: never; seq_no_primary_term?: never; stored_fields?: never; pit?: never; runtime_mappings?: never; stats?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; analyzer?: never; analyze_wildcard?: never; batched_reduce_size?: never; ccs_minimize_roundtrips?: never; default_operator?: never; df?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; lenient?: never; max_concurrent_shard_requests?: never; preference?: never; pre_filter_shard_size?: never; request_cache?: never; routing?: never; scroll?: never; search_type?: never; suggest_field?: never; suggest_mode?: never; suggest_size?: never; suggest_text?: never; typed_keys?: never; rest_total_hits_as_int?: never; _source_excludes?: never; _source_includes?: never; q?: never; wait_for_checkpoints?: never; allow_partial_search_results?: never; aggregations?: never; aggs?: never; collapse?: never; explain?: never; ext?: never; from?: never; highlight?: never; track_total_hits?: never; indices_boost?: never; docvalue_fields?: never; min_score?: never; post_filter?: never; profile?: never; query?: never; rescore?: never; script_fields?: never; search_after?: never; size?: never; slice?: never; sort?: never; _source?: never; fields?: never; suggest?: never; terminate_after?: never; timeout?: never; track_scores?: never; version?: never; seq_no_primary_term?: never; stored_fields?: never; pit?: never; runtime_mappings?: never; stats?: never; }; } export interface FleetSearchResponse { took: long; timed_out: boolean; _shards: ShardStatistics; hits: SearchHitsMetadata; aggregations?: Record; _clusters?: ClusterStatistics; fields?: Record; max_score?: double; num_reduce_phases?: long; profile?: SearchProfile; pit_id?: Id; _scroll_id?: ScrollId; suggest?: Record[]>; terminated_early?: boolean; } export interface GraphConnection { doc_count: long; source: long; target: long; weight: double; } export interface GraphExploreControls { /** To avoid the top-matching documents sample being dominated by a single source of results, it is sometimes necessary to request diversity in the sample. * You can do this by selecting a single-value field and setting a maximum number of documents per value for that field. */ sample_diversity?: GraphSampleDiversity; /** Each hop considers a sample of the best-matching documents on each shard. * Using samples improves the speed of execution and keeps exploration focused on meaningfully-connected terms. * Very small values (less than 50) might not provide sufficient weight-of-evidence to identify significant connections between terms. * Very large sample sizes can dilute the quality of the results and increase execution times. */ sample_size?: integer; /** The length of time in milliseconds after which exploration will be halted and the results gathered so far are returned. * This timeout is honored on a best-effort basis. * Execution might overrun this timeout if, for example, a long pause is encountered while FieldData is loaded for a field. */ timeout?: Duration; /** Filters associated terms so only those that are significantly associated with your query are included. */ use_significance: boolean; } export interface GraphHop { /** Specifies one or more fields from which you want to extract terms that are associated with the specified vertices. */ connections?: GraphHop; /** An optional guiding query that constrains the Graph API as it explores connected terms. */ query?: QueryDslQueryContainer; /** Contains the fields you are interested in. */ vertices: GraphVertexDefinition[]; } export interface GraphSampleDiversity { field: Field; max_docs_per_value: integer; } export interface GraphVertex { depth: long; field: Field; term: string; weight: double; } export interface GraphVertexDefinition { /** Prevents the specified terms from being included in the results. */ exclude?: string[]; /** Identifies a field in the documents of interest. */ field: Field; /** Identifies the terms of interest that form the starting points from which you want to spider out. */ include?: (GraphVertexInclude | string)[]; /** Specifies how many documents must contain a pair of terms before it is considered to be a useful connection. * This setting acts as a certainty threshold. */ min_doc_count?: long; /** Controls how many documents on a particular shard have to contain a pair of terms before the connection is returned for global consideration. */ shard_min_doc_count?: long; /** Specifies the maximum number of vertex terms returned for each field. */ size?: integer; } export interface GraphVertexInclude { boost?: double; term: string; } export interface GraphExploreRequest extends RequestBase { /** Name of the index. */ index: Indices; /** Custom value used to route operations to a specific shard. */ routing?: Routing; /** Specifies the period of time to wait for a response from each shard. * If no response is received before the timeout expires, the request fails and returns an error. * Defaults to no timeout. */ timeout?: Duration; /** Specifies or more fields from which you want to extract terms that are associated with the specified vertices. */ connections?: GraphHop; /** Direct the Graph API how to build the graph. */ controls?: GraphExploreControls; /** A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. */ query?: QueryDslQueryContainer; /** Specifies one or more fields that contain the terms you want to include in the graph as vertices. */ vertices?: GraphVertexDefinition[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; routing?: never; timeout?: never; connections?: never; controls?: never; query?: never; vertices?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; routing?: never; timeout?: never; connections?: never; controls?: never; query?: never; vertices?: never; }; } export interface GraphExploreResponse { connections: GraphConnection[]; failures: ShardFailure[]; timed_out: boolean; took: long; vertices: GraphVertex[]; } export interface IlmActions { /** Phases allowed: warm, cold. */ allocate?: IlmAllocateAction; /** Phases allowed: delete. */ delete?: IlmDeleteAction; /** Phases allowed: hot, warm, cold. */ downsample?: IlmDownsampleAction; /** The freeze action is a noop in 8.x */ freeze?: EmptyObject; /** Phases allowed: hot, warm. */ forcemerge?: IlmForceMergeAction; /** Phases allowed: warm, cold. */ migrate?: IlmMigrateAction; /** Phases allowed: hot, warm, cold. */ readonly?: EmptyObject; /** Phases allowed: hot. */ rollover?: IlmRolloverAction; /** Phases allowed: hot, warm, cold. */ set_priority?: IlmSetPriorityAction; /** Phases allowed: hot, cold, frozen. */ searchable_snapshot?: IlmSearchableSnapshotAction; /** Phases allowed: hot, warm. */ shrink?: IlmShrinkAction; /** Phases allowed: hot, warm, cold, frozen. */ unfollow?: EmptyObject; /** Phases allowed: delete. */ wait_for_snapshot?: IlmWaitForSnapshotAction; } export interface IlmAllocateAction { number_of_replicas?: integer; total_shards_per_node?: integer; include?: Record; exclude?: Record; require?: Record; } export interface IlmDeleteAction { delete_searchable_snapshot?: boolean; } export interface IlmDownsampleAction { fixed_interval: DurationLarge; wait_timeout?: Duration; } export interface IlmForceMergeAction { max_num_segments: integer; index_codec?: string; } export interface IlmMigrateAction { enabled?: boolean; } export interface IlmPhase { actions?: IlmActions; min_age?: Duration; } export interface IlmPhases { cold?: IlmPhase; delete?: IlmPhase; frozen?: IlmPhase; hot?: IlmPhase; warm?: IlmPhase; } export interface IlmPolicy { phases: IlmPhases; /** Arbitrary metadata that is not automatically generated or used by Elasticsearch. */ _meta?: Metadata; } export interface IlmRolloverAction { max_size?: ByteSize; max_primary_shard_size?: ByteSize; max_age?: Duration; max_docs?: long; max_primary_shard_docs?: long; min_size?: ByteSize; min_primary_shard_size?: ByteSize; min_age?: Duration; min_docs?: long; min_primary_shard_docs?: long; } export interface IlmSearchableSnapshotAction { snapshot_repository: string; force_merge_index?: boolean; } export interface IlmSetPriorityAction { priority?: integer; } export interface IlmShrinkAction { number_of_shards?: integer; max_primary_shard_size?: ByteSize; allow_write_after_shrink?: boolean; } export interface IlmWaitForSnapshotAction { policy: string; } export interface IlmDeleteLifecycleRequest extends RequestBase { /** Identifier for the policy. */ name: Name; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }; } export type IlmDeleteLifecycleResponse = AcknowledgedResponseBase; export type IlmExplainLifecycleLifecycleExplain = IlmExplainLifecycleLifecycleExplainManaged | IlmExplainLifecycleLifecycleExplainUnmanaged; export interface IlmExplainLifecycleLifecycleExplainManaged { action?: Name; action_time?: DateTime; action_time_millis?: EpochTime; age?: Duration; failed_step?: Name; failed_step_retry_count?: integer; index: IndexName; index_creation_date?: DateTime; index_creation_date_millis?: EpochTime; is_auto_retryable_error?: boolean; lifecycle_date?: DateTime; lifecycle_date_millis?: EpochTime; managed: true; phase?: Name; phase_time?: DateTime; phase_time_millis?: EpochTime; policy?: Name; previous_step_info?: Record; repository_name?: string; snapshot_name?: string; shrink_index_name?: string; step?: Name; step_info?: Record; step_time?: DateTime; step_time_millis?: EpochTime; phase_execution?: IlmExplainLifecycleLifecycleExplainPhaseExecution; time_since_index_creation?: Duration; } export interface IlmExplainLifecycleLifecycleExplainPhaseExecution { phase_definition?: IlmPhase; policy: Name; version: VersionNumber; modified_date_in_millis: EpochTime; } export interface IlmExplainLifecycleLifecycleExplainUnmanaged { index: IndexName; managed: false; } export interface IlmExplainLifecycleRequest extends RequestBase { /** Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`). * To target all data streams and indices, use `*` or `_all`. */ index: IndexName; /** Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. */ only_errors?: boolean; /** Filters the returned indices to only indices that are managed by ILM. */ only_managed?: boolean; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; only_errors?: never; only_managed?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; only_errors?: never; only_managed?: never; master_timeout?: never; }; } export interface IlmExplainLifecycleResponse { indices: Record; } export interface IlmGetLifecycleLifecycle { modified_date: DateTime; policy: IlmPolicy; version: VersionNumber; } export interface IlmGetLifecycleRequest extends RequestBase { /** Identifier for the policy. */ name?: Name; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }; } export type IlmGetLifecycleResponse = Record; export interface IlmGetStatusRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export interface IlmGetStatusResponse { operation_mode: LifecycleOperationMode; } export interface IlmMigrateToDataTiersRequest extends RequestBase { /** If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. * This provides a way to retrieve the indices and ILM policies that need to be migrated. */ dry_run?: boolean; /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration; legacy_template_to_delete?: string; node_attribute?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { dry_run?: never; master_timeout?: never; legacy_template_to_delete?: never; node_attribute?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { dry_run?: never; master_timeout?: never; legacy_template_to_delete?: never; node_attribute?: never; }; } export interface IlmMigrateToDataTiersResponse { dry_run: boolean; /** The name of the legacy index template that was deleted. * This information is missing if no legacy index templates were deleted. */ removed_legacy_template: string; /** The ILM policies that were updated. */ migrated_ilm_policies: string[]; /** The indices that were migrated to tier preference routing. */ migrated_indices: Indices; /** The legacy index templates that were updated to not contain custom routing settings for the provided data attribute. */ migrated_legacy_templates: string[]; /** The composable index templates that were updated to not contain custom routing settings for the provided data attribute. */ migrated_composable_templates: string[]; /** The component templates that were updated to not contain custom routing settings for the provided data attribute. */ migrated_component_templates: string[]; } export interface IlmMoveToStepRequest extends RequestBase { /** The name of the index whose lifecycle step is to change */ index: IndexName; /** The step that the index is expected to be in. */ current_step: IlmMoveToStepStepKey; /** The step that you want to run. */ next_step: IlmMoveToStepStepKey; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; current_step?: never; next_step?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; current_step?: never; next_step?: never; }; } export type IlmMoveToStepResponse = AcknowledgedResponseBase; export interface IlmMoveToStepStepKey { /** The optional action to which the index will be moved. */ action?: string; /** The optional step name to which the index will be moved. */ name?: string; phase: string; } export interface IlmPutLifecycleRequest extends RequestBase { /** Identifier for the policy. */ name: Name; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; policy?: IlmPolicy; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; policy?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; policy?: never; }; } export type IlmPutLifecycleResponse = AcknowledgedResponseBase; export interface IlmRemovePolicyRequest extends RequestBase { /** The name of the index to remove policy on */ index: IndexName; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; }; } export interface IlmRemovePolicyResponse { failed_indexes: IndexName[]; has_failures: boolean; } export interface IlmRetryRequest extends RequestBase { /** The name of the indices (comma-separated) whose failed lifecycle step is to be retry */ index: IndexName; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; }; } export type IlmRetryResponse = AcknowledgedResponseBase; export interface IlmStartRequest extends RequestBase { /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; timeout?: never; }; } export type IlmStartResponse = AcknowledgedResponseBase; export interface IlmStopRequest extends RequestBase { /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; timeout?: never; }; } export type IlmStopResponse = AcknowledgedResponseBase; export interface IndicesAlias { /** Query used to limit documents the alias can access. */ filter?: QueryDslQueryContainer; /** Value used to route indexing operations to a specific shard. * If specified, this overwrites the `routing` value for indexing operations. */ index_routing?: Routing; /** If `true`, the alias is hidden. * All indices for the alias must have the same `is_hidden` value. */ is_hidden?: boolean; /** If `true`, the index is the write index for the alias. */ is_write_index?: boolean; /** Value used to route indexing and search operations to a specific shard. */ routing?: Routing; /** Value used to route search operations to a specific shard. * If specified, this overwrites the `routing` value for search operations. */ search_routing?: Routing; } export interface IndicesAliasDefinition { /** Query used to limit documents the alias can access. */ filter?: QueryDslQueryContainer; /** Value used to route indexing operations to a specific shard. * If specified, this overwrites the `routing` value for indexing operations. */ index_routing?: string; /** If `true`, the index is the write index for the alias. */ is_write_index?: boolean; /** Value used to route indexing and search operations to a specific shard. */ routing?: string; /** Value used to route search operations to a specific shard. * If specified, this overwrites the `routing` value for search operations. */ search_routing?: string; /** If `true`, the alias is hidden. * All indices for the alias must have the same `is_hidden` value. */ is_hidden?: boolean; } export interface IndicesCacheQueries { enabled: boolean; } export interface IndicesDataStream { /** Custom metadata for the stream, copied from the `_meta` object of the stream’s matching index template. * If empty, the response omits this property. */ _meta?: Metadata; /** If `true`, the data stream allows custom routing on write request. */ allow_custom_routing?: boolean; /** Information about failure store backing indices */ failure_store?: IndicesFailureStore; /** Current generation for the data stream. This number acts as a cumulative count of the stream’s rollovers, starting at 1. */ generation: integer; /** If `true`, the data stream is hidden. */ hidden: boolean; /** Name of the current ILM lifecycle policy in the stream’s matching index template. * This lifecycle policy is set in the `index.lifecycle.name` setting. * If the template does not include a lifecycle policy, this property is not included in the response. * NOTE: A data stream’s backing indices may be assigned different lifecycle policies. To retrieve the lifecycle policy for individual backing indices, use the get index settings API. */ ilm_policy?: Name; /** Name of the lifecycle system that'll manage the next generation of the data stream. */ next_generation_managed_by: IndicesManagedBy; /** Indicates if ILM should take precedence over DSL in case both are configured to managed this data stream. */ prefer_ilm: boolean; /** Array of objects containing information about the data stream’s backing indices. * The last item in this array contains information about the stream’s current write index. */ indices: IndicesDataStreamIndex[]; /** Contains the configuration for the data stream lifecycle of this data stream. */ lifecycle?: IndicesDataStreamLifecycleWithRollover; /** Name of the data stream. */ name: DataStreamName; /** If `true`, the data stream is created and managed by cross-cluster replication and the local cluster can not write into this data stream or change its mappings. */ replicated?: boolean; /** If `true`, the next write to this data stream will trigger a rollover first and the document will be indexed in the new backing index. If the rollover fails the indexing request will fail too. */ rollover_on_write: boolean; /** Health status of the data stream. * This health status is based on the state of the primary and replica shards of the stream’s backing indices. */ status: HealthStatus; /** If `true`, the data stream is created and managed by an Elastic stack component and cannot be modified through normal user interaction. */ system?: boolean; /** Name of the index template used to create the data stream’s backing indices. * The template’s index pattern must match the name of this data stream. */ template: Name; /** Information about the `@timestamp` field in the data stream. */ timestamp_field: IndicesDataStreamTimestampField; } export interface IndicesDataStreamIndex { /** Name of the backing index. */ index_name: IndexName; /** Universally unique identifier (UUID) for the index. */ index_uuid: Uuid; /** Name of the current ILM lifecycle policy configured for this backing index. */ ilm_policy?: Name; /** Name of the lifecycle system that's currently managing this backing index. */ managed_by?: IndicesManagedBy; /** Indicates if ILM should take precedence over DSL in case both are configured to manage this index. */ prefer_ilm?: boolean; } export interface IndicesDataStreamLifecycle { /** If defined, every document added to this data stream will be stored at least for this time frame. * Any time after this duration the document could be deleted. * When empty, every document in this data stream will be stored indefinitely. */ data_retention?: Duration; /** The downsampling configuration to execute for the managed backing index after rollover. */ downsampling?: IndicesDataStreamLifecycleDownsampling; /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle * that's disabled (enabled: `false`) will have no effect on the data stream. */ enabled?: boolean; } export interface IndicesDataStreamLifecycleDownsampling { /** The list of downsampling rounds to execute as part of this downsampling configuration */ rounds: IndicesDownsamplingRound[]; } export interface IndicesDataStreamLifecycleRolloverConditions { min_age?: Duration; max_age?: string; min_docs?: long; max_docs?: long; min_size?: ByteSize; max_size?: ByteSize; min_primary_shard_size?: ByteSize; max_primary_shard_size?: ByteSize; min_primary_shard_docs?: long; max_primary_shard_docs?: long; } export interface IndicesDataStreamLifecycleWithRollover extends IndicesDataStreamLifecycle { /** The conditions which will trigger the rollover of a backing index as configured by the cluster setting `cluster.lifecycle.default.rollover`. * This property is an implementation detail and it will only be retrieved when the query param `include_defaults` is set to true. * The contents of this field are subject to change. */ rollover?: IndicesDataStreamLifecycleRolloverConditions; } export interface IndicesDataStreamTimestampField { /** Name of the timestamp field for the data stream, which must be `@timestamp`. The `@timestamp` field must be included in every document indexed to the data stream. */ name: Field; } export interface IndicesDataStreamVisibility { hidden?: boolean; allow_custom_routing?: boolean; } export interface IndicesDownsampleConfig { /** The interval at which to aggregate the original time series index. */ fixed_interval: DurationLarge; } export interface IndicesDownsamplingRound { /** The duration since rollover when this downsampling round should execute */ after: Duration; /** The downsample configuration to execute. */ config: IndicesDownsampleConfig; } export interface IndicesFailureStore { enabled: boolean; indices: IndicesDataStreamIndex[]; rollover_on_write: boolean; } export interface IndicesFielddataFrequencyFilter { max: double; min: double; min_segment_size: integer; } export type IndicesIndexCheckOnStartup = boolean | 'true' | 'false' | 'checksum'; export interface IndicesIndexRouting { allocation?: IndicesIndexRoutingAllocation; rebalance?: IndicesIndexRoutingRebalance; } export interface IndicesIndexRoutingAllocation { enable?: IndicesIndexRoutingAllocationOptions; include?: IndicesIndexRoutingAllocationInclude; initial_recovery?: IndicesIndexRoutingAllocationInitialRecovery; disk?: IndicesIndexRoutingAllocationDisk; } export interface IndicesIndexRoutingAllocationDisk { threshold_enabled?: boolean | string; } export interface IndicesIndexRoutingAllocationInclude { _tier_preference?: string; _id?: Id; } export interface IndicesIndexRoutingAllocationInitialRecovery { _id?: Id; } export type IndicesIndexRoutingAllocationOptions = 'all' | 'primaries' | 'new_primaries' | 'none'; export interface IndicesIndexRoutingRebalance { enable: IndicesIndexRoutingRebalanceOptions; } export type IndicesIndexRoutingRebalanceOptions = 'all' | 'primaries' | 'replicas' | 'none'; export interface IndicesIndexSegmentSort { field?: Fields; order?: IndicesSegmentSortOrder | IndicesSegmentSortOrder[]; mode?: IndicesSegmentSortMode | IndicesSegmentSortMode[]; missing?: IndicesSegmentSortMissing | IndicesSegmentSortMissing[]; } export interface IndicesIndexSettingBlocks { read_only?: SpecUtilsStringified; read_only_allow_delete?: SpecUtilsStringified; read?: SpecUtilsStringified; write?: SpecUtilsStringified; metadata?: SpecUtilsStringified; } export interface IndicesIndexSettingsKeys { index?: IndicesIndexSettings; mode?: string; routing_path?: string | string[]; soft_deletes?: IndicesSoftDeletes; sort?: IndicesIndexSegmentSort; /** @remarks This property is not supported on Elastic Cloud Serverless. */ number_of_shards?: integer | string; /** @remarks This property is not supported on Elastic Cloud Serverless. */ number_of_replicas?: integer | string; number_of_routing_shards?: integer; check_on_startup?: IndicesIndexCheckOnStartup; codec?: string; routing_partition_size?: SpecUtilsStringified; load_fixed_bitset_filters_eagerly?: boolean; hidden?: boolean | string; auto_expand_replicas?: SpecUtilsWithNullValue; merge?: IndicesMerge; search?: IndicesSettingsSearch; refresh_interval?: Duration; max_result_window?: integer; max_inner_result_window?: integer; max_rescore_window?: integer; max_docvalue_fields_search?: integer; max_script_fields?: integer; max_ngram_diff?: integer; max_shingle_diff?: integer; blocks?: IndicesIndexSettingBlocks; max_refresh_listeners?: integer; /** Settings to define analyzers, tokenizers, token filters and character filters. * Refer to the linked documentation for step-by-step examples of updating analyzers on existing indices. */ analyze?: IndicesSettingsAnalyze; highlight?: IndicesSettingsHighlight; max_terms_count?: integer; max_regex_length?: integer; routing?: IndicesIndexRouting; gc_deletes?: Duration; default_pipeline?: PipelineName; final_pipeline?: PipelineName; lifecycle?: IndicesIndexSettingsLifecycle; provided_name?: Name; creation_date?: SpecUtilsStringified>; creation_date_string?: DateTime; uuid?: Uuid; version?: IndicesIndexVersioning; verified_before_close?: boolean | string; format?: string | integer; max_slices_per_scroll?: integer; translog?: IndicesTranslog; query_string?: IndicesSettingsQueryString; priority?: integer | string; top_metrics_max_size?: integer; analysis?: IndicesIndexSettingsAnalysis; settings?: IndicesIndexSettings; time_series?: IndicesIndexSettingsTimeSeries; queries?: IndicesQueries; /** Configure custom similarity settings to customize how search results are scored. */ similarity?: Record; /** Enable or disable dynamic mapping for an index. */ mapping?: IndicesMappingLimitSettings; 'indexing.slowlog'?: IndicesIndexingSlowlogSettings; /** Configure indexing back pressure limits. */ indexing_pressure?: IndicesIndexingPressure; /** The store module allows you to control how index data is stored and accessed on disk. */ store?: IndicesStorage; } export type IndicesIndexSettings = IndicesIndexSettingsKeys & { [property: string]: any; }; export interface IndicesIndexSettingsAnalysis { analyzer?: Record; char_filter?: Record; filter?: Record; normalizer?: Record; tokenizer?: Record; } export interface IndicesIndexSettingsLifecycle { /** The name of the policy to use to manage the index. For information about how Elasticsearch applies policy changes, see Policy updates. */ name?: Name; /** Indicates whether or not the index has been rolled over. Automatically set to true when ILM completes the rollover action. * You can explicitly set it to skip rollover. */ indexing_complete?: SpecUtilsStringified; /** If specified, this is the timestamp used to calculate the index age for its phase transitions. Use this setting * if you create a new index that contains old data and want to use the original creation date to calculate the index * age. Specified as a Unix epoch value in milliseconds. */ origination_date?: long; /** Set to true to parse the origination date from the index name. This origination date is used to calculate the index age * for its phase transitions. The index name must match the pattern ^.*-{date_format}-\\d+, where the date_format is * yyyy.MM.dd and the trailing digits are optional. An index that was rolled over would normally match the full format, * for example logs-2016.10.31-000002). If the index name doesn’t match the pattern, index creation fails. */ parse_origination_date?: boolean; step?: IndicesIndexSettingsLifecycleStep; /** The index alias to update when the index rolls over. Specify when using a policy that contains a rollover action. * When the index rolls over, the alias is updated to reflect that the index is no longer the write index. For more * information about rolling indices, see Rollover. */ rollover_alias?: string; /** Preference for the system that manages a data stream backing index (preferring ILM when both ILM and DLM are * applicable for an index). */ prefer_ilm?: boolean | string; } export interface IndicesIndexSettingsLifecycleStep { /** Time to wait for the cluster to resolve allocation issues during an ILM shrink action. Must be greater than 1h (1 hour). * See Shard allocation for shrink. */ wait_time_threshold?: Duration; } export interface IndicesIndexSettingsTimeSeries { end_time?: DateTime; start_time?: DateTime; } export interface IndicesIndexState { aliases?: Record; mappings?: MappingTypeMapping; settings?: IndicesIndexSettings; /** Default settings, included when the request's `include_default` is `true`. */ defaults?: IndicesIndexSettings; data_stream?: DataStreamName; /** Data stream lifecycle applicable if this is a data stream. */ lifecycle?: IndicesDataStreamLifecycle; } export interface IndicesIndexTemplate { /** Name of the index template. */ index_patterns: Names; /** An ordered list of component template names. * Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ composed_of: Name[]; /** Template to be applied. * It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ template?: IndicesIndexTemplateSummary; /** Version number used to manage index templates externally. * This number is not automatically generated by Elasticsearch. */ version?: VersionNumber; /** Priority to determine index template precedence when a new data stream or index is created. * The index template with the highest priority is chosen. * If no priority is specified the template is treated as though it is of priority 0 (lowest priority). * This number is not automatically generated by Elasticsearch. */ priority?: long; /** Optional user metadata about the index template. May have any contents. * This map is not automatically generated by Elasticsearch. */ _meta?: Metadata; allow_auto_create?: boolean; /** If this object is included, the template is used to create data streams and their backing indices. * Supports an empty object. * Data streams require a matching index template with a `data_stream` object. */ data_stream?: IndicesIndexTemplateDataStreamConfiguration; /** Marks this index template as deprecated. * When creating or updating a non-deprecated index template that uses deprecated components, * Elasticsearch will emit a deprecation warning. */ deprecated?: boolean; /** A list of component template names that are allowed to be absent. */ ignore_missing_component_templates?: Names; } export interface IndicesIndexTemplateDataStreamConfiguration { /** If true, the data stream is hidden. */ hidden?: boolean; /** If true, the data stream supports custom routing. */ allow_custom_routing?: boolean; } export interface IndicesIndexTemplateSummary { /** Aliases to add. * If the index template includes a `data_stream` object, these are data stream aliases. * Otherwise, these are index aliases. * Data stream aliases ignore the `index_routing`, `routing`, and `search_routing` options. */ aliases?: Record; /** Mapping for fields in the index. * If specified, this mapping can include field names, field data types, and mapping parameters. */ mappings?: MappingTypeMapping; /** Configuration options for the index. */ settings?: IndicesIndexSettings; lifecycle?: IndicesDataStreamLifecycleWithRollover; } export interface IndicesIndexVersioning { created?: VersionString; created_string?: string; } export interface IndicesIndexingPressure { memory: IndicesIndexingPressureMemory; } export interface IndicesIndexingPressureMemory { /** Number of outstanding bytes that may be consumed by indexing requests. When this limit is reached or exceeded, * the node will reject new coordinating and primary operations. When replica operations consume 1.5x this limit, * the node will reject new replica operations. Defaults to 10% of the heap. */ limit?: integer; } export interface IndicesIndexingSlowlogSettings { level?: string; source?: integer; reformat?: boolean; threshold?: IndicesIndexingSlowlogTresholds; } export interface IndicesIndexingSlowlogTresholds { /** The indexing slow log, similar in functionality to the search slow log. The log file name ends with `_index_indexing_slowlog.json`. * Log and the thresholds are configured in the same way as the search slowlog. */ index?: IndicesSlowlogTresholdLevels; } export type IndicesManagedBy = 'Index Lifecycle Management' | 'Data stream lifecycle' | 'Unmanaged'; export interface IndicesMappingLimitSettings { coerce?: boolean; total_fields?: IndicesMappingLimitSettingsTotalFields; depth?: IndicesMappingLimitSettingsDepth; nested_fields?: IndicesMappingLimitSettingsNestedFields; nested_objects?: IndicesMappingLimitSettingsNestedObjects; field_name_length?: IndicesMappingLimitSettingsFieldNameLength; dimension_fields?: IndicesMappingLimitSettingsDimensionFields; source?: IndicesMappingLimitSettingsSourceFields; ignore_malformed?: boolean | string; } export interface IndicesMappingLimitSettingsDepth { /** The maximum depth for a field, which is measured as the number of inner objects. For instance, if all fields are defined * at the root object level, then the depth is 1. If there is one object mapping, then the depth is 2, etc. */ limit?: long; } export interface IndicesMappingLimitSettingsDimensionFields { /** [preview] This functionality is in technical preview and may be changed or removed in a future release. * Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features. */ limit?: long; } export interface IndicesMappingLimitSettingsFieldNameLength { /** Setting for the maximum length of a field name. This setting isn’t really something that addresses mappings explosion but * might still be useful if you want to limit the field length. It usually shouldn’t be necessary to set this setting. The * default is okay unless a user starts to add a huge number of fields with really long names. Default is `Long.MAX_VALUE` (no limit). */ limit?: long; } export interface IndicesMappingLimitSettingsNestedFields { /** The maximum number of distinct nested mappings in an index. The nested type should only be used in special cases, when * arrays of objects need to be queried independently of each other. To safeguard against poorly designed mappings, this * setting limits the number of unique nested types per index. */ limit?: long; } export interface IndicesMappingLimitSettingsNestedObjects { /** The maximum number of nested JSON objects that a single document can contain across all nested types. This limit helps * to prevent out of memory errors when a document contains too many nested objects. */ limit?: long; } export interface IndicesMappingLimitSettingsSourceFields { mode: IndicesSourceMode; } export interface IndicesMappingLimitSettingsTotalFields { /** The maximum number of fields in an index. Field and object mappings, as well as field aliases count towards this limit. * The limit is in place to prevent mappings and searches from becoming too large. Higher values can lead to performance * degradations and memory issues, especially in clusters with a high load or few resources. */ limit?: long | string; /** This setting determines what happens when a dynamically mapped field would exceed the total fields limit. When set * to false (the default), the index request of the document that tries to add a dynamic field to the mapping will fail * with the message Limit of total fields [X] has been exceeded. When set to true, the index request will not fail. * Instead, fields that would exceed the limit are not added to the mapping, similar to dynamic: false. * The fields that were not added to the mapping will be added to the _ignored field. */ ignore_dynamic_beyond_limit?: boolean | string; } export interface IndicesMerge { scheduler?: IndicesMergeScheduler; } export interface IndicesMergeScheduler { max_thread_count?: SpecUtilsStringified; max_merge_count?: SpecUtilsStringified; } export interface IndicesNumericFielddata { format: IndicesNumericFielddataFormat; } export type IndicesNumericFielddataFormat = 'array' | 'disabled'; export interface IndicesQueries { cache?: IndicesCacheQueries; } export interface IndicesRetentionLease { period: Duration; } export interface IndicesSearchIdle { after?: Duration; } export type IndicesSegmentSortMissing = '_last' | '_first'; export type IndicesSegmentSortMode = 'min' | 'MIN' | 'max' | 'MAX'; export type IndicesSegmentSortOrder = 'asc' | 'ASC' | 'desc' | 'DESC'; export interface IndicesSettingsAnalyze { max_token_count?: SpecUtilsStringified; } export interface IndicesSettingsHighlight { max_analyzed_offset?: integer; } export interface IndicesSettingsQueryString { lenient: SpecUtilsStringified; } export interface IndicesSettingsSearch { idle?: IndicesSearchIdle; slowlog?: IndicesSlowlogSettings; } export type IndicesSettingsSimilarity = IndicesSettingsSimilarityBm25 | IndicesSettingsSimilarityBoolean | IndicesSettingsSimilarityDfi | IndicesSettingsSimilarityDfr | IndicesSettingsSimilarityIb | IndicesSettingsSimilarityLmd | IndicesSettingsSimilarityLmj | IndicesSettingsSimilarityScripted; export interface IndicesSettingsSimilarityBm25 { type: 'BM25'; b?: double; discount_overlaps?: boolean; k1?: double; } export interface IndicesSettingsSimilarityBoolean { type: 'boolean'; } export interface IndicesSettingsSimilarityDfi { type: 'DFI'; independence_measure: DFIIndependenceMeasure; } export interface IndicesSettingsSimilarityDfr { type: 'DFR'; after_effect: DFRAfterEffect; basic_model: DFRBasicModel; normalization: Normalization; } export interface IndicesSettingsSimilarityIb { type: 'IB'; distribution: IBDistribution; lambda: IBLambda; normalization: Normalization; } export interface IndicesSettingsSimilarityLmd { type: 'LMDirichlet'; mu?: double; } export interface IndicesSettingsSimilarityLmj { type: 'LMJelinekMercer'; lambda?: double; } export interface IndicesSettingsSimilarityScripted { type: 'scripted'; script: Script | ScriptSource; weight_script?: Script | ScriptSource; } export interface IndicesSlowlogSettings { level?: string; source?: integer; reformat?: boolean; threshold?: IndicesSlowlogTresholds; } export interface IndicesSlowlogTresholdLevels { warn?: Duration; info?: Duration; debug?: Duration; trace?: Duration; } export interface IndicesSlowlogTresholds { query?: IndicesSlowlogTresholdLevels; fetch?: IndicesSlowlogTresholdLevels; } export interface IndicesSoftDeletes { /** Indicates whether soft deletes are enabled on the index. */ enabled?: boolean; /** The maximum period to retain a shard history retention lease before it is considered expired. * Shard history retention leases ensure that soft deletes are retained during merges on the Lucene * index. If a soft delete is merged away before it can be replicated to a follower the following * process will fail due to incomplete history on the leader. */ retention_lease?: IndicesRetentionLease; } export type IndicesSourceMode = 'disabled' | 'stored' | 'synthetic'; export interface IndicesStorage { type: IndicesStorageType; /** You can restrict the use of the mmapfs and the related hybridfs store type via the setting node.store.allow_mmap. * This is a boolean setting indicating whether or not memory-mapping is allowed. The default is to allow it. This * setting is useful, for example, if you are in an environment where you can not control the ability to create a lot * of memory maps so you need disable the ability to use memory-mapping. */ allow_mmap?: boolean; } export type IndicesStorageType = 'fs' | 'niofs' | 'mmapfs' | 'hybridfs' | string; export interface IndicesTemplateMapping { aliases: Record; index_patterns: Name[]; mappings: MappingTypeMapping; order: integer; settings: Record; version?: VersionNumber; } export interface IndicesTranslog { /** How often the translog is fsynced to disk and committed, regardless of write operations. * Values less than 100ms are not allowed. */ sync_interval?: Duration; /** Whether or not to `fsync` and commit the translog after every index, delete, update, or bulk request. */ durability?: IndicesTranslogDurability; /** The translog stores all operations that are not yet safely persisted in Lucene (i.e., are not * part of a Lucene commit point). Although these operations are available for reads, they will need * to be replayed if the shard was stopped and had to be recovered. This setting controls the * maximum total size of these operations, to prevent recoveries from taking too long. Once the * maximum size has been reached a flush will happen, generating a new Lucene commit point. */ flush_threshold_size?: ByteSize; retention?: IndicesTranslogRetention; } export type IndicesTranslogDurability = 'request' | 'REQUEST' | 'async' | 'ASYNC'; export interface IndicesTranslogRetention { /** This controls the total size of translog files to keep for each shard. Keeping more translog files increases * the chance of performing an operation based sync when recovering a replica. If the translog files are not * sufficient, replica recovery will fall back to a file based sync. This setting is ignored, and should not be * set, if soft deletes are enabled. Soft deletes are enabled by default in indices created in Elasticsearch * versions 7.0.0 and later. */ size?: ByteSize; /** This controls the maximum duration for which translog files are kept by each shard. Keeping more * translog files increases the chance of performing an operation based sync when recovering replicas. If * the translog files are not sufficient, replica recovery will fall back to a file based sync. This setting * is ignored, and should not be set, if soft deletes are enabled. Soft deletes are enabled by default in * indices created in Elasticsearch versions 7.0.0 and later. */ age?: Duration; } export type IndicesAddBlockIndicesBlockOptions = 'metadata' | 'read' | 'read_only' | 'write'; export interface IndicesAddBlockIndicesBlockStatus { name: IndexName; blocked: boolean; } export interface IndicesAddBlockRequest extends RequestBase { /** A comma-separated list or wildcard expression of index names used to limit the request. * By default, you must explicitly name the indices you are adding blocks to. * To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. * You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. */ index: IndexName; /** The block type to add to the index. */ block: IndicesAddBlockIndicesBlockOptions; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean; /** The type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** The period to wait for the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration; /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. * It can also be set to `-1` to indicate that the request should never timeout. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; block?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; block?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; master_timeout?: never; timeout?: never; }; } export interface IndicesAddBlockResponse { acknowledged: boolean; shards_acknowledged: boolean; indices: IndicesAddBlockIndicesBlockStatus[]; } export interface IndicesAnalyzeAnalyzeDetail { analyzer?: IndicesAnalyzeAnalyzerDetail; charfilters?: IndicesAnalyzeCharFilterDetail[]; custom_analyzer: boolean; tokenfilters?: IndicesAnalyzeTokenDetail[]; tokenizer?: IndicesAnalyzeTokenDetail; } export interface IndicesAnalyzeAnalyzeToken { end_offset: long; position: long; positionLength?: long; start_offset: long; token: string; type: string; } export interface IndicesAnalyzeAnalyzerDetail { name: string; tokens: IndicesAnalyzeExplainAnalyzeToken[]; } export interface IndicesAnalyzeCharFilterDetail { filtered_text: string[]; name: string; } export interface IndicesAnalyzeExplainAnalyzeTokenKeys { bytes: string; end_offset: long; keyword?: boolean; position: long; positionLength: long; start_offset: long; termFrequency: long; token: string; type: string; } export type IndicesAnalyzeExplainAnalyzeToken = IndicesAnalyzeExplainAnalyzeTokenKeys & { [property: string]: any; }; export interface IndicesAnalyzeRequest extends RequestBase { /** Index used to derive the analyzer. * If specified, the `analyzer` or field parameter overrides this value. * If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. */ index?: IndexName; /** The name of the analyzer that should be applied to the provided `text`. * This could be a built-in analyzer, or an analyzer that’s been configured in the index. */ analyzer?: string; /** Array of token attributes used to filter the output of the `explain` parameter. */ attributes?: string[]; /** Array of character filters used to preprocess characters before the tokenizer. */ char_filter?: AnalysisCharFilter[]; /** If `true`, the response includes token attributes and additional details. */ explain?: boolean; /** Field used to derive the analyzer. * To use this parameter, you must specify an index. * If specified, the `analyzer` parameter overrides this value. */ field?: Field; /** Array of token filters used to apply after the tokenizer. */ filter?: AnalysisTokenFilter[]; /** Normalizer to use to convert text into a single token. */ normalizer?: string; /** Text to analyze. * If an array of strings is provided, it is analyzed as a multi-value field. */ text?: IndicesAnalyzeTextToAnalyze; /** Tokenizer to use to convert text into tokens. */ tokenizer?: AnalysisTokenizer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; analyzer?: never; attributes?: never; char_filter?: never; explain?: never; field?: never; filter?: never; normalizer?: never; text?: never; tokenizer?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; analyzer?: never; attributes?: never; char_filter?: never; explain?: never; field?: never; filter?: never; normalizer?: never; text?: never; tokenizer?: never; }; } export interface IndicesAnalyzeResponse { detail?: IndicesAnalyzeAnalyzeDetail; tokens?: IndicesAnalyzeAnalyzeToken[]; } export type IndicesAnalyzeTextToAnalyze = string | string[]; export interface IndicesAnalyzeTokenDetail { name: string; tokens: IndicesAnalyzeExplainAnalyzeToken[]; } export interface IndicesCancelMigrateReindexRequest extends RequestBase { /** The index or data stream name */ index: Indices; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; }; } export type IndicesCancelMigrateReindexResponse = AcknowledgedResponseBase; export interface IndicesClearCacheRequest extends RequestBase { /** Comma-separated list of data streams, indices, and aliases used to limit the request. * Supports wildcards (`*`). * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `true`, clears the fields cache. * Use the `fields` parameter to clear the cache of specific fields only. */ fielddata?: boolean; /** Comma-separated list of field names used to limit the `fielddata` parameter. */ fields?: Fields; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** If `true`, clears the query cache. */ query?: boolean; /** If `true`, clears the request cache. */ request?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; fielddata?: never; fields?: never; ignore_unavailable?: never; query?: never; request?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; fielddata?: never; fields?: never; ignore_unavailable?: never; query?: never; request?: never; }; } export type IndicesClearCacheResponse = ShardsOperationResponseBase; export interface IndicesCloneRequest extends RequestBase { /** Name of the source index to clone. */ index: IndexName; /** Name of the target index to create. */ target: Name; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** The number of shard copies that must be active before proceeding with the operation. * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards; /** Aliases for the resulting index. */ aliases?: Record; /** Configuration options for the target index. */ settings?: Record; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; target?: never; master_timeout?: never; timeout?: never; wait_for_active_shards?: never; aliases?: never; settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; target?: never; master_timeout?: never; timeout?: never; wait_for_active_shards?: never; aliases?: never; settings?: never; }; } export interface IndicesCloneResponse { acknowledged: boolean; index: IndexName; shards_acknowledged: boolean; } export interface IndicesCloseCloseIndexResult { closed: boolean; shards?: Record; } export interface IndicesCloseCloseShardResult { failures: ShardFailure[]; } export interface IndicesCloseRequest extends RequestBase { /** Comma-separated list or wildcard expression of index names used to limit the request. */ index: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** The number of shard copies that must be active before proceeding with the operation. * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; master_timeout?: never; timeout?: never; wait_for_active_shards?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; master_timeout?: never; timeout?: never; wait_for_active_shards?: never; }; } export interface IndicesCloseResponse { acknowledged: boolean; indices: Record; shards_acknowledged: boolean; } export interface IndicesCreateRequest extends RequestBase { /** Name of the index you wish to create. * Index names must meet the following criteria: * * * Lowercase only * * Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, ` ` (space character), `,`, or `#` * * Indices prior to 7.0 could contain a colon (`:`), but that has been deprecated and will not be supported in later versions * * Cannot start with `-`, `_`, or `+` * * Cannot be `.` or `..` * * Cannot be longer than 255 bytes (note thtat it is bytes, so multi-byte characters will reach the limit faster) * * Names starting with `.` are deprecated, except for hidden indices and internal indices managed by plugins */ index: IndexName; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** The number of shard copies that must be active before proceeding with the operation. * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards; /** Aliases for the index. */ aliases?: Record; /** Mapping for fields in the index. If specified, this mapping can include: * - Field names * - Field data types * - Mapping parameters */ mappings?: MappingTypeMapping; /** Configuration options for the index. */ settings?: IndicesIndexSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; master_timeout?: never; timeout?: never; wait_for_active_shards?: never; aliases?: never; mappings?: never; settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; master_timeout?: never; timeout?: never; wait_for_active_shards?: never; aliases?: never; mappings?: never; settings?: never; }; } export interface IndicesCreateResponse { index: IndexName; shards_acknowledged: boolean; acknowledged: boolean; } export interface IndicesCreateDataStreamRequest extends RequestBase { /** Name of the data stream, which must meet the following criteria: * Lowercase only; * Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; * Cannot start with `-`, `_`, `+`, or `.ds-`; * Cannot be `.` or `..`; * Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. */ name: DataStreamName; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }; } export type IndicesCreateDataStreamResponse = AcknowledgedResponseBase; export interface IndicesCreateFromCreateFrom { /** Mappings overrides to be applied to the destination index (optional) */ mappings_override?: MappingTypeMapping; /** Settings overrides to be applied to the destination index (optional) */ settings_override?: IndicesIndexSettings; /** If index blocks should be removed when creating destination index (optional) */ remove_index_blocks?: boolean; } export interface IndicesCreateFromRequest extends RequestBase { /** The source index or data stream name */ source: IndexName; /** The destination index or data stream name */ dest: IndexName; create_from?: IndicesCreateFromCreateFrom; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { source?: never; dest?: never; create_from?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { source?: never; dest?: never; create_from?: never; }; } export interface IndicesCreateFromResponse { acknowledged: boolean; index: IndexName; shards_acknowledged: boolean; } export interface IndicesDataStreamsStatsDataStreamsStatsItem { /** Current number of backing indices for the data stream. */ backing_indices: integer; /** Name of the data stream. */ data_stream: Name; /** The data stream’s highest `@timestamp` value, converted to milliseconds since the Unix epoch. * NOTE: This timestamp is provided as a best effort. * The data stream may contain `@timestamp` values higher than this if one or more of the following conditions are met: * The stream contains closed backing indices; * Backing indices with a lower generation contain higher `@timestamp` values. */ maximum_timestamp: EpochTime; /** Total size of all shards for the data stream’s backing indices. * This parameter is only returned if the `human` query parameter is `true`. */ store_size?: ByteSize; /** Total size, in bytes, of all shards for the data stream’s backing indices. */ store_size_bytes: long; } export interface IndicesDataStreamsStatsRequest extends RequestBase { /** Comma-separated list of data streams used to limit the request. * Wildcard expressions (`*`) are supported. * To target all data streams in a cluster, omit this parameter or use `*`. */ name?: IndexName; /** Type of data stream that wildcard patterns can match. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; expand_wildcards?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; expand_wildcards?: never; }; } export interface IndicesDataStreamsStatsResponse { /** Contains information about shards that attempted to execute the request. */ _shards: ShardStatistics; /** Total number of backing indices for the selected data streams. */ backing_indices: integer; /** Total number of selected data streams. */ data_stream_count: integer; /** Contains statistics for the selected data streams. */ data_streams: IndicesDataStreamsStatsDataStreamsStatsItem[]; /** Total size of all shards for the selected data streams. * This property is included only if the `human` query parameter is `true` */ total_store_sizes?: ByteSize; /** Total size, in bytes, of all shards for the selected data streams. */ total_store_size_bytes: long; } export interface IndicesDeleteRequest extends RequestBase { /** Comma-separated list of indices to delete. * You cannot specify index aliases. * By default, this parameter does not support wildcards (`*`) or `_all`. * To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. */ index: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; master_timeout?: never; timeout?: never; }; } export type IndicesDeleteResponse = IndicesResponseBase; export interface IndicesDeleteAliasRequest extends RequestBase { /** Comma-separated list of data streams or indices used to limit the request. * Supports wildcards (`*`). */ index: Indices; /** Comma-separated list of aliases to remove. * Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. */ name: Names; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; name?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; name?: never; master_timeout?: never; timeout?: never; }; } export type IndicesDeleteAliasResponse = AcknowledgedResponseBase; export interface IndicesDeleteDataLifecycleRequest extends RequestBase { /** A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams */ name: DataStreamNames; /** Whether wildcard expressions should get expanded to open or closed indices (default: open) */ expand_wildcards?: ExpandWildcards; /** Specify timeout for connection to master */ master_timeout?: Duration; /** Explicit timestamp for the document */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; expand_wildcards?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; expand_wildcards?: never; master_timeout?: never; timeout?: never; }; } export type IndicesDeleteDataLifecycleResponse = AcknowledgedResponseBase; export interface IndicesDeleteDataStreamRequest extends RequestBase { /** Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported. */ name: DataStreamNames; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Type of data stream that wildcard patterns can match. Supports comma-separated values,such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; expand_wildcards?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; expand_wildcards?: never; }; } export type IndicesDeleteDataStreamResponse = AcknowledgedResponseBase; export interface IndicesDeleteIndexTemplateRequest extends RequestBase { /** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ name: Names; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }; } export type IndicesDeleteIndexTemplateResponse = AcknowledgedResponseBase; export interface IndicesDeleteTemplateRequest extends RequestBase { /** The name of the legacy index template to delete. * Wildcard (`*`) expressions are supported. */ name: Name; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }; } export type IndicesDeleteTemplateResponse = AcknowledgedResponseBase; export interface IndicesDiskUsageRequest extends RequestBase { /** Comma-separated list of data streams, indices, and aliases used to limit the request. * It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. */ index: Indices; /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `true`, the API performs a flush before analysis. * If `false`, the response may not include uncommitted data. */ flush?: boolean; /** If `true`, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean; /** Analyzing field disk usage is resource-intensive. * To use the API, this parameter must be set to `true`. */ run_expensive_tasks?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; flush?: never; ignore_unavailable?: never; run_expensive_tasks?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; flush?: never; ignore_unavailable?: never; run_expensive_tasks?: never; }; } export type IndicesDiskUsageResponse = any; export interface IndicesDownsampleRequest extends RequestBase { /** Name of the time series index to downsample. */ index: IndexName; /** Name of the index to create. */ target_index: IndexName; config?: IndicesDownsampleConfig; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; target_index?: never; config?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; target_index?: never; config?: never; }; } export type IndicesDownsampleResponse = any; export interface IndicesExistsRequest extends RequestBase { /** Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). */ index: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `true`, returns settings in flat format. */ flat_settings?: boolean; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** If `true`, return all default settings in the response. */ include_defaults?: boolean; /** If `true`, the request retrieves information from the local node only. */ local?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; flat_settings?: never; ignore_unavailable?: never; include_defaults?: never; local?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; flat_settings?: never; ignore_unavailable?: never; include_defaults?: never; local?: never; }; } export type IndicesExistsResponse = boolean; export interface IndicesExistsAliasRequest extends RequestBase { /** Comma-separated list of aliases to check. Supports wildcards (`*`). */ name: Names; /** Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. */ ignore_unavailable?: boolean; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; master_timeout?: never; }; } export type IndicesExistsAliasResponse = boolean; export interface IndicesExistsIndexTemplateRequest extends RequestBase { /** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ name: Name; /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ local?: boolean; /** If true, returns settings in flat format. */ flat_settings?: boolean; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; local?: never; flat_settings?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; local?: never; flat_settings?: never; master_timeout?: never; }; } export type IndicesExistsIndexTemplateResponse = boolean; export interface IndicesExistsTemplateRequest extends RequestBase { /** A comma-separated list of index template names used to limit the request. * Wildcard (`*`) expressions are supported. */ name: Names; /** Indicates whether to use a flat format for the response. */ flat_settings?: boolean; /** Indicates whether to get information from the local node only. */ local?: boolean; /** The period to wait for the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; flat_settings?: never; local?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; flat_settings?: never; local?: never; master_timeout?: never; }; } export type IndicesExistsTemplateResponse = boolean; export interface IndicesExplainDataLifecycleDataStreamLifecycleExplain { index: IndexName; managed_by_lifecycle: boolean; index_creation_date_millis?: EpochTime; time_since_index_creation?: Duration; rollover_date_millis?: EpochTime; time_since_rollover?: Duration; lifecycle?: IndicesDataStreamLifecycleWithRollover; generation_time?: Duration; error?: string; } export interface IndicesExplainDataLifecycleRequest extends RequestBase { /** The name of the index to explain */ index: Indices; /** indicates if the API should return the default values the system uses for the index's lifecycle */ include_defaults?: boolean; /** Specify timeout for connection to master */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; include_defaults?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; include_defaults?: never; master_timeout?: never; }; } export interface IndicesExplainDataLifecycleResponse { indices: Record; } export interface IndicesFieldUsageStatsFieldSummary { any: uint; stored_fields: uint; doc_values: uint; points: uint; norms: uint; term_vectors: uint; knn_vectors: uint; inverted_index: IndicesFieldUsageStatsInvertedIndex; } export interface IndicesFieldUsageStatsFieldsUsageBodyKeys { _shards: ShardStatistics; } export type IndicesFieldUsageStatsFieldsUsageBody = IndicesFieldUsageStatsFieldsUsageBodyKeys & { [property: string]: IndicesFieldUsageStatsUsageStatsIndex | ShardStatistics; }; export interface IndicesFieldUsageStatsInvertedIndex { terms: uint; postings: uint; proximity: uint; positions: uint; term_frequencies: uint; offsets: uint; payloads: uint; } export interface IndicesFieldUsageStatsRequest extends RequestBase { /** Comma-separated list or wildcard expression of index names used to limit the request. */ index: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `true`, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean; /** Comma-separated list or wildcard expressions of fields to include in the statistics. */ fields?: Fields; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; fields?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; fields?: never; }; } export type IndicesFieldUsageStatsResponse = IndicesFieldUsageStatsFieldsUsageBody; export interface IndicesFieldUsageStatsShardsStats { all_fields: IndicesFieldUsageStatsFieldSummary; fields: Record; } export interface IndicesFieldUsageStatsUsageStatsIndex { shards: IndicesFieldUsageStatsUsageStatsShards[]; } export interface IndicesFieldUsageStatsUsageStatsShards { routing: IndicesStatsShardRouting; stats: IndicesFieldUsageStatsShardsStats; tracking_id: string; tracking_started_at_millis: EpochTime; } export interface IndicesFlushRequest extends RequestBase { /** Comma-separated list of data streams, indices, and aliases to flush. * Supports wildcards (`*`). * To flush all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `true`, the request forces a flush even if there are no changes to commit to the index. */ force?: boolean; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** If `true`, the flush operation blocks until execution when another flush operation is running. * If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. */ wait_if_ongoing?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; force?: never; ignore_unavailable?: never; wait_if_ongoing?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; force?: never; ignore_unavailable?: never; wait_if_ongoing?: never; }; } export type IndicesFlushResponse = ShardsOperationResponseBase; export interface IndicesForcemergeRequest extends RequestBase { /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices; /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean; /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards; /** Specify whether the index should be flushed after performing the operation (default: true) */ flush?: boolean; /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean; /** The number of segments the index should be merged into (default: dynamic) */ max_num_segments?: long; /** Specify whether the operation should only expunge deleted documents */ only_expunge_deletes?: boolean; /** Should the request wait until the force merge is completed. */ wait_for_completion?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; flush?: never; ignore_unavailable?: never; max_num_segments?: never; only_expunge_deletes?: never; wait_for_completion?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; flush?: never; ignore_unavailable?: never; max_num_segments?: never; only_expunge_deletes?: never; wait_for_completion?: never; }; } export type IndicesForcemergeResponse = IndicesForcemergeForceMergeResponseBody; export interface IndicesForcemergeForceMergeResponseBody extends ShardsOperationResponseBase { /** task contains a task id returned when wait_for_completion=false, * you can use the task_id to get the status of the task at _tasks/ */ task?: string; } export type IndicesGetFeature = 'aliases' | 'mappings' | 'settings'; export type IndicesGetFeatures = IndicesGetFeature | IndicesGetFeature[]; export interface IndicesGetRequest extends RequestBase { /** Comma-separated list of data streams, indices, and index aliases used to limit the request. * Wildcard expressions (*) are supported. */ index: Indices; /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only * missing or closed indices. This behavior applies even if the request targets other open indices. For example, * a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean; /** Type of index that wildcard expressions can match. If the request can target data streams, this argument * determines whether wildcard expressions match hidden data streams. Supports comma-separated values, * such as open,hidden. */ expand_wildcards?: ExpandWildcards; /** If true, returns settings in flat format. */ flat_settings?: boolean; /** If false, requests that target a missing index return an error. */ ignore_unavailable?: boolean; /** If true, return all default settings in the response. */ include_defaults?: boolean; /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ local?: boolean; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Return only information on specified index features */ features?: IndicesGetFeatures; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; flat_settings?: never; ignore_unavailable?: never; include_defaults?: never; local?: never; master_timeout?: never; features?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; flat_settings?: never; ignore_unavailable?: never; include_defaults?: never; local?: never; master_timeout?: never; features?: never; }; } export type IndicesGetResponse = Record; export interface IndicesGetAliasIndexAliases { aliases: Record; } export interface IndicesGetAliasRequest extends RequestBase { /** Comma-separated list of aliases to retrieve. * Supports wildcards (`*`). * To retrieve all aliases, omit this parameter or use `*` or `_all`. */ name?: Names; /** Comma-separated list of data streams or indices used to limit the request. * Supports wildcards (`*`). * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; master_timeout?: never; }; } export type IndicesGetAliasResponse = Record; export interface IndicesGetDataLifecycleDataStreamWithLifecycle { name: DataStreamName; lifecycle?: IndicesDataStreamLifecycleWithRollover; } export interface IndicesGetDataLifecycleRequest extends RequestBase { /** Comma-separated list of data streams to limit the request. * Supports wildcards (`*`). * To target all data streams, omit this parameter or use `*` or `_all`. */ name: DataStreamNames; /** Type of data stream that wildcard patterns can match. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `true`, return all default settings in the response. */ include_defaults?: boolean; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; expand_wildcards?: never; include_defaults?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; expand_wildcards?: never; include_defaults?: never; master_timeout?: never; }; } export interface IndicesGetDataLifecycleResponse { data_streams: IndicesGetDataLifecycleDataStreamWithLifecycle[]; } export interface IndicesGetDataLifecycleStatsDataStreamStats { /** The count of the backing indices for the data stream. */ backing_indices_in_error: integer; /** The count of the backing indices for the data stream that have encountered an error. */ backing_indices_in_total: integer; /** The name of the data stream. */ name: DataStreamName; } export interface IndicesGetDataLifecycleStatsRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export interface IndicesGetDataLifecycleStatsResponse { /** The count of data streams currently being managed by the data stream lifecycle. */ data_stream_count: integer; /** Information about the data streams that are managed by the data stream lifecycle. */ data_streams: IndicesGetDataLifecycleStatsDataStreamStats[]; /** The duration of the last data stream lifecycle execution. */ last_run_duration_in_millis?: DurationValue; /** The time that passed between the start of the last two data stream lifecycle executions. * This value should amount approximately to `data_streams.lifecycle.poll_interval`. */ time_between_starts_in_millis?: DurationValue; } export interface IndicesGetDataStreamRequest extends RequestBase { /** Comma-separated list of data stream names used to limit the request. * Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. */ name?: DataStreamNames; /** Type of data stream that wildcard patterns can match. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If true, returns all relevant default configurations for the index template. */ include_defaults?: boolean; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Whether the maximum timestamp for each data stream should be calculated and returned. */ verbose?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; expand_wildcards?: never; include_defaults?: never; master_timeout?: never; verbose?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; expand_wildcards?: never; include_defaults?: never; master_timeout?: never; verbose?: never; }; } export interface IndicesGetDataStreamResponse { data_streams: IndicesDataStream[]; } export interface IndicesGetFieldMappingRequest extends RequestBase { /** Comma-separated list or wildcard expression of fields used to limit returned information. * Supports wildcards (`*`). */ fields: Fields; /** Comma-separated list of data streams, indices, and aliases used to limit the request. * Supports wildcards (`*`). * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** If `true`, return all default settings in the response. */ include_defaults?: boolean; /** If `true`, the request retrieves information from the local node only. */ local?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { fields?: never; index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; include_defaults?: never; local?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { fields?: never; index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; include_defaults?: never; local?: never; }; } export type IndicesGetFieldMappingResponse = Record; export interface IndicesGetFieldMappingTypeFieldMappings { mappings: Record; } export interface IndicesGetIndexTemplateIndexTemplateItem { name: Name; index_template: IndicesIndexTemplate; } export interface IndicesGetIndexTemplateRequest extends RequestBase { /** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ name?: Name; /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ local?: boolean; /** If true, returns settings in flat format. */ flat_settings?: boolean; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** If true, returns all relevant default configurations for the index template. */ include_defaults?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; local?: never; flat_settings?: never; master_timeout?: never; include_defaults?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; local?: never; flat_settings?: never; master_timeout?: never; include_defaults?: never; }; } export interface IndicesGetIndexTemplateResponse { index_templates: IndicesGetIndexTemplateIndexTemplateItem[]; } export interface IndicesGetMappingIndexMappingRecord { item?: MappingTypeMapping; mappings: MappingTypeMapping; } export interface IndicesGetMappingRequest extends RequestBase { /** Comma-separated list of data streams, indices, and aliases used to limit the request. * Supports wildcards (`*`). * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** If `true`, the request retrieves information from the local node only. */ local?: boolean; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; local?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; local?: never; master_timeout?: never; }; } export type IndicesGetMappingResponse = Record; export interface IndicesGetMigrateReindexStatusRequest extends RequestBase { /** The index or data stream name. */ index: Indices; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; }; } export interface IndicesGetMigrateReindexStatusResponse { start_time?: DateTime; start_time_millis: EpochTime; complete: boolean; total_indices_in_data_stream: integer; total_indices_requiring_upgrade: integer; successes: integer; in_progress: IndicesGetMigrateReindexStatusStatusInProgress[]; pending: integer; errors: IndicesGetMigrateReindexStatusStatusError[]; exception?: string; } export interface IndicesGetMigrateReindexStatusStatusError { index: string; message: string; } export interface IndicesGetMigrateReindexStatusStatusInProgress { index: string; total_doc_count: long; reindexed_doc_count: long; } export interface IndicesGetSettingsRequest extends RequestBase { /** Comma-separated list of data streams, indices, and aliases used to limit * the request. Supports wildcards (`*`). To target all data streams and * indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** Comma-separated list or wildcard expression of settings to retrieve. */ name?: Names; /** If `false`, the request returns an error if any wildcard expression, index * alias, or `_all` value targets only missing or closed indices. This * behavior applies even if the request targets other open indices. For * example, a request targeting `foo*,bar*` returns an error if an index * starts with foo but no index starts with `bar`. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `true`, returns settings in flat format. */ flat_settings?: boolean; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** If `true`, return all default settings in the response. */ include_defaults?: boolean; /** If `true`, the request retrieves information from the local node only. If * `false`, information is retrieved from the master node. */ local?: boolean; /** Period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an * error. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; name?: never; allow_no_indices?: never; expand_wildcards?: never; flat_settings?: never; ignore_unavailable?: never; include_defaults?: never; local?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; name?: never; allow_no_indices?: never; expand_wildcards?: never; flat_settings?: never; ignore_unavailable?: never; include_defaults?: never; local?: never; master_timeout?: never; }; } export type IndicesGetSettingsResponse = Record; export interface IndicesGetTemplateRequest extends RequestBase { /** Comma-separated list of index template names used to limit the request. * Wildcard (`*`) expressions are supported. * To return all index templates, omit this parameter or use a value of `_all` or `*`. */ name?: Names; /** If `true`, returns settings in flat format. */ flat_settings?: boolean; /** If `true`, the request retrieves information from the local node only. */ local?: boolean; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; flat_settings?: never; local?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; flat_settings?: never; local?: never; master_timeout?: never; }; } export type IndicesGetTemplateResponse = Record; export interface IndicesMigrateReindexMigrateReindex { /** Reindex mode. Currently only 'upgrade' is supported. */ mode: IndicesMigrateReindexModeEnum; /** The source index or data stream (only data streams are currently supported). */ source: IndicesMigrateReindexSourceIndex; } export type IndicesMigrateReindexModeEnum = 'upgrade'; export interface IndicesMigrateReindexRequest extends RequestBase { reindex?: IndicesMigrateReindexMigrateReindex; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { reindex?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { reindex?: never; }; } export type IndicesMigrateReindexResponse = AcknowledgedResponseBase; export interface IndicesMigrateReindexSourceIndex { index: IndexName; } export interface IndicesMigrateToDataStreamRequest extends RequestBase { /** Name of the index alias to convert to a data stream. */ name: IndexName; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }; } export type IndicesMigrateToDataStreamResponse = AcknowledgedResponseBase; export interface IndicesModifyDataStreamAction { /** Adds an existing index as a backing index for a data stream. * The index is hidden as part of this operation. * WARNING: Adding indices with the `add_backing_index` action can potentially result in improper data stream behavior. * This should be considered an expert level API. */ add_backing_index?: IndicesModifyDataStreamIndexAndDataStreamAction; /** Removes a backing index from a data stream. * The index is unhidden as part of this operation. * A data stream’s write index cannot be removed. */ remove_backing_index?: IndicesModifyDataStreamIndexAndDataStreamAction; } export interface IndicesModifyDataStreamIndexAndDataStreamAction { /** Data stream targeted by the action. */ data_stream: DataStreamName; /** Index for the action. */ index: IndexName; } export interface IndicesModifyDataStreamRequest extends RequestBase { /** Actions to perform. */ actions: IndicesModifyDataStreamAction[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { actions?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { actions?: never; }; } export type IndicesModifyDataStreamResponse = AcknowledgedResponseBase; export interface IndicesOpenRequest extends RequestBase { /** Comma-separated list of data streams, indices, and aliases used to limit the request. * Supports wildcards (`*`). * By default, you must explicitly name the indices you using to limit the request. * To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. * You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. */ index: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** The number of shard copies that must be active before proceeding with the operation. * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; master_timeout?: never; timeout?: never; wait_for_active_shards?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; master_timeout?: never; timeout?: never; wait_for_active_shards?: never; }; } export interface IndicesOpenResponse { acknowledged: boolean; shards_acknowledged: boolean; } export interface IndicesPromoteDataStreamRequest extends RequestBase { /** The name of the data stream */ name: IndexName; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; }; } export type IndicesPromoteDataStreamResponse = any; export interface IndicesPutAliasRequest extends RequestBase { /** Comma-separated list of data streams or indices to add. * Supports wildcards (`*`). * Wildcard patterns that match both data streams and indices return an error. */ index: Indices; /** Alias to update. * If the alias doesn’t exist, the request creates it. * Index alias names support date math. */ name: Name; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** Query used to limit documents the alias can access. */ filter?: QueryDslQueryContainer; /** Value used to route indexing operations to a specific shard. * If specified, this overwrites the `routing` value for indexing operations. * Data stream aliases don’t support this parameter. */ index_routing?: Routing; /** If `true`, sets the write index or data stream for the alias. * If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. * If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. * Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. */ is_write_index?: boolean; /** Value used to route indexing and search operations to a specific shard. * Data stream aliases don’t support this parameter. */ routing?: Routing; /** Value used to route search operations to a specific shard. * If specified, this overwrites the `routing` value for search operations. * Data stream aliases don’t support this parameter. */ search_routing?: Routing; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; name?: never; master_timeout?: never; timeout?: never; filter?: never; index_routing?: never; is_write_index?: never; routing?: never; search_routing?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; name?: never; master_timeout?: never; timeout?: never; filter?: never; index_routing?: never; is_write_index?: never; routing?: never; search_routing?: never; }; } export type IndicesPutAliasResponse = AcknowledgedResponseBase; export interface IndicesPutDataLifecycleRequest extends RequestBase { /** Comma-separated list of data streams used to limit the request. * Supports wildcards (`*`). * To target all data streams use `*` or `_all`. */ name: DataStreamNames; /** Type of data stream that wildcard patterns can match. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** Period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an * error. */ master_timeout?: Duration; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** If defined, every document added to this data stream will be stored at least for this time frame. * Any time after this duration the document could be deleted. * When empty, every document in this data stream will be stored indefinitely. */ data_retention?: Duration; /** The downsampling configuration to execute for the managed backing index after rollover. */ downsampling?: IndicesDataStreamLifecycleDownsampling; /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle * that's disabled (enabled: `false`) will have no effect on the data stream. */ enabled?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; expand_wildcards?: never; master_timeout?: never; timeout?: never; data_retention?: never; downsampling?: never; enabled?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; expand_wildcards?: never; master_timeout?: never; timeout?: never; data_retention?: never; downsampling?: never; enabled?: never; }; } export type IndicesPutDataLifecycleResponse = AcknowledgedResponseBase; export interface IndicesPutIndexTemplateIndexTemplateMapping { /** Aliases to add. * If the index template includes a `data_stream` object, these are data stream aliases. * Otherwise, these are index aliases. * Data stream aliases ignore the `index_routing`, `routing`, and `search_routing` options. */ aliases?: Record; /** Mapping for fields in the index. * If specified, this mapping can include field names, field data types, and mapping parameters. */ mappings?: MappingTypeMapping; /** Configuration options for the index. */ settings?: IndicesIndexSettings; lifecycle?: IndicesDataStreamLifecycle; } export interface IndicesPutIndexTemplateRequest extends RequestBase { /** Index or template name */ name: Name; /** If `true`, this request cannot replace or update existing index templates. */ create?: boolean; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** User defined reason for creating/updating the index template */ cause?: string; /** Name of the index template to create. */ index_patterns?: Indices; /** An ordered list of component template names. * Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ composed_of?: Name[]; /** Template to be applied. * It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ template?: IndicesPutIndexTemplateIndexTemplateMapping; /** If this object is included, the template is used to create data streams and their backing indices. * Supports an empty object. * Data streams require a matching index template with a `data_stream` object. */ data_stream?: IndicesDataStreamVisibility; /** Priority to determine index template precedence when a new data stream or index is created. * The index template with the highest priority is chosen. * If no priority is specified the template is treated as though it is of priority 0 (lowest priority). * This number is not automatically generated by Elasticsearch. */ priority?: long; /** Version number used to manage index templates externally. * This number is not automatically generated by Elasticsearch. * External systems can use these version numbers to simplify template management. * To unset a version, replace the template without specifying one. */ version?: VersionNumber; /** Optional user metadata about the index template. * It may have any contents. * It is not automatically generated or used by Elasticsearch. * This user-defined object is stored in the cluster state, so keeping it short is preferable * To unset the metadata, replace the template without specifying it. */ _meta?: Metadata; /** This setting overrides the value of the `action.auto_create_index` cluster setting. * If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. * If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. */ allow_auto_create?: boolean; /** The configuration option ignore_missing_component_templates can be used when an index template * references a component template that might not exist */ ignore_missing_component_templates?: string[]; /** Marks this index template as deprecated. When creating or updating a non-deprecated index template * that uses deprecated components, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; create?: never; master_timeout?: never; cause?: never; index_patterns?: never; composed_of?: never; template?: never; data_stream?: never; priority?: never; version?: never; _meta?: never; allow_auto_create?: never; ignore_missing_component_templates?: never; deprecated?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; create?: never; master_timeout?: never; cause?: never; index_patterns?: never; composed_of?: never; template?: never; data_stream?: never; priority?: never; version?: never; _meta?: never; allow_auto_create?: never; ignore_missing_component_templates?: never; deprecated?: never; }; } export type IndicesPutIndexTemplateResponse = AcknowledgedResponseBase; export interface IndicesPutMappingRequest extends RequestBase { /** A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. */ index: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** If `true`, the mappings are applied only to the current write index for the target. */ write_index_only?: boolean; /** Controls whether dynamic date detection is enabled. */ date_detection?: boolean; /** Controls whether new fields are added dynamically. */ dynamic?: MappingDynamicMapping; /** If date detection is enabled then new string fields are checked * against 'dynamic_date_formats' and if the value matches then * a new date field is added instead of string. */ dynamic_date_formats?: string[]; /** Specify dynamic templates for the mapping. */ dynamic_templates?: Partial>[]; /** Control whether field names are enabled for the index. */ _field_names?: MappingFieldNamesField; /** A mapping type can have custom meta data associated with it. These are * not used at all by Elasticsearch, but can be used to store * application-specific metadata. */ _meta?: Metadata; /** Automatically map strings into numeric data types for all fields. */ numeric_detection?: boolean; /** Mapping for a field. For new fields, this mapping can include: * * - Field name * - Field data type * - Mapping parameters */ properties?: Record; /** Enable making a routing value required on indexed documents. */ _routing?: MappingRoutingField; /** Control whether the _source field is enabled on the index. */ _source?: MappingSourceField; /** Mapping of runtime fields for the index. */ runtime?: MappingRuntimeFields; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; master_timeout?: never; timeout?: never; write_index_only?: never; date_detection?: never; dynamic?: never; dynamic_date_formats?: never; dynamic_templates?: never; _field_names?: never; _meta?: never; numeric_detection?: never; properties?: never; _routing?: never; _source?: never; runtime?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; master_timeout?: never; timeout?: never; write_index_only?: never; date_detection?: never; dynamic?: never; dynamic_date_formats?: never; dynamic_templates?: never; _field_names?: never; _meta?: never; numeric_detection?: never; properties?: never; _routing?: never; _source?: never; runtime?: never; }; } export type IndicesPutMappingResponse = IndicesResponseBase; export interface IndicesPutSettingsRequest extends RequestBase { /** Comma-separated list of data streams, indices, and aliases used to limit * the request. Supports wildcards (`*`). To target all data streams and * indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** If `false`, the request returns an error if any wildcard expression, index * alias, or `_all` value targets only missing or closed indices. This * behavior applies even if the request targets other open indices. For * example, a request targeting `foo*,bar*` returns an error if an index * starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. If the request can target * data streams, this argument determines whether wildcard expressions match * hidden data streams. Supports comma-separated values, such as * `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `true`, returns settings in flat format. */ flat_settings?: boolean; /** If `true`, returns settings in flat format. */ ignore_unavailable?: boolean; /** Period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an * error. */ master_timeout?: Duration; /** If `true`, existing index settings remain unchanged. */ preserve_existing?: boolean; /** Whether to close and reopen the index to apply non-dynamic settings. * If set to `true` the indices to which the settings are being applied * will be closed temporarily and then reopened in order to apply the changes. */ reopen?: boolean; /** Period to wait for a response. If no response is received before the * timeout expires, the request fails and returns an error. */ timeout?: Duration; settings?: IndicesIndexSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; flat_settings?: never; ignore_unavailable?: never; master_timeout?: never; preserve_existing?: never; reopen?: never; timeout?: never; settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; flat_settings?: never; ignore_unavailable?: never; master_timeout?: never; preserve_existing?: never; reopen?: never; timeout?: never; settings?: never; }; } export type IndicesPutSettingsResponse = AcknowledgedResponseBase; export interface IndicesPutTemplateRequest extends RequestBase { /** The name of the template */ name: Name; /** If true, this request cannot replace or update existing index templates. */ create?: boolean; /** Period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** User defined reason for creating/updating the index template */ cause?: string; /** Aliases for the index. */ aliases?: Record; /** Array of wildcard expressions used to match the names * of indices during creation. */ index_patterns?: string | string[]; /** Mapping for fields in the index. */ mappings?: MappingTypeMapping; /** Order in which Elasticsearch applies this template if index * matches multiple templates. * * Templates with lower 'order' values are merged first. Templates with higher * 'order' values are merged later, overriding templates with lower values. */ order?: integer; /** Configuration options for the index. */ settings?: IndicesIndexSettings; /** Version number used to manage index templates externally. This number * is not automatically generated by Elasticsearch. * To unset a version, replace the template without specifying one. */ version?: VersionNumber; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; create?: never; master_timeout?: never; cause?: never; aliases?: never; index_patterns?: never; mappings?: never; order?: never; settings?: never; version?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; create?: never; master_timeout?: never; cause?: never; aliases?: never; index_patterns?: never; mappings?: never; order?: never; settings?: never; version?: never; }; } export type IndicesPutTemplateResponse = AcknowledgedResponseBase; export interface IndicesRecoveryFileDetails { length: long; name: string; recovered: long; } export interface IndicesRecoveryRecoveryBytes { percent: Percentage; recovered?: ByteSize; recovered_in_bytes: ByteSize; recovered_from_snapshot?: ByteSize; recovered_from_snapshot_in_bytes?: ByteSize; reused?: ByteSize; reused_in_bytes: ByteSize; total?: ByteSize; total_in_bytes: ByteSize; } export interface IndicesRecoveryRecoveryFiles { details?: IndicesRecoveryFileDetails[]; percent: Percentage; recovered: long; reused: long; total: long; } export interface IndicesRecoveryRecoveryIndexStatus { bytes?: IndicesRecoveryRecoveryBytes; files: IndicesRecoveryRecoveryFiles; size: IndicesRecoveryRecoveryBytes; source_throttle_time?: Duration; source_throttle_time_in_millis: DurationValue; target_throttle_time?: Duration; target_throttle_time_in_millis: DurationValue; total_time?: Duration; total_time_in_millis: DurationValue; } export interface IndicesRecoveryRecoveryOrigin { hostname?: string; host?: Host; transport_address?: TransportAddress; id?: Id; ip?: Ip; name?: Name; bootstrap_new_history_uuid?: boolean; repository?: Name; snapshot?: Name; version?: VersionString; restoreUUID?: Uuid; index?: IndexName; } export interface IndicesRecoveryRecoveryStartStatus { check_index_time?: Duration; check_index_time_in_millis: DurationValue; total_time?: Duration; total_time_in_millis: DurationValue; } export interface IndicesRecoveryRecoveryStatus { shards: IndicesRecoveryShardRecovery[]; } export interface IndicesRecoveryRequest extends RequestBase { /** Comma-separated list of data streams, indices, and aliases used to limit the request. * Supports wildcards (`*`). * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** If `true`, the response only includes ongoing shard recoveries. */ active_only?: boolean; /** If `true`, the response includes detailed information about shard recoveries. */ detailed?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; active_only?: never; detailed?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; active_only?: never; detailed?: never; }; } export type IndicesRecoveryResponse = Record; export interface IndicesRecoveryShardRecovery { id: long; index: IndicesRecoveryRecoveryIndexStatus; primary: boolean; source: IndicesRecoveryRecoveryOrigin; stage: string; start?: IndicesRecoveryRecoveryStartStatus; start_time?: DateTime; start_time_in_millis: EpochTime; stop_time?: DateTime; stop_time_in_millis?: EpochTime; target: IndicesRecoveryRecoveryOrigin; total_time?: Duration; total_time_in_millis: DurationValue; translog: IndicesRecoveryTranslogStatus; type: string; verify_index: IndicesRecoveryVerifyIndex; } export interface IndicesRecoveryTranslogStatus { percent: Percentage; recovered: long; total: long; total_on_start: long; total_time?: Duration; total_time_in_millis: DurationValue; } export interface IndicesRecoveryVerifyIndex { check_index_time?: Duration; check_index_time_in_millis: DurationValue; total_time?: Duration; total_time_in_millis: DurationValue; } export interface IndicesRefreshRequest extends RequestBase { /** Comma-separated list of data streams, indices, and aliases used to limit the request. * Supports wildcards (`*`). * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; }; } export type IndicesRefreshResponse = ShardsOperationResponseBase; export interface IndicesReloadSearchAnalyzersReloadDetails { index: string; reloaded_analyzers: string[]; reloaded_node_ids: string[]; } export interface IndicesReloadSearchAnalyzersReloadResult { reload_details: IndicesReloadSearchAnalyzersReloadDetails[]; _shards: ShardStatistics; } export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { /** A comma-separated list of index names to reload analyzers for */ index: Indices; /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean; /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards; /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean; /** Changed resource to reload analyzers from if applicable */ resource?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; resource?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; resource?: never; }; } export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult; export interface IndicesResolveClusterRequest extends RequestBase { /** A comma-separated list of names or index patterns for the indices, aliases, and data streams to resolve. * Resources on remote clusters can be specified using the ``:`` syntax. * Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. * If no index expression is specified, information about all remote clusters configured on the local cluster * is returned without doing any index matching */ name?: Names; /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing * or closed indices. This behavior applies even if the request targets other open indices. For example, a request * targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index * options to the `_resolve/cluster` API endpoint that takes no index expression. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index * options to the `_resolve/cluster` API endpoint that takes no index expression. */ expand_wildcards?: ExpandWildcards; /** If true, concrete, expanded, or aliased indices are ignored when frozen. * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index * options to the `_resolve/cluster` API endpoint that takes no index expression. */ ignore_throttled?: boolean; /** If false, the request returns an error if it targets a missing or closed index. * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index * options to the `_resolve/cluster` API endpoint that takes no index expression. */ ignore_unavailable?: boolean; /** The maximum time to wait for remote clusters to respond. * If a remote cluster does not respond within this timeout period, the API response * will show the cluster as not connected and include an error message that the * request timed out. * * The default timeout is unset and the query can take * as long as the networking layer is configured to wait for remote clusters that are * not responding (typically 30 seconds). */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; timeout?: never; }; } export interface IndicesResolveClusterResolveClusterInfo { /** Whether the remote cluster is connected to the local (querying) cluster. */ connected: boolean; /** The `skip_unavailable` setting for a remote cluster. */ skip_unavailable: boolean; /** Whether the index expression provided in the request matches any indices, aliases or data streams * on the cluster. */ matching_indices?: boolean; /** Provides error messages that are likely to occur if you do a search with this index expression * on the specified cluster (for example, lack of security privileges to query an index). */ error?: string; /** Provides version information about the cluster. */ version?: ElasticsearchVersionMinInfo; } export type IndicesResolveClusterResponse = Record; export interface IndicesResolveIndexRequest extends RequestBase { /** Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. * Resources on remote clusters can be specified using the ``:`` syntax. */ name: Names; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; expand_wildcards?: never; ignore_unavailable?: never; allow_no_indices?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; expand_wildcards?: never; ignore_unavailable?: never; allow_no_indices?: never; }; } export interface IndicesResolveIndexResolveIndexAliasItem { name: Name; indices: Indices; } export interface IndicesResolveIndexResolveIndexDataStreamsItem { name: DataStreamName; timestamp_field: Field; backing_indices: Indices; } export interface IndicesResolveIndexResolveIndexItem { name: Name; aliases?: string[]; attributes: string[]; data_stream?: DataStreamName; } export interface IndicesResolveIndexResponse { indices: IndicesResolveIndexResolveIndexItem[]; aliases: IndicesResolveIndexResolveIndexAliasItem[]; data_streams: IndicesResolveIndexResolveIndexDataStreamsItem[]; } export interface IndicesRolloverRequest extends RequestBase { /** Name of the data stream or index alias to roll over. */ alias: IndexAlias; /** Name of the index to create. * Supports date math. * Data streams do not support this parameter. */ new_index?: IndexName; /** If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. */ dry_run?: boolean; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** The number of shard copies that must be active before proceeding with the operation. * Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards; /** If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. * Only allowed on data streams. */ lazy?: boolean; /** Aliases for the target index. * Data streams do not support this parameter. */ aliases?: Record; /** Conditions for the rollover. * If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. * If this parameter is not specified, Elasticsearch performs the rollover unconditionally. * If conditions are specified, at least one of them must be a `max_*` condition. * The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. */ conditions?: IndicesRolloverRolloverConditions; /** Mapping for fields in the index. * If specified, this mapping can include field names, field data types, and mapping paramaters. */ mappings?: MappingTypeMapping; /** Configuration options for the index. * Data streams do not support this parameter. */ settings?: Record; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { alias?: never; new_index?: never; dry_run?: never; master_timeout?: never; timeout?: never; wait_for_active_shards?: never; lazy?: never; aliases?: never; conditions?: never; mappings?: never; settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { alias?: never; new_index?: never; dry_run?: never; master_timeout?: never; timeout?: never; wait_for_active_shards?: never; lazy?: never; aliases?: never; conditions?: never; mappings?: never; settings?: never; }; } export interface IndicesRolloverResponse { acknowledged: boolean; conditions: Record; dry_run: boolean; new_index: string; old_index: string; rolled_over: boolean; shards_acknowledged: boolean; } export interface IndicesRolloverRolloverConditions { min_age?: Duration; max_age?: Duration; max_age_millis?: DurationValue; min_docs?: long; max_docs?: long; max_size?: ByteSize; max_size_bytes?: long; min_size?: ByteSize; min_size_bytes?: long; max_primary_shard_size?: ByteSize; max_primary_shard_size_bytes?: long; min_primary_shard_size?: ByteSize; min_primary_shard_size_bytes?: long; max_primary_shard_docs?: long; min_primary_shard_docs?: long; } export interface IndicesSegmentsIndexSegment { shards: Record; } export interface IndicesSegmentsRequest extends RequestBase { /** Comma-separated list of data streams, indices, and aliases used to limit the request. * Supports wildcards (`*`). * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; }; } export interface IndicesSegmentsResponse { indices: Record; _shards: ShardStatistics; } export interface IndicesSegmentsSegment { attributes: Record; committed: boolean; compound: boolean; deleted_docs: long; generation: integer; search: boolean; size_in_bytes: double; num_docs: long; version: VersionString; } export interface IndicesSegmentsShardSegmentRouting { node: string; primary: boolean; state: string; } export interface IndicesSegmentsShardsSegment { num_committed_segments: integer; routing: IndicesSegmentsShardSegmentRouting; num_search_segments: integer; segments: Record; } export interface IndicesShardStoresIndicesShardStores { shards: Record; } export interface IndicesShardStoresRequest extends RequestBase { /** List of data streams, indices, and aliases used to limit the request. */ index?: Indices; /** If false, the request returns an error if any wildcard expression, index alias, or _all * value targets only missing or closed indices. This behavior applies even if the request * targets other open indices. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. If the request can target data streams, * this argument determines whether wildcard expressions match hidden data streams. */ expand_wildcards?: ExpandWildcards; /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean; /** List of shard health statuses used to limit the request. */ status?: IndicesShardStoresShardStoreStatus | IndicesShardStoresShardStoreStatus[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; status?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_unavailable?: never; status?: never; }; } export interface IndicesShardStoresResponse { indices: Record; } export interface IndicesShardStoresShardStoreKeys { allocation: IndicesShardStoresShardStoreAllocation; allocation_id?: Id; store_exception?: IndicesShardStoresShardStoreException; } export type IndicesShardStoresShardStore = IndicesShardStoresShardStoreKeys & { [property: string]: IndicesShardStoresShardStoreNode | IndicesShardStoresShardStoreAllocation | Id | IndicesShardStoresShardStoreException; }; export type IndicesShardStoresShardStoreAllocation = 'primary' | 'replica' | 'unused'; export interface IndicesShardStoresShardStoreException { reason: string; type: string; } export interface IndicesShardStoresShardStoreNode { attributes: Record; ephemeral_id?: string; external_id?: string; name: Name; roles: string[]; transport_address: TransportAddress; } export type IndicesShardStoresShardStoreStatus = 'green' | 'yellow' | 'red' | 'all'; export interface IndicesShardStoresShardStoreWrapper { stores: IndicesShardStoresShardStore[]; } export interface IndicesShrinkRequest extends RequestBase { /** Name of the source index to shrink. */ index: IndexName; /** Name of the target index to create. */ target: IndexName; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** The number of shard copies that must be active before proceeding with the operation. * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards; /** The key is the alias name. * Index alias names support date math. */ aliases?: Record; /** Configuration options for the target index. */ settings?: Record; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; target?: never; master_timeout?: never; timeout?: never; wait_for_active_shards?: never; aliases?: never; settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; target?: never; master_timeout?: never; timeout?: never; wait_for_active_shards?: never; aliases?: never; settings?: never; }; } export interface IndicesShrinkResponse { acknowledged: boolean; shards_acknowledged: boolean; index: IndexName; } export interface IndicesSimulateIndexTemplateRequest extends RequestBase { /** Name of the index to simulate */ name: Name; /** Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one */ create?: boolean; /** User defined reason for dry-run creating the new template for simulation purposes */ cause?: string; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** If true, returns all relevant default configurations for the index template. */ include_defaults?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; create?: never; cause?: never; master_timeout?: never; include_defaults?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; create?: never; cause?: never; master_timeout?: never; include_defaults?: never; }; } export interface IndicesSimulateIndexTemplateResponse { overlapping?: IndicesSimulateTemplateOverlapping[]; template: IndicesSimulateTemplateTemplate; } export interface IndicesSimulateTemplateOverlapping { name: Name; index_patterns: string[]; } export interface IndicesSimulateTemplateRequest extends RequestBase { /** Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit * this parameter and specify the template configuration in the request body. */ name?: Name; /** If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. */ create?: boolean; /** User defined reason for dry-run creating the new template for simulation purposes */ cause?: string; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** If true, returns all relevant default configurations for the index template. */ include_defaults?: boolean; /** This setting overrides the value of the `action.auto_create_index` cluster setting. * If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. * If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. */ allow_auto_create?: boolean; /** Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. */ index_patterns?: Indices; /** An ordered list of component template names. * Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ composed_of?: Name[]; /** Template to be applied. * It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ template?: IndicesPutIndexTemplateIndexTemplateMapping; /** If this object is included, the template is used to create data streams and their backing indices. * Supports an empty object. * Data streams require a matching index template with a `data_stream` object. */ data_stream?: IndicesDataStreamVisibility; /** Priority to determine index template precedence when a new data stream or index is created. * The index template with the highest priority is chosen. * If no priority is specified the template is treated as though it is of priority 0 (lowest priority). * This number is not automatically generated by Elasticsearch. */ priority?: long; /** Version number used to manage index templates externally. * This number is not automatically generated by Elasticsearch. */ version?: VersionNumber; /** Optional user metadata about the index template. * May have any contents. * This map is not automatically generated by Elasticsearch. */ _meta?: Metadata; /** The configuration option ignore_missing_component_templates can be used when an index template * references a component template that might not exist */ ignore_missing_component_templates?: string[]; /** Marks this index template as deprecated. When creating or updating a non-deprecated index template * that uses deprecated components, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; create?: never; cause?: never; master_timeout?: never; include_defaults?: never; allow_auto_create?: never; index_patterns?: never; composed_of?: never; template?: never; data_stream?: never; priority?: never; version?: never; _meta?: never; ignore_missing_component_templates?: never; deprecated?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; create?: never; cause?: never; master_timeout?: never; include_defaults?: never; allow_auto_create?: never; index_patterns?: never; composed_of?: never; template?: never; data_stream?: never; priority?: never; version?: never; _meta?: never; ignore_missing_component_templates?: never; deprecated?: never; }; } export interface IndicesSimulateTemplateResponse { overlapping?: IndicesSimulateTemplateOverlapping[]; template: IndicesSimulateTemplateTemplate; } export interface IndicesSimulateTemplateTemplate { aliases: Record; mappings: MappingTypeMapping; settings: IndicesIndexSettings; } export interface IndicesSplitRequest extends RequestBase { /** Name of the source index to split. */ index: IndexName; /** Name of the target index to create. */ target: IndexName; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** The number of shard copies that must be active before proceeding with the operation. * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards; /** Aliases for the resulting index. */ aliases?: Record; /** Configuration options for the target index. */ settings?: Record; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; target?: never; master_timeout?: never; timeout?: never; wait_for_active_shards?: never; aliases?: never; settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; target?: never; master_timeout?: never; timeout?: never; wait_for_active_shards?: never; aliases?: never; settings?: never; }; } export interface IndicesSplitResponse { acknowledged: boolean; shards_acknowledged: boolean; index: IndexName; } export type IndicesStatsIndexMetadataState = 'open' | 'close'; export interface IndicesStatsIndexStats { /** Contains statistics about completions across all shards assigned to the node. */ completion?: CompletionStats; /** Contains statistics about documents across all primary shards assigned to the node. */ docs?: DocStats; /** Contains statistics about the field data cache across all shards assigned to the node. */ fielddata?: FielddataStats; /** Contains statistics about flush operations for the node. */ flush?: FlushStats; /** Contains statistics about get operations for the node. */ get?: GetStats; /** Contains statistics about indexing operations for the node. */ indexing?: IndexingStats; /** Contains statistics about indices operations for the node. */ indices?: IndicesStatsIndicesStats; /** Contains statistics about merge operations for the node. */ merges?: MergesStats; /** Contains statistics about the query cache across all shards assigned to the node. */ query_cache?: QueryCacheStats; /** Contains statistics about recovery operations for the node. */ recovery?: RecoveryStats; /** Contains statistics about refresh operations for the node. */ refresh?: RefreshStats; /** Contains statistics about the request cache across all shards assigned to the node. */ request_cache?: RequestCacheStats; /** Contains statistics about search operations for the node. */ search?: SearchStats; /** Contains statistics about segments across all shards assigned to the node. */ segments?: SegmentsStats; /** Contains statistics about the size of shards assigned to the node. */ store?: StoreStats; /** Contains statistics about transaction log operations for the node. */ translog?: TranslogStats; /** Contains statistics about index warming operations for the node. */ warmer?: WarmerStats; bulk?: BulkStats; shard_stats?: IndicesStatsShardsTotalStats; } export interface IndicesStatsIndicesStats { primaries?: IndicesStatsIndexStats; shards?: Record; total?: IndicesStatsIndexStats; uuid?: Uuid; health?: HealthStatus; status?: IndicesStatsIndexMetadataState; } export interface IndicesStatsMappingStats { total_count: long; total_estimated_overhead?: ByteSize; total_estimated_overhead_in_bytes: long; } export interface IndicesStatsRequest extends RequestBase { /** Limit the information returned the specific metrics. */ metric?: Metrics; /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices; /** Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics. */ completion_fields?: Fields; /** Type of index that wildcard patterns can match. If the request can target data streams, this argument * determines whether wildcard expressions match hidden data streams. Supports comma-separated values, * such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** Comma-separated list or wildcard expressions of fields to include in fielddata statistics. */ fielddata_fields?: Fields; /** Comma-separated list or wildcard expressions of fields to include in the statistics. */ fields?: Fields; /** If true, statistics are not collected from closed indices. */ forbid_closed_indices?: boolean; /** Comma-separated list of search groups to include in the search statistics. */ groups?: string | string[]; /** If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). */ include_segment_file_sizes?: boolean; /** If true, the response includes information from segments that are not loaded into memory. */ include_unloaded_segments?: boolean; /** Indicates whether statistics are aggregated at the cluster, index, or shard level. */ level?: Level; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { metric?: never; index?: never; completion_fields?: never; expand_wildcards?: never; fielddata_fields?: never; fields?: never; forbid_closed_indices?: never; groups?: never; include_segment_file_sizes?: never; include_unloaded_segments?: never; level?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { metric?: never; index?: never; completion_fields?: never; expand_wildcards?: never; fielddata_fields?: never; fields?: never; forbid_closed_indices?: never; groups?: never; include_segment_file_sizes?: never; include_unloaded_segments?: never; level?: never; }; } export interface IndicesStatsResponse { indices?: Record; _shards: ShardStatistics; _all: IndicesStatsIndicesStats; } export interface IndicesStatsShardCommit { generation: integer; id: Id; num_docs: long; user_data: Record; } export interface IndicesStatsShardFileSizeInfo { description: string; size_in_bytes: long; min_size_in_bytes?: long; max_size_in_bytes?: long; average_size_in_bytes?: long; count?: long; } export interface IndicesStatsShardLease { id: Id; retaining_seq_no: SequenceNumber; timestamp: long; source: string; } export interface IndicesStatsShardPath { data_path: string; is_custom_data_path: boolean; state_path: string; } export interface IndicesStatsShardQueryCache { cache_count: long; cache_size: long; evictions: long; hit_count: long; memory_size_in_bytes: long; miss_count: long; total_count: long; } export interface IndicesStatsShardRetentionLeases { primary_term: long; version: VersionNumber; leases: IndicesStatsShardLease[]; } export interface IndicesStatsShardRouting { node: string; primary: boolean; relocating_node?: string | null; state: IndicesStatsShardRoutingState; } export type IndicesStatsShardRoutingState = 'UNASSIGNED' | 'INITIALIZING' | 'STARTED' | 'RELOCATING'; export interface IndicesStatsShardSequenceNumber { global_checkpoint: long; local_checkpoint: long; max_seq_no: SequenceNumber; } export interface IndicesStatsShardStats { commit?: IndicesStatsShardCommit; completion?: CompletionStats; docs?: DocStats; fielddata?: FielddataStats; flush?: FlushStats; get?: GetStats; indexing?: IndexingStats; mappings?: IndicesStatsMappingStats; merges?: MergesStats; shard_path?: IndicesStatsShardPath; query_cache?: IndicesStatsShardQueryCache; recovery?: RecoveryStats; refresh?: RefreshStats; request_cache?: RequestCacheStats; retention_leases?: IndicesStatsShardRetentionLeases; routing?: IndicesStatsShardRouting; search?: SearchStats; segments?: SegmentsStats; seq_no?: IndicesStatsShardSequenceNumber; store?: StoreStats; translog?: TranslogStats; warmer?: WarmerStats; bulk?: BulkStats; shards?: Record; shard_stats?: IndicesStatsShardsTotalStats; indices?: IndicesStatsIndicesStats; } export interface IndicesStatsShardsTotalStats { total_count: long; } export interface IndicesUpdateAliasesAction { /** Adds a data stream or index to an alias. * If the alias doesn’t exist, the `add` action creates it. */ add?: IndicesUpdateAliasesAddAction; /** Removes a data stream or index from an alias. */ remove?: IndicesUpdateAliasesRemoveAction; /** Deletes an index. * You cannot use this action on aliases or data streams. */ remove_index?: IndicesUpdateAliasesRemoveIndexAction; } export interface IndicesUpdateAliasesAddAction { /** Alias for the action. * Index alias names support date math. */ alias?: IndexAlias; /** Aliases for the action. * Index alias names support date math. */ aliases?: IndexAlias | IndexAlias[]; /** Query used to limit documents the alias can access. */ filter?: QueryDslQueryContainer; /** Data stream or index for the action. * Supports wildcards (`*`). */ index?: IndexName; /** Data streams or indices for the action. * Supports wildcards (`*`). */ indices?: Indices; /** Value used to route indexing operations to a specific shard. * If specified, this overwrites the `routing` value for indexing operations. * Data stream aliases don’t support this parameter. */ index_routing?: Routing; /** If `true`, the alias is hidden. */ is_hidden?: boolean; /** If `true`, sets the write index or data stream for the alias. */ is_write_index?: boolean; /** Value used to route indexing and search operations to a specific shard. * Data stream aliases don’t support this parameter. */ routing?: Routing; /** Value used to route search operations to a specific shard. * If specified, this overwrites the `routing` value for search operations. * Data stream aliases don’t support this parameter. */ search_routing?: Routing; /** If `true`, the alias must exist to perform the action. */ must_exist?: boolean; } export interface IndicesUpdateAliasesRemoveAction { /** Alias for the action. * Index alias names support date math. */ alias?: IndexAlias; /** Aliases for the action. * Index alias names support date math. */ aliases?: IndexAlias | IndexAlias[]; /** Data stream or index for the action. * Supports wildcards (`*`). */ index?: IndexName; /** Data streams or indices for the action. * Supports wildcards (`*`). */ indices?: Indices; /** If `true`, the alias must exist to perform the action. */ must_exist?: boolean; } export interface IndicesUpdateAliasesRemoveIndexAction { /** Data stream or index for the action. * Supports wildcards (`*`). */ index?: IndexName; /** Data streams or indices for the action. * Supports wildcards (`*`). */ indices?: Indices; /** If `true`, the alias must exist to perform the action. */ must_exist?: boolean; } export interface IndicesUpdateAliasesRequest extends RequestBase { /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** Actions to perform. */ actions?: IndicesUpdateAliasesAction[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; timeout?: never; actions?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; timeout?: never; actions?: never; }; } export type IndicesUpdateAliasesResponse = AcknowledgedResponseBase; export interface IndicesValidateQueryIndicesValidationExplanation { error?: string; explanation?: string; index: IndexName; valid: boolean; } export interface IndicesValidateQueryRequest extends RequestBase { /** Comma-separated list of data streams, indices, and aliases to search. * Supports wildcards (`*`). * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index?: Indices; /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean; /** If `true`, the validation is executed on all shards instead of one random shard per index. */ all_shards?: boolean; /** Analyzer to use for the query string. * This parameter can only be used when the `q` query string parameter is specified. */ analyzer?: string; /** If `true`, wildcard and prefix queries are analyzed. */ analyze_wildcard?: boolean; /** The default operator for query string query: `AND` or `OR`. */ default_operator?: QueryDslOperator; /** Field to use as default where no field prefix is given in the query string. * This parameter can only be used when the `q` query string parameter is specified. */ df?: string; /** Type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** If `true`, the response returns detailed information if an error has occurred. */ explain?: boolean; /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean; /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. */ lenient?: boolean; /** If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed. */ rewrite?: boolean; /** Query in the Lucene query string syntax. */ q?: string; /** Query in the Lucene query string syntax. */ query?: QueryDslQueryContainer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; allow_no_indices?: never; all_shards?: never; analyzer?: never; analyze_wildcard?: never; default_operator?: never; df?: never; expand_wildcards?: never; explain?: never; ignore_unavailable?: never; lenient?: never; rewrite?: never; q?: never; query?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; allow_no_indices?: never; all_shards?: never; analyzer?: never; analyze_wildcard?: never; default_operator?: never; df?: never; expand_wildcards?: never; explain?: never; ignore_unavailable?: never; lenient?: never; rewrite?: never; q?: never; query?: never; }; } export interface IndicesValidateQueryResponse { explanations?: IndicesValidateQueryIndicesValidationExplanation[]; _shards?: ShardStatistics; valid: boolean; error?: string; } export interface InferenceAdaptiveAllocations { /** Turn on `adaptive_allocations`. */ enabled?: boolean; /** The maximum number of allocations to scale to. * If set, it must be greater than or equal to `min_number_of_allocations`. */ max_number_of_allocations?: integer; /** The minimum number of allocations to scale to. * If set, it must be greater than or equal to 0. * If not defined, the deployment scales to 0. */ min_number_of_allocations?: integer; } export interface InferenceAlibabaCloudServiceSettings { /** A valid API key for the AlibabaCloud AI Search API. */ api_key: string; /** The name of the host address used for the inference task. * You can find the host address in the API keys section of the documentation. */ host: string; /** This setting helps to minimize the number of rate limit errors returned from AlibabaCloud AI Search. * By default, the `alibabacloud-ai-search` service sets the number of requests allowed per minute to `1000`. */ rate_limit?: InferenceRateLimitSetting; /** The name of the model service to use for the inference task. * The following service IDs are available for the `completion` task: * * * `ops-qwen-turbo` * * `qwen-turbo` * * `qwen-plus` * * `qwen-max ÷ qwen-max-longcontext` * * The following service ID is available for the `rerank` task: * * * `ops-bge-reranker-larger` * * The following service ID is available for the `sparse_embedding` task: * * * `ops-text-sparse-embedding-001` * * The following service IDs are available for the `text_embedding` task: * * `ops-text-embedding-001` * `ops-text-embedding-zh-001` * `ops-text-embedding-en-001` * `ops-text-embedding-002` */ service_id: string; /** The name of the workspace used for the inference task. */ workspace: string; } export type InferenceAlibabaCloudServiceType = 'alibabacloud-ai-search'; export interface InferenceAlibabaCloudTaskSettings { /** For a `sparse_embedding` or `text_embedding` task, specify the type of input passed to the model. * Valid values are: * * * `ingest` for storing document embeddings in a vector database. * * `search` for storing embeddings of search queries run against a vector database to find relevant documents. */ input_type?: string; /** For a `sparse_embedding` task, it affects whether the token name will be returned in the response. * It defaults to `false`, which means only the token ID will be returned in the response. */ return_token?: boolean; } export type InferenceAlibabaCloudTaskType = 'completion' | 'rerank' | 'space_embedding' | 'text_embedding'; export interface InferenceAmazonBedrockServiceSettings { /** A valid AWS access key that has permissions to use Amazon Bedrock and access to models for inference requests. */ access_key: string; /** The base model ID or an ARN to a custom model based on a foundational model. * The base model IDs can be found in the Amazon Bedrock documentation. * Note that the model ID must be available for the provider chosen and your IAM user must have access to the model. */ model: string; /** The model provider for your deployment. * Note that some providers may support only certain task types. * Supported providers include: * * * `amazontitan` - available for `text_embedding` and `completion` task types * * `anthropic` - available for `completion` task type only * * `ai21labs` - available for `completion` task type only * * `cohere` - available for `text_embedding` and `completion` task types * * `meta` - available for `completion` task type only * * `mistral` - available for `completion` task type only */ provider?: string; /** The region that your model or ARN is deployed in. * The list of available regions per model can be found in the Amazon Bedrock documentation. */ region: string; /** This setting helps to minimize the number of rate limit errors returned from Watsonx. * By default, the `watsonxai` service sets the number of requests allowed per minute to 120. */ rate_limit?: InferenceRateLimitSetting; /** A valid AWS secret key that is paired with the `access_key`. * For informationg about creating and managing access and secret keys, refer to the AWS documentation. */ secret_key: string; } export type InferenceAmazonBedrockServiceType = 'amazonbedrock'; export interface InferenceAmazonBedrockTaskSettings { /** For a `completion` task, it sets the maximum number for the output tokens to be generated. */ max_new_tokens?: integer; /** For a `completion` task, it is a number between 0.0 and 1.0 that controls the apparent creativity of the results. * At temperature 0.0 the model is most deterministic, at temperature 1.0 most random. * It should not be used if `top_p` or `top_k` is specified. */ temperature?: float; /** For a `completion` task, it limits samples to the top-K most likely words, balancing coherence and variability. * It is only available for anthropic, cohere, and mistral providers. * It is an alternative to `temperature`; it should not be used if `temperature` is specified. */ top_k?: float; /** For a `completion` task, it is a number in the range of 0.0 to 1.0, to eliminate low-probability tokens. * Top-p uses nucleus sampling to select top tokens whose sum of likelihoods does not exceed a certain value, ensuring both variety and coherence. * It is an alternative to `temperature`; it should not be used if `temperature` is specified. */ top_p?: float; } export type InferenceAmazonBedrockTaskType = 'completion' | 'text_embedding'; export interface InferenceAnthropicServiceSettings { /** A valid API key for the Anthropic API. */ api_key: string; /** The name of the model to use for the inference task. * Refer to the Anthropic documentation for the list of supported models. */ model_id: string; /** This setting helps to minimize the number of rate limit errors returned from Anthropic. * By default, the `anthropic` service sets the number of requests allowed per minute to 50. */ rate_limit?: InferenceRateLimitSetting; } export type InferenceAnthropicServiceType = 'anthropic'; export interface InferenceAnthropicTaskSettings { /** For a `completion` task, it is the maximum number of tokens to generate before stopping. */ max_tokens: integer; /** For a `completion` task, it is the amount of randomness injected into the response. * For more details about the supported range, refer to Anthropic documentation. */ temperature?: float; /** For a `completion` task, it specifies to only sample from the top K options for each subsequent token. * It is recommended for advanced use cases only. * You usually only need to use `temperature`. */ top_k?: integer; /** For a `completion` task, it specifies to use Anthropic's nucleus sampling. * In nucleus sampling, Anthropic computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches the specified probability. * You should either alter `temperature` or `top_p`, but not both. * It is recommended for advanced use cases only. * You usually only need to use `temperature`. */ top_p?: float; } export type InferenceAnthropicTaskType = 'completion'; export interface InferenceAzureAiStudioServiceSettings { /** A valid API key of your Azure AI Studio model deployment. * This key can be found on the overview page for your deployment in the management section of your Azure AI Studio account. * * IMPORTANT: You need to provide the API key only once, during the inference model creation. * The get inference endpoint API does not retrieve your API key. * After creating the inference model, you cannot change the associated API key. * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ api_key: string; /** The type of endpoint that is available for deployment through Azure AI Studio: `token` or `realtime`. * The `token` endpoint type is for "pay as you go" endpoints that are billed per token. * The `realtime` endpoint type is for "real-time" endpoints that are billed per hour of usage. */ endpoint_type: string; /** The target URL of your Azure AI Studio model deployment. * This can be found on the overview page for your deployment in the management section of your Azure AI Studio account. */ target: string; /** The model provider for your deployment. * Note that some providers may support only certain task types. * Supported providers include: * * * `cohere` - available for `text_embedding` and `completion` task types * * `databricks` - available for `completion` task type only * * `meta` - available for `completion` task type only * * `microsoft_phi` - available for `completion` task type only * * `mistral` - available for `completion` task type only * * `openai` - available for `text_embedding` and `completion` task types */ provider: string; /** This setting helps to minimize the number of rate limit errors returned from Azure AI Studio. * By default, the `azureaistudio` service sets the number of requests allowed per minute to 240. */ rate_limit?: InferenceRateLimitSetting; } export type InferenceAzureAiStudioServiceType = 'azureaistudio'; export interface InferenceAzureAiStudioTaskSettings { /** For a `completion` task, instruct the inference process to perform sampling. * It has no effect unless `temperature` or `top_p` is specified. */ do_sample?: float; /** For a `completion` task, provide a hint for the maximum number of output tokens to be generated. */ max_new_tokens?: integer; /** For a `completion` task, control the apparent creativity of generated completions with a sampling temperature. * It must be a number in the range of 0.0 to 2.0. * It should not be used if `top_p` is specified. */ temperature?: float; /** For a `completion` task, make the model consider the results of the tokens with nucleus sampling probability. * It is an alternative value to `temperature` and must be a number in the range of 0.0 to 2.0. * It should not be used if `temperature` is specified. */ top_p?: float; /** For a `text_embedding` task, specify the user issuing the request. * This information can be used for abuse detection. */ user?: string; } export type InferenceAzureAiStudioTaskType = 'completion' | 'text_embedding'; export interface InferenceAzureOpenAIServiceSettings { /** A valid API key for your Azure OpenAI account. * You must specify either `api_key` or `entra_id`. * If you do not provide either or you provide both, you will receive an error when you try to create your model. * * IMPORTANT: You need to provide the API key only once, during the inference model creation. * The get inference endpoint API does not retrieve your API key. * After creating the inference model, you cannot change the associated API key. * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ api_key?: string; /** The Azure API version ID to use. * It is recommended to use the latest supported non-preview version. */ api_version: string; /** The deployment name of your deployed models. * Your Azure OpenAI deployments can be found though the Azure OpenAI Studio portal that is linked to your subscription. */ deployment_id: string; /** A valid Microsoft Entra token. * You must specify either `api_key` or `entra_id`. * If you do not provide either or you provide both, you will receive an error when you try to create your model. */ entra_id?: string; /** This setting helps to minimize the number of rate limit errors returned from Azure. * The `azureopenai` service sets a default number of requests allowed per minute depending on the task type. * For `text_embedding`, it is set to `1440`. * For `completion`, it is set to `120`. */ rate_limit?: InferenceRateLimitSetting; /** The name of your Azure OpenAI resource. * You can find this from the list of resources in the Azure Portal for your subscription. */ resource_name: string; } export type InferenceAzureOpenAIServiceType = 'azureopenai'; export interface InferenceAzureOpenAITaskSettings { /** For a `completion` or `text_embedding` task, specify the user issuing the request. * This information can be used for abuse detection. */ user?: string; } export type InferenceAzureOpenAITaskType = 'completion' | 'text_embedding'; export type InferenceCohereEmbeddingType = 'byte' | 'float' | 'int8'; export type InferenceCohereInputType = 'classification' | 'clustering' | 'ingest' | 'search'; export interface InferenceCohereServiceSettings { /** A valid API key for your Cohere account. * You can find or create your Cohere API keys on the Cohere API key settings page. * * IMPORTANT: You need to provide the API key only once, during the inference model creation. * The get inference endpoint API does not retrieve your API key. * After creating the inference model, you cannot change the associated API key. * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ api_key: string; /** For a `text_embedding` task, the types of embeddings you want to get back. * Use `byte` for signed int8 embeddings (this is a synonym of `int8`). * Use `float` for the default float embeddings. * Use `int8` for signed int8 embeddings. */ embedding_type?: InferenceCohereEmbeddingType; /** For a `completion`, `rerank`, or `text_embedding` task, the name of the model to use for the inference task. * * * For the available `completion` models, refer to the [Cohere command docs](https://docs.cohere.com/docs/models#command). * * For the available `rerank` models, refer to the [Cohere rerank docs](https://docs.cohere.com/reference/rerank-1). * * For the available `text_embedding` models, refer to [Cohere embed docs](https://docs.cohere.com/reference/embed). * * The default value for a text embedding task is `embed-english-v2.0`. */ model_id?: string; /** This setting helps to minimize the number of rate limit errors returned from Cohere. * By default, the `cohere` service sets the number of requests allowed per minute to 10000. */ rate_limit?: InferenceRateLimitSetting; /** The similarity measure. * If the `embedding_type` is `float`, the default value is `dot_product`. * If the `embedding_type` is `int8` or `byte`, the default value is `cosine`. */ similarity?: InferenceCohereSimilarityType; } export type InferenceCohereServiceType = 'cohere'; export type InferenceCohereSimilarityType = 'cosine' | 'dot_product' | 'l2_norm'; export interface InferenceCohereTaskSettings { /** For a `text_embedding` task, the type of input passed to the model. * Valid values are: * * * `classification`: Use it for embeddings passed through a text classifier. * * `clustering`: Use it for the embeddings run through a clustering algorithm. * * `ingest`: Use it for storing document embeddings in a vector database. * * `search`: Use it for storing embeddings of search queries run against a vector database to find relevant documents. * * IMPORTANT: The `input_type` field is required when using embedding models `v3` and higher. */ input_type?: InferenceCohereInputType; /** For a `rerank` task, return doc text within the results. */ return_documents?: boolean; /** For a `rerank` task, the number of most relevant documents to return. * It defaults to the number of the documents. * If this inference endpoint is used in a `text_similarity_reranker` retriever query and `top_n` is set, it must be greater than or equal to `rank_window_size` in the query. */ top_n?: integer; /** For a `text_embedding` task, the method to handle inputs longer than the maximum token length. * Valid values are: * * * `END`: When the input exceeds the maximum input token length, the end of the input is discarded. * * `NONE`: When the input exceeds the maximum input token length, an error is returned. * * `START`: When the input exceeds the maximum input token length, the start of the input is discarded. */ truncate?: InferenceCohereTruncateType; } export type InferenceCohereTaskType = 'completion' | 'rerank' | 'text_embedding'; export type InferenceCohereTruncateType = 'END' | 'NONE' | 'START'; export interface InferenceCompletionInferenceResult { completion: InferenceCompletionResult[]; } export interface InferenceCompletionResult { result: string; } export interface InferenceCompletionTool { /** The type of tool. */ type: string; /** The function definition. */ function: InferenceCompletionToolFunction; } export interface InferenceCompletionToolChoice { /** The type of the tool. */ type: string; /** The tool choice function. */ function: InferenceCompletionToolChoiceFunction; } export interface InferenceCompletionToolChoiceFunction { /** The name of the function to call. */ name: string; } export interface InferenceCompletionToolFunction { /** A description of what the function does. * This is used by the model to choose when and how to call the function. */ description?: string; /** The name of the function. */ name: string; /** The parameters the functional accepts. This should be formatted as a JSON object. */ parameters?: any; /** Whether to enable schema adherence when generating the function call. */ strict?: boolean; } export type InferenceCompletionToolType = string | InferenceCompletionToolChoice; export interface InferenceContentObject { /** The text content. */ text: string; /** The type of content. */ type: string; } export interface InferenceDeleteInferenceEndpointResult extends AcknowledgedResponseBase { pipelines: string[]; } export type InferenceDenseByteVector = byte[]; export type InferenceDenseVector = float[]; export interface InferenceElasticsearchServiceSettings { /** Adaptive allocations configuration details. * If `enabled` is true, the number of allocations of the model is set based on the current load the process gets. * When the load is high, a new model allocation is automatically created, respecting the value of `max_number_of_allocations` if it's set. * When the load is low, a model allocation is automatically removed, respecting the value of `min_number_of_allocations` if it's set. * If `enabled` is true, do not set the number of allocations manually. */ adaptive_allocations?: InferenceAdaptiveAllocations; /** The deployment identifier for a trained model deployment. * When `deployment_id` is used the `model_id` is optional. */ deployment_id?: string; /** The name of the model to use for the inference task. * It can be the ID of a built-in model (for example, `.multilingual-e5-small` for E5) or a text embedding model that was uploaded by using the Eland client. */ model_id: string; /** The total number of allocations that are assigned to the model across machine learning nodes. * Increasing this value generally increases the throughput. * If adaptive allocations are enabled, do not set this value because it's automatically set. */ num_allocations?: integer; /** The number of threads used by each model allocation during inference. * This setting generally increases the speed per inference request. * The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. * The value must be a power of 2. * The maximum value is 32. */ num_threads: integer; } export type InferenceElasticsearchServiceType = 'elasticsearch'; export interface InferenceElasticsearchTaskSettings { /** For a `rerank` task, return the document instead of only the index. */ return_documents?: boolean; } export type InferenceElasticsearchTaskType = 'rerank' | 'sparse_embedding' | 'text_embedding'; export interface InferenceElserServiceSettings { /** Adaptive allocations configuration details. * If `enabled` is true, the number of allocations of the model is set based on the current load the process gets. * When the load is high, a new model allocation is automatically created, respecting the value of `max_number_of_allocations` if it's set. * When the load is low, a model allocation is automatically removed, respecting the value of `min_number_of_allocations` if it's set. * If `enabled` is true, do not set the number of allocations manually. */ adaptive_allocations?: InferenceAdaptiveAllocations; /** The total number of allocations this model is assigned across machine learning nodes. * Increasing this value generally increases the throughput. * If adaptive allocations is enabled, do not set this value because it's automatically set. */ num_allocations: integer; /** The number of threads used by each model allocation during inference. * Increasing this value generally increases the speed per inference request. * The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. * The value must be a power of 2. * The maximum value is 32. * * > info * > If you want to optimize your ELSER endpoint for ingest, set the number of threads to 1. If you want to optimize your ELSER endpoint for search, set the number of threads to greater than 1. */ num_threads: integer; } export type InferenceElserServiceType = 'elser'; export type InferenceElserTaskType = 'sparse_embedding'; export type InferenceGoogleAiServiceType = 'googleaistudio'; export interface InferenceGoogleAiStudioServiceSettings { /** A valid API key of your Google Gemini account. */ api_key: string; /** The name of the model to use for the inference task. * Refer to the Google documentation for the list of supported models. */ model_id: string; /** This setting helps to minimize the number of rate limit errors returned from Google AI Studio. * By default, the `googleaistudio` service sets the number of requests allowed per minute to 360. */ rate_limit?: InferenceRateLimitSetting; } export type InferenceGoogleAiStudioTaskType = 'completion' | 'text_embedding'; export interface InferenceGoogleVertexAIServiceSettings { /** The name of the location to use for the inference task. * Refer to the Google documentation for the list of supported locations. */ location: string; /** The name of the model to use for the inference task. * Refer to the Google documentation for the list of supported models. */ model_id: string; /** The name of the project to use for the inference task. */ project_id: string; /** This setting helps to minimize the number of rate limit errors returned from Google Vertex AI. * By default, the `googlevertexai` service sets the number of requests allowed per minute to 30.000. */ rate_limit?: InferenceRateLimitSetting; /** A valid service account in JSON format for the Google Vertex AI API. */ service_account_json: string; } export type InferenceGoogleVertexAIServiceType = 'googlevertexai'; export interface InferenceGoogleVertexAITaskSettings { /** For a `text_embedding` task, truncate inputs longer than the maximum token length automatically. */ auto_truncate?: boolean; /** For a `rerank` task, the number of the top N documents that should be returned. */ top_n?: integer; } export type InferenceGoogleVertexAITaskType = 'rerank' | 'text_embedding'; export interface InferenceHuggingFaceServiceSettings { /** A valid access token for your HuggingFace account. * You can create or find your access tokens on the HuggingFace settings page. * * IMPORTANT: You need to provide the API key only once, during the inference model creation. * The get inference endpoint API does not retrieve your API key. * After creating the inference model, you cannot change the associated API key. * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ api_key: string; /** This setting helps to minimize the number of rate limit errors returned from Hugging Face. * By default, the `hugging_face` service sets the number of requests allowed per minute to 3000. */ rate_limit?: InferenceRateLimitSetting; /** The URL endpoint to use for the requests. */ url: string; } export type InferenceHuggingFaceServiceType = 'hugging_face'; export type InferenceHuggingFaceTaskType = 'text_embedding'; export interface InferenceInferenceChunkingSettings { /** The maximum size of a chunk in words. * This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). */ max_chunk_size?: integer; /** The number of overlapping words for chunks. * It is applicable only to a `word` chunking strategy. * This value cannot be higher than half the `max_chunk_size` value. */ overlap?: integer; /** The number of overlapping sentences for chunks. * It is applicable only for a `sentence` chunking strategy. * It can be either `1` or `0`. */ sentence_overlap?: integer; /** The chunking strategy: `sentence` or `word`. */ strategy?: string; } export interface InferenceInferenceEndpoint { /** Chunking configuration object */ chunking_settings?: InferenceInferenceChunkingSettings; /** The service type */ service: string; /** Settings specific to the service */ service_settings: InferenceServiceSettings; /** Task settings specific to the service and task type */ task_settings?: InferenceTaskSettings; } export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string; /** The task type */ task_type: InferenceTaskType; } export interface InferenceInferenceEndpointInfoAlibabaCloudAI extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string; /** The task type */ task_type: InferenceTaskTypeAlibabaCloudAI; } export interface InferenceInferenceEndpointInfoAmazonBedrock extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string; /** The task type */ task_type: InferenceTaskTypeAmazonBedrock; } export interface InferenceInferenceEndpointInfoAnthropic extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string; /** The task type */ task_type: InferenceTaskTypeAnthropic; } export interface InferenceInferenceEndpointInfoAzureAIStudio extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string; /** The task type */ task_type: InferenceTaskTypeAzureAIStudio; } export interface InferenceInferenceEndpointInfoAzureOpenAI extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string; /** The task type */ task_type: InferenceTaskTypeAzureOpenAI; } export interface InferenceInferenceEndpointInfoCohere extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string; /** The task type */ task_type: InferenceTaskTypeCohere; } export interface InferenceInferenceEndpointInfoELSER extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string; /** The task type */ task_type: InferenceTaskTypeELSER; } export interface InferenceInferenceEndpointInfoElasticsearch extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string; /** The task type */ task_type: InferenceTaskTypeElasticsearch; } export interface InferenceInferenceEndpointInfoGoogleAIStudio extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string; /** The task type */ task_type: InferenceTaskTypeGoogleAIStudio; } export interface InferenceInferenceEndpointInfoGoogleVertexAI extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string; /** The task type */ task_type: InferenceTaskTypeGoogleVertexAI; } export interface InferenceInferenceEndpointInfoHuggingFace extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string; /** The task type */ task_type: InferenceTaskTypeHuggingFace; } export interface InferenceInferenceEndpointInfoJinaAi extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string; /** The task type */ task_type: InferenceTaskTypeJinaAi; } export interface InferenceInferenceEndpointInfoMistral extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string; /** The task type */ task_type: InferenceTaskTypeMistral; } export interface InferenceInferenceEndpointInfoOpenAI extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string; /** The task type */ task_type: InferenceTaskTypeOpenAI; } export interface InferenceInferenceEndpointInfoVoyageAI extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string; /** The task type */ task_type: InferenceTaskTypeVoyageAI; } export interface InferenceInferenceEndpointInfoWatsonx extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string; /** The task type */ task_type: InferenceTaskTypeWatsonx; } export interface InferenceInferenceResult { text_embedding_bytes?: InferenceTextEmbeddingByteResult[]; text_embedding_bits?: InferenceTextEmbeddingByteResult[]; text_embedding?: InferenceTextEmbeddingResult[]; sparse_embedding?: InferenceSparseEmbeddingResult[]; completion?: InferenceCompletionResult[]; rerank?: InferenceRankedDocument[]; } export interface InferenceJinaAIServiceSettings { /** A valid API key of your JinaAI account. * * IMPORTANT: You need to provide the API key only once, during the inference model creation. * The get inference endpoint API does not retrieve your API key. * After creating the inference model, you cannot change the associated API key. * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ api_key: string; /** The name of the model to use for the inference task. * For a `rerank` task, it is required. * For a `text_embedding` task, it is optional. */ model_id?: string; /** This setting helps to minimize the number of rate limit errors returned from JinaAI. * By default, the `jinaai` service sets the number of requests allowed per minute to 2000 for all task types. */ rate_limit?: InferenceRateLimitSetting; /** For a `text_embedding` task, the similarity measure. One of cosine, dot_product, l2_norm. * The default values varies with the embedding type. * For example, a float embedding type uses a `dot_product` similarity measure by default. */ similarity?: InferenceJinaAISimilarityType; } export type InferenceJinaAIServiceType = 'jinaai'; export type InferenceJinaAISimilarityType = 'cosine' | 'dot_product' | 'l2_norm'; export interface InferenceJinaAITaskSettings { /** For a `rerank` task, return the doc text within the results. */ return_documents?: boolean; /** For a `text_embedding` task, the task passed to the model. * Valid values are: * * * `classification`: Use it for embeddings passed through a text classifier. * * `clustering`: Use it for the embeddings run through a clustering algorithm. * * `ingest`: Use it for storing document embeddings in a vector database. * * `search`: Use it for storing embeddings of search queries run against a vector database to find relevant documents. */ task?: InferenceJinaAITextEmbeddingTask; /** For a `rerank` task, the number of most relevant documents to return. * It defaults to the number of the documents. * If this inference endpoint is used in a `text_similarity_reranker` retriever query and `top_n` is set, it must be greater than or equal to `rank_window_size` in the query. */ top_n?: integer; } export type InferenceJinaAITaskType = 'rerank' | 'text_embedding'; export type InferenceJinaAITextEmbeddingTask = 'classification' | 'clustering' | 'ingest' | 'search'; export interface InferenceMessage { /** The content of the message. * * String example: * ``` * { * "content": "Some string" * } * ``` * * Object example: * ``` * { * "content": [ * { * "text": "Some text", * "type": "text" * } * ] * } * ``` */ content?: InferenceMessageContent; /** The role of the message author. Valid values are `user`, `assistant`, `system`, and `tool`. */ role: string; /** Only for `tool` role messages. The tool call that this message is responding to. */ tool_call_id?: Id; /** Only for `assistant` role messages. The tool calls generated by the model. If it's specified, the `content` field is optional. * Example: * ``` * { * "tool_calls": [ * { * "id": "call_KcAjWtAww20AihPHphUh46Gd", * "type": "function", * "function": { * "name": "get_current_weather", * "arguments": "{\"location\":\"Boston, MA\"}" * } * } * ] * } * ``` */ tool_calls?: InferenceToolCall[]; } export type InferenceMessageContent = string | InferenceContentObject[]; export interface InferenceMistralServiceSettings { /** A valid API key of your Mistral account. * You can find your Mistral API keys or you can create a new one on the API Keys page. * * IMPORTANT: You need to provide the API key only once, during the inference model creation. * The get inference endpoint API does not retrieve your API key. * After creating the inference model, you cannot change the associated API key. * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ api_key: string; /** The maximum number of tokens per input before chunking occurs. */ max_input_tokens?: integer; /** The name of the model to use for the inference task. * Refer to the Mistral models documentation for the list of available text embedding models. */ model: string; /** This setting helps to minimize the number of rate limit errors returned from the Mistral API. * By default, the `mistral` service sets the number of requests allowed per minute to 240. */ rate_limit?: InferenceRateLimitSetting; } export type InferenceMistralServiceType = 'mistral'; export type InferenceMistralTaskType = 'text_embedding'; export interface InferenceOpenAIServiceSettings { /** A valid API key of your OpenAI account. * You can find your OpenAI API keys in your OpenAI account under the API keys section. * * IMPORTANT: You need to provide the API key only once, during the inference model creation. * The get inference endpoint API does not retrieve your API key. * After creating the inference model, you cannot change the associated API key. * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ api_key: string; /** The number of dimensions the resulting output embeddings should have. * It is supported only in `text-embedding-3` and later models. * If it is not set, the OpenAI defined default for the model is used. */ dimensions?: integer; /** The name of the model to use for the inference task. * Refer to the OpenAI documentation for the list of available text embedding models. */ model_id: string; /** The unique identifier for your organization. * You can find the Organization ID in your OpenAI account under *Settings > Organizations*. */ organization_id?: string; /** This setting helps to minimize the number of rate limit errors returned from OpenAI. * The `openai` service sets a default number of requests allowed per minute depending on the task type. * For `text_embedding`, it is set to `3000`. * For `completion`, it is set to `500`. */ rate_limit?: InferenceRateLimitSetting; /** The URL endpoint to use for the requests. * It can be changed for testing purposes. */ url?: string; } export type InferenceOpenAIServiceType = 'openai'; export interface InferenceOpenAITaskSettings { /** For a `completion` or `text_embedding` task, specify the user issuing the request. * This information can be used for abuse detection. */ user?: string; } export type InferenceOpenAITaskType = 'chat_completion' | 'completion' | 'text_embedding'; export interface InferenceRankedDocument { index: integer; relevance_score: float; text?: string; } export interface InferenceRateLimitSetting { /** The number of requests allowed per minute. * By default, the number of requests allowed per minute is set by each service as follows: * * * `alibabacloud-ai-search` service: `1000` * * `anthropic` service: `50` * * `azureaistudio` service: `240` * * `azureopenai` service and task type `text_embedding`: `1440` * * `azureopenai` service and task type `completion`: `120` * * `cohere` service: `10000` * * `elastic` service and task type `chat_completion`: `240` * * `googleaistudio` service: `360` * * `googlevertexai` service: `30000` * * `hugging_face` service: `3000` * * `jinaai` service: `2000` * * `mistral` service: `240` * * `openai` service and task type `text_embedding`: `3000` * * `openai` service and task type `completion`: `500` * * `voyageai` service: `2000` * * `watsonxai` service: `120` */ requests_per_minute?: integer; } export interface InferenceRequestChatCompletion { /** A list of objects representing the conversation. * Requests should generally only add new messages from the user (role `user`). * The other message roles (`assistant`, `system`, or `tool`) should generally only be copied from the response to a previous completion request, such that the messages array is built up throughout a conversation. */ messages: InferenceMessage[]; /** The ID of the model to use. */ model?: string; /** The upper bound limit for the number of tokens that can be generated for a completion request. */ max_completion_tokens?: long; /** A sequence of strings to control when the model should stop generating additional tokens. */ stop?: string[]; /** The sampling temperature to use. */ temperature?: float; /** Controls which tool is called by the model. * String representation: One of `auto`, `none`, or `requrired`. `auto` allows the model to choose between calling tools and generating a message. `none` causes the model to not call any tools. `required` forces the model to call one or more tools. * Example (object representation): * ``` * { * "tool_choice": { * "type": "function", * "function": { * "name": "get_current_weather" * } * } * } * ``` */ tool_choice?: InferenceCompletionToolType; /** A list of tools that the model can call. * Example: * ``` * { * "tools": [ * { * "type": "function", * "function": { * "name": "get_price_of_item", * "description": "Get the current price of an item", * "parameters": { * "type": "object", * "properties": { * "item": { * "id": "12345" * }, * "unit": { * "type": "currency" * } * } * } * } * } * ] * } * ``` */ tools?: InferenceCompletionTool[]; /** Nucleus sampling, an alternative to sampling with temperature. */ top_p?: float; } export interface InferenceRerankedInferenceResult { rerank: InferenceRankedDocument[]; } export type InferenceServiceSettings = any; export interface InferenceSparseEmbeddingInferenceResult { sparse_embedding: InferenceSparseEmbeddingResult[]; } export interface InferenceSparseEmbeddingResult { embedding: InferenceSparseVector; } export type InferenceSparseVector = Record; export type InferenceTaskSettings = any; export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' | 'chat_completion'; export type InferenceTaskTypeAlibabaCloudAI = 'text_embedding' | 'rerank' | 'completion' | 'sparse_embedding'; export type InferenceTaskTypeAmazonBedrock = 'text_embedding' | 'completion'; export type InferenceTaskTypeAnthropic = 'completion'; export type InferenceTaskTypeAzureAIStudio = 'text_embedding' | 'completion'; export type InferenceTaskTypeAzureOpenAI = 'text_embedding' | 'completion'; export type InferenceTaskTypeCohere = 'text_embedding' | 'rerank' | 'completion'; export type InferenceTaskTypeELSER = 'sparse_embedding'; export type InferenceTaskTypeElasticsearch = 'sparse_embedding' | 'text_embedding' | 'rerank'; export type InferenceTaskTypeGoogleAIStudio = 'text_embedding' | 'completion'; export type InferenceTaskTypeGoogleVertexAI = 'text_embedding' | 'rerank'; export type InferenceTaskTypeHuggingFace = 'text_embedding'; export type InferenceTaskTypeJinaAi = 'text_embedding' | 'rerank'; export type InferenceTaskTypeMistral = 'text_embedding'; export type InferenceTaskTypeOpenAI = 'text_embedding' | 'chat_completion' | 'completion'; export type InferenceTaskTypeVoyageAI = 'text_embedding' | 'rerank'; export type InferenceTaskTypeWatsonx = 'text_embedding'; export interface InferenceTextEmbeddingByteResult { embedding: InferenceDenseByteVector; } export interface InferenceTextEmbeddingInferenceResult { text_embedding_bytes?: InferenceTextEmbeddingByteResult[]; text_embedding_bits?: InferenceTextEmbeddingByteResult[]; text_embedding?: InferenceTextEmbeddingResult[]; } export interface InferenceTextEmbeddingResult { embedding: InferenceDenseVector; } export interface InferenceToolCall { /** The identifier of the tool call. */ id: Id; /** The function that the model called. */ function: InferenceToolCallFunction; /** The type of the tool call. */ type: string; } export interface InferenceToolCallFunction { /** The arguments to call the function with in JSON format. */ arguments: string; /** The name of the function to call. */ name: string; } export interface InferenceVoyageAIServiceSettings { /** The number of dimensions for resulting output embeddings. * This setting maps to `output_dimension` in the VoyageAI documentation. * Only for the `text_embedding` task type. */ dimensions?: integer; /** The name of the model to use for the inference task. * Refer to the VoyageAI documentation for the list of available text embedding and rerank models. */ model_id: string; /** This setting helps to minimize the number of rate limit errors returned from VoyageAI. * The `voyageai` service sets a default number of requests allowed per minute depending on the task type. * For both `text_embedding` and `rerank`, it is set to `2000`. */ rate_limit?: InferenceRateLimitSetting; /** The data type for the embeddings to be returned. * This setting maps to `output_dtype` in the VoyageAI documentation. * Permitted values: float, int8, bit. * `int8` is a synonym of `byte` in the VoyageAI documentation. * `bit` is a synonym of `binary` in the VoyageAI documentation. * Only for the `text_embedding` task type. */ embedding_type?: float; } export type InferenceVoyageAIServiceType = 'voyageai'; export interface InferenceVoyageAITaskSettings { /** Type of the input text. * Permitted values: `ingest` (maps to `document` in the VoyageAI documentation), `search` (maps to `query` in the VoyageAI documentation). * Only for the `text_embedding` task type. */ input_type?: string; /** Whether to return the source documents in the response. * Only for the `rerank` task type. */ return_documents?: boolean; /** The number of most relevant documents to return. * If not specified, the reranking results of all documents will be returned. * Only for the `rerank` task type. */ top_k?: integer; /** Whether to truncate the input texts to fit within the context length. */ truncation?: boolean; } export type InferenceVoyageAITaskType = 'text_embedding' | 'rerank'; export interface InferenceWatsonxServiceSettings { /** A valid API key of your Watsonx account. * You can find your Watsonx API keys or you can create a new one on the API keys page. * * IMPORTANT: You need to provide the API key only once, during the inference model creation. * The get inference endpoint API does not retrieve your API key. * After creating the inference model, you cannot change the associated API key. * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ api_key: string; /** A version parameter that takes a version date in the format of `YYYY-MM-DD`. * For the active version data parameters, refer to the Wastonx documentation. */ api_version: string; /** The name of the model to use for the inference task. * Refer to the IBM Embedding Models section in the Watsonx documentation for the list of available text embedding models. */ model_id: string; /** The identifier of the IBM Cloud project to use for the inference task. */ project_id: string; /** This setting helps to minimize the number of rate limit errors returned from Watsonx. * By default, the `watsonxai` service sets the number of requests allowed per minute to 120. */ rate_limit?: InferenceRateLimitSetting; /** The URL of the inference endpoint that you created on Watsonx. */ url: string; } export type InferenceWatsonxServiceType = 'watsonxai'; export type InferenceWatsonxTaskType = 'text_embedding'; export interface InferenceChatCompletionUnifiedRequest extends RequestBase { /** The inference Id */ inference_id: Id; /** Specifies the amount of time to wait for the inference request to complete. */ timeout?: Duration; chat_completion_request?: InferenceRequestChatCompletion; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { inference_id?: never; timeout?: never; chat_completion_request?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { inference_id?: never; timeout?: never; chat_completion_request?: never; }; } export type InferenceChatCompletionUnifiedResponse = StreamResult; export interface InferenceCompletionRequest extends RequestBase { /** The inference Id */ inference_id: Id; /** Specifies the amount of time to wait for the inference request to complete. */ timeout?: Duration; /** Inference input. * Either a string or an array of strings. */ input: string | string[]; /** Optional task settings */ task_settings?: InferenceTaskSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { inference_id?: never; timeout?: never; input?: never; task_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { inference_id?: never; timeout?: never; input?: never; task_settings?: never; }; } export type InferenceCompletionResponse = InferenceCompletionInferenceResult; export interface InferenceDeleteRequest extends RequestBase { /** The task type */ task_type?: InferenceTaskType; /** The inference identifier. */ inference_id: Id; /** When true, the endpoint is not deleted and a list of ingest processors which reference this endpoint is returned. */ dry_run?: boolean; /** When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields. */ force?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; inference_id?: never; dry_run?: never; force?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; inference_id?: never; dry_run?: never; force?: never; }; } export type InferenceDeleteResponse = InferenceDeleteInferenceEndpointResult; export interface InferenceGetRequest extends RequestBase { /** The task type */ task_type?: InferenceTaskType; /** The inference Id */ inference_id?: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; inference_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; inference_id?: never; }; } export interface InferenceGetResponse { endpoints: InferenceInferenceEndpointInfo[]; } export interface InferenceInferenceRequest extends RequestBase { /** The type of inference task that the model performs. */ task_type?: InferenceTaskType; /** The unique identifier for the inference endpoint. */ inference_id: Id; /** The amount of time to wait for the inference request to complete. */ timeout?: Duration; /** The query input, which is required only for the `rerank` task. * It is not required for other tasks. */ query?: string; /** The text on which you want to perform the inference task. * It can be a single string or an array. * * > info * > Inference endpoints for the `completion` task type currently only support a single string as input. */ input: string | string[]; /** Task settings for the individual inference request. * These settings are specific to the task type you specified and override the task settings specified when initializing the service. */ task_settings?: InferenceTaskSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; inference_id?: never; timeout?: never; query?: never; input?: never; task_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; inference_id?: never; timeout?: never; query?: never; input?: never; task_settings?: never; }; } export type InferenceInferenceResponse = InferenceInferenceResult; export interface InferencePutRequest extends RequestBase { /** The task type. Refer to the integration list in the API description for the available task types. */ task_type?: InferenceTaskType; /** The inference Id */ inference_id: Id; inference_config?: InferenceInferenceEndpoint; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; inference_id?: never; inference_config?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; inference_id?: never; inference_config?: never; }; } export type InferencePutResponse = InferenceInferenceEndpointInfo; export interface InferencePutAlibabacloudRequest extends RequestBase { /** The type of the inference task that the model will perform. */ task_type: InferenceAlibabaCloudTaskType; /** The unique identifier of the inference endpoint. */ alibabacloud_inference_id: Id; /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings; /** The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`. */ service: InferenceAlibabaCloudServiceType; /** Settings used to install the inference model. These settings are specific to the `alibabacloud-ai-search` service. */ service_settings: InferenceAlibabaCloudServiceSettings; /** Settings to configure the inference task. * These settings are specific to the task type you specified. */ task_settings?: InferenceAlibabaCloudTaskSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; alibabacloud_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; alibabacloud_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }; } export type InferencePutAlibabacloudResponse = InferenceInferenceEndpointInfoAlibabaCloudAI; export interface InferencePutAmazonbedrockRequest extends RequestBase { /** The type of the inference task that the model will perform. */ task_type: InferenceAmazonBedrockTaskType; /** The unique identifier of the inference endpoint. */ amazonbedrock_inference_id: Id; /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings; /** The type of service supported for the specified task type. In this case, `amazonbedrock`. */ service: InferenceAmazonBedrockServiceType; /** Settings used to install the inference model. These settings are specific to the `amazonbedrock` service. */ service_settings: InferenceAmazonBedrockServiceSettings; /** Settings to configure the inference task. * These settings are specific to the task type you specified. */ task_settings?: InferenceAmazonBedrockTaskSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; amazonbedrock_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; amazonbedrock_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }; } export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfoAmazonBedrock; export interface InferencePutAnthropicRequest extends RequestBase { /** The task type. * The only valid task type for the model to perform is `completion`. */ task_type: InferenceAnthropicTaskType; /** The unique identifier of the inference endpoint. */ anthropic_inference_id: Id; /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings; /** The type of service supported for the specified task type. In this case, `anthropic`. */ service: InferenceAnthropicServiceType; /** Settings used to install the inference model. These settings are specific to the `watsonxai` service. */ service_settings: InferenceAnthropicServiceSettings; /** Settings to configure the inference task. * These settings are specific to the task type you specified. */ task_settings?: InferenceAnthropicTaskSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; anthropic_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; anthropic_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }; } export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfoAnthropic; export interface InferencePutAzureaistudioRequest extends RequestBase { /** The type of the inference task that the model will perform. */ task_type: InferenceAzureAiStudioTaskType; /** The unique identifier of the inference endpoint. */ azureaistudio_inference_id: Id; /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings; /** The type of service supported for the specified task type. In this case, `azureaistudio`. */ service: InferenceAzureAiStudioServiceType; /** Settings used to install the inference model. These settings are specific to the `openai` service. */ service_settings: InferenceAzureAiStudioServiceSettings; /** Settings to configure the inference task. * These settings are specific to the task type you specified. */ task_settings?: InferenceAzureAiStudioTaskSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; azureaistudio_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; azureaistudio_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }; } export type InferencePutAzureaistudioResponse = InferenceInferenceEndpointInfoAzureAIStudio; export interface InferencePutAzureopenaiRequest extends RequestBase { /** The type of the inference task that the model will perform. * NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. */ task_type: InferenceAzureOpenAITaskType; /** The unique identifier of the inference endpoint. */ azureopenai_inference_id: Id; /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings; /** The type of service supported for the specified task type. In this case, `azureopenai`. */ service: InferenceAzureOpenAIServiceType; /** Settings used to install the inference model. These settings are specific to the `azureopenai` service. */ service_settings: InferenceAzureOpenAIServiceSettings; /** Settings to configure the inference task. * These settings are specific to the task type you specified. */ task_settings?: InferenceAzureOpenAITaskSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; azureopenai_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; azureopenai_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }; } export type InferencePutAzureopenaiResponse = InferenceInferenceEndpointInfoAzureOpenAI; export interface InferencePutCohereRequest extends RequestBase { /** The type of the inference task that the model will perform. */ task_type: InferenceCohereTaskType; /** The unique identifier of the inference endpoint. */ cohere_inference_id: Id; /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings; /** The type of service supported for the specified task type. In this case, `cohere`. */ service: InferenceCohereServiceType; /** Settings used to install the inference model. * These settings are specific to the `cohere` service. */ service_settings: InferenceCohereServiceSettings; /** Settings to configure the inference task. * These settings are specific to the task type you specified. */ task_settings?: InferenceCohereTaskSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; cohere_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; cohere_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }; } export type InferencePutCohereResponse = InferenceInferenceEndpointInfoCohere; export interface InferencePutElasticsearchRequest extends RequestBase { /** The type of the inference task that the model will perform. */ task_type: InferenceElasticsearchTaskType; /** The unique identifier of the inference endpoint. * The must not match the `model_id`. */ elasticsearch_inference_id: Id; /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings; /** The type of service supported for the specified task type. In this case, `elasticsearch`. */ service: InferenceElasticsearchServiceType; /** Settings used to install the inference model. These settings are specific to the `elasticsearch` service. */ service_settings: InferenceElasticsearchServiceSettings; /** Settings to configure the inference task. * These settings are specific to the task type you specified. */ task_settings?: InferenceElasticsearchTaskSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; elasticsearch_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; elasticsearch_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }; } export type InferencePutElasticsearchResponse = InferenceInferenceEndpointInfoElasticsearch; export interface InferencePutElserRequest extends RequestBase { /** The type of the inference task that the model will perform. */ task_type: InferenceElserTaskType; /** The unique identifier of the inference endpoint. */ elser_inference_id: Id; /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings; /** The type of service supported for the specified task type. In this case, `elser`. */ service: InferenceElserServiceType; /** Settings used to install the inference model. These settings are specific to the `elser` service. */ service_settings: InferenceElserServiceSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; elser_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; elser_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; }; } export type InferencePutElserResponse = InferenceInferenceEndpointInfoELSER; export interface InferencePutGoogleaistudioRequest extends RequestBase { /** The type of the inference task that the model will perform. */ task_type: InferenceGoogleAiStudioTaskType; /** The unique identifier of the inference endpoint. */ googleaistudio_inference_id: Id; /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings; /** The type of service supported for the specified task type. In this case, `googleaistudio`. */ service: InferenceGoogleAiServiceType; /** Settings used to install the inference model. These settings are specific to the `googleaistudio` service. */ service_settings: InferenceGoogleAiStudioServiceSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; googleaistudio_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; googleaistudio_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; }; } export type InferencePutGoogleaistudioResponse = InferenceInferenceEndpointInfoGoogleAIStudio; export interface InferencePutGooglevertexaiRequest extends RequestBase { /** The type of the inference task that the model will perform. */ task_type: InferenceGoogleVertexAITaskType; /** The unique identifier of the inference endpoint. */ googlevertexai_inference_id: Id; /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings; /** The type of service supported for the specified task type. In this case, `googlevertexai`. */ service: InferenceGoogleVertexAIServiceType; /** Settings used to install the inference model. These settings are specific to the `googlevertexai` service. */ service_settings: InferenceGoogleVertexAIServiceSettings; /** Settings to configure the inference task. * These settings are specific to the task type you specified. */ task_settings?: InferenceGoogleVertexAITaskSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; googlevertexai_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; googlevertexai_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }; } export type InferencePutGooglevertexaiResponse = InferenceInferenceEndpointInfoGoogleVertexAI; export interface InferencePutHuggingFaceRequest extends RequestBase { /** The type of the inference task that the model will perform. */ task_type: InferenceHuggingFaceTaskType; /** The unique identifier of the inference endpoint. */ huggingface_inference_id: Id; /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings; /** The type of service supported for the specified task type. In this case, `hugging_face`. */ service: InferenceHuggingFaceServiceType; /** Settings used to install the inference model. These settings are specific to the `hugging_face` service. */ service_settings: InferenceHuggingFaceServiceSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; huggingface_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; huggingface_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; }; } export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfoHuggingFace; export interface InferencePutJinaaiRequest extends RequestBase { /** The type of the inference task that the model will perform. */ task_type: InferenceJinaAITaskType; /** The unique identifier of the inference endpoint. */ jinaai_inference_id: Id; /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings; /** The type of service supported for the specified task type. In this case, `jinaai`. */ service: InferenceJinaAIServiceType; /** Settings used to install the inference model. These settings are specific to the `jinaai` service. */ service_settings: InferenceJinaAIServiceSettings; /** Settings to configure the inference task. * These settings are specific to the task type you specified. */ task_settings?: InferenceJinaAITaskSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; jinaai_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; jinaai_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }; } export type InferencePutJinaaiResponse = InferenceInferenceEndpointInfoJinaAi; export interface InferencePutMistralRequest extends RequestBase { /** The task type. * The only valid task type for the model to perform is `text_embedding`. */ task_type: InferenceMistralTaskType; /** The unique identifier of the inference endpoint. */ mistral_inference_id: Id; /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings; /** The type of service supported for the specified task type. In this case, `mistral`. */ service: InferenceMistralServiceType; /** Settings used to install the inference model. These settings are specific to the `mistral` service. */ service_settings: InferenceMistralServiceSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; mistral_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; mistral_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; }; } export type InferencePutMistralResponse = InferenceInferenceEndpointInfoMistral; export interface InferencePutOpenaiRequest extends RequestBase { /** The type of the inference task that the model will perform. * NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. */ task_type: InferenceOpenAITaskType; /** The unique identifier of the inference endpoint. */ openai_inference_id: Id; /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings; /** The type of service supported for the specified task type. In this case, `openai`. */ service: InferenceOpenAIServiceType; /** Settings used to install the inference model. These settings are specific to the `openai` service. */ service_settings: InferenceOpenAIServiceSettings; /** Settings to configure the inference task. * These settings are specific to the task type you specified. */ task_settings?: InferenceOpenAITaskSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; openai_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; openai_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }; } export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfoOpenAI; export interface InferencePutVoyageaiRequest extends RequestBase { /** The type of the inference task that the model will perform. */ task_type: InferenceVoyageAITaskType; /** The unique identifier of the inference endpoint. */ voyageai_inference_id: Id; /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings; /** The type of service supported for the specified task type. In this case, `voyageai`. */ service: InferenceVoyageAIServiceType; /** Settings used to install the inference model. These settings are specific to the `voyageai` service. */ service_settings: InferenceVoyageAIServiceSettings; /** Settings to configure the inference task. * These settings are specific to the task type you specified. */ task_settings?: InferenceVoyageAITaskSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; voyageai_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; voyageai_inference_id?: never; chunking_settings?: never; service?: never; service_settings?: never; task_settings?: never; }; } export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfoVoyageAI; export interface InferencePutWatsonxRequest extends RequestBase { /** The task type. * The only valid task type for the model to perform is `text_embedding`. */ task_type: InferenceWatsonxTaskType; /** The unique identifier of the inference endpoint. */ watsonx_inference_id: Id; /** The type of service supported for the specified task type. In this case, `watsonxai`. */ service: InferenceWatsonxServiceType; /** Settings used to install the inference model. These settings are specific to the `watsonxai` service. */ service_settings: InferenceWatsonxServiceSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_type?: never; watsonx_inference_id?: never; service?: never; service_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_type?: never; watsonx_inference_id?: never; service?: never; service_settings?: never; }; } export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfoWatsonx; export interface InferenceRerankRequest extends RequestBase { /** The unique identifier for the inference endpoint. */ inference_id: Id; /** The amount of time to wait for the inference request to complete. */ timeout?: Duration; /** Query input. */ query: string; /** The text on which you want to perform the inference task. * It can be a single string or an array. * * > info * > Inference endpoints for the `completion` task type currently only support a single string as input. */ input: string | string[]; /** Task settings for the individual inference request. * These settings are specific to the task type you specified and override the task settings specified when initializing the service. */ task_settings?: InferenceTaskSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { inference_id?: never; timeout?: never; query?: never; input?: never; task_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { inference_id?: never; timeout?: never; query?: never; input?: never; task_settings?: never; }; } export type InferenceRerankResponse = InferenceRerankedInferenceResult; export interface InferenceSparseEmbeddingRequest extends RequestBase { /** The inference Id */ inference_id: Id; /** Specifies the amount of time to wait for the inference request to complete. */ timeout?: Duration; /** Inference input. * Either a string or an array of strings. */ input: string | string[]; /** Optional task settings */ task_settings?: InferenceTaskSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { inference_id?: never; timeout?: never; input?: never; task_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { inference_id?: never; timeout?: never; input?: never; task_settings?: never; }; } export type InferenceSparseEmbeddingResponse = InferenceSparseEmbeddingInferenceResult; export interface InferenceStreamCompletionRequest extends RequestBase { /** The unique identifier for the inference endpoint. */ inference_id: Id; /** The text on which you want to perform the inference task. * It can be a single string or an array. * * NOTE: Inference endpoints for the completion task type currently only support a single string as input. */ input: string | string[]; /** Optional task settings */ task_settings?: InferenceTaskSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { inference_id?: never; input?: never; task_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { inference_id?: never; input?: never; task_settings?: never; }; } export type InferenceStreamCompletionResponse = StreamResult; export interface InferenceTextEmbeddingRequest extends RequestBase { /** The inference Id */ inference_id: Id; /** Specifies the amount of time to wait for the inference request to complete. */ timeout?: Duration; /** Inference input. * Either a string or an array of strings. */ input: string | string[]; /** Optional task settings */ task_settings?: InferenceTaskSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { inference_id?: never; timeout?: never; input?: never; task_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { inference_id?: never; timeout?: never; input?: never; task_settings?: never; }; } export type InferenceTextEmbeddingResponse = InferenceTextEmbeddingInferenceResult; export interface InferenceUpdateRequest extends RequestBase { /** The unique identifier of the inference endpoint. */ inference_id: Id; /** The type of inference task that the model performs. */ task_type?: InferenceTaskType; inference_config?: InferenceInferenceEndpoint; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { inference_id?: never; task_type?: never; inference_config?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { inference_id?: never; task_type?: never; inference_config?: never; }; } export type InferenceUpdateResponse = InferenceInferenceEndpointInfo; export interface IngestAppendProcessor extends IngestProcessorBase { /** The field to be appended to. * Supports template snippets. */ field: Field; /** The value to be appended. Supports template snippets. */ value: any | any[]; /** If `false`, the processor does not append values already present in the field. */ allow_duplicates?: boolean; } export interface IngestAttachmentProcessor extends IngestProcessorBase { /** The field to get the base64 encoded field from. */ field: Field; /** If `true` and field does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** The number of chars being used for extraction to prevent huge fields. * Use `-1` for no limit. */ indexed_chars?: long; /** Field name from which you can overwrite the number of chars being used for extraction. */ indexed_chars_field?: Field; /** Array of properties to select to be stored. * Can be `content`, `title`, `name`, `author`, `keywords`, `date`, `content_type`, `content_length`, `language`. */ properties?: string[]; /** The field that will hold the attachment information. */ target_field?: Field; /** If true, the binary field will be removed from the document */ remove_binary?: boolean; /** Field containing the name of the resource to decode. * If specified, the processor passes this resource name to the underlying Tika library to enable Resource Name Based Detection. */ resource_name?: string; } export interface IngestBytesProcessor extends IngestProcessorBase { /** The field to convert. */ field: Field; /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** The field to assign the converted value to. * By default, the field is updated in-place. */ target_field?: Field; } export interface IngestCircleProcessor extends IngestProcessorBase { /** The difference between the resulting inscribed distance from center to side and the circle’s radius (measured in meters for `geo_shape`, unit-less for `shape`). */ error_distance: double; /** The field to interpret as a circle. Either a string in WKT format or a map for GeoJSON. */ field: Field; /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** Which field mapping type is to be used when processing the circle: `geo_shape` or `shape`. */ shape_type: IngestShapeType; /** The field to assign the polygon shape to * By default, the field is updated in-place. */ target_field?: Field; } export interface IngestCommunityIDProcessor extends IngestProcessorBase { /** Field containing the source IP address. */ source_ip?: Field; /** Field containing the source port. */ source_port?: Field; /** Field containing the destination IP address. */ destination_ip?: Field; /** Field containing the destination port. */ destination_port?: Field; /** Field containing the IANA number. */ iana_number?: Field; /** Field containing the ICMP type. */ icmp_type?: Field; /** Field containing the ICMP code. */ icmp_code?: Field; /** Field containing the transport protocol name or number. Used only when the * iana_number field is not present. The following protocol names are currently * supported: eigrp, gre, icmp, icmpv6, igmp, ipv6-icmp, ospf, pim, sctp, tcp, udp */ transport?: Field; /** Output field for the community ID. */ target_field?: Field; /** Seed for the community ID hash. Must be between 0 and 65535 (inclusive). The * seed can prevent hash collisions between network domains, such as a staging * and production network that use the same addressing scheme. */ seed?: integer; /** If true and any required fields are missing, the processor quietly exits * without modifying the document. */ ignore_missing?: boolean; } export interface IngestConvertProcessor extends IngestProcessorBase { /** The field whose value is to be converted. */ field: Field; /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** The field to assign the converted value to. * By default, the `field` is updated in-place. */ target_field?: Field; /** The type to convert the existing value to. */ type: IngestConvertType; } export type IngestConvertType = 'integer' | 'long' | 'double' | 'float' | 'boolean' | 'ip' | 'string' | 'auto'; export interface IngestCsvProcessor extends IngestProcessorBase { /** Value used to fill empty fields. * Empty fields are skipped if this is not provided. * An empty field is one with no value (2 consecutive separators) or empty quotes (`""`). */ empty_value?: any; /** The field to extract data from. */ field: Field; /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** Quote used in CSV, has to be single character string. */ quote?: string; /** Separator used in CSV, has to be single character string. */ separator?: string; /** The array of fields to assign extracted values to. */ target_fields: Fields; /** Trim whitespaces in unquoted fields. */ trim?: boolean; } export interface IngestDatabaseConfiguration { /** The provider-assigned name of the IP geolocation database to download. */ name: Name; maxmind?: IngestMaxmind; ipinfo?: IngestIpinfo; } export interface IngestDatabaseConfigurationFull { web?: IngestWeb; local?: IngestLocal; /** The provider-assigned name of the IP geolocation database to download. */ name: Name; maxmind?: IngestMaxmind; ipinfo?: IngestIpinfo; } export interface IngestDateIndexNameProcessor extends IngestProcessorBase { /** An array of the expected date formats for parsing dates / timestamps in the document being preprocessed. * Can be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. */ date_formats?: string[]; /** How to round the date when formatting the date into the index name. Valid values are: * `y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second). * Supports template snippets. */ date_rounding: string; /** The field to get the date or timestamp from. */ field: Field; /** The format to be used when printing the parsed date into the index name. * A valid java time pattern is expected here. * Supports template snippets. */ index_name_format?: string; /** A prefix of the index name to be prepended before the printed date. * Supports template snippets. */ index_name_prefix?: string; /** The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days. */ locale?: string; /** The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names. */ timezone?: string; } export interface IngestDateProcessor extends IngestProcessorBase { /** The field to get the date from. */ field: Field; /** An array of the expected date formats. * Can be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. */ formats: string[]; /** The locale to use when parsing the date, relevant when parsing month names or week days. * Supports template snippets. */ locale?: string; /** The field that will hold the parsed date. */ target_field?: Field; /** The timezone to use when parsing the date. * Supports template snippets. */ timezone?: string; /** The format to use when writing the date to target_field. Must be a valid * java time pattern. */ output_format?: string; } export interface IngestDissectProcessor extends IngestProcessorBase { /** The character(s) that separate the appended fields. */ append_separator?: string; /** The field to dissect. */ field: Field; /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** The pattern to apply to the field. */ pattern: string; } export interface IngestDocument { /** Unique identifier for the document. * This ID must be unique within the `_index`. */ _id?: Id; /** Name of the index containing the document. */ _index?: IndexName; /** JSON body for the document. */ _source: any; } export interface IngestDocumentSimulationKeys { /** Unique identifier for the document. This ID must be unique within the `_index`. */ _id: Id; /** Name of the index containing the document. */ _index: IndexName; _ingest: IngestIngest; /** Value used to send the document to a specific primary shard. */ _routing?: string; /** JSON body for the document. */ _source: Record; /** */ _version?: SpecUtilsStringified; _version_type?: VersionType; } export type IngestDocumentSimulation = IngestDocumentSimulationKeys & { [property: string]: string | Id | IndexName | IngestIngest | Record | SpecUtilsStringified | VersionType; }; export interface IngestDotExpanderProcessor extends IngestProcessorBase { /** The field to expand into an object field. * If set to `*`, all top-level fields will be expanded. */ field: Field; /** Controls the behavior when there is already an existing nested object that conflicts with the expanded field. * When `false`, the processor will merge conflicts by combining the old and the new values into an array. * When `true`, the value from the expanded field will overwrite the existing value. */ override?: boolean; /** The field that contains the field to expand. * Only required if the field to expand is part another object field, because the `field` option can only understand leaf fields. */ path?: string; } export interface IngestDropProcessor extends IngestProcessorBase { } export interface IngestEnrichProcessor extends IngestProcessorBase { /** The field in the input document that matches the policies match_field used to retrieve the enrichment data. * Supports template snippets. */ field: Field; /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** The maximum number of matched documents to include under the configured target field. * The `target_field` will be turned into a json array if `max_matches` is higher than 1, otherwise `target_field` will become a json object. * In order to avoid documents getting too large, the maximum allowed value is 128. */ max_matches?: integer; /** If processor will update fields with pre-existing non-null-valued field. * When set to `false`, such fields will not be touched. */ override?: boolean; /** The name of the enrich policy to use. */ policy_name: string; /** A spatial relation operator used to match the geoshape of incoming documents to documents in the enrich index. * This option is only used for `geo_match` enrich policy types. */ shape_relation?: GeoShapeRelation; /** Field added to incoming documents to contain enrich data. This field contains both the `match_field` and `enrich_fields` specified in the enrich policy. * Supports template snippets. */ target_field: Field; } export interface IngestFailProcessor extends IngestProcessorBase { /** The error message thrown by the processor. * Supports template snippets. */ message: string; } export type IngestFingerprintDigest = 'MD5' | 'SHA-1' | 'SHA-256' | 'SHA-512' | 'MurmurHash3'; export interface IngestFingerprintProcessor extends IngestProcessorBase { /** Array of fields to include in the fingerprint. For objects, the processor * hashes both the field key and value. For other fields, the processor hashes * only the field value. */ fields: Fields; /** Output field for the fingerprint. */ target_field?: Field; /** Salt value for the hash function. */ salt?: string; /** The hash method used to compute the fingerprint. Must be one of MD5, SHA-1, * SHA-256, SHA-512, or MurmurHash3. */ method?: IngestFingerprintDigest; /** If true, the processor ignores any missing fields. If all fields are * missing, the processor silently exits without modifying the document. */ ignore_missing?: boolean; } export interface IngestForeachProcessor extends IngestProcessorBase { /** Field containing array or object values. */ field: Field; /** If `true`, the processor silently exits without changing the document if the `field` is `null` or missing. */ ignore_missing?: boolean; /** Ingest processor to run on each element. */ processor: IngestProcessorContainer; } export interface IngestGeoGridProcessor extends IngestProcessorBase { /** The field to interpret as a geo-tile.= * The field format is determined by the `tile_type`. */ field: string; /** Three tile formats are understood: geohash, geotile and geohex. */ tile_type: IngestGeoGridTileType; /** The field to assign the polygon shape to, by default, the `field` is updated in-place. */ target_field?: Field; /** If specified and a parent tile exists, save that tile address to this field. */ parent_field?: Field; /** If specified and children tiles exist, save those tile addresses to this field as an array of strings. */ children_field?: Field; /** If specified and intersecting non-child tiles exist, save their addresses to this field as an array of strings. */ non_children_field?: Field; /** If specified, save the tile precision (zoom) as an integer to this field. */ precision_field?: Field; /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** Which format to save the generated polygon in. */ target_format?: IngestGeoGridTargetFormat; } export type IngestGeoGridTargetFormat = 'geojson' | 'wkt'; export type IngestGeoGridTileType = 'geotile' | 'geohex' | 'geohash'; export interface IngestGeoIpProcessor extends IngestProcessorBase { /** The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory. */ database_file?: string; /** The field to get the ip address from for the geographical lookup. */ field: Field; /** If `true`, only the first found geoip data will be returned, even if the field contains an array. */ first_only?: boolean; /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** Controls what properties are added to the `target_field` based on the geoip lookup. */ properties?: string[]; /** The field that will hold the geographical information looked up from the MaxMind database. */ target_field?: Field; /** If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. * Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. */ download_database_on_pipeline_creation?: boolean; } export interface IngestGrokProcessor extends IngestProcessorBase { /** Must be disabled or v1. If v1, the processor uses patterns with Elastic * Common Schema (ECS) field names. */ ecs_compatibility?: string; /** The field to use for grok expression parsing. */ field: Field; /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** A map of pattern-name and pattern tuples defining custom patterns to be used by the current processor. * Patterns matching existing names will override the pre-existing definition. */ pattern_definitions?: Record; /** An ordered list of grok expression to match and extract named captures with. * Returns on the first expression in the list that matches. */ patterns: GrokPattern[]; /** When `true`, `_ingest._grok_match_index` will be inserted into your matched document’s metadata with the index into the pattern found in `patterns` that matched. */ trace_match?: boolean; } export interface IngestGsubProcessor extends IngestProcessorBase { /** The field to apply the replacement to. */ field: Field; /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** The pattern to be replaced. */ pattern: string; /** The string to replace the matching patterns with. */ replacement: string; /** The field to assign the converted value to * By default, the `field` is updated in-place. */ target_field?: Field; } export interface IngestHtmlStripProcessor extends IngestProcessorBase { /** The string-valued field to remove HTML tags from. */ field: Field; /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document, */ ignore_missing?: boolean; /** The field to assign the converted value to * By default, the `field` is updated in-place. */ target_field?: Field; } export interface IngestInferenceConfig { /** Regression configuration for inference. */ regression?: IngestInferenceConfigRegression; /** Classification configuration for inference. */ classification?: IngestInferenceConfigClassification; } export interface IngestInferenceConfigClassification { /** Specifies the number of top class predictions to return. */ num_top_classes?: integer; /** Specifies the maximum number of feature importance values per document. */ num_top_feature_importance_values?: integer; /** The field that is added to incoming documents to contain the inference prediction. */ results_field?: Field; /** Specifies the field to which the top classes are written. */ top_classes_results_field?: Field; /** Specifies the type of the predicted field to write. * Valid values are: `string`, `number`, `boolean`. */ prediction_field_type?: string; } export interface IngestInferenceConfigRegression { /** The field that is added to incoming documents to contain the inference prediction. */ results_field?: Field; /** Specifies the maximum number of feature importance values per document. */ num_top_feature_importance_values?: integer; } export interface IngestInferenceProcessor extends IngestProcessorBase { /** The ID or alias for the trained model, or the ID of the deployment. */ model_id: Id; /** Field added to incoming documents to contain results objects. */ target_field?: Field; /** Maps the document field names to the known field names of the model. * This mapping takes precedence over any default mappings provided in the model configuration. */ field_map?: Record; /** Contains the inference type and its options. */ inference_config?: IngestInferenceConfig; /** Input fields for inference and output (destination) fields for the inference results. * This option is incompatible with the target_field and field_map options. */ input_output?: IngestInputConfig | IngestInputConfig[]; /** If true and any of the input fields defined in input_ouput are missing * then those missing fields are quietly ignored, otherwise a missing field causes a failure. * Only applies when using input_output configurations to explicitly list the input fields. */ ignore_missing?: boolean; } export interface IngestIngest { _redact?: IngestRedact; timestamp: DateTime; pipeline?: Name; } export interface IngestInputConfig { input_field: string; output_field: string; } export interface IngestIpLocationProcessor extends IngestProcessorBase { /** The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory. */ database_file?: string; /** The field to get the ip address from for the geographical lookup. */ field: Field; /** If `true`, only the first found IP location data will be returned, even if the field contains an array. */ first_only?: boolean; /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** Controls what properties are added to the `target_field` based on the IP location lookup. */ properties?: string[]; /** The field that will hold the geographical information looked up from the MaxMind database. */ target_field?: Field; /** If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. * Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. */ download_database_on_pipeline_creation?: boolean; } export interface IngestIpinfo { } export interface IngestJoinProcessor extends IngestProcessorBase { /** Field containing array values to join. */ field: Field; /** The separator character. */ separator: string; /** The field to assign the joined value to. * By default, the field is updated in-place. */ target_field?: Field; } export interface IngestJsonProcessor extends IngestProcessorBase { /** Flag that forces the parsed JSON to be added at the top level of the document. * `target_field` must not be set when this option is chosen. */ add_to_root?: boolean; /** When set to `replace`, root fields that conflict with fields from the parsed JSON will be overridden. * When set to `merge`, conflicting fields will be merged. * Only applicable `if add_to_root` is set to true. */ add_to_root_conflict_strategy?: IngestJsonProcessorConflictStrategy; /** When set to `true`, the JSON parser will not fail if the JSON contains duplicate keys. * Instead, the last encountered value for any duplicate key wins. */ allow_duplicate_keys?: boolean; /** The field to be parsed. */ field: Field; /** The field that the converted structured object will be written into. * Any existing content in this field will be overwritten. */ target_field?: Field; } export type IngestJsonProcessorConflictStrategy = 'replace' | 'merge'; export interface IngestKeyValueProcessor extends IngestProcessorBase { /** List of keys to exclude from document. */ exclude_keys?: string[]; /** The field to be parsed. * Supports template snippets. */ field: Field; /** Regex pattern to use for splitting key-value pairs. */ field_split: string; /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** List of keys to filter and insert into document. * Defaults to including all keys. */ include_keys?: string[]; /** Prefix to be added to extracted keys. */ prefix?: string; /** If `true`. strip brackets `()`, `<>`, `[]` as well as quotes `'` and `"` from extracted values. */ strip_brackets?: boolean; /** The field to insert the extracted keys into. * Defaults to the root of the document. * Supports template snippets. */ target_field?: Field; /** String of characters to trim from extracted keys. */ trim_key?: string; /** String of characters to trim from extracted values. */ trim_value?: string; /** Regex pattern to use for splitting the key from the value within a key-value pair. */ value_split: string; } export interface IngestLocal { type: string; } export interface IngestLowercaseProcessor extends IngestProcessorBase { /** The field to make lowercase. */ field: Field; /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** The field to assign the converted value to. * By default, the field is updated in-place. */ target_field?: Field; } export interface IngestMaxmind { account_id: Id; } export interface IngestNetworkDirectionProcessor extends IngestProcessorBase { /** Field containing the source IP address. */ source_ip?: Field; /** Field containing the destination IP address. */ destination_ip?: Field; /** Output field for the network direction. */ target_field?: Field; /** List of internal networks. Supports IPv4 and IPv6 addresses and ranges in * CIDR notation. Also supports the named ranges listed below. These may be * constructed with template snippets. Must specify only one of * internal_networks or internal_networks_field. */ internal_networks?: string[]; /** A field on the given document to read the internal_networks configuration * from. */ internal_networks_field?: Field; /** If true and any required fields are missing, the processor quietly exits * without modifying the document. */ ignore_missing?: boolean; } export interface IngestPipeline { /** Description of the ingest pipeline. */ description?: string; /** Processors to run immediately after a processor failure. */ on_failure?: IngestProcessorContainer[]; /** Processors used to perform transformations on documents before indexing. * Processors run sequentially in the order specified. */ processors?: IngestProcessorContainer[]; /** Version number used by external systems to track ingest pipelines. */ version?: VersionNumber; /** Marks this ingest pipeline as deprecated. * When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean; /** Arbitrary metadata about the ingest pipeline. This map is not automatically generated by Elasticsearch. */ _meta?: Metadata; } export interface IngestPipelineConfig { /** Description of the ingest pipeline. */ description?: string; /** Version number used by external systems to track ingest pipelines. */ version?: VersionNumber; /** Processors used to perform transformations on documents before indexing. * Processors run sequentially in the order specified. */ processors: IngestProcessorContainer[]; } export interface IngestPipelineProcessor extends IngestProcessorBase { /** The name of the pipeline to execute. * Supports template snippets. */ name: Name; /** Whether to ignore missing pipelines instead of failing. */ ignore_missing_pipeline?: boolean; } export interface IngestPipelineProcessorResult { doc?: IngestDocumentSimulation; tag?: string; processor_type?: string; status?: IngestPipelineSimulationStatusOptions; description?: string; ignored_error?: ErrorCause; error?: ErrorCause; } export type IngestPipelineSimulationStatusOptions = 'success' | 'error' | 'error_ignored' | 'skipped' | 'dropped'; export interface IngestProcessorBase { /** Description of the processor. * Useful for describing the purpose of the processor or its configuration. */ description?: string; /** Conditionally execute the processor. */ if?: Script | ScriptSource; /** Ignore failures for the processor. */ ignore_failure?: boolean; /** Handle failures for the processor. */ on_failure?: IngestProcessorContainer[]; /** Identifier for the processor. * Useful for debugging and metrics. */ tag?: string; } export interface IngestProcessorContainer { /** Appends one or more values to an existing array if the field already exists and it is an array. * Converts a scalar to an array and appends one or more values to it if the field exists and it is a scalar. * Creates an array containing the provided values if the field doesn’t exist. * Accepts a single value or an array of values. */ append?: IngestAppendProcessor; /** The attachment processor lets Elasticsearch extract file attachments in common formats (such as PPT, XLS, and PDF) by using the Apache text extraction library Tika. */ attachment?: IngestAttachmentProcessor; /** Converts a human readable byte value (for example `1kb`) to its value in bytes (for example `1024`). * If the field is an array of strings, all members of the array will be converted. * Supported human readable units are "b", "kb", "mb", "gb", "tb", "pb" case insensitive. * An error will occur if the field is not a supported format or resultant value exceeds 2^63. */ bytes?: IngestBytesProcessor; /** Converts circle definitions of shapes to regular polygons which approximate them. */ circle?: IngestCircleProcessor; /** Computes the Community ID for network flow data as defined in the * Community ID Specification. You can use a community ID to correlate network * events related to a single flow. */ community_id?: IngestCommunityIDProcessor; /** Converts a field in the currently ingested document to a different type, such as converting a string to an integer. * If the field value is an array, all members will be converted. */ convert?: IngestConvertProcessor; /** Extracts fields from CSV line out of a single text field within a document. * Any empty field in CSV will be skipped. */ csv?: IngestCsvProcessor; /** Parses dates from fields, and then uses the date or timestamp as the timestamp for the document. */ date?: IngestDateProcessor; /** The purpose of this processor is to point documents to the right time based index based on a date or timestamp field in a document by using the date math index name support. */ date_index_name?: IngestDateIndexNameProcessor; /** Extracts structured fields out of a single text field by matching the text field against a delimiter-based pattern. */ dissect?: IngestDissectProcessor; /** Expands a field with dots into an object field. * This processor allows fields with dots in the name to be accessible by other processors in the pipeline. * Otherwise these fields can’t be accessed by any processor. */ dot_expander?: IngestDotExpanderProcessor; /** Drops the document without raising any errors. * This is useful to prevent the document from getting indexed based on some condition. */ drop?: IngestDropProcessor; /** The `enrich` processor can enrich documents with data from another index. */ enrich?: IngestEnrichProcessor; /** Raises an exception. * This is useful for when you expect a pipeline to fail and want to relay a specific message to the requester. */ fail?: IngestFailProcessor; /** Computes a hash of the document’s content. You can use this hash for * content fingerprinting. */ fingerprint?: IngestFingerprintProcessor; /** Runs an ingest processor on each element of an array or object. */ foreach?: IngestForeachProcessor; /** Currently an undocumented alias for GeoIP Processor. */ ip_location?: IngestIpLocationProcessor; /** Converts geo-grid definitions of grid tiles or cells to regular bounding boxes or polygons which describe their shape. * This is useful if there is a need to interact with the tile shapes as spatially indexable fields. */ geo_grid?: IngestGeoGridProcessor; /** The `geoip` processor adds information about the geographical location of an IPv4 or IPv6 address. */ geoip?: IngestGeoIpProcessor; /** Extracts structured fields out of a single text field within a document. * You choose which field to extract matched fields from, as well as the grok pattern you expect will match. * A grok pattern is like a regular expression that supports aliased expressions that can be reused. */ grok?: IngestGrokProcessor; /** Converts a string field by applying a regular expression and a replacement. * If the field is an array of string, all members of the array will be converted. * If any non-string values are encountered, the processor will throw an exception. */ gsub?: IngestGsubProcessor; /** Removes HTML tags from the field. * If the field is an array of strings, HTML tags will be removed from all members of the array. */ html_strip?: IngestHtmlStripProcessor; /** Uses a pre-trained data frame analytics model or a model deployed for natural language processing tasks to infer against the data that is being ingested in the pipeline. */ inference?: IngestInferenceProcessor; /** Joins each element of an array into a single string using a separator character between each element. * Throws an error when the field is not an array. */ join?: IngestJoinProcessor; /** Converts a JSON string into a structured JSON object. */ json?: IngestJsonProcessor; /** This processor helps automatically parse messages (or specific event fields) which are of the `foo=bar` variety. */ kv?: IngestKeyValueProcessor; /** Converts a string to its lowercase equivalent. * If the field is an array of strings, all members of the array will be converted. */ lowercase?: IngestLowercaseProcessor; /** Calculates the network direction given a source IP address, destination IP * address, and a list of internal networks. */ network_direction?: IngestNetworkDirectionProcessor; /** Executes another pipeline. */ pipeline?: IngestPipelineProcessor; /** The Redact processor uses the Grok rules engine to obscure text in the input document matching the given Grok patterns. * The processor can be used to obscure Personal Identifying Information (PII) by configuring it to detect known patterns such as email or IP addresses. * Text that matches a Grok pattern is replaced with a configurable string such as `` where an email address is matched or simply replace all matches with the text `` if preferred. */ redact?: IngestRedactProcessor; /** Extracts the registered domain (also known as the effective top-level * domain or eTLD), sub-domain, and top-level domain from a fully qualified * domain name (FQDN). Uses the registered domains defined in the Mozilla * Public Suffix List. */ registered_domain?: IngestRegisteredDomainProcessor; /** Removes existing fields. * If one field doesn’t exist, an exception will be thrown. */ remove?: IngestRemoveProcessor; /** Renames an existing field. * If the field doesn’t exist or the new name is already used, an exception will be thrown. */ rename?: IngestRenameProcessor; /** Routes a document to another target index or data stream. * When setting the `destination` option, the target is explicitly specified and the dataset and namespace options can’t be set. * When the `destination` option is not set, this processor is in a data stream mode. Note that in this mode, the reroute processor can only be used on data streams that follow the data stream naming scheme. */ reroute?: IngestRerouteProcessor; /** Runs an inline or stored script on incoming documents. * The script runs in the `ingest` context. */ script?: IngestScriptProcessor; /** Adds a field with the specified value. * If the field already exists, its value will be replaced with the provided one. */ set?: IngestSetProcessor; /** Sets user-related details (such as `username`, `roles`, `email`, `full_name`, `metadata`, `api_key`, `realm` and `authentication_type`) from the current authenticated user to the current document by pre-processing the ingest. */ set_security_user?: IngestSetSecurityUserProcessor; /** Sorts the elements of an array ascending or descending. * Homogeneous arrays of numbers will be sorted numerically, while arrays of strings or heterogeneous arrays of strings + numbers will be sorted lexicographically. * Throws an error when the field is not an array. */ sort?: IngestSortProcessor; /** Splits a field into an array using a separator character. * Only works on string fields. */ split?: IngestSplitProcessor; /** Terminates the current ingest pipeline, causing no further processors to be run. * This will normally be executed conditionally, using the `if` option. */ terminate?: IngestTerminateProcessor; /** Trims whitespace from a field. * If the field is an array of strings, all members of the array will be trimmed. * This only works on leading and trailing whitespace. */ trim?: IngestTrimProcessor; /** Converts a string to its uppercase equivalent. * If the field is an array of strings, all members of the array will be converted. */ uppercase?: IngestUppercaseProcessor; /** URL-decodes a string. * If the field is an array of strings, all members of the array will be decoded. */ urldecode?: IngestUrlDecodeProcessor; /** Parses a Uniform Resource Identifier (URI) string and extracts its components as an object. * This URI object includes properties for the URI’s domain, path, fragment, port, query, scheme, user info, username, and password. */ uri_parts?: IngestUriPartsProcessor; /** The `user_agent` processor extracts details from the user agent string a browser sends with its web requests. * This processor adds this information by default under the `user_agent` field. */ user_agent?: IngestUserAgentProcessor; } export interface IngestRedact { /** indicates if document has been redacted */ _is_redacted: boolean; } export interface IngestRedactProcessor extends IngestProcessorBase { /** The field to be redacted */ field: Field; /** A list of grok expressions to match and redact named captures with */ patterns: GrokPattern[]; pattern_definitions?: Record; /** Start a redacted section with this token */ prefix?: string; /** End a redacted section with this token */ suffix?: string; /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** If `true` and the current license does not support running redact processors, then the processor quietly exits without modifying the document */ skip_if_unlicensed?: boolean; /** If `true` then ingest metadata `_ingest._redact._is_redacted` is set to `true` if the document has been redacted */ trace_redact?: boolean; } export interface IngestRegisteredDomainProcessor extends IngestProcessorBase { /** Field containing the source FQDN. */ field: Field; /** Object field containing extracted domain components. If an empty string, * the processor adds components to the document’s root. */ target_field?: Field; /** If true and any required fields are missing, the processor quietly exits * without modifying the document. */ ignore_missing?: boolean; } export interface IngestRemoveProcessor extends IngestProcessorBase { /** Fields to be removed. Supports template snippets. */ field: Fields; /** Fields to be kept. When set, all fields other than those specified are removed. */ keep?: Fields; /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; } export interface IngestRenameProcessor extends IngestProcessorBase { /** The field to be renamed. * Supports template snippets. */ field: Field; /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** The new name of the field. * Supports template snippets. */ target_field: Field; } export interface IngestRerouteProcessor extends IngestProcessorBase { /** A static value for the target. Can’t be set when the dataset or namespace option is set. */ destination?: string; /** Field references or a static value for the dataset part of the data stream name. * In addition to the criteria for index names, cannot contain - and must be no longer than 100 characters. * Example values are nginx.access and nginx.error. * * Supports field references with a mustache-like syntax (denoted as {{double}} or {{{triple}}} curly braces). * When resolving field references, the processor replaces invalid characters with _. Uses the part * of the index name as a fallback if all field references resolve to a null, missing, or non-string value. * * default {{data_stream.dataset}} */ dataset?: string | string[]; /** Field references or a static value for the namespace part of the data stream name. See the criteria for * index names for allowed characters. Must be no longer than 100 characters. * * Supports field references with a mustache-like syntax (denoted as {{double}} or {{{triple}}} curly braces). * When resolving field references, the processor replaces invalid characters with _. Uses the part * of the index name as a fallback if all field references resolve to a null, missing, or non-string value. * * default {{data_stream.namespace}} */ namespace?: string | string[]; } export interface IngestScriptProcessor extends IngestProcessorBase { /** ID of a stored script. * If no `source` is specified, this parameter is required. */ id?: Id; /** Script language. */ lang?: ScriptLanguage; /** Object containing parameters for the script. */ params?: Record; /** Inline script. * If no `id` is specified, this parameter is required. */ source?: ScriptSource; } export interface IngestSetProcessor extends IngestProcessorBase { /** The origin field which will be copied to `field`, cannot set `value` simultaneously. * Supported data types are `boolean`, `number`, `array`, `object`, `string`, `date`, etc. */ copy_from?: Field; /** The field to insert, upsert, or update. * Supports template snippets. */ field: Field; /** If `true` and `value` is a template snippet that evaluates to `null` or the empty string, the processor quietly exits without modifying the document. */ ignore_empty_value?: boolean; /** The media type for encoding `value`. * Applies only when value is a template snippet. * Must be one of `application/json`, `text/plain`, or `application/x-www-form-urlencoded`. */ media_type?: string; /** If `true` processor will update fields with pre-existing non-null-valued field. * When set to `false`, such fields will not be touched. */ override?: boolean; /** The value to be set for the field. * Supports template snippets. * May specify only one of `value` or `copy_from`. */ value?: any; } export interface IngestSetSecurityUserProcessor extends IngestProcessorBase { /** The field to store the user information into. */ field: Field; /** Controls what user related properties are added to the field. */ properties?: string[]; } export type IngestShapeType = 'geo_shape' | 'shape'; export interface IngestSimulateDocumentResult { doc?: IngestDocumentSimulation; error?: ErrorCause; processor_results?: IngestPipelineProcessorResult[]; } export interface IngestSortProcessor extends IngestProcessorBase { /** The field to be sorted. */ field: Field; /** The sort order to use. * Accepts `"asc"` or `"desc"`. */ order?: SortOrder; /** The field to assign the sorted value to. * By default, the field is updated in-place. */ target_field?: Field; } export interface IngestSplitProcessor extends IngestProcessorBase { /** The field to split. */ field: Field; /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** Preserves empty trailing fields, if any. */ preserve_trailing?: boolean; /** A regex which matches the separator, for example, `,` or `\s+`. */ separator: string; /** The field to assign the split value to. * By default, the field is updated in-place. */ target_field?: Field; } export interface IngestTerminateProcessor extends IngestProcessorBase { } export interface IngestTrimProcessor extends IngestProcessorBase { /** The string-valued field to trim whitespace from. */ field: Field; /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** The field to assign the trimmed value to. * By default, the field is updated in-place. */ target_field?: Field; } export interface IngestUppercaseProcessor extends IngestProcessorBase { /** The field to make uppercase. */ field: Field; /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** The field to assign the converted value to. * By default, the field is updated in-place. */ target_field?: Field; } export interface IngestUriPartsProcessor extends IngestProcessorBase { /** Field containing the URI string. */ field: Field; /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** If `true`, the processor copies the unparsed URI to `.original`. */ keep_original?: boolean; /** If `true`, the processor removes the `field` after parsing the URI string. * If parsing fails, the processor does not remove the `field`. */ remove_if_successful?: boolean; /** Output field for the URI object. */ target_field?: Field; } export interface IngestUrlDecodeProcessor extends IngestProcessorBase { /** The field to decode. */ field: Field; /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** The field to assign the converted value to. * By default, the field is updated in-place. */ target_field?: Field; } export interface IngestUserAgentProcessor extends IngestProcessorBase { /** The field containing the user agent string. */ field: Field; /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean; /** The name of the file in the `config/ingest-user-agent` directory containing the regular expressions for parsing the user agent string. Both the directory and the file have to be created before starting Elasticsearch. If not specified, ingest-user-agent will use the `regexes.yaml` from uap-core it ships with. */ regex_file?: string; /** The field that will be filled with the user agent details. */ target_field?: Field; /** Controls what properties are added to `target_field`. */ properties?: IngestUserAgentProperty[]; /** Extracts device type from the user agent string on a best-effort basis. * @beta */ extract_device_type?: boolean; } export type IngestUserAgentProperty = 'name' | 'os' | 'device' | 'original' | 'version'; export interface IngestWeb { } export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { /** A comma-separated list of geoip database configurations to delete */ id: Ids; /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; master_timeout?: never; timeout?: never; }; } export type IngestDeleteGeoipDatabaseResponse = AcknowledgedResponseBase; export interface IngestDeleteIpLocationDatabaseRequest extends RequestBase { /** A comma-separated list of IP location database configurations. */ id: Ids; /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. * A value of `-1` indicates that the request should never time out. */ master_timeout?: Duration; /** The period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. * A value of `-1` indicates that the request should never time out. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; master_timeout?: never; timeout?: never; }; } export type IngestDeleteIpLocationDatabaseResponse = AcknowledgedResponseBase; export interface IngestDeletePipelineRequest extends RequestBase { /** Pipeline ID or wildcard expression of pipeline IDs used to limit the request. * To delete all ingest pipelines in a cluster, use a value of `*`. */ id: Id; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; master_timeout?: never; timeout?: never; }; } export type IngestDeletePipelineResponse = AcknowledgedResponseBase; export interface IngestGeoIpStatsGeoIpDownloadStatistics { /** Total number of successful database downloads. */ successful_downloads: integer; /** Total number of failed database downloads. */ failed_downloads: integer; /** Total milliseconds spent downloading databases. */ total_download_time: DurationValue; /** Current number of databases available for use. */ databases_count: integer; /** Total number of database updates skipped. */ skipped_updates: integer; /** Total number of databases not updated after 30 days */ expired_databases: integer; } export interface IngestGeoIpStatsGeoIpNodeDatabaseName { /** Name of the database. */ name: Name; } export interface IngestGeoIpStatsGeoIpNodeDatabases { /** Downloaded databases for the node. */ databases: IngestGeoIpStatsGeoIpNodeDatabaseName[]; /** Downloaded database files, including related license files. Elasticsearch stores these files in the node’s temporary directory: $ES_TMPDIR/geoip-databases/. */ files_in_temp: string[]; } export interface IngestGeoIpStatsRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export interface IngestGeoIpStatsResponse { /** Download statistics for all GeoIP2 databases. */ stats: IngestGeoIpStatsGeoIpDownloadStatistics; /** Downloaded GeoIP2 databases for each node. */ nodes: Record; } export interface IngestGetGeoipDatabaseDatabaseConfigurationMetadata { id: Id; version: long; modified_date_millis: EpochTime; database: IngestDatabaseConfiguration; } export interface IngestGetGeoipDatabaseRequest extends RequestBase { /** A comma-separated list of database configuration IDs to retrieve. * Wildcard (`*`) expressions are supported. * To get all database configurations, omit this parameter or use `*`. */ id?: Ids; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; }; } export interface IngestGetGeoipDatabaseResponse { databases: IngestGetGeoipDatabaseDatabaseConfigurationMetadata[]; } export interface IngestGetIpLocationDatabaseDatabaseConfigurationMetadata { id: Id; version: VersionNumber; modified_date_millis?: EpochTime; modified_date?: EpochTime; database: IngestDatabaseConfigurationFull; } export interface IngestGetIpLocationDatabaseRequest extends RequestBase { /** Comma-separated list of database configuration IDs to retrieve. * Wildcard (`*`) expressions are supported. * To get all database configurations, omit this parameter or use `*`. */ id?: Ids; /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. * A value of `-1` indicates that the request should never time out. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; master_timeout?: never; }; } export interface IngestGetIpLocationDatabaseResponse { databases: IngestGetIpLocationDatabaseDatabaseConfigurationMetadata[]; } export interface IngestGetPipelineRequest extends RequestBase { /** Comma-separated list of pipeline IDs to retrieve. * Wildcard (`*`) expressions are supported. * To get all ingest pipelines, omit this parameter or use `*`. */ id?: Id; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Return pipelines without their definitions (default: false) */ summary?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; master_timeout?: never; summary?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; master_timeout?: never; summary?: never; }; } export type IngestGetPipelineResponse = Record; export interface IngestProcessorGrokRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export interface IngestProcessorGrokResponse { patterns: Record; } export interface IngestPutGeoipDatabaseRequest extends RequestBase { /** ID of the database configuration to create or update. */ id: Id; /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** The provider-assigned name of the IP geolocation database to download. */ name: Name; /** The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. * At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. */ maxmind: IngestMaxmind; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; master_timeout?: never; timeout?: never; name?: never; maxmind?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; master_timeout?: never; timeout?: never; name?: never; maxmind?: never; }; } export type IngestPutGeoipDatabaseResponse = AcknowledgedResponseBase; export interface IngestPutIpLocationDatabaseRequest extends RequestBase { /** The database configuration identifier. */ id: Id; /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. * A value of `-1` indicates that the request should never time out. */ master_timeout?: Duration; /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. * If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. * A value of `-1` indicates that the request should never time out. */ timeout?: Duration; configuration?: IngestDatabaseConfiguration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; master_timeout?: never; timeout?: never; configuration?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; master_timeout?: never; timeout?: never; configuration?: never; }; } export type IngestPutIpLocationDatabaseResponse = AcknowledgedResponseBase; export interface IngestPutPipelineRequest extends RequestBase { /** ID of the ingest pipeline to create or update. */ id: Id; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** Required version for optimistic concurrency control for pipeline updates */ if_version?: VersionNumber; /** Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. */ _meta?: Metadata; /** Description of the ingest pipeline. */ description?: string; /** Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. */ on_failure?: IngestProcessorContainer[]; /** Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. */ processors?: IngestProcessorContainer[]; /** Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. */ version?: VersionNumber; /** Marks this ingest pipeline as deprecated. * When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; master_timeout?: never; timeout?: never; if_version?: never; _meta?: never; description?: never; on_failure?: never; processors?: never; version?: never; deprecated?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; master_timeout?: never; timeout?: never; if_version?: never; _meta?: never; description?: never; on_failure?: never; processors?: never; version?: never; deprecated?: never; }; } export type IngestPutPipelineResponse = AcknowledgedResponseBase; export interface IngestSimulateRequest extends RequestBase { /** The pipeline to test. * If you don't specify a `pipeline` in the request body, this parameter is required. */ id?: Id; /** If `true`, the response includes output data for each processor in the executed pipeline. */ verbose?: boolean; /** Sample documents to test in the pipeline. */ docs: IngestDocument[]; /** The pipeline to test. * If you don't specify the `pipeline` request path parameter, this parameter is required. * If you specify both this and the request path parameter, the API only uses the request path parameter. */ pipeline?: IngestPipeline; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; verbose?: never; docs?: never; pipeline?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; verbose?: never; docs?: never; pipeline?: never; }; } export interface IngestSimulateResponse { docs: IngestSimulateDocumentResult[]; } export interface LicenseLicense { expiry_date_in_millis: EpochTime; issue_date_in_millis: EpochTime; start_date_in_millis?: EpochTime; issued_to: string; issuer: string; max_nodes?: long | null; max_resource_units?: long; signature: string; type: LicenseLicenseType; uid: string; } export type LicenseLicenseStatus = 'active' | 'valid' | 'invalid' | 'expired'; export type LicenseLicenseType = 'missing' | 'trial' | 'basic' | 'standard' | 'dev' | 'silver' | 'gold' | 'platinum' | 'enterprise'; export interface LicenseDeleteRequest extends RequestBase { /** The period to wait for a connection to the master node. */ master_timeout?: Duration; /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; timeout?: never; }; } export type LicenseDeleteResponse = AcknowledgedResponseBase; export interface LicenseGetLicenseInformation { expiry_date?: DateTime; expiry_date_in_millis?: EpochTime; issue_date: DateTime; issue_date_in_millis: EpochTime; issued_to: string; issuer: string; max_nodes: long | null; max_resource_units?: integer | null; status: LicenseLicenseStatus; type: LicenseLicenseType; uid: Uuid; start_date_in_millis: EpochTime; } export interface LicenseGetRequest extends RequestBase { /** If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. * This parameter is deprecated and will always be set to true in 8.x. */ accept_enterprise?: boolean; /** Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. */ local?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { accept_enterprise?: never; local?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { accept_enterprise?: never; local?: never; }; } export interface LicenseGetResponse { license: LicenseGetLicenseInformation; } export interface LicenseGetBasicStatusRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export interface LicenseGetBasicStatusResponse { eligible_to_start_basic: boolean; } export interface LicenseGetTrialStatusRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export interface LicenseGetTrialStatusResponse { eligible_to_start_trial: boolean; } export interface LicensePostAcknowledgement { license: string[]; message: string; } export interface LicensePostRequest extends RequestBase { /** Specifies whether you acknowledge the license changes. */ acknowledge?: boolean; /** The period to wait for a connection to the master node. */ master_timeout?: Duration; /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; license?: LicenseLicense; /** A sequence of one or more JSON documents containing the license information. */ licenses?: LicenseLicense[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { acknowledge?: never; master_timeout?: never; timeout?: never; license?: never; licenses?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { acknowledge?: never; master_timeout?: never; timeout?: never; license?: never; licenses?: never; }; } export interface LicensePostResponse { acknowledge?: LicensePostAcknowledgement; acknowledged: boolean; license_status: LicenseLicenseStatus; } export interface LicensePostStartBasicRequest extends RequestBase { /** whether the user has acknowledged acknowledge messages (default: false) */ acknowledge?: boolean; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { acknowledge?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { acknowledge?: never; master_timeout?: never; timeout?: never; }; } export interface LicensePostStartBasicResponse { acknowledged: boolean; basic_was_started: boolean; error_message?: string; type?: LicenseLicenseType; acknowledge?: Record; } export interface LicensePostStartTrialRequest extends RequestBase { /** whether the user has acknowledged acknowledge messages (default: false) */ acknowledge?: boolean; type_query_string?: string; /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { acknowledge?: never; type_query_string?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { acknowledge?: never; type_query_string?: never; master_timeout?: never; }; } export interface LicensePostStartTrialResponse { acknowledged: boolean; error_message?: string; trial_was_started: boolean; type?: LicenseLicenseType; } export interface LogstashPipeline { /** A description of the pipeline. * This description is not used by Elasticsearch or Logstash. */ description: string; /** The date the pipeline was last updated. * It must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format. */ last_modified: DateTime; /** The configuration for the pipeline. */ pipeline: string; /** Optional metadata about the pipeline, which can have any contents. * This metadata is not generated or used by Elasticsearch or Logstash. */ pipeline_metadata: LogstashPipelineMetadata; /** Settings for the pipeline. * It supports only flat keys in dot notation. */ pipeline_settings: LogstashPipelineSettings; /** The user who last updated the pipeline. */ username: string; } export interface LogstashPipelineMetadata { type: string; version: string; } export interface LogstashPipelineSettings { /** The number of workers that will, in parallel, execute the filter and output stages of the pipeline. */ 'pipeline.workers': integer; /** The maximum number of events an individual worker thread will collect from inputs before attempting to execute its filters and outputs. */ 'pipeline.batch.size': integer; /** When creating pipeline event batches, how long in milliseconds to wait for each event before dispatching an undersized batch to pipeline workers. */ 'pipeline.batch.delay': integer; /** The internal queuing model to use for event buffering. */ 'queue.type': string; /** The total capacity of the queue (`queue.type: persisted`) in number of bytes. */ 'queue.max_bytes': string; /** The maximum number of written events before forcing a checkpoint when persistent queues are enabled (`queue.type: persisted`). */ 'queue.checkpoint.writes': integer; } export interface LogstashDeletePipelineRequest extends RequestBase { /** An identifier for the pipeline. */ id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; }; } export type LogstashDeletePipelineResponse = boolean; export interface LogstashGetPipelineRequest extends RequestBase { /** A comma-separated list of pipeline identifiers. */ id?: Ids; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; }; } export type LogstashGetPipelineResponse = Record; export interface LogstashPutPipelineRequest extends RequestBase { /** An identifier for the pipeline. */ id: Id; pipeline?: LogstashPipeline; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; pipeline?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; pipeline?: never; }; } export type LogstashPutPipelineResponse = boolean; export interface MigrationDeprecationsDeprecation { /** Optional details about the deprecation warning. */ details?: string; /** The level property describes the significance of the issue. */ level: MigrationDeprecationsDeprecationLevel; /** Descriptive information about the deprecation warning. */ message: string; /** A link to the breaking change documentation, where you can find more information about this change. */ url: string; resolve_during_rolling_upgrade: boolean; _meta?: Record; } export type MigrationDeprecationsDeprecationLevel = 'none' | 'info' | 'warning' | 'critical'; export interface MigrationDeprecationsRequest extends RequestBase { /** Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. */ index?: IndexName; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; }; } export interface MigrationDeprecationsResponse { /** Cluster-level deprecation warnings. */ cluster_settings: MigrationDeprecationsDeprecation[]; /** Index warnings are sectioned off per index and can be filtered using an index-pattern in the query. * This section includes warnings for the backing indices of data streams specified in the request path. */ index_settings: Record; data_streams: Record; /** Node-level deprecation warnings. * Since only a subset of your nodes might incorporate these settings, it is important to read the details section for more information about which nodes are affected. */ node_settings: MigrationDeprecationsDeprecation[]; /** Machine learning-related deprecation warnings. */ ml_settings: MigrationDeprecationsDeprecation[]; /** Template warnings are sectioned off per template and include deprecations for both component templates and * index templates. */ templates: Record; /** ILM policy warnings are sectioned off per policy. */ ilm_policies: Record; } export interface MigrationGetFeatureUpgradeStatusMigrationFeature { feature_name: string; minimum_index_version: VersionString; migration_status: MigrationGetFeatureUpgradeStatusMigrationStatus; indices: MigrationGetFeatureUpgradeStatusMigrationFeatureIndexInfo[]; } export interface MigrationGetFeatureUpgradeStatusMigrationFeatureIndexInfo { index: IndexName; version: VersionString; failure_cause?: ErrorCause; } export type MigrationGetFeatureUpgradeStatusMigrationStatus = 'NO_MIGRATION_NEEDED' | 'MIGRATION_NEEDED' | 'IN_PROGRESS' | 'ERROR'; export interface MigrationGetFeatureUpgradeStatusRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export interface MigrationGetFeatureUpgradeStatusResponse { features: MigrationGetFeatureUpgradeStatusMigrationFeature[]; migration_status: MigrationGetFeatureUpgradeStatusMigrationStatus; } export interface MigrationPostFeatureUpgradeMigrationFeature { feature_name: string; } export interface MigrationPostFeatureUpgradeRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export interface MigrationPostFeatureUpgradeResponse { accepted: boolean; features: MigrationPostFeatureUpgradeMigrationFeature[]; } export interface MlAdaptiveAllocationsSettings { /** If true, adaptive_allocations is enabled */ enabled: boolean; /** Specifies the minimum number of allocations to scale to. * If set, it must be greater than or equal to 0. * If not defined, the deployment scales to 0. */ min_number_of_allocations?: integer; /** Specifies the maximum number of allocations to scale to. * If set, it must be greater than or equal to min_number_of_allocations. */ max_number_of_allocations?: integer; } export interface MlAnalysisConfig { /** The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. This value should be either a whole number of days or equate to a * whole number of buckets in one day. If the anomaly detection job uses a datafeed with aggregations, this value must also be divisible by the interval of the date histogram aggregation. */ bucket_span?: Duration; /** If `categorization_field_name` is specified, you can also define the analyzer that is used to interpret the categorization field. This property cannot be used at the same time as `categorization_filters`. The categorization analyzer specifies how the `categorization_field` is interpreted by the categorization process. The `categorization_analyzer` field can be specified either as a string or as an object. If it is a string, it must refer to a built-in analyzer or one added by another plugin. */ categorization_analyzer?: MlCategorizationAnalyzer; /** If this property is specified, the values of the specified field will be categorized. The resulting categories must be used in a detector by setting `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword `mlcategory`. */ categorization_field_name?: Field; /** If `categorization_field_name` is specified, you can also define optional filters. This property expects an array of regular expressions. The expressions are used to filter out matching sequences from the categorization field values. You can use this functionality to fine tune the categorization by excluding sequences from consideration when categories are defined. For example, you can exclude SQL statements that appear in your log files. This property cannot be used at the same time as `categorization_analyzer`. If you only want to define simple regular expression filters that are applied prior to tokenization, setting this property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering, use the `categorization_analyzer` property instead and include the filters as pattern_replace character filters. The effect is exactly the same. */ categorization_filters?: string[]; /** Detector configuration objects specify which data fields a job analyzes. They also specify which analytical functions are used. You can specify multiple detectors for a job. If the detectors array does not contain at least one detector, no analysis can occur and an error is returned. */ detectors: MlDetector[]; /** A comma separated list of influencer field names. Typically these can be the by, over, or partition fields that are used in the detector configuration. You might also want to use a field name that is not specifically named in a detector, but is available as part of the input data. When you use multiple detectors, the use of influencers is recommended as it aggregates results for each influencer entity. */ influencers?: Field[]; /** The size of the window in which to expect data that is out of time order. If you specify a non-zero value, it must be greater than or equal to one second. NOTE: Latency is applicable only when you send data by using the post data API. */ latency?: Duration; /** Advanced configuration option. Affects the pruning of models that have not been updated for the given time duration. The value must be set to a multiple of the `bucket_span`. If set too low, important information may be removed from the model. For jobs created in 8.1 and later, the default value is the greater of `30d` or 20 times `bucket_span`. */ model_prune_window?: Duration; /** This functionality is reserved for internal use. It is not supported for use in customer environments and is not subject to the support SLA of official GA features. If set to `true`, the analysis will automatically find correlations between metrics for a given by field value and report anomalies when those correlations cease to hold. For example, suppose CPU and memory usage on host A is usually highly correlated with the same metrics on host B. Perhaps this correlation occurs because they are running a load-balanced application. If you enable this property, anomalies will be reported when, for example, CPU usage on host A is high and the value of CPU usage on host B is low. That is to say, you’ll see an anomaly when the CPU of host A is unusual given the CPU of host B. To use the `multivariate_by_fields` property, you must also specify `by_field_name` in your detector. */ multivariate_by_fields?: boolean; /** Settings related to how categorization interacts with partition fields. */ per_partition_categorization?: MlPerPartitionCategorization; /** If this property is specified, the data that is fed to the job is expected to be pre-summarized. This property value is the name of the field that contains the count of raw data points that have been summarized. The same `summary_count_field_name` applies to all detectors in the job. NOTE: The `summary_count_field_name` property cannot be used with the `metric` function. */ summary_count_field_name?: Field; } export interface MlAnalysisConfigRead { /** The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. */ bucket_span: Duration; /** If `categorization_field_name` is specified, you can also define the analyzer that is used to interpret the categorization field. * This property cannot be used at the same time as `categorization_filters`. * The categorization analyzer specifies how the `categorization_field` is interpreted by the categorization process. */ categorization_analyzer?: MlCategorizationAnalyzer; /** If this property is specified, the values of the specified field will be categorized. * The resulting categories must be used in a detector by setting `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword `mlcategory`. */ categorization_field_name?: Field; /** If `categorization_field_name` is specified, you can also define optional filters. * This property expects an array of regular expressions. * The expressions are used to filter out matching sequences from the categorization field values. */ categorization_filters?: string[]; /** An array of detector configuration objects. * Detector configuration objects specify which data fields a job analyzes. * They also specify which analytical functions are used. * You can specify multiple detectors for a job. */ detectors: MlDetectorRead[]; /** A comma separated list of influencer field names. * Typically these can be the by, over, or partition fields that are used in the detector configuration. * You might also want to use a field name that is not specifically named in a detector, but is available as part of the input data. * When you use multiple detectors, the use of influencers is recommended as it aggregates results for each influencer entity. */ influencers: Field[]; /** Advanced configuration option. * Affects the pruning of models that have not been updated for the given time duration. * The value must be set to a multiple of the `bucket_span`. * If set too low, important information may be removed from the model. * Typically, set to `30d` or longer. * If not set, model pruning only occurs if the model memory status reaches the soft limit or the hard limit. * For jobs created in 8.1 and later, the default value is the greater of `30d` or 20 times `bucket_span`. */ model_prune_window?: Duration; /** The size of the window in which to expect data that is out of time order. * Defaults to no latency. * If you specify a non-zero value, it must be greater than or equal to one second. */ latency?: Duration; /** This functionality is reserved for internal use. * It is not supported for use in customer environments and is not subject to the support SLA of official GA features. * If set to `true`, the analysis will automatically find correlations between metrics for a given by field value and report anomalies when those correlations cease to hold. */ multivariate_by_fields?: boolean; /** Settings related to how categorization interacts with partition fields. */ per_partition_categorization?: MlPerPartitionCategorization; /** If this property is specified, the data that is fed to the job is expected to be pre-summarized. * This property value is the name of the field that contains the count of raw data points that have been summarized. * The same `summary_count_field_name` applies to all detectors in the job. */ summary_count_field_name?: Field; } export interface MlAnalysisLimits { /** The maximum number of examples stored per category in memory and in the results data store. If you increase this value, more examples are available, however it requires that you have more storage available. If you set this value to 0, no examples are stored. NOTE: The `categorization_examples_limit` applies only to analysis that uses categorization. */ categorization_examples_limit?: long; /** The approximate maximum amount of memory resources that are required for analytical processing. Once this limit is approached, data pruning becomes more aggressive. Upon exceeding this limit, new entities are not modeled. If the `xpack.ml.max_model_memory_limit` setting has a value greater than 0 and less than 1024mb, that value is used instead of the default. The default value is relatively small to ensure that high resource usage is a conscious decision. If you have jobs that are expected to analyze high cardinality fields, you will likely need to use a higher value. If you specify a number instead of a string, the units are assumed to be MiB. Specifying a string is recommended for clarity. If you specify a byte size unit of `b` or `kb` and the number does not equate to a discrete number of megabytes, it is rounded down to the closest MiB. The minimum valid value is 1 MiB. If you specify a value less than 1 MiB, an error occurs. If you specify a value for the `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create jobs that have `model_memory_limit` values greater than that setting value. */ model_memory_limit?: ByteSize; } export interface MlAnalysisMemoryLimit { /** Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ model_memory_limit: string; } export interface MlAnomaly { /** The actual value for the bucket. */ actual?: double[]; /** Information about the factors impacting the initial anomaly score. */ anomaly_score_explanation?: MlAnomalyExplanation; /** The length of the bucket in seconds. This value matches the `bucket_span` that is specified in the job. */ bucket_span: DurationValue; /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to their own history. It is used for finding unusual values in the context of the split. */ by_field_name?: string; /** The value of `by_field_name`. */ by_field_value?: string; /** For population analysis, an over field must be specified in the detector. This property contains an array of anomaly records that are the causes for the anomaly that has been identified for the over field. This sub-resource contains the most anomalous records for the `over_field_name`. For scalability reasons, a maximum of the 10 most significant causes of the anomaly are returned. As part of the core analytical modeling, these low-level anomaly records are aggregated for their parent over field record. The `causes` resource contains similar elements to the record resource, namely `actual`, `typical`, `geo_results.actual_point`, `geo_results.typical_point`, `*_field_name` and `*_field_value`. Probability and scores are not applicable to causes. */ causes?: MlAnomalyCause[]; /** A unique identifier for the detector. */ detector_index: integer; /** Certain functions require a field to operate on, for example, `sum()`. For those functions, this value is the name of the field to be analyzed. */ field_name?: string; /** The function in which the anomaly occurs, as specified in the detector configuration. For example, `max`. */ function?: string; /** The description of the function in which the anomaly occurs, as specified in the detector configuration. */ function_description?: string; /** If the detector function is `lat_long`, this object contains comma delimited strings for the latitude and longitude of the actual and typical values. */ geo_results?: MlGeoResults; /** If influencers were specified in the detector configuration, this array contains influencers that contributed to or were to blame for an anomaly. */ influencers?: MlInfluence[]; /** A normalized score between 0-100, which is based on the probability of the anomalousness of this record. This is the initial value that was calculated at the time the bucket was processed. */ initial_record_score: double; /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean; /** Identifier for the anomaly detection job. */ job_id: string; /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to the history of all splits. It is used for finding unusual values in the population of all splits. */ over_field_name?: string; /** The value of `over_field_name`. */ over_field_value?: string; /** The field used to segment the analysis. When you use this property, you have completely independent baselines for each value of this field. */ partition_field_name?: string; /** The value of `partition_field_name`. */ partition_field_value?: string; /** The probability of the individual anomaly occurring, in the range 0 to 1. For example, `0.0000772031`. This value can be held to a high precision of over 300 decimal places, so the `record_score` is provided as a human-readable and friendly interpretation of this. */ probability: double; /** A normalized score between 0-100, which is based on the probability of the anomalousness of this record. Unlike `initial_record_score`, this value will be updated by a re-normalization process as new data is analyzed. */ record_score: double; /** Internal. This is always set to `record`. */ result_type: string; /** The start time of the bucket for which these results were calculated. */ timestamp: EpochTime; /** The typical value for the bucket, according to analytical modeling. */ typical?: double[]; } export interface MlAnomalyCause { actual?: double[]; by_field_name?: Name; by_field_value?: string; correlated_by_field_value?: string; field_name?: Field; function?: string; function_description?: string; geo_results?: MlGeoResults; influencers?: MlInfluence[]; over_field_name?: Name; over_field_value?: string; partition_field_name?: string; partition_field_value?: string; probability: double; typical?: double[]; } export interface MlAnomalyExplanation { /** Impact from the duration and magnitude of the detected anomaly relative to the historical average. */ anomaly_characteristics_impact?: integer; /** Length of the detected anomaly in the number of buckets. */ anomaly_length?: integer; /** Type of the detected anomaly: `spike` or `dip`. */ anomaly_type?: string; /** Indicates reduction of anomaly score for the bucket with large confidence intervals. If a bucket has large confidence intervals, the score is reduced. */ high_variance_penalty?: boolean; /** If the bucket contains fewer samples than expected, the score is reduced. */ incomplete_bucket_penalty?: boolean; /** Lower bound of the 95% confidence interval. */ lower_confidence_bound?: double; /** Impact of the deviation between actual and typical values in the past 12 buckets. */ multi_bucket_impact?: integer; /** Impact of the deviation between actual and typical values in the current bucket. */ single_bucket_impact?: integer; /** Typical (expected) value for this bucket. */ typical_value?: double; /** Upper bound of the 95% confidence interval. */ upper_confidence_bound?: double; } export interface MlApiKeyAuthorization { /** The identifier for the API key. */ id: string; /** The name of the API key. */ name: string; } export type MlAppliesTo = 'actual' | 'typical' | 'diff_from_typical' | 'time'; export interface MlBucketInfluencer { /** A normalized score between 0-100, which is calculated for each bucket influencer. This score might be updated as * newer data is analyzed. */ anomaly_score: double; /** The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */ bucket_span: DurationValue; /** The field name of the influencer. */ influencer_field_name: Field; /** The score between 0-100 for each bucket influencer. This score is the initial value that was calculated at the * time the bucket was processed. */ initial_anomaly_score: double; /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean; /** Identifier for the anomaly detection job. */ job_id: Id; /** The probability that the bucket has this behavior, in the range 0 to 1. This value can be held to a high precision * of over 300 decimal places, so the `anomaly_score` is provided as a human-readable and friendly interpretation of * this. */ probability: double; /** Internal. */ raw_anomaly_score: double; /** Internal. This value is always set to `bucket_influencer`. */ result_type: string; /** The start time of the bucket for which these results were calculated. */ timestamp: EpochTime; /** The start time of the bucket for which these results were calculated. */ timestamp_string?: DateTime; } export interface MlBucketSummary { /** The maximum anomaly score, between 0-100, for any of the bucket influencers. This is an overall, rate-limited * score for the job. All the anomaly records in the bucket contribute to this score. This value might be updated as * new data is analyzed. */ anomaly_score: double; bucket_influencers: MlBucketInfluencer[]; /** The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */ bucket_span: DurationValue; /** The number of input data records processed in this bucket. */ event_count: long; /** The maximum anomaly score for any of the bucket influencers. This is the initial value that was calculated at the * time the bucket was processed. */ initial_anomaly_score: double; /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean; /** Identifier for the anomaly detection job. */ job_id: Id; /** The amount of time, in milliseconds, that it took to analyze the bucket contents and calculate results. */ processing_time_ms: DurationValue; /** Internal. This value is always set to bucket. */ result_type: string; /** The start time of the bucket. This timestamp uniquely identifies the bucket. Events that occur exactly at the * timestamp of the bucket are included in the results for the bucket. */ timestamp: EpochTime; /** The start time of the bucket. This timestamp uniquely identifies the bucket. Events that occur exactly at the * timestamp of the bucket are included in the results for the bucket. */ timestamp_string?: DateTime; } export interface MlCalendarEvent { /** A string that uniquely identifies a calendar. */ calendar_id?: Id; event_id?: Id; /** A description of the scheduled event. */ description: string; /** The timestamp for the end of the scheduled event in milliseconds since the epoch or ISO 8601 format. */ end_time: DateTime; /** The timestamp for the beginning of the scheduled event in milliseconds since the epoch or ISO 8601 format. */ start_time: DateTime; /** When true the model will not create results for this calendar period. */ skip_result?: boolean; /** When true the model will not be updated for this calendar period. */ skip_model_update?: boolean; /** Shift time by this many seconds. For example adjust time for daylight savings changes */ force_time_shift?: integer; } export type MlCategorizationAnalyzer = string | MlCategorizationAnalyzerDefinition; export interface MlCategorizationAnalyzerDefinition { /** One or more character filters. In addition to the built-in character filters, other plugins can provide more character filters. If this property is not specified, no character filters are applied prior to categorization. If you are customizing some other aspect of the analyzer and you need to achieve the equivalent of `categorization_filters` (which are not permitted when some other aspect of the analyzer is customized), add them here as pattern replace character filters. */ char_filter?: AnalysisCharFilter[]; /** One or more token filters. In addition to the built-in token filters, other plugins can provide more token filters. If this property is not specified, no token filters are applied prior to categorization. */ filter?: AnalysisTokenFilter[]; /** The name or definition of the tokenizer to use after character filters are applied. This property is compulsory if `categorization_analyzer` is specified as an object. Machine learning provides a tokenizer called `ml_standard` that tokenizes in a way that has been determined to produce good categorization results on a variety of log file formats for logs in English. If you want to use that tokenizer but change the character or token filters, specify "tokenizer": "ml_standard" in your `categorization_analyzer`. Additionally, the `ml_classic` tokenizer is available, which tokenizes in the same way as the non-customizable tokenizer in old versions of the product (before 6.2). `ml_classic` was the default categorization tokenizer in versions 6.2 to 7.13, so if you need categorization identical to the default for jobs created in these versions, specify "tokenizer": "ml_classic" in your `categorization_analyzer`. */ tokenizer?: AnalysisTokenizer; } export type MlCategorizationStatus = 'ok' | 'warn'; export interface MlCategory { /** A unique identifier for the category. category_id is unique at the job level, even when per-partition categorization is enabled. */ category_id: ulong; /** A list of examples of actual values that matched the category. */ examples: string[]; /** [experimental] A Grok pattern that could be used in Logstash or an ingest pipeline to extract fields from messages that match the category. This field is experimental and may be changed or removed in a future release. The Grok patterns that are found are not optimal, but are often a good starting point for manual tweaking. */ grok_pattern?: GrokPattern; /** Identifier for the anomaly detection job. */ job_id: Id; /** The maximum length of the fields that matched the category. The value is increased by 10% to enable matching for similar fields that have not been analyzed. */ max_matching_length: ulong; /** If per-partition categorization is enabled, this property identifies the field used to segment the categorization. It is not present when per-partition categorization is disabled. */ partition_field_name?: string; /** If per-partition categorization is enabled, this property identifies the value of the partition_field_name for the category. It is not present when per-partition categorization is disabled. */ partition_field_value?: string; /** A regular expression that is used to search for values that match the category. */ regex: string; /** A space separated list of the common tokens that are matched in values of the category. */ terms: string; /** The number of messages that have been matched by this category. This is only guaranteed to have the latest accurate count after a job _flush or _close */ num_matches?: long; /** A list of category_id entries that this current category encompasses. Any new message that is processed by the categorizer will match against this category and not any of the categories in this list. This is only guaranteed to have the latest accurate list of categories after a job _flush or _close */ preferred_to_categories?: Id[]; p?: string; result_type: string; mlcategory: string; } export interface MlChunkingConfig { /** If the mode is `auto`, the chunk size is dynamically calculated; * this is the recommended value when the datafeed does not use aggregations. * If the mode is `manual`, chunking is applied according to the specified `time_span`; * use this mode when the datafeed uses aggregations. If the mode is `off`, no chunking is applied. */ mode: MlChunkingMode; /** The time span that each search will be querying. This setting is applicable only when the `mode` is set to `manual`. */ time_span?: Duration; } export type MlChunkingMode = 'auto' | 'manual' | 'off'; export interface MlClassificationInferenceOptions { /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer; /** Specifies the maximum number of feature importance values per document. */ num_top_feature_importance_values?: integer; /** Specifies the type of the predicted field to write. Acceptable values are: string, number, boolean. When boolean is provided 1.0 is transformed to true and 0.0 to false. */ prediction_field_type?: string; /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string; /** Specifies the field to which the top classes are written. Defaults to top_classes. */ top_classes_results_field?: string; } export interface MlCommonTokenizationConfig { /** Should the tokenizer lower case the text */ do_lower_case?: boolean; /** Maximum input sequence length for the model */ max_sequence_length?: integer; /** Tokenization spanning options. Special value of -1 indicates no spanning takes place */ span?: integer; /** Should tokenization input be automatically truncated before sending to the model for inference */ truncate?: MlTokenizationTruncate; /** Is tokenization completed with special tokens */ with_special_tokens?: boolean; } export type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte'; export type MlCustomSettings = any; export interface MlDataCounts { bucket_count: long; earliest_record_timestamp?: long; empty_bucket_count: long; input_bytes: long; input_field_count: long; input_record_count: long; invalid_date_count: long; job_id: Id; last_data_time?: long; latest_empty_bucket_timestamp?: long; latest_record_timestamp?: long; latest_sparse_bucket_timestamp?: long; latest_bucket_timestamp?: long; log_time?: long; missing_field_count: long; out_of_order_timestamp_count: long; processed_field_count: long; processed_record_count: long; sparse_bucket_count: long; } export interface MlDataDescription { /** Only JSON format is supported at this time. */ format?: string; /** The name of the field that contains the timestamp. */ time_field?: Field; /** The time format, which can be `epoch`, `epoch_ms`, or a custom pattern. The value `epoch` refers to UNIX or Epoch time (the number of seconds since 1 Jan 1970). The value `epoch_ms` indicates that time is measured in milliseconds since the epoch. The `epoch` and `epoch_ms` time formats accept either integer or real values. Custom patterns must conform to the Java DateTimeFormatter class. When you use date-time formatting patterns, it is recommended that you provide the full date, time and time zone. For example: `yyyy-MM-dd'T'HH:mm:ssX`. If the pattern that you specify is not sufficient to produce a complete timestamp, job creation fails. */ time_format?: string; field_delimiter?: string; } export interface MlDatafeed { aggregations?: Record; /** @alias aggregations */ aggs?: Record; /** The security privileges that the datafeed uses to run its queries. If Elastic Stack security features were disabled at the time of the most recent update to the datafeed, this property is omitted. */ authorization?: MlDatafeedAuthorization; chunking_config?: MlChunkingConfig; datafeed_id: Id; frequency?: Duration; indices: string[]; indexes?: string[]; job_id: Id; max_empty_searches?: integer; query: QueryDslQueryContainer; query_delay?: Duration; script_fields?: Record; scroll_size?: integer; delayed_data_check_config: MlDelayedDataCheckConfig; runtime_mappings?: MappingRuntimeFields; indices_options?: IndicesOptions; } export interface MlDatafeedAuthorization { /** If an API key was used for the most recent update to the datafeed, its name and identifier are listed in the response. */ api_key?: MlApiKeyAuthorization; /** If a user ID was used for the most recent update to the datafeed, its roles at the time of the update are listed in the response. */ roles?: string[]; /** If a service account was used for the most recent update to the datafeed, the account name is listed in the response. */ service_account?: string; } export interface MlDatafeedConfig { /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. */ aggregations?: Record; /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. * @alias aggregations */ aggs?: Record; /** Datafeeds might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated and is an advanced configuration option. */ chunking_config?: MlChunkingConfig; /** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. The default value is the job identifier. */ datafeed_id?: Id; /** Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` option is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. */ delayed_data_check_config?: MlDelayedDataCheckConfig; /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. For example: `150s`. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. */ frequency?: Duration; /** An array of index names. Wildcards are supported. If any indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */ indices?: Indices; /** An array of index names. Wildcards are supported. If any indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. * @alias indices */ indexes?: Indices; /** Specifies index expansion options that are used during search. */ indices_options?: IndicesOptions; job_id?: Id; /** If a real-time datafeed has never seen any data (including during any initial training period) then it will automatically stop itself and close its associated job after this many real-time searches that return no documents. In other words, it will stop after `frequency` times `max_empty_searches` of real-time operation. If not set then a datafeed with no end time that sees no data will remain started until it is explicitly stopped. */ max_empty_searches?: integer; /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. */ query?: QueryDslQueryContainer; /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. */ query_delay?: Duration; /** Specifies runtime fields for the datafeed search. */ runtime_mappings?: MappingRuntimeFields; /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. */ script_fields?: Record; /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default. */ scroll_size?: integer; } export interface MlDatafeedRunningState { /** Indicates if the datafeed is "real-time"; meaning that the datafeed has no configured `end` time. */ real_time_configured: boolean; /** Indicates whether the datafeed has finished running on the available past data. * For datafeeds without a configured `end` time, this means that the datafeed is now running on "real-time" data. */ real_time_running: boolean; /** Provides the latest time interval the datafeed has searched. */ search_interval?: MlRunningStateSearchInterval; } export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping'; export interface MlDatafeedStats { /** For started datafeeds only, contains messages relating to the selection of a node. */ assignment_explanation?: string; /** A numerical character string that uniquely identifies the datafeed. * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. * It must start and end with alphanumeric characters. */ datafeed_id: Id; /** For started datafeeds only, this information pertains to the node upon which the datafeed is started. * @remarks This property is not supported on Elastic Cloud Serverless. */ node?: MlDiscoveryNodeCompact; /** The status of the datafeed, which can be one of the following values: `starting`, `started`, `stopping`, `stopped`. */ state: MlDatafeedState; /** An object that provides statistical information about timing aspect of this datafeed. */ timing_stats?: MlDatafeedTimingStats; /** An object containing the running state for this datafeed. * It is only provided if the datafeed is started. */ running_state?: MlDatafeedRunningState; } export interface MlDatafeedTimingStats { /** The number of buckets processed. */ bucket_count: long; /** The exponential average search time per hour, in milliseconds. */ exponential_average_search_time_per_hour_ms: DurationValue; exponential_average_calculation_context?: MlExponentialAverageCalculationContext; /** Identifier for the anomaly detection job. */ job_id: Id; /** The number of searches run by the datafeed. */ search_count: long; /** The total time the datafeed spent searching, in milliseconds. */ total_search_time_ms: DurationValue; /** The average search time per bucket, in milliseconds. */ average_search_time_per_bucket_ms?: DurationValue; } export interface MlDataframeAnalysis { /** Advanced configuration option. Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. This parameter affects loss calculations by acting as a multiplier of the tree depth. Higher alpha values result in shallower trees and faster training times. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to zero. */ alpha?: double; /** Defines which field of the document is to be predicted. It must match one of the fields in the index being used to train. If this field is missing from a document, then that document will not be used for training, but a prediction with the trained model will be generated for it. It is also known as continuous target variable. * For classification analysis, the data type of the field must be numeric (`integer`, `short`, `long`, `byte`), categorical (`ip` or `keyword`), or `boolean`. There must be no more than 30 different values in this field. * For regression analysis, the data type of the field must be numeric. */ dependent_variable: string; /** Advanced configuration option. Controls the fraction of data that is used to compute the derivatives of the loss function for tree training. A small value results in the use of a small fraction of the data. If this value is set to be less than 1, accuracy typically improves. However, too small a value may result in poor convergence for the ensemble and so require more trees. By default, this value is calculated during hyperparameter optimization. It must be greater than zero and less than or equal to 1. */ downsample_factor?: double; /** Advanced configuration option. Specifies whether the training process should finish if it is not finding any better performing models. If disabled, the training process can take significantly longer and the chance of finding a better performing model is unremarkable. */ early_stopping_enabled?: boolean; /** Advanced configuration option. The shrinkage applied to the weights. Smaller values result in larger forests which have a better generalization error. However, larger forests cause slower training. By default, this value is calculated during hyperparameter optimization. It must be a value between 0.001 and 1. */ eta?: double; /** Advanced configuration option. Specifies the rate at which `eta` increases for each new tree that is added to the forest. For example, a rate of 1.05 increases `eta` by 5% for each extra tree. By default, this value is calculated during hyperparameter optimization. It must be between 0.5 and 2. */ eta_growth_rate_per_tree?: double; /** Advanced configuration option. Defines the fraction of features that will be used when selecting a random bag for each candidate split. By default, this value is calculated during hyperparameter optimization. */ feature_bag_fraction?: double; /** Advanced configuration option. A collection of feature preprocessors that modify one or more included fields. The analysis uses the resulting one or more features instead of the original document field. However, these features are ephemeral; they are not stored in the destination index. Multiple `feature_processors` entries can refer to the same document fields. Automatic categorical feature encoding still occurs for the fields that are unprocessed by a custom processor or that have categorical values. Use this property only if you want to override the automatic feature encoding of the specified fields. */ feature_processors?: MlDataframeAnalysisFeatureProcessor[]; /** Advanced configuration option. Regularization parameter to prevent overfitting on the training data set. Multiplies a linear penalty associated with the size of individual trees in the forest. A high gamma value causes training to prefer small trees. A small gamma value results in larger individual trees and slower training. By default, this value is calculated during hyperparameter optimization. It must be a nonnegative value. */ gamma?: double; /** Advanced configuration option. Regularization parameter to prevent overfitting on the training data set. Multiplies an L2 regularization term which applies to leaf weights of the individual trees in the forest. A high lambda value causes training to favor small leaf weights. This behavior makes the prediction function smoother at the expense of potentially not being able to capture relevant relationships between the features and the dependent variable. A small lambda value results in large individual trees and slower training. By default, this value is calculated during hyperparameter optimization. It must be a nonnegative value. */ lambda?: double; /** Advanced configuration option. A multiplier responsible for determining the maximum number of hyperparameter optimization steps in the Bayesian optimization procedure. The maximum number of steps is determined based on the number of undefined hyperparameters times the maximum optimization rounds per hyperparameter. By default, this value is calculated during hyperparameter optimization. */ max_optimization_rounds_per_hyperparameter?: integer; /** Advanced configuration option. Defines the maximum number of decision trees in the forest. The maximum value is 2000. By default, this value is calculated during hyperparameter optimization. */ max_trees?: integer; /** Advanced configuration option. Defines the maximum number of decision trees in the forest. The maximum value is 2000. By default, this value is calculated during hyperparameter optimization. * @alias max_trees */ maximum_number_trees?: integer; /** Advanced configuration option. Specifies the maximum number of feature importance values per document to return. By default, no feature importance calculation occurs. */ num_top_feature_importance_values?: integer; /** Defines the name of the prediction field in the results. Defaults to `_prediction`. */ prediction_field_name?: Field; /** Defines the seed for the random generator that is used to pick training data. By default, it is randomly generated. Set it to a specific value to use the same training data each time you start a job (assuming other related parameters such as `source` and `analyzed_fields` are the same). */ randomize_seed?: double; /** Advanced configuration option. Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. This soft limit combines with the `soft_tree_depth_tolerance` to penalize trees that exceed the specified depth; the regularized loss increases quickly beyond this depth. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to 0. */ soft_tree_depth_limit?: integer; /** Advanced configuration option. This option controls how quickly the regularized loss increases when the tree depth exceeds `soft_tree_depth_limit`. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to 0.01. */ soft_tree_depth_tolerance?: double; /** Defines what percentage of the eligible documents that will be used for training. Documents that are ignored by the analysis (for example those that contain arrays with more than one value) won’t be included in the calculation for used percentage. */ training_percent?: Percentage; } export interface MlDataframeAnalysisAnalyzedFields { /** An array of strings that defines the fields that will be excluded from the analysis. You do not need to add fields with unsupported data types to excludes, these fields are excluded from the analysis automatically. */ includes?: string[]; /** An array of strings that defines the fields that will be included in the analysis. */ excludes?: string[]; } export interface MlDataframeAnalysisClassification extends MlDataframeAnalysis { class_assignment_objective?: string; /** Defines the number of categories for which the predicted probabilities are reported. It must be non-negative or -1. If it is -1 or greater than the total number of categories, probabilities are reported for all categories; if you have a large number of categories, there could be a significant effect on the size of your destination index. NOTE: To use the AUC ROC evaluation method, `num_top_classes` must be set to -1 or a value greater than or equal to the total number of categories. */ num_top_classes?: integer; } export interface MlDataframeAnalysisContainer { /** The configuration information necessary to perform classification. */ classification?: MlDataframeAnalysisClassification; /** The configuration information necessary to perform outlier detection. NOTE: Advanced parameters are for fine-tuning classification analysis. They are set automatically by hyperparameter optimization to give the minimum validation error. It is highly recommended to use the default values unless you fully understand the function of these parameters. */ outlier_detection?: MlDataframeAnalysisOutlierDetection; /** The configuration information necessary to perform regression. NOTE: Advanced parameters are for fine-tuning regression analysis. They are set automatically by hyperparameter optimization to give the minimum validation error. It is highly recommended to use the default values unless you fully understand the function of these parameters. */ regression?: MlDataframeAnalysisRegression; } export interface MlDataframeAnalysisFeatureProcessor { /** The configuration information necessary to perform frequency encoding. */ frequency_encoding?: MlDataframeAnalysisFeatureProcessorFrequencyEncoding; /** The configuration information necessary to perform multi encoding. It allows multiple processors to be changed together. This way the output of a processor can then be passed to another as an input. */ multi_encoding?: MlDataframeAnalysisFeatureProcessorMultiEncoding; /** The configuration information necessary to perform n-gram encoding. Features created by this encoder have the following name format: .. For example, if the feature_prefix is f, the feature name for the second unigram in a string is f.11. */ n_gram_encoding?: MlDataframeAnalysisFeatureProcessorNGramEncoding; /** The configuration information necessary to perform one hot encoding. */ one_hot_encoding?: MlDataframeAnalysisFeatureProcessorOneHotEncoding; /** The configuration information necessary to perform target mean encoding. */ target_mean_encoding?: MlDataframeAnalysisFeatureProcessorTargetMeanEncoding; } export interface MlDataframeAnalysisFeatureProcessorFrequencyEncoding { /** The resulting feature name. */ feature_name: Name; field: Field; /** The resulting frequency map for the field value. If the field value is missing from the frequency_map, the resulting value is 0. */ frequency_map: Record; } export interface MlDataframeAnalysisFeatureProcessorMultiEncoding { /** The ordered array of custom processors to execute. Must be more than 1. */ processors: integer[]; } export interface MlDataframeAnalysisFeatureProcessorNGramEncoding { /** The feature name prefix. Defaults to ngram__. */ feature_prefix?: string; /** The name of the text field to encode. */ field: Field; /** Specifies the length of the n-gram substring. Defaults to 50. Must be greater than 0. */ length?: integer; /** Specifies which n-grams to gather. It’s an array of integer values where the minimum value is 1, and a maximum value is 5. */ n_grams: integer[]; /** Specifies the zero-indexed start of the n-gram substring. Negative values are allowed for encoding n-grams of string suffixes. Defaults to 0. */ start?: integer; custom?: boolean; } export interface MlDataframeAnalysisFeatureProcessorOneHotEncoding { /** The name of the field to encode. */ field: Field; /** The one hot map mapping the field value with the column name. */ hot_map: string; } export interface MlDataframeAnalysisFeatureProcessorTargetMeanEncoding { /** The default value if field value is not found in the target_map. */ default_value: integer; /** The resulting feature name. */ feature_name: Name; /** The name of the field to encode. */ field: Field; /** The field value to target mean transition map. */ target_map: Record; } export interface MlDataframeAnalysisOutlierDetection { /** Specifies whether the feature influence calculation is enabled. */ compute_feature_influence?: boolean; /** The minimum outlier score that a document needs to have in order to calculate its feature influence score. Value range: 0-1. */ feature_influence_threshold?: double; /** The method that outlier detection uses. Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`, and `ensemble`. The default value is ensemble, which means that outlier detection uses an ensemble of different methods and normalises and combines their individual outlier scores to obtain the overall outlier score. */ method?: string; /** Defines the value for how many nearest neighbors each method of outlier detection uses to calculate its outlier score. When the value is not set, different values are used for different ensemble members. This default behavior helps improve the diversity in the ensemble; only override it if you are confident that the value you choose is appropriate for the data set. */ n_neighbors?: integer; /** The proportion of the data set that is assumed to be outlying prior to outlier detection. For example, 0.05 means it is assumed that 5% of values are real outliers and 95% are inliers. */ outlier_fraction?: double; /** If true, the following operation is performed on the columns before computing outlier scores: `(x_i - mean(x_i)) / sd(x_i)`. */ standardization_enabled?: boolean; } export interface MlDataframeAnalysisRegression extends MlDataframeAnalysis { /** The loss function used during regression. Available options are `mse` (mean squared error), `msle` (mean squared logarithmic error), `huber` (Pseudo-Huber loss). */ loss_function?: string; /** A positive number that is used as a parameter to the `loss_function`. */ loss_function_parameter?: double; } export interface MlDataframeAnalytics { /** An object containing information about the analysis job. */ analysis_stats?: MlDataframeAnalyticsStatsContainer; /** For running jobs only, contains messages relating to the selection of a node to run the job. */ assignment_explanation?: string; /** An object that provides counts for the quantity of documents skipped, used in training, or available for testing. */ data_counts: MlDataframeAnalyticsStatsDataCounts; /** The unique identifier of the data frame analytics job. */ id: Id; /** An object describing memory usage of the analytics. It is present only after the job is started and memory usage is reported. */ memory_usage: MlDataframeAnalyticsStatsMemoryUsage; /** Contains properties for the node that runs the job. This information is available only for running jobs. * @remarks This property is not supported on Elastic Cloud Serverless. */ node?: NodeAttributes; /** The progress report of the data frame analytics job by phase. */ progress: MlDataframeAnalyticsStatsProgress[]; /** The status of the data frame analytics job, which can be one of the following values: failed, started, starting, stopping, stopped. */ state: MlDataframeState; } export interface MlDataframeAnalyticsAuthorization { /** If an API key was used for the most recent update to the job, its name and identifier are listed in the response. */ api_key?: MlApiKeyAuthorization; /** If a user ID was used for the most recent update to the job, its roles at the time of the update are listed in the response. */ roles?: string[]; /** If a service account was used for the most recent update to the job, the account name is listed in the response. */ service_account?: string; } export interface MlDataframeAnalyticsDestination { /** Defines the destination index to store the results of the data frame analytics job. */ index: IndexName; /** Defines the name of the field in which to store the results of the analysis. Defaults to `ml`. */ results_field?: Field; } export interface MlDataframeAnalyticsFieldSelection { /** Whether the field is selected to be included in the analysis. */ is_included: boolean; /** Whether the field is required. */ is_required: boolean; /** The feature type of this field for the analysis. May be categorical or numerical. */ feature_type?: string; /** The mapping types of the field. */ mapping_types: string[]; /** The field name. */ name: Field; /** The reason a field is not selected to be included in the analysis. */ reason?: string; } export interface MlDataframeAnalyticsMemoryEstimation { /** Estimated memory usage under the assumption that overflowing to disk is allowed during data frame analytics. expected_memory_with_disk is usually smaller than expected_memory_without_disk as using disk allows to limit the main memory needed to perform data frame analytics. */ expected_memory_with_disk: string; /** Estimated memory usage under the assumption that the whole data frame analytics should happen in memory (i.e. without overflowing to disk). */ expected_memory_without_disk: string; } export interface MlDataframeAnalyticsSource { /** Index or indices on which to perform the analysis. It can be a single index or index pattern as well as an array of indices or patterns. NOTE: If your source indices contain documents with the same IDs, only the document that is indexed last appears in the destination index. */ index: Indices; /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. By default, this property has the following value: {"match_all": {}}. */ query?: QueryDslQueryContainer; /** Definitions of runtime fields that will become part of the mapping of the destination index. */ runtime_mappings?: MappingRuntimeFields; /** Specify `includes` and/or `excludes patterns to select which fields will be present in the destination. Fields that are excluded cannot be included in the analysis. */ _source?: MlDataframeAnalysisAnalyzedFields | string[]; } export interface MlDataframeAnalyticsStatsContainer { /** An object containing information about the classification analysis job. */ classification_stats?: MlDataframeAnalyticsStatsHyperparameters; /** An object containing information about the outlier detection job. */ outlier_detection_stats?: MlDataframeAnalyticsStatsOutlierDetection; /** An object containing information about the regression analysis. */ regression_stats?: MlDataframeAnalyticsStatsHyperparameters; } export interface MlDataframeAnalyticsStatsDataCounts { /** The number of documents that are skipped during the analysis because they contained values that are not supported by the analysis. For example, outlier detection does not support missing fields so it skips documents with missing fields. Likewise, all types of analysis skip documents that contain arrays with more than one element. */ skipped_docs_count: integer; /** The number of documents that are not used for training the model and can be used for testing. */ test_docs_count: integer; /** The number of documents that are used for training the model. */ training_docs_count: integer; } export interface MlDataframeAnalyticsStatsHyperparameters { /** An object containing the parameters of the classification analysis job. */ hyperparameters: MlHyperparameters; /** The number of iterations on the analysis. */ iteration: integer; /** The timestamp when the statistics were reported in milliseconds since the epoch. */ timestamp: EpochTime; /** An object containing time statistics about the data frame analytics job. */ timing_stats: MlTimingStats; /** An object containing information about validation loss. */ validation_loss: MlValidationLoss; } export interface MlDataframeAnalyticsStatsMemoryUsage { /** This value is present when the status is hard_limit and it is a new estimate of how much memory the job needs. */ memory_reestimate_bytes?: long; /** The number of bytes used at the highest peak of memory usage. */ peak_usage_bytes: long; /** The memory usage status. */ status: string; /** The timestamp when memory usage was calculated. */ timestamp?: EpochTime; } export interface MlDataframeAnalyticsStatsOutlierDetection { /** The list of job parameters specified by the user or determined by algorithmic heuristics. */ parameters: MlOutlierDetectionParameters; /** The timestamp when the statistics were reported in milliseconds since the epoch. */ timestamp: EpochTime; /** An object containing time statistics about the data frame analytics job. */ timing_stats: MlTimingStats; } export interface MlDataframeAnalyticsStatsProgress { /** Defines the phase of the data frame analytics job. */ phase: string; /** The progress that the data frame analytics job has made expressed in percentage. */ progress_percent: integer; } export interface MlDataframeAnalyticsSummary { allow_lazy_start?: boolean; analysis: MlDataframeAnalysisContainer; analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[]; /** The security privileges that the job uses to run its queries. If Elastic Stack security features were disabled at the time of the most recent update to the job, this property is omitted. */ authorization?: MlDataframeAnalyticsAuthorization; create_time?: EpochTime; description?: string; dest: MlDataframeAnalyticsDestination; id: Id; max_num_threads?: integer; model_memory_limit?: string; source: MlDataframeAnalyticsSource; version?: VersionString; _meta?: Metadata; } export interface MlDataframeEvaluationClassification { /** The field of the index which contains the ground truth. The data type of this field can be boolean or integer. If the data type is integer, the value has to be either 0 (false) or 1 (true). */ actual_field: Field; /** The field in the index which contains the predicted value, in other words the results of the classification analysis. */ predicted_field?: Field; /** The field of the index which is an array of documents of the form { "class_name": XXX, "class_probability": YYY }. This field must be defined as nested in the mappings. */ top_classes_field?: Field; /** Specifies the metrics that are used for the evaluation. */ metrics?: MlDataframeEvaluationClassificationMetrics; } export interface MlDataframeEvaluationClassificationMetrics extends MlDataframeEvaluationMetrics { /** Accuracy of predictions (per-class and overall). */ accuracy?: Record; /** Multiclass confusion matrix. */ multiclass_confusion_matrix?: Record; } export interface MlDataframeEvaluationClassificationMetricsAucRoc { /** Name of the only class that is treated as positive during AUC ROC calculation. Other classes are treated as negative ("one-vs-all" strategy). All the evaluated documents must have class_name in the list of their top classes. */ class_name?: Name; /** Whether or not the curve should be returned in addition to the score. Default value is false. */ include_curve?: boolean; } export interface MlDataframeEvaluationContainer { /** Classification evaluation evaluates the results of a classification analysis which outputs a prediction that identifies to which of the classes each document belongs. */ classification?: MlDataframeEvaluationClassification; /** Outlier detection evaluates the results of an outlier detection analysis which outputs the probability that each document is an outlier. */ outlier_detection?: MlDataframeEvaluationOutlierDetection; /** Regression evaluation evaluates the results of a regression analysis which outputs a prediction of values. */ regression?: MlDataframeEvaluationRegression; } export interface MlDataframeEvaluationMetrics { /** The AUC ROC (area under the curve of the receiver operating characteristic) score and optionally the curve. It is calculated for a specific class (provided as "class_name") treated as positive. */ auc_roc?: MlDataframeEvaluationClassificationMetricsAucRoc; /** Precision of predictions (per-class and average). */ precision?: Record; /** Recall of predictions (per-class and average). */ recall?: Record; } export interface MlDataframeEvaluationOutlierDetection { /** The field of the index which contains the ground truth. The data type of this field can be boolean or integer. If the data type is integer, the value has to be either 0 (false) or 1 (true). */ actual_field: Field; /** The field of the index that defines the probability of whether the item belongs to the class in question or not. It’s the field that contains the results of the analysis. */ predicted_probability_field: Field; /** Specifies the metrics that are used for the evaluation. */ metrics?: MlDataframeEvaluationOutlierDetectionMetrics; } export interface MlDataframeEvaluationOutlierDetectionMetrics extends MlDataframeEvaluationMetrics { /** Accuracy of predictions (per-class and overall). */ confusion_matrix?: Record; } export interface MlDataframeEvaluationRegression { /** The field of the index which contains the ground truth. The data type of this field must be numerical. */ actual_field: Field; /** The field in the index that contains the predicted value, in other words the results of the regression analysis. */ predicted_field: Field; /** Specifies the metrics that are used for the evaluation. For more information on mse, msle, and huber, consult the Jupyter notebook on regression loss functions. */ metrics?: MlDataframeEvaluationRegressionMetrics; } export interface MlDataframeEvaluationRegressionMetrics { /** Average squared difference between the predicted values and the actual (ground truth) value. For more information, read this wiki article. */ mse?: Record; /** Average squared difference between the logarithm of the predicted values and the logarithm of the actual (ground truth) value. */ msle?: MlDataframeEvaluationRegressionMetricsMsle; /** Pseudo Huber loss function. */ huber?: MlDataframeEvaluationRegressionMetricsHuber; /** Proportion of the variance in the dependent variable that is predictable from the independent variables. */ r_squared?: Record; } export interface MlDataframeEvaluationRegressionMetricsHuber { /** Approximates 1/2 (prediction - actual)2 for values much less than delta and approximates a straight line with slope delta for values much larger than delta. Defaults to 1. Delta needs to be greater than 0. */ delta?: double; } export interface MlDataframeEvaluationRegressionMetricsMsle { /** Defines the transition point at which you switch from minimizing quadratic error to minimizing quadratic log error. Defaults to 1. */ offset?: double; } export type MlDataframeState = 'started' | 'stopped' | 'starting' | 'stopping' | 'failed'; export interface MlDelayedDataCheckConfig { /** The window of time that is searched for late data. This window of time ends with the latest finalized bucket. * It defaults to null, which causes an appropriate `check_window` to be calculated when the real-time datafeed runs. * In particular, the default `check_window` span calculation is based on the maximum of `2h` or `8 * bucket_span`. */ check_window?: Duration; /** Specifies whether the datafeed periodically checks for delayed data. */ enabled: boolean; } export type MlDeploymentAllocationState = 'started' | 'starting' | 'fully_allocated'; export type MlDeploymentAssignmentState = 'started' | 'starting' | 'stopping' | 'failed'; export interface MlDetectionRule { /** The set of actions to be triggered when the rule applies. If more than one action is specified the effects of all actions are combined. */ actions?: MlRuleAction[]; /** An array of numeric conditions when the rule applies. A rule must either have a non-empty scope or at least one condition. Multiple conditions are combined together with a logical AND. */ conditions?: MlRuleCondition[]; /** A scope of series where the rule applies. A rule must either have a non-empty scope or at least one condition. By default, the scope includes all series. Scoping is allowed for any of the fields that are also specified in `by_field_name`, `over_field_name`, or `partition_field_name`. */ scope?: Record; } export interface MlDetector { /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to their own history. It is used for finding unusual values in the context of the split. */ by_field_name?: Field; /** Custom rules enable you to customize the way detectors operate. For example, a rule may dictate conditions under which results should be skipped. Kibana refers to custom rules as job rules. */ custom_rules?: MlDetectionRule[]; /** A description of the detector. */ detector_description?: string; /** A unique identifier for the detector. This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. If you specify a value for this property, it is ignored. */ detector_index?: integer; /** If set, frequent entities are excluded from influencing the anomaly results. Entities can be considered frequent over time or frequent in a population. If you are working with both over and by fields, you can set `exclude_frequent` to `all` for both fields, or to `by` or `over` for those specific fields. */ exclude_frequent?: MlExcludeFrequent; /** The field that the detector uses in the function. If you use an event rate function such as count or rare, do not specify this field. The `field_name` cannot contain double quotes or backslashes. */ field_name?: Field; /** The analysis function that is used. For example, `count`, `rare`, `mean`, `min`, `max`, or `sum`. */ function?: string; /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to the history of all splits. It is used for finding unusual values in the population of all splits. */ over_field_name?: Field; /** The field used to segment the analysis. When you use this property, you have completely independent baselines for each value of this field. */ partition_field_name?: Field; /** Defines whether a new series is used as the null series when there is no value for the by or partition fields. */ use_null?: boolean; } export interface MlDetectorRead { /** The field used to split the data. * In particular, this property is used for analyzing the splits with respect to their own history. * It is used for finding unusual values in the context of the split. */ by_field_name?: Field; /** An array of custom rule objects, which enable you to customize the way detectors operate. * For example, a rule may dictate to the detector conditions under which results should be skipped. * Kibana refers to custom rules as job rules. */ custom_rules?: MlDetectionRule[]; /** A description of the detector. */ detector_description?: string; /** A unique identifier for the detector. * This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. */ detector_index?: integer; /** Contains one of the following values: `all`, `none`, `by`, or `over`. * If set, frequent entities are excluded from influencing the anomaly results. * Entities can be considered frequent over time or frequent in a population. * If you are working with both over and by fields, then you can set `exclude_frequent` to all for both fields, or to `by` or `over` for those specific fields. */ exclude_frequent?: MlExcludeFrequent; /** The field that the detector uses in the function. * If you use an event rate function such as `count` or `rare`, do not specify this field. */ field_name?: Field; /** The analysis function that is used. * For example, `count`, `rare`, `mean`, `min`, `max`, and `sum`. */ function: string; /** The field used to split the data. * In particular, this property is used for analyzing the splits with respect to the history of all splits. * It is used for finding unusual values in the population of all splits. */ over_field_name?: Field; /** The field used to segment the analysis. * When you use this property, you have completely independent baselines for each value of this field. */ partition_field_name?: Field; /** Defines whether a new series is used as the null series when there is no value for the by or partition fields. */ use_null?: boolean; } export interface MlDetectorUpdate { /** A unique identifier for the detector. * This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. */ detector_index: integer; /** A description of the detector. */ description?: string; /** An array of custom rule objects, which enable you to customize the way detectors operate. * For example, a rule may dictate to the detector conditions under which results should be skipped. * Kibana refers to custom rules as job rules. */ custom_rules?: MlDetectionRule[]; } export type MlDiscoveryNode = Partial>; export interface MlDiscoveryNodeCompact { name: Name; ephemeral_id: Id; id: Id; transport_address: TransportAddress; attributes: Record; } export interface MlDiscoveryNodeContent { name?: Name; ephemeral_id: Id; transport_address: TransportAddress; external_id: string; attributes: Record; roles: string[]; version: VersionString; min_index_version: integer; max_index_version: integer; } export type MlExcludeFrequent = 'all' | 'none' | 'by' | 'over'; export interface MlExponentialAverageCalculationContext { incremental_metric_value_ms: DurationValue; latest_timestamp?: EpochTime; previous_exponential_average_ms?: DurationValue; } export type MlFeatureExtractor = MlQueryFeatureExtractor; export interface MlFillMaskInferenceOptions { /** The string/token which will be removed from incoming documents and replaced with the inference prediction(s). * In a response, this field contains the mask token for the specified model/tokenizer. Each model and tokenizer * has a predefined mask token which cannot be changed. Thus, it is recommended not to set this value in requests. * However, if this field is present in a request, its value must match the predefined value for that model/tokenizer, * otherwise the request will fail. */ mask_token?: string; /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer; /** The tokenization options to update when inferring */ tokenization?: MlTokenizationConfigContainer; /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string; vocabulary: MlVocabulary; } export interface MlFillMaskInferenceUpdateOptions { /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer; /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions; /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string; } export interface MlFilter { /** A description of the filter. */ description?: string; /** A string that uniquely identifies a filter. */ filter_id: Id; /** An array of strings which is the filter item list. */ items: string[]; } export interface MlFilterRef { /** The identifier for the filter. */ filter_id: Id; /** If set to `include`, the rule applies for values in the filter. If set to `exclude`, the rule applies for values not in the filter. */ filter_type?: MlFilterType; } export type MlFilterType = 'include' | 'exclude'; export interface MlGeoResults { /** The actual value for the bucket formatted as a `geo_point`. */ actual_point?: string; /** The typical value for the bucket formatted as a `geo_point`. */ typical_point?: string; } export interface MlHyperparameter { /** A positive number showing how much the parameter influences the variation of the loss function. For hyperparameters with values that are not specified by the user but tuned during hyperparameter optimization. */ absolute_importance?: double; /** Name of the hyperparameter. */ name: Name; /** A number between 0 and 1 showing the proportion of influence on the variation of the loss function among all tuned hyperparameters. For hyperparameters with values that are not specified by the user but tuned during hyperparameter optimization. */ relative_importance?: double; /** Indicates if the hyperparameter is specified by the user (true) or optimized (false). */ supplied: boolean; /** The value of the hyperparameter, either optimized or specified by the user. */ value: double; } export interface MlHyperparameters { /** Advanced configuration option. * Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. * This parameter affects loss calculations by acting as a multiplier of the tree depth. * Higher alpha values result in shallower trees and faster training times. * By default, this value is calculated during hyperparameter optimization. * It must be greater than or equal to zero. */ alpha?: double; /** Advanced configuration option. * Regularization parameter to prevent overfitting on the training data set. * Multiplies an L2 regularization term which applies to leaf weights of the individual trees in the forest. * A high lambda value causes training to favor small leaf weights. * This behavior makes the prediction function smoother at the expense of potentially not being able to capture relevant relationships between the features and the dependent variable. * A small lambda value results in large individual trees and slower training. * By default, this value is calculated during hyperparameter optimization. * It must be a nonnegative value. */ lambda?: double; /** Advanced configuration option. * Regularization parameter to prevent overfitting on the training data set. * Multiplies a linear penalty associated with the size of individual trees in the forest. * A high gamma value causes training to prefer small trees. * A small gamma value results in larger individual trees and slower training. * By default, this value is calculated during hyperparameter optimization. * It must be a nonnegative value. */ gamma?: double; /** Advanced configuration option. * The shrinkage applied to the weights. * Smaller values result in larger forests which have a better generalization error. * However, larger forests cause slower training. * By default, this value is calculated during hyperparameter optimization. * It must be a value between `0.001` and `1`. */ eta?: double; /** Advanced configuration option. * Specifies the rate at which `eta` increases for each new tree that is added to the forest. * For example, a rate of 1.05 increases `eta` by 5% for each extra tree. * By default, this value is calculated during hyperparameter optimization. * It must be between `0.5` and `2`. */ eta_growth_rate_per_tree?: double; /** Advanced configuration option. * Defines the fraction of features that will be used when selecting a random bag for each candidate split. * By default, this value is calculated during hyperparameter optimization. */ feature_bag_fraction?: double; /** Advanced configuration option. * Controls the fraction of data that is used to compute the derivatives of the loss function for tree training. * A small value results in the use of a small fraction of the data. * If this value is set to be less than 1, accuracy typically improves. * However, too small a value may result in poor convergence for the ensemble and so require more trees. * By default, this value is calculated during hyperparameter optimization. * It must be greater than zero and less than or equal to 1. */ downsample_factor?: double; /** If the algorithm fails to determine a non-trivial tree (more than a single leaf), this parameter determines how many of such consecutive failures are tolerated. * Once the number of attempts exceeds the threshold, the forest training stops. */ max_attempts_to_add_tree?: integer; /** Advanced configuration option. * A multiplier responsible for determining the maximum number of hyperparameter optimization steps in the Bayesian optimization procedure. * The maximum number of steps is determined based on the number of undefined hyperparameters times the maximum optimization rounds per hyperparameter. * By default, this value is calculated during hyperparameter optimization. */ max_optimization_rounds_per_hyperparameter?: integer; /** Advanced configuration option. * Defines the maximum number of decision trees in the forest. * The maximum value is 2000. * By default, this value is calculated during hyperparameter optimization. */ max_trees?: integer; /** The maximum number of folds for the cross-validation procedure. */ num_folds?: integer; /** Determines the maximum number of splits for every feature that can occur in a decision tree when the tree is trained. */ num_splits_per_feature?: integer; /** Advanced configuration option. * Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. * This soft limit combines with the `soft_tree_depth_tolerance` to penalize trees that exceed the specified depth; the regularized loss increases quickly beyond this depth. * By default, this value is calculated during hyperparameter optimization. * It must be greater than or equal to 0. */ soft_tree_depth_limit?: integer; /** Advanced configuration option. * This option controls how quickly the regularized loss increases when the tree depth exceeds `soft_tree_depth_limit`. * By default, this value is calculated during hyperparameter optimization. * It must be greater than or equal to 0.01. */ soft_tree_depth_tolerance?: double; } export type MlInclude = 'definition' | 'feature_importance_baseline' | 'hyperparameters' | 'total_feature_importance' | 'definition_status'; export interface MlInferenceConfigCreateContainer { /** Regression configuration for inference. */ regression?: MlRegressionInferenceOptions; /** Classification configuration for inference. */ classification?: MlClassificationInferenceOptions; /** Text classification configuration for inference. */ text_classification?: MlTextClassificationInferenceOptions; /** Zeroshot classification configuration for inference. */ zero_shot_classification?: MlZeroShotClassificationInferenceOptions; /** Fill mask configuration for inference. */ fill_mask?: MlFillMaskInferenceOptions; learning_to_rank?: MlLearningToRankConfig; /** Named entity recognition configuration for inference. */ ner?: MlNerInferenceOptions; /** Pass through configuration for inference. */ pass_through?: MlPassThroughInferenceOptions; /** Text embedding configuration for inference. */ text_embedding?: MlTextEmbeddingInferenceOptions; /** Text expansion configuration for inference. */ text_expansion?: MlTextExpansionInferenceOptions; /** Question answering configuration for inference. */ question_answering?: MlQuestionAnsweringInferenceOptions; } export interface MlInferenceConfigUpdateContainer { /** Regression configuration for inference. */ regression?: MlRegressionInferenceOptions; /** Classification configuration for inference. */ classification?: MlClassificationInferenceOptions; /** Text classification configuration for inference. */ text_classification?: MlTextClassificationInferenceUpdateOptions; /** Zeroshot classification configuration for inference. */ zero_shot_classification?: MlZeroShotClassificationInferenceUpdateOptions; /** Fill mask configuration for inference. */ fill_mask?: MlFillMaskInferenceUpdateOptions; /** Named entity recognition configuration for inference. */ ner?: MlNerInferenceUpdateOptions; /** Pass through configuration for inference. */ pass_through?: MlPassThroughInferenceUpdateOptions; /** Text embedding configuration for inference. */ text_embedding?: MlTextEmbeddingInferenceUpdateOptions; /** Text expansion configuration for inference. */ text_expansion?: MlTextExpansionInferenceUpdateOptions; /** Question answering configuration for inference */ question_answering?: MlQuestionAnsweringInferenceUpdateOptions; } export interface MlInferenceResponseResult { /** If the model is trained for named entity recognition (NER) tasks, the response contains the recognized entities. */ entities?: MlTrainedModelEntities[]; /** Indicates whether the input text was truncated to meet the model's maximum sequence length limit. This property * is present only when it is true. */ is_truncated?: boolean; /** If the model is trained for a text classification or zero shot classification task, the response is the * predicted class. * For named entity recognition (NER) tasks, it contains the annotated text output. * For fill mask tasks, it contains the top prediction for replacing the mask token. * For text embedding tasks, it contains the raw numerical text embedding values. * For regression models, its a numerical value * For classification models, it may be an integer, double, boolean or string depending on prediction type */ predicted_value?: MlPredictedValue | MlPredictedValue[]; /** For fill mask tasks, the response contains the input text sequence with the mask token replaced by the predicted * value. * Additionally */ predicted_value_sequence?: string; /** Specifies a probability for the predicted value. */ prediction_probability?: double; /** Specifies a confidence score for the predicted value. */ prediction_score?: double; /** For fill mask, text classification, and zero shot classification tasks, the response contains a list of top * class entries. */ top_classes?: MlTopClassEntry[]; /** If the request failed, the response contains the reason for the failure. */ warning?: string; /** The feature importance for the inference results. Relevant only for classification or regression models */ feature_importance?: MlTrainedModelInferenceFeatureImportance[]; } export interface MlInfluence { influencer_field_name: string; influencer_field_values: string[]; } export interface MlInfluencer { /** The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */ bucket_span: DurationValue; /** A normalized score between 0-100, which is based on the probability of the influencer in this bucket aggregated * across detectors. Unlike `initial_influencer_score`, this value is updated by a re-normalization process as new * data is analyzed. */ influencer_score: double; /** The field name of the influencer. */ influencer_field_name: Field; /** The entity that influenced, contributed to, or was to blame for the anomaly. */ influencer_field_value: string; /** A normalized score between 0-100, which is based on the probability of the influencer aggregated across detectors. * This is the initial value that was calculated at the time the bucket was processed. */ initial_influencer_score: double; /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean; /** Identifier for the anomaly detection job. */ job_id: Id; /** The probability that the influencer has this behavior, in the range 0 to 1. This value can be held to a high * precision of over 300 decimal places, so the `influencer_score` is provided as a human-readable and friendly * interpretation of this value. */ probability: double; /** Internal. This value is always set to `influencer`. */ result_type: string; /** The start time of the bucket for which these results were calculated. */ timestamp: EpochTime; /** Additional influencer properties are added, depending on the fields being analyzed. For example, if it’s * analyzing `user_name` as an influencer, a field `user_name` is added to the result document. This * information enables you to filter the anomaly results more easily. */ foo?: string; } export interface MlJob { /** Advanced configuration option. * Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */ allow_lazy_open: boolean; /** The analysis configuration, which specifies how to analyze the data. * After you create a job, you cannot change the analysis configuration; all the properties are informational. */ analysis_config: MlAnalysisConfig; /** Limits can be applied for the resources required to hold the mathematical models in memory. * These limits are approximate and can be set per job. * They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ analysis_limits?: MlAnalysisLimits; /** Advanced configuration option. * The time between each periodic persistence of the model. * The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. * The smallest allowed value is 1 hour. */ background_persist_interval?: Duration; blocked?: MlJobBlocked; create_time?: DateTime; /** Advanced configuration option. * Contains custom metadata about the job. */ custom_settings?: MlCustomSettings; /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. * It specifies a period of time (in days) after which only the first snapshot per day is retained. * This period is relative to the timestamp of the most recent snapshot for this job. * Valid values range from 0 to `model_snapshot_retention_days`. */ daily_model_snapshot_retention_after_days?: long; /** The data description defines the format of the input data when you send data to the job by using the post data API. * Note that when configuring a datafeed, these properties are automatically set. * When data is received via the post data API, it is not stored in Elasticsearch. * Only the results for anomaly detection are retained. */ data_description: MlDataDescription; /** The datafeed, which retrieves data from Elasticsearch for analysis by the job. * You can associate only one datafeed with each anomaly detection job. */ datafeed_config?: MlDatafeed; /** Indicates that the process of deleting the job is in progress but not yet completed. * It is only reported when `true`. */ deleting?: boolean; /** A description of the job. */ description?: string; /** If the job closed or failed, this is the time the job finished, otherwise it is `null`. * This property is informational; you cannot change its value. */ finished_time?: DateTime; /** A list of job groups. * A job can belong to no groups or many. */ groups?: string[]; /** Identifier for the anomaly detection job. * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. * It must start and end with alphanumeric characters. */ job_id: Id; /** Reserved for future use, currently set to `anomaly_detector`. */ job_type?: string; /** The machine learning configuration version number at which the the job was created. */ job_version?: VersionString; /** This advanced configuration option stores model information along with the results. * It provides a more detailed view into anomaly detection. * Model plot provides a simplified and indicative view of the model and its bounds. */ model_plot_config?: MlModelPlotConfig; model_snapshot_id?: Id; /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. * It specifies the maximum period of time (in days) that snapshots are retained. * This period is relative to the timestamp of the most recent snapshot for this job. * By default, snapshots ten days older than the newest snapshot are deleted. */ model_snapshot_retention_days: long; /** Advanced configuration option. * The period over which adjustments to the score are applied, as new data is seen. * The default value is the longer of 30 days or 100 `bucket_spans`. */ renormalization_window_days?: long; /** A text string that affects the name of the machine learning results index. * The default value is `shared`, which generates an index named `.ml-anomalies-shared`. */ results_index_name: IndexName; /** Advanced configuration option. * The period of time (in days) that results are retained. * Age is calculated relative to the timestamp of the latest bucket result. * If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. * The default value is null, which means all results are retained. * Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. * Annotations added by users are retained forever. */ results_retention_days?: long; } export interface MlJobBlocked { reason: MlJobBlockedReason; task_id?: TaskId; } export type MlJobBlockedReason = 'delete' | 'reset' | 'revert'; export interface MlJobConfig { /** Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */ allow_lazy_open?: boolean; /** The analysis configuration, which specifies how to analyze the data. * After you create a job, you cannot change the analysis configuration; all the properties are informational. */ analysis_config: MlAnalysisConfig; /** Limits can be applied for the resources required to hold the mathematical models in memory. * These limits are approximate and can be set per job. * They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ analysis_limits?: MlAnalysisLimits; /** Advanced configuration option. * The time between each periodic persistence of the model. * The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. * The smallest allowed value is 1 hour. */ background_persist_interval?: Duration; /** Advanced configuration option. * Contains custom metadata about the job. */ custom_settings?: MlCustomSettings; /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. * It specifies a period of time (in days) after which only the first snapshot per day is retained. * This period is relative to the timestamp of the most recent snapshot for this job. */ daily_model_snapshot_retention_after_days?: long; /** The data description defines the format of the input data when you send data to the job by using the post data API. * Note that when configure a datafeed, these properties are automatically set. */ data_description: MlDataDescription; /** The datafeed, which retrieves data from Elasticsearch for analysis by the job. * You can associate only one datafeed with each anomaly detection job. */ datafeed_config?: MlDatafeedConfig; /** A description of the job. */ description?: string; /** A list of job groups. A job can belong to no groups or many. */ groups?: string[]; /** Identifier for the anomaly detection job. * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. * It must start and end with alphanumeric characters. */ job_id?: Id; /** Reserved for future use, currently set to `anomaly_detector`. */ job_type?: string; /** This advanced configuration option stores model information along with the results. * It provides a more detailed view into anomaly detection. * Model plot provides a simplified and indicative view of the model and its bounds. */ model_plot_config?: MlModelPlotConfig; /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. * It specifies the maximum period of time (in days) that snapshots are retained. * This period is relative to the timestamp of the most recent snapshot for this job. * The default value is `10`, which means snapshots ten days older than the newest snapshot are deleted. */ model_snapshot_retention_days?: long; /** Advanced configuration option. * The period over which adjustments to the score are applied, as new data is seen. * The default value is the longer of 30 days or 100 `bucket_spans`. */ renormalization_window_days?: long; /** A text string that affects the name of the machine learning results index. * The default value is `shared`, which generates an index named `.ml-anomalies-shared`. */ results_index_name?: IndexName; /** Advanced configuration option. * The period of time (in days) that results are retained. * Age is calculated relative to the timestamp of the latest bucket result. * If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. * The default value is null, which means all results are retained. * Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. * Annotations added by users are retained forever. */ results_retention_days?: long; } export interface MlJobForecastStatistics { memory_bytes?: MlJobStatistics; processing_time_ms?: MlJobStatistics; records?: MlJobStatistics; status?: Record; total: long; forecasted_jobs: integer; } export type MlJobState = 'closing' | 'closed' | 'opened' | 'failed' | 'opening'; export interface MlJobStatistics { avg: double; max: double; min: double; total: double; } export interface MlJobStats { /** For open anomaly detection jobs only, contains messages relating to the selection of a node to run the job. */ assignment_explanation?: string; /** An object that describes the quantity of input to the job and any related error counts. * The `data_count` values are cumulative for the lifetime of a job. * If a model snapshot is reverted or old results are deleted, the job counts are not reset. */ data_counts: MlDataCounts; /** An object that provides statistical information about forecasts belonging to this job. * Some statistics are omitted if no forecasts have been made. */ forecasts_stats: MlJobForecastStatistics; /** Identifier for the anomaly detection job. */ job_id: string; /** An object that provides information about the size and contents of the model. */ model_size_stats: MlModelSizeStats; /** Contains properties for the node that runs the job. * This information is available only for open jobs. * @remarks This property is not supported on Elastic Cloud Serverless. */ node?: MlDiscoveryNodeCompact; /** For open jobs only, the elapsed time for which the job has been open. */ open_time?: DateTime; /** The status of the anomaly detection job, which can be one of the following values: `closed`, `closing`, `failed`, `opened`, `opening`. */ state: MlJobState; /** An object that provides statistical information about timing aspect of this job. */ timing_stats: MlJobTimingStats; /** Indicates that the process of deleting the job is in progress but not yet completed. It is only reported when `true`. */ deleting?: boolean; } export interface MlJobTimingStats { average_bucket_processing_time_ms?: DurationValue; bucket_count: long; exponential_average_bucket_processing_time_ms?: DurationValue; exponential_average_bucket_processing_time_per_hour_ms: DurationValue; job_id: Id; total_bucket_processing_time_ms: DurationValue; maximum_bucket_processing_time_ms?: DurationValue; minimum_bucket_processing_time_ms?: DurationValue; } export interface MlLearningToRankConfig { default_params?: Record; feature_extractors?: Record[]; num_top_feature_importance_values: integer; } export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit'; export interface MlModelPackageConfig { create_time?: EpochTime; description?: string; inference_config?: Record; metadata?: Metadata; minimum_version?: string; model_repository?: string; model_type?: string; packaged_model_id: Id; platform_architecture?: string; prefix_strings?: MlTrainedModelPrefixStrings; size?: ByteSize; sha256?: string; tags?: string[]; vocabulary_file?: string; } export interface MlModelPlotConfig { /** If true, enables calculation and storage of the model change annotations for each entity that is being analyzed. */ annotations_enabled?: boolean; /** If true, enables calculation and storage of the model bounds for each entity that is being analyzed. */ enabled?: boolean; /** Limits data collection to this comma separated list of partition or by field values. If terms are not specified or it is an empty string, no filtering is applied. Wildcards are not supported. Only the specified terms can be viewed when using the Single Metric Viewer. */ terms?: Field; } export interface MlModelSizeStats { bucket_allocation_failures_count: long; job_id: Id; log_time: DateTime; memory_status: MlMemoryStatus; model_bytes: ByteSize; model_bytes_exceeded?: ByteSize; model_bytes_memory_limit?: ByteSize; output_memory_allocator_bytes?: ByteSize; peak_model_bytes?: ByteSize; assignment_memory_basis?: string; result_type: string; total_by_field_count: long; total_over_field_count: long; total_partition_field_count: long; categorization_status: MlCategorizationStatus; categorized_doc_count: integer; dead_category_count: integer; failed_category_count: integer; frequent_category_count: integer; rare_category_count: integer; total_category_count: integer; timestamp?: long; } export interface MlModelSnapshot { /** An optional description of the job. */ description?: string; /** A numerical character string that uniquely identifies the job that the snapshot was created for. */ job_id: Id; /** The timestamp of the latest processed record. */ latest_record_time_stamp?: integer; /** The timestamp of the latest bucket result. */ latest_result_time_stamp?: integer; /** The minimum version required to be able to restore the model snapshot. */ min_version: VersionString; /** Summary information describing the model. */ model_size_stats?: MlModelSizeStats; /** If true, this snapshot will not be deleted during automatic cleanup of snapshots older than model_snapshot_retention_days. However, this snapshot will be deleted when the job is deleted. The default value is false. */ retain: boolean; /** For internal use only. */ snapshot_doc_count: long; /** A numerical character string that uniquely identifies the model snapshot. */ snapshot_id: Id; /** The creation timestamp for the snapshot. */ timestamp: long; } export interface MlModelSnapshotUpgrade { job_id: Id; snapshot_id: Id; state: MlSnapshotUpgradeState; /** @remarks This property is not supported on Elastic Cloud Serverless. */ node: MlDiscoveryNode; assignment_explanation: string; } export interface MlNerInferenceOptions { /** The tokenization options */ tokenization?: MlTokenizationConfigContainer; /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string; /** The token classification labels. Must be IOB formatted tags */ classification_labels?: string[]; vocabulary?: MlVocabulary; } export interface MlNerInferenceUpdateOptions { /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions; /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string; } export interface MlNlpBertTokenizationConfig extends MlCommonTokenizationConfig { } export interface MlNlpRobertaTokenizationConfig extends MlCommonTokenizationConfig { /** Should the tokenizer prefix input with a space character */ add_prefix_space?: boolean; } export interface MlNlpTokenizationUpdateOptions { /** Truncate options to apply */ truncate?: MlTokenizationTruncate; /** Span options to apply */ span?: integer; } export interface MlOutlierDetectionParameters { /** Specifies whether the feature influence calculation is enabled. */ compute_feature_influence?: boolean; /** The minimum outlier score that a document needs to have in order to calculate its feature influence score. * Value range: 0-1 */ feature_influence_threshold?: double; /** The method that outlier detection uses. * Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`, and `ensemble`. * The default value is ensemble, which means that outlier detection uses an ensemble of different methods and normalises and combines their individual outlier scores to obtain the overall outlier score. */ method?: string; /** Defines the value for how many nearest neighbors each method of outlier detection uses to calculate its outlier score. * When the value is not set, different values are used for different ensemble members. * This default behavior helps improve the diversity in the ensemble; only override it if you are confident that the value you choose is appropriate for the data set. */ n_neighbors?: integer; /** The proportion of the data set that is assumed to be outlying prior to outlier detection. * For example, 0.05 means it is assumed that 5% of values are real outliers and 95% are inliers. */ outlier_fraction?: double; /** If `true`, the following operation is performed on the columns before computing outlier scores: (x_i - mean(x_i)) / sd(x_i). */ standardization_enabled?: boolean; } export interface MlOverallBucket { /** The length of the bucket in seconds. Matches the job with the longest bucket_span value. */ bucket_span: DurationValue; /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean; /** An array of objects that contain the max_anomaly_score per job_id. */ jobs: MlOverallBucketJob[]; /** The top_n average of the maximum bucket anomaly_score per job. */ overall_score: double; /** Internal. This is always set to overall_bucket. */ result_type: string; /** The start time of the bucket for which these results were calculated. */ timestamp: EpochTime; /** The start time of the bucket for which these results were calculated. */ timestamp_string?: DateTime; } export interface MlOverallBucketJob { job_id: Id; max_anomaly_score: double; } export interface MlPage { /** Skips the specified number of items. */ from?: integer; /** Specifies the maximum number of items to obtain. */ size?: integer; } export interface MlPassThroughInferenceOptions { /** The tokenization options */ tokenization?: MlTokenizationConfigContainer; /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string; vocabulary?: MlVocabulary; } export interface MlPassThroughInferenceUpdateOptions { /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions; /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string; } export interface MlPerPartitionCategorization { /** To enable this setting, you must also set the `partition_field_name` property to the same value in every detector that uses the keyword `mlcategory`. Otherwise, job creation fails. */ enabled?: boolean; /** This setting can be set to true only if per-partition categorization is enabled. If true, both categorization and subsequent anomaly detection stops for partitions where the categorization status changes to warn. This setting makes it viable to have a job where it is expected that categorization works well for some partitions but not others; you do not pay the cost of bad categorization forever in the partitions where it works badly. */ stop_on_warn?: boolean; } export type MlPredictedValue = ScalarValue | ScalarValue[]; export interface MlQueryFeatureExtractor { default_score?: float; feature_name: string; query: QueryDslQueryContainer; } export interface MlQuestionAnsweringInferenceOptions { /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer; /** The tokenization options to update when inferring */ tokenization?: MlTokenizationConfigContainer; /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string; /** The maximum answer length to consider */ max_answer_length?: integer; } export interface MlQuestionAnsweringInferenceUpdateOptions { /** The question to answer given the inference context */ question: string; /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer; /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions; /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string; /** The maximum answer length to consider for extraction */ max_answer_length?: integer; } export interface MlRegressionInferenceOptions { /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: Field; /** Specifies the maximum number of feature importance values per document. */ num_top_feature_importance_values?: integer; } export type MlRoutingState = 'failed' | 'started' | 'starting' | 'stopped' | 'stopping'; export type MlRuleAction = 'skip_result' | 'skip_model_update'; export interface MlRuleCondition { /** Specifies the result property to which the condition applies. If your detector uses `lat_long`, `metric`, `rare`, or `freq_rare` functions, you can only specify conditions that apply to time. */ applies_to: MlAppliesTo; /** Specifies the condition operator. The available options are greater than, greater than or equals, less than, and less than or equals. */ operator: MlConditionOperator; /** The value that is compared against the `applies_to` field using the operator. */ value: double; } export interface MlRunningStateSearchInterval { /** The end time. */ end?: Duration; /** The end time as an epoch in milliseconds. */ end_ms: DurationValue; /** The start time. */ start?: Duration; /** The start time as an epoch in milliseconds. */ start_ms: DurationValue; } export type MlSnapshotUpgradeState = 'loading_old_state' | 'saving_new_state' | 'stopped' | 'failed'; export interface MlTextClassificationInferenceOptions { /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer; /** The tokenization options */ tokenization?: MlTokenizationConfigContainer; /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string; /** Classification labels to apply other than the stored labels. Must have the same deminsions as the default configured labels */ classification_labels?: string[]; vocabulary?: MlVocabulary; } export interface MlTextClassificationInferenceUpdateOptions { /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer; /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions; /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string; /** Classification labels to apply other than the stored labels. Must have the same deminsions as the default configured labels */ classification_labels?: string[]; } export interface MlTextEmbeddingInferenceOptions { /** The number of dimensions in the embedding output */ embedding_size?: integer; /** The tokenization options */ tokenization?: MlTokenizationConfigContainer; /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string; vocabulary: MlVocabulary; } export interface MlTextEmbeddingInferenceUpdateOptions { tokenization?: MlNlpTokenizationUpdateOptions; /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string; } export interface MlTextExpansionInferenceOptions { /** The tokenization options */ tokenization?: MlTokenizationConfigContainer; /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string; vocabulary: MlVocabulary; } export interface MlTextExpansionInferenceUpdateOptions { tokenization?: MlNlpTokenizationUpdateOptions; /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string; } export interface MlTimingStats { /** Runtime of the analysis in milliseconds. */ elapsed_time: DurationValue; /** Runtime of the latest iteration of the analysis in milliseconds. */ iteration_time?: DurationValue; } export interface MlTokenizationConfigContainer { /** Indicates BERT tokenization and its options */ bert?: MlNlpBertTokenizationConfig; /** Indicates BERT Japanese tokenization and its options */ bert_ja?: MlNlpBertTokenizationConfig; /** Indicates MPNET tokenization and its options */ mpnet?: MlNlpBertTokenizationConfig; /** Indicates RoBERTa tokenization and its options */ roberta?: MlNlpRobertaTokenizationConfig; xlm_roberta?: MlXlmRobertaTokenizationConfig; } export type MlTokenizationTruncate = 'first' | 'second' | 'none'; export interface MlTopClassEntry { class_name: string; class_probability: double; class_score: double; } export interface MlTotalFeatureImportance { /** The feature for which this importance was calculated. */ feature_name: Name; /** A collection of feature importance statistics related to the training data set for this particular feature. */ importance: MlTotalFeatureImportanceStatistics[]; /** If the trained model is a classification model, feature importance statistics are gathered per target class value. */ classes: MlTotalFeatureImportanceClass[]; } export interface MlTotalFeatureImportanceClass { /** The target class value. Could be a string, boolean, or number. */ class_name: Name; /** A collection of feature importance statistics related to the training data set for this particular feature. */ importance: MlTotalFeatureImportanceStatistics[]; } export interface MlTotalFeatureImportanceStatistics { /** The average magnitude of this feature across all the training data. This value is the average of the absolute values of the importance for this feature. */ mean_magnitude: double; /** The maximum importance value across all the training data for this feature. */ max: integer; /** The minimum importance value across all the training data for this feature. */ min: integer; } export interface MlTrainedModelAssignment { adaptive_allocations?: MlAdaptiveAllocationsSettings | null; /** The overall assignment state. */ assignment_state: MlDeploymentAssignmentState; max_assigned_allocations?: integer; reason?: string; /** The allocation state for each node. */ routing_table: Record; /** The timestamp when the deployment started. */ start_time: DateTime; task_parameters: MlTrainedModelAssignmentTaskParameters; } export interface MlTrainedModelAssignmentRoutingStateAndReason { /** The reason for the current state. It is usually populated only when the * `routing_state` is `failed`. */ reason?: string; /** The current routing state. */ routing_state: MlRoutingState; } export interface MlTrainedModelAssignmentRoutingTable { /** The reason for the current state. It is usually populated only when the * `routing_state` is `failed`. */ reason?: string; /** The current routing state. */ routing_state: MlRoutingState; /** Current number of allocations. */ current_allocations: integer; /** Target number of allocations. */ target_allocations: integer; } export interface MlTrainedModelAssignmentTaskParameters { /** The size of the trained model in bytes. */ model_bytes: ByteSize; /** The unique identifier for the trained model. */ model_id: Id; /** The unique identifier for the trained model deployment. */ deployment_id: Id; /** The size of the trained model cache. */ cache_size?: ByteSize; /** The total number of allocations this model is assigned across ML nodes. */ number_of_allocations: integer; priority: MlTrainingPriority; per_deployment_memory_bytes: ByteSize; per_allocation_memory_bytes: ByteSize; /** Number of inference requests are allowed in the queue at a time. */ queue_capacity: integer; /** Number of threads per allocation. */ threads_per_allocation: integer; } export interface MlTrainedModelConfig { /** Identifier for the trained model. */ model_id: Id; /** The model type */ model_type?: MlTrainedModelType; /** A comma delimited string of tags. A trained model can have many tags, or none. */ tags: string[]; /** The Elasticsearch version number in which the trained model was created. */ version?: VersionString; compressed_definition?: string; /** Information on the creator of the trained model. */ created_by?: string; /** The time when the trained model was created. */ create_time?: DateTime; /** Any field map described in the inference configuration takes precedence. */ default_field_map?: Record; /** The free-text description of the trained model. */ description?: string; /** The estimated heap usage in bytes to keep the trained model in memory. */ estimated_heap_memory_usage_bytes?: integer; /** The estimated number of operations to use the trained model. */ estimated_operations?: integer; /** True if the full model definition is present. */ fully_defined?: boolean; /** The default configuration for inference. This can be either a regression, classification, or one of the many NLP focused configurations. It must match the underlying definition.trained_model's target_type. For pre-packaged models such as ELSER the config is not required. */ inference_config?: MlInferenceConfigCreateContainer; /** The input field names for the model definition. */ input: MlTrainedModelConfigInput; /** The license level of the trained model. */ license_level?: string; /** An object containing metadata about the trained model. For example, models created by data frame analytics contain analysis_config and input objects. */ metadata?: MlTrainedModelConfigMetadata; model_size_bytes?: ByteSize; model_package?: MlModelPackageConfig; location?: MlTrainedModelLocation; platform_architecture?: string; prefix_strings?: MlTrainedModelPrefixStrings; } export interface MlTrainedModelConfigInput { /** An array of input field names for the model. */ field_names: Field[]; } export interface MlTrainedModelConfigMetadata { model_aliases?: string[]; /** An object that contains the baseline for feature importance values. For regression analysis, it is a single value. For classification analysis, there is a value for each class. */ feature_importance_baseline?: Record; /** List of the available hyperparameters optimized during the fine_parameter_tuning phase as well as specified by the user. */ hyperparameters?: MlHyperparameter[]; /** An array of the total feature importance for each feature used from the training data set. This array of objects is returned if data frame analytics trained the model and the request includes total_feature_importance in the include request parameter. */ total_feature_importance?: MlTotalFeatureImportance[]; } export interface MlTrainedModelDeploymentAllocationStatus { /** The current number of nodes where the model is allocated. */ allocation_count: integer; /** The detailed allocation state related to the nodes. */ state: MlDeploymentAllocationState; /** The desired number of nodes for model allocation. */ target_allocation_count: integer; } export interface MlTrainedModelDeploymentNodesStats { /** The average time for each inference call to complete on this node. */ average_inference_time_ms?: DurationValue; average_inference_time_ms_last_minute?: DurationValue; /** The average time for each inference call to complete on this node, excluding cache */ average_inference_time_ms_excluding_cache_hits?: DurationValue; /** The number of errors when evaluating the trained model. */ error_count?: integer; /** The total number of inference calls made against this node for this model. */ inference_count?: long; inference_cache_hit_count?: long; inference_cache_hit_count_last_minute?: long; /** The epoch time stamp of the last inference call for the model on this node. */ last_access?: EpochTime; /** Information pertaining to the node. * @remarks This property is not supported on Elastic Cloud Serverless. */ node?: MlDiscoveryNode; /** The number of allocations assigned to this node. */ number_of_allocations?: integer; /** The number of inference requests queued to be processed. */ number_of_pending_requests?: integer; peak_throughput_per_minute: long; /** The number of inference requests that were not processed because the queue was full. */ rejected_execution_count?: integer; /** The current routing state and reason for the current routing state for this allocation. */ routing_state: MlTrainedModelAssignmentRoutingStateAndReason; /** The epoch timestamp when the allocation started. */ start_time?: EpochTime; /** The number of threads used by each allocation during inference. */ threads_per_allocation?: integer; throughput_last_minute: integer; /** The number of inference requests that timed out before being processed. */ timeout_count?: integer; } export interface MlTrainedModelDeploymentStats { adaptive_allocations?: MlAdaptiveAllocationsSettings; /** The detailed allocation status for the deployment. */ allocation_status?: MlTrainedModelDeploymentAllocationStatus; cache_size?: ByteSize; /** The unique identifier for the trained model deployment. */ deployment_id: Id; /** The sum of `error_count` for all nodes in the deployment. */ error_count?: integer; /** The sum of `inference_count` for all nodes in the deployment. */ inference_count?: integer; /** The unique identifier for the trained model. */ model_id: Id; /** The deployment stats for each node that currently has the model allocated. * In serverless, stats are reported for a single unnamed virtual node. */ nodes: MlTrainedModelDeploymentNodesStats[]; /** The number of allocations requested. */ number_of_allocations?: integer; peak_throughput_per_minute: long; priority: MlTrainingPriority; /** The number of inference requests that can be queued before new requests are rejected. */ queue_capacity?: integer; /** The sum of `rejected_execution_count` for all nodes in the deployment. * Individual nodes reject an inference request if the inference queue is full. * The queue size is controlled by the `queue_capacity` setting in the start * trained model deployment API. */ rejected_execution_count?: integer; /** The reason for the current deployment state. Usually only populated when * the model is not deployed to a node. */ reason?: string; /** The epoch timestamp when the deployment started. */ start_time: EpochTime; /** The overall state of the deployment. */ state?: MlDeploymentAssignmentState; /** The number of threads used be each allocation during inference. */ threads_per_allocation?: integer; /** The sum of `timeout_count` for all nodes in the deployment. */ timeout_count?: integer; } export interface MlTrainedModelEntities { class_name: string; class_probability: double; entity: string; start_pos: integer; end_pos: integer; } export interface MlTrainedModelInferenceClassImportance { class_name: string; importance: double; } export interface MlTrainedModelInferenceFeatureImportance { feature_name: string; importance?: double; classes?: MlTrainedModelInferenceClassImportance[]; } export interface MlTrainedModelInferenceStats { /** The number of times the model was loaded for inference and was not retrieved from the cache. * If this number is close to the `inference_count`, the cache is not being appropriately used. * This can be solved by increasing the cache size or its time-to-live (TTL). * Refer to general machine learning settings for the appropriate settings. */ cache_miss_count: integer; /** The number of failures when using the model for inference. */ failure_count: integer; /** The total number of times the model has been called for inference. * This is across all inference contexts, including all pipelines. */ inference_count: integer; /** The number of inference calls where all the training features for the model were missing. */ missing_all_fields_count: integer; /** The time when the statistics were last updated. */ timestamp: EpochTime; } export interface MlTrainedModelLocation { index: MlTrainedModelLocationIndex; } export interface MlTrainedModelLocationIndex { name: IndexName; } export interface MlTrainedModelPrefixStrings { /** String prepended to input at ingest */ ingest?: string; /** String prepended to input at search */ search?: string; } export interface MlTrainedModelSizeStats { /** The size of the model in bytes. */ model_size_bytes: ByteSize; /** The amount of memory required to load the model in bytes. */ required_native_memory_bytes: ByteSize; } export interface MlTrainedModelStats { /** A collection of deployment stats, which is present when the models are deployed. */ deployment_stats?: MlTrainedModelDeploymentStats; /** A collection of inference stats fields. */ inference_stats?: MlTrainedModelInferenceStats; /** A collection of ingest stats for the model across all nodes. * The values are summations of the individual node statistics. * The format matches the ingest section in the nodes stats API. */ ingest?: Record; /** The unique identifier of the trained model. */ model_id: Id; /** A collection of model size stats. */ model_size_stats: MlTrainedModelSizeStats; /** The number of ingest pipelines that currently refer to the model. */ pipeline_count: integer; } export type MlTrainedModelType = 'tree_ensemble' | 'lang_ident' | 'pytorch'; export type MlTrainingPriority = 'normal' | 'low'; export interface MlTransformAuthorization { /** If an API key was used for the most recent update to the transform, its name and identifier are listed in the response. */ api_key?: MlApiKeyAuthorization; /** If a user ID was used for the most recent update to the transform, its roles at the time of the update are listed in the response. */ roles?: string[]; /** If a service account was used for the most recent update to the transform, the account name is listed in the response. */ service_account?: string; } export interface MlValidationLoss { /** Validation loss values for every added decision tree during the forest growing procedure. */ fold_values: string[]; /** The type of the loss metric. For example, binomial_logistic. */ loss_type: string; } export interface MlVocabulary { index: IndexName; } export interface MlXlmRobertaTokenizationConfig extends MlCommonTokenizationConfig { } export interface MlZeroShotClassificationInferenceOptions { /** The tokenization options to update when inferring */ tokenization?: MlTokenizationConfigContainer; /** Hypothesis template used when tokenizing labels for prediction */ hypothesis_template?: string; /** The zero shot classification labels indicating entailment, neutral, and contradiction * Must contain exactly and only entailment, neutral, and contradiction */ classification_labels: string[]; /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string; /** Indicates if more than one true label exists. */ multi_label?: boolean; /** The labels to predict. */ labels?: string[]; } export interface MlZeroShotClassificationInferenceUpdateOptions { /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions; /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string; /** Update the configured multi label option. Indicates if more than one true label exists. Defaults to the configured value. */ multi_label?: boolean; /** The labels to predict. */ labels: string[]; } export interface MlClearTrainedModelDeploymentCacheRequest extends RequestBase { /** The unique identifier of the trained model. */ model_id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { model_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { model_id?: never; }; } export interface MlClearTrainedModelDeploymentCacheResponse { cleared: boolean; } export interface MlCloseJobRequest extends RequestBase { /** Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. */ job_id: Id; /** Refer to the description for the `allow_no_match` query parameter. */ allow_no_match?: boolean; /** Refer to the descriptiion for the `force` query parameter. */ force?: boolean; /** Refer to the description for the `timeout` query parameter. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; allow_no_match?: never; force?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; allow_no_match?: never; force?: never; timeout?: never; }; } export interface MlCloseJobResponse { closed: boolean; } export interface MlDeleteCalendarRequest extends RequestBase { /** A string that uniquely identifies a calendar. */ calendar_id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { calendar_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { calendar_id?: never; }; } export type MlDeleteCalendarResponse = AcknowledgedResponseBase; export interface MlDeleteCalendarEventRequest extends RequestBase { /** A string that uniquely identifies a calendar. */ calendar_id: Id; /** Identifier for the scheduled event. * You can obtain this identifier by using the get calendar events API. */ event_id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { calendar_id?: never; event_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { calendar_id?: never; event_id?: never; }; } export type MlDeleteCalendarEventResponse = AcknowledgedResponseBase; export interface MlDeleteCalendarJobRequest extends RequestBase { /** A string that uniquely identifies a calendar. */ calendar_id: Id; /** An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a * comma-separated list of jobs or groups. */ job_id: Ids; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { calendar_id?: never; job_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { calendar_id?: never; job_id?: never; }; } export interface MlDeleteCalendarJobResponse { /** A string that uniquely identifies a calendar. */ calendar_id: Id; /** A description of the calendar. */ description?: string; /** A list of anomaly detection job identifiers or group names. */ job_ids: Ids; } export interface MlDeleteDataFrameAnalyticsRequest extends RequestBase { /** Identifier for the data frame analytics job. */ id: Id; /** If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. */ force?: boolean; /** The time to wait for the job to be deleted. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; force?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; force?: never; timeout?: never; }; } export type MlDeleteDataFrameAnalyticsResponse = AcknowledgedResponseBase; export interface MlDeleteDatafeedRequest extends RequestBase { /** A numerical character string that uniquely identifies the datafeed. This * identifier can contain lowercase alphanumeric characters (a-z and 0-9), * hyphens, and underscores. It must start and end with alphanumeric * characters. */ datafeed_id: Id; /** Use to forcefully delete a started datafeed; this method is quicker than * stopping and deleting the datafeed. */ force?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { datafeed_id?: never; force?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { datafeed_id?: never; force?: never; }; } export type MlDeleteDatafeedResponse = AcknowledgedResponseBase; export interface MlDeleteExpiredDataRequest extends RequestBase { /** Identifier for an anomaly detection job. It can be a job identifier, a * group name, or a wildcard expression. */ job_id?: Id; /** The desired requests per second for the deletion processes. The default * behavior is no throttling. */ requests_per_second?: float; /** How long can the underlying delete processes run until they are canceled. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; requests_per_second?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; requests_per_second?: never; timeout?: never; }; } export interface MlDeleteExpiredDataResponse { deleted: boolean; } export interface MlDeleteFilterRequest extends RequestBase { /** A string that uniquely identifies a filter. */ filter_id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { filter_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { filter_id?: never; }; } export type MlDeleteFilterResponse = AcknowledgedResponseBase; export interface MlDeleteForecastRequest extends RequestBase { /** Identifier for the anomaly detection job. */ job_id: Id; /** A comma-separated list of forecast identifiers. If you do not specify * this optional parameter or if you specify `_all` or `*` the API deletes * all forecasts from the job. */ forecast_id?: Id; /** Specifies whether an error occurs when there are no forecasts. In * particular, if this parameter is set to `false` and there are no * forecasts associated with the job, attempts to delete all forecasts * return an error. */ allow_no_forecasts?: boolean; /** Specifies the period of time to wait for the completion of the delete * operation. When this period of time elapses, the API fails and returns an * error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; forecast_id?: never; allow_no_forecasts?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; forecast_id?: never; allow_no_forecasts?: never; timeout?: never; }; } export type MlDeleteForecastResponse = AcknowledgedResponseBase; export interface MlDeleteJobRequest extends RequestBase { /** Identifier for the anomaly detection job. */ job_id: Id; /** Use to forcefully delete an opened job; this method is quicker than * closing and deleting the job. */ force?: boolean; /** Specifies whether annotations that have been added by the * user should be deleted along with any auto-generated annotations when the job is * reset. */ delete_user_annotations?: boolean; /** Specifies whether the request should return immediately or wait until the * job deletion completes. */ wait_for_completion?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; force?: never; delete_user_annotations?: never; wait_for_completion?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; force?: never; delete_user_annotations?: never; wait_for_completion?: never; }; } export type MlDeleteJobResponse = AcknowledgedResponseBase; export interface MlDeleteModelSnapshotRequest extends RequestBase { /** Identifier for the anomaly detection job. */ job_id: Id; /** Identifier for the model snapshot. */ snapshot_id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; snapshot_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; snapshot_id?: never; }; } export type MlDeleteModelSnapshotResponse = AcknowledgedResponseBase; export interface MlDeleteTrainedModelRequest extends RequestBase { /** The unique identifier of the trained model. */ model_id: Id; /** Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. */ force?: boolean; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { model_id?: never; force?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { model_id?: never; force?: never; timeout?: never; }; } export type MlDeleteTrainedModelResponse = AcknowledgedResponseBase; export interface MlDeleteTrainedModelAliasRequest extends RequestBase { /** The model alias to delete. */ model_alias: Name; /** The trained model ID to which the model alias refers. */ model_id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { model_alias?: never; model_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { model_alias?: never; model_id?: never; }; } export type MlDeleteTrainedModelAliasResponse = AcknowledgedResponseBase; export interface MlEstimateModelMemoryRequest extends RequestBase { /** For a list of the properties that you can specify in the * `analysis_config` component of the body of this API. */ analysis_config?: MlAnalysisConfig; /** Estimates of the highest cardinality in a single bucket that is observed * for influencer fields over the time period that the job analyzes data. * To produce a good answer, values must be provided for all influencer * fields. Providing values for fields that are not listed as `influencers` * has no effect on the estimation. */ max_bucket_cardinality?: Record; /** Estimates of the cardinality that is observed for fields over the whole * time period that the job analyzes data. To produce a good answer, values * must be provided for fields referenced in the `by_field_name`, * `over_field_name` and `partition_field_name` of any detectors. Providing * values for other fields has no effect on the estimation. It can be * omitted from the request if no detectors have a `by_field_name`, * `over_field_name` or `partition_field_name`. */ overall_cardinality?: Record; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { analysis_config?: never; max_bucket_cardinality?: never; overall_cardinality?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { analysis_config?: never; max_bucket_cardinality?: never; overall_cardinality?: never; }; } export interface MlEstimateModelMemoryResponse { model_memory_estimate: string; } export interface MlEvaluateDataFrameConfusionMatrixItem { actual_class: Name; actual_class_doc_count: integer; predicted_classes: MlEvaluateDataFrameConfusionMatrixPrediction[]; other_predicted_class_doc_count: integer; } export interface MlEvaluateDataFrameConfusionMatrixPrediction { predicted_class: Name; count: integer; } export interface MlEvaluateDataFrameConfusionMatrixThreshold { /** True Positive */ tp: integer; /** False Positive */ fp: integer; /** True Negative */ tn: integer; /** False Negative */ fn: integer; } export interface MlEvaluateDataFrameDataframeClassificationSummary { /** The AUC ROC (area under the curve of the receiver operating characteristic) score and optionally the curve. * It is calculated for a specific class (provided as "class_name") treated as positive. */ auc_roc?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc; /** Accuracy of predictions (per-class and overall). */ accuracy?: MlEvaluateDataFrameDataframeClassificationSummaryAccuracy; /** Multiclass confusion matrix. */ multiclass_confusion_matrix?: MlEvaluateDataFrameDataframeClassificationSummaryMulticlassConfusionMatrix; /** Precision of predictions (per-class and average). */ precision?: MlEvaluateDataFrameDataframeClassificationSummaryPrecision; /** Recall of predictions (per-class and average). */ recall?: MlEvaluateDataFrameDataframeClassificationSummaryRecall; } export interface MlEvaluateDataFrameDataframeClassificationSummaryAccuracy { classes: MlEvaluateDataFrameDataframeEvaluationClass[]; overall_accuracy: double; } export interface MlEvaluateDataFrameDataframeClassificationSummaryMulticlassConfusionMatrix { confusion_matrix: MlEvaluateDataFrameConfusionMatrixItem[]; other_actual_class_count: integer; } export interface MlEvaluateDataFrameDataframeClassificationSummaryPrecision { classes: MlEvaluateDataFrameDataframeEvaluationClass[]; avg_precision: double; } export interface MlEvaluateDataFrameDataframeClassificationSummaryRecall { classes: MlEvaluateDataFrameDataframeEvaluationClass[]; avg_recall: double; } export interface MlEvaluateDataFrameDataframeEvaluationClass extends MlEvaluateDataFrameDataframeEvaluationValue { class_name: Name; } export interface MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc extends MlEvaluateDataFrameDataframeEvaluationValue { curve?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRocCurveItem[]; } export interface MlEvaluateDataFrameDataframeEvaluationSummaryAucRocCurveItem { tpr: double; fpr: double; threshold: double; } export interface MlEvaluateDataFrameDataframeEvaluationValue { value: double; } export interface MlEvaluateDataFrameDataframeOutlierDetectionSummary { /** The AUC ROC (area under the curve of the receiver operating characteristic) score and optionally the curve. */ auc_roc?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc; /** Set the different thresholds of the outlier score at where the metric is calculated. */ precision?: Record; /** Set the different thresholds of the outlier score at where the metric is calculated. */ recall?: Record; /** Set the different thresholds of the outlier score at where the metrics (`tp` - true positive, `fp` - false positive, `tn` - true negative, `fn` - false negative) are calculated. */ confusion_matrix?: Record; } export interface MlEvaluateDataFrameDataframeRegressionSummary { /** Pseudo Huber loss function. */ huber?: MlEvaluateDataFrameDataframeEvaluationValue; /** Average squared difference between the predicted values and the actual (`ground truth`) value. */ mse?: MlEvaluateDataFrameDataframeEvaluationValue; /** Average squared difference between the logarithm of the predicted values and the logarithm of the actual (`ground truth`) value. */ msle?: MlEvaluateDataFrameDataframeEvaluationValue; /** Proportion of the variance in the dependent variable that is predictable from the independent variables. */ r_squared?: MlEvaluateDataFrameDataframeEvaluationValue; } export interface MlEvaluateDataFrameRequest extends RequestBase { /** Defines the type of evaluation you want to perform. */ evaluation: MlDataframeEvaluationContainer; /** Defines the `index` in which the evaluation will be performed. */ index: IndexName; /** A query clause that retrieves a subset of data from the source index. */ query?: QueryDslQueryContainer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { evaluation?: never; index?: never; query?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { evaluation?: never; index?: never; query?: never; }; } export interface MlEvaluateDataFrameResponse { /** Evaluation results for a classification analysis. * It outputs a prediction that identifies to which of the classes each document belongs. */ classification?: MlEvaluateDataFrameDataframeClassificationSummary; /** Evaluation results for an outlier detection analysis. * It outputs the probability that each document is an outlier. */ outlier_detection?: MlEvaluateDataFrameDataframeOutlierDetectionSummary; /** Evaluation results for a regression analysis which outputs a prediction of values. */ regression?: MlEvaluateDataFrameDataframeRegressionSummary; } export interface MlExplainDataFrameAnalyticsRequest extends RequestBase { /** Identifier for the data frame analytics job. This identifier can contain * lowercase alphanumeric characters (a-z and 0-9), hyphens, and * underscores. It must start and end with alphanumeric characters. */ id?: Id; /** The configuration of how to source the analysis data. It requires an * index. Optionally, query and _source may be specified. */ source?: MlDataframeAnalyticsSource; /** The destination configuration, consisting of index and optionally * results_field (ml by default). */ dest?: MlDataframeAnalyticsDestination; /** The analysis configuration, which contains the information necessary to * perform one of the following types of analysis: classification, outlier * detection, or regression. */ analysis?: MlDataframeAnalysisContainer; /** A description of the job. */ description?: string; /** The approximate maximum amount of memory resources that are permitted for * analytical processing. If your `elasticsearch.yml` file contains an * `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to * create data frame analytics jobs that have `model_memory_limit` values * greater than that setting. */ model_memory_limit?: string; /** The maximum number of threads to be used by the analysis. Using more * threads may decrease the time necessary to complete the analysis at the * cost of using more CPU. Note that the process may use additional threads * for operational functionality other than the analysis itself. */ max_num_threads?: integer; /** Specify includes and/or excludes patterns to select which fields will be * included in the analysis. The patterns specified in excludes are applied * last, therefore excludes takes precedence. In other words, if the same * field is specified in both includes and excludes, then the field will not * be included in the analysis. */ analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[]; /** Specifies whether this job can start when there is insufficient machine * learning node capacity for it to be immediately assigned to a node. */ allow_lazy_start?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; source?: never; dest?: never; analysis?: never; description?: never; model_memory_limit?: never; max_num_threads?: never; analyzed_fields?: never; allow_lazy_start?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; source?: never; dest?: never; analysis?: never; description?: never; model_memory_limit?: never; max_num_threads?: never; analyzed_fields?: never; allow_lazy_start?: never; }; } export interface MlExplainDataFrameAnalyticsResponse { /** An array of objects that explain selection for each field, sorted by the field names. */ field_selection: MlDataframeAnalyticsFieldSelection[]; /** An array of objects that explain selection for each field, sorted by the field names. */ memory_estimation: MlDataframeAnalyticsMemoryEstimation; } export interface MlFlushJobRequest extends RequestBase { /** Identifier for the anomaly detection job. */ job_id: Id; /** Refer to the description for the `advance_time` query parameter. */ advance_time?: DateTime; /** Refer to the description for the `calc_interim` query parameter. */ calc_interim?: boolean; /** Refer to the description for the `end` query parameter. */ end?: DateTime; /** Refer to the description for the `skip_time` query parameter. */ skip_time?: DateTime; /** Refer to the description for the `start` query parameter. */ start?: DateTime; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; advance_time?: never; calc_interim?: never; end?: never; skip_time?: never; start?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; advance_time?: never; calc_interim?: never; end?: never; skip_time?: never; start?: never; }; } export interface MlFlushJobResponse { flushed: boolean; /** Provides the timestamp (in milliseconds since the epoch) of the end of * the last bucket that was processed. */ last_finalized_bucket_end?: integer; } export interface MlForecastRequest extends RequestBase { /** Identifier for the anomaly detection job. The job must be open when you * create a forecast; otherwise, an error occurs. */ job_id: Id; /** Refer to the description for the `duration` query parameter. */ duration?: Duration; /** Refer to the description for the `expires_in` query parameter. */ expires_in?: Duration; /** Refer to the description for the `max_model_memory` query parameter. */ max_model_memory?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; duration?: never; expires_in?: never; max_model_memory?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; duration?: never; expires_in?: never; max_model_memory?: never; }; } export interface MlForecastResponse { acknowledged: boolean; forecast_id: Id; } export interface MlGetBucketsRequest extends RequestBase { /** Identifier for the anomaly detection job. */ job_id: Id; /** The timestamp of a single bucket result. If you do not specify this * parameter, the API returns information about all buckets. */ timestamp?: DateTime; /** Skips the specified number of buckets. */ from?: integer; /** Specifies the maximum number of buckets to obtain. */ size?: integer; /** Refer to the description for the `anomaly_score` query parameter. */ anomaly_score?: double; /** Refer to the description for the `desc` query parameter. */ desc?: boolean; /** Refer to the description for the `end` query parameter. */ end?: DateTime; /** Refer to the description for the `exclude_interim` query parameter. */ exclude_interim?: boolean; /** Refer to the description for the `expand` query parameter. */ expand?: boolean; page?: MlPage; /** Refer to the desription for the `sort` query parameter. */ sort?: Field; /** Refer to the description for the `start` query parameter. */ start?: DateTime; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; timestamp?: never; from?: never; size?: never; anomaly_score?: never; desc?: never; end?: never; exclude_interim?: never; expand?: never; page?: never; sort?: never; start?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; timestamp?: never; from?: never; size?: never; anomaly_score?: never; desc?: never; end?: never; exclude_interim?: never; expand?: never; page?: never; sort?: never; start?: never; }; } export interface MlGetBucketsResponse { buckets: MlBucketSummary[]; count: long; } export interface MlGetCalendarEventsRequest extends RequestBase { /** A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. */ calendar_id: Id; /** Specifies to get events with timestamps earlier than this time. */ end?: DateTime; /** Skips the specified number of events. */ from?: integer; /** Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`. */ job_id?: Id; /** Specifies the maximum number of events to obtain. */ size?: integer; /** Specifies to get events with timestamps after this time. */ start?: DateTime; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { calendar_id?: never; end?: never; from?: never; job_id?: never; size?: never; start?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { calendar_id?: never; end?: never; from?: never; job_id?: never; size?: never; start?: never; }; } export interface MlGetCalendarEventsResponse { count: long; events: MlCalendarEvent[]; } export interface MlGetCalendarsCalendar { /** A string that uniquely identifies a calendar. */ calendar_id: Id; /** A description of the calendar. */ description?: string; /** An array of anomaly detection job identifiers. */ job_ids: Id[]; } export interface MlGetCalendarsRequest extends RequestBase { /** A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. */ calendar_id?: Id; /** Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. */ from?: integer; /** Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier. */ size?: integer; /** This object is supported only when you omit the calendar identifier. */ page?: MlPage; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { calendar_id?: never; from?: never; size?: never; page?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { calendar_id?: never; from?: never; size?: never; page?: never; }; } export interface MlGetCalendarsResponse { calendars: MlGetCalendarsCalendar[]; count: long; } export interface MlGetCategoriesRequest extends RequestBase { /** Identifier for the anomaly detection job. */ job_id: Id; /** Identifier for the category, which is unique in the job. If you specify * neither the category ID nor the partition_field_value, the API returns * information about all categories. If you specify only the * partition_field_value, it returns information about all categories for * the specified partition. */ category_id?: CategoryId; /** Skips the specified number of categories. */ from?: integer; /** Only return categories for the specified partition. */ partition_field_value?: string; /** Specifies the maximum number of categories to obtain. */ size?: integer; /** Configures pagination. * This parameter has the `from` and `size` properties. */ page?: MlPage; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; category_id?: never; from?: never; partition_field_value?: never; size?: never; page?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; category_id?: never; from?: never; partition_field_value?: never; size?: never; page?: never; }; } export interface MlGetCategoriesResponse { categories: MlCategory[]; count: long; } export interface MlGetDataFrameAnalyticsRequest extends RequestBase { /** Identifier for the data frame analytics job. If you do not specify this * option, the API returns information for the first hundred data frame * analytics jobs. */ id?: Id; /** Specifies what to do when the request: * * 1. Contains wildcard expressions and there are no data frame analytics * jobs that match. * 2. Contains the `_all` string or no identifiers and there are no matches. * 3. Contains wildcard expressions and there are only partial matches. * * The default value returns an empty data_frame_analytics array when there * are no matches and the subset of results when there are partial matches. * If this parameter is `false`, the request returns a 404 status code when * there are no matches or only partial matches. */ allow_no_match?: boolean; /** Skips the specified number of data frame analytics jobs. */ from?: integer; /** Specifies the maximum number of data frame analytics jobs to obtain. */ size?: integer; /** Indicates if certain fields should be removed from the configuration on * retrieval. This allows the configuration to be in an acceptable format to * be retrieved and then added to another cluster. */ exclude_generated?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; allow_no_match?: never; from?: never; size?: never; exclude_generated?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; allow_no_match?: never; from?: never; size?: never; exclude_generated?: never; }; } export interface MlGetDataFrameAnalyticsResponse { count: integer; /** An array of data frame analytics job resources, which are sorted by the id value in ascending order. */ data_frame_analytics: MlDataframeAnalyticsSummary[]; } export interface MlGetDataFrameAnalyticsStatsRequest extends RequestBase { /** Identifier for the data frame analytics job. If you do not specify this * option, the API returns information for the first hundred data frame * analytics jobs. */ id?: Id; /** Specifies what to do when the request: * * 1. Contains wildcard expressions and there are no data frame analytics * jobs that match. * 2. Contains the `_all` string or no identifiers and there are no matches. * 3. Contains wildcard expressions and there are only partial matches. * * The default value returns an empty data_frame_analytics array when there * are no matches and the subset of results when there are partial matches. * If this parameter is `false`, the request returns a 404 status code when * there are no matches or only partial matches. */ allow_no_match?: boolean; /** Skips the specified number of data frame analytics jobs. */ from?: integer; /** Specifies the maximum number of data frame analytics jobs to obtain. */ size?: integer; /** Defines whether the stats response should be verbose. */ verbose?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; allow_no_match?: never; from?: never; size?: never; verbose?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; allow_no_match?: never; from?: never; size?: never; verbose?: never; }; } export interface MlGetDataFrameAnalyticsStatsResponse { count: long; /** An array of objects that contain usage information for data frame analytics jobs, which are sorted by the id value in ascending order. */ data_frame_analytics: MlDataframeAnalytics[]; } export interface MlGetDatafeedStatsRequest extends RequestBase { /** Identifier for the datafeed. It can be a datafeed identifier or a * wildcard expression. If you do not specify one of these options, the API * returns information about all datafeeds. */ datafeed_id?: Ids; /** Specifies what to do when the request: * * 1. Contains wildcard expressions and there are no datafeeds that match. * 2. Contains the `_all` string or no identifiers and there are no matches. * 3. Contains wildcard expressions and there are only partial matches. * * The default value is `true`, which returns an empty `datafeeds` array * when there are no matches and the subset of results when there are * partial matches. If this parameter is `false`, the request returns a * `404` status code when there are no matches or only partial matches. */ allow_no_match?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { datafeed_id?: never; allow_no_match?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { datafeed_id?: never; allow_no_match?: never; }; } export interface MlGetDatafeedStatsResponse { count: long; datafeeds: MlDatafeedStats[]; } export interface MlGetDatafeedsRequest extends RequestBase { /** Identifier for the datafeed. It can be a datafeed identifier or a * wildcard expression. If you do not specify one of these options, the API * returns information about all datafeeds. */ datafeed_id?: Ids; /** Specifies what to do when the request: * * 1. Contains wildcard expressions and there are no datafeeds that match. * 2. Contains the `_all` string or no identifiers and there are no matches. * 3. Contains wildcard expressions and there are only partial matches. * * The default value is `true`, which returns an empty `datafeeds` array * when there are no matches and the subset of results when there are * partial matches. If this parameter is `false`, the request returns a * `404` status code when there are no matches or only partial matches. */ allow_no_match?: boolean; /** Indicates if certain fields should be removed from the configuration on * retrieval. This allows the configuration to be in an acceptable format to * be retrieved and then added to another cluster. */ exclude_generated?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { datafeed_id?: never; allow_no_match?: never; exclude_generated?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { datafeed_id?: never; allow_no_match?: never; exclude_generated?: never; }; } export interface MlGetDatafeedsResponse { count: long; datafeeds: MlDatafeed[]; } export interface MlGetFiltersRequest extends RequestBase { /** A string that uniquely identifies a filter. */ filter_id?: Ids; /** Skips the specified number of filters. */ from?: integer; /** Specifies the maximum number of filters to obtain. */ size?: integer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { filter_id?: never; from?: never; size?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { filter_id?: never; from?: never; size?: never; }; } export interface MlGetFiltersResponse { count: long; filters: MlFilter[]; } export interface MlGetInfluencersRequest extends RequestBase { /** Identifier for the anomaly detection job. */ job_id: Id; /** If true, the results are sorted in descending order. */ desc?: boolean; /** Returns influencers with timestamps earlier than this time. * The default value means it is unset and results are not limited to * specific timestamps. */ end?: DateTime; /** If true, the output excludes interim results. By default, interim results * are included. */ exclude_interim?: boolean; /** Returns influencers with anomaly scores greater than or equal to this * value. */ influencer_score?: double; /** Skips the specified number of influencers. */ from?: integer; /** Specifies the maximum number of influencers to obtain. */ size?: integer; /** Specifies the sort field for the requested influencers. By default, the * influencers are sorted by the `influencer_score` value. */ sort?: Field; /** Returns influencers with timestamps after this time. The default value * means it is unset and results are not limited to specific timestamps. */ start?: DateTime; /** Configures pagination. * This parameter has the `from` and `size` properties. */ page?: MlPage; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; desc?: never; end?: never; exclude_interim?: never; influencer_score?: never; from?: never; size?: never; sort?: never; start?: never; page?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; desc?: never; end?: never; exclude_interim?: never; influencer_score?: never; from?: never; size?: never; sort?: never; start?: never; page?: never; }; } export interface MlGetInfluencersResponse { count: long; /** Array of influencer objects */ influencers: MlInfluencer[]; } export interface MlGetJobStatsRequest extends RequestBase { /** Identifier for the anomaly detection job. It can be a job identifier, a * group name, a comma-separated list of jobs, or a wildcard expression. If * you do not specify one of these options, the API returns information for * all anomaly detection jobs. */ job_id?: Id; /** Specifies what to do when the request: * * 1. Contains wildcard expressions and there are no jobs that match. * 2. Contains the _all string or no identifiers and there are no matches. * 3. Contains wildcard expressions and there are only partial matches. * * If `true`, the API returns an empty `jobs` array when * there are no matches and the subset of results when there are partial * matches. If `false`, the API returns a `404` status * code when there are no matches or only partial matches. */ allow_no_match?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; allow_no_match?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; allow_no_match?: never; }; } export interface MlGetJobStatsResponse { count: long; jobs: MlJobStats[]; } export interface MlGetJobsRequest extends RequestBase { /** Identifier for the anomaly detection job. It can be a job identifier, a * group name, or a wildcard expression. If you do not specify one of these * options, the API returns information for all anomaly detection jobs. */ job_id?: Ids; /** Specifies what to do when the request: * * 1. Contains wildcard expressions and there are no jobs that match. * 2. Contains the _all string or no identifiers and there are no matches. * 3. Contains wildcard expressions and there are only partial matches. * * The default value is `true`, which returns an empty `jobs` array when * there are no matches and the subset of results when there are partial * matches. If this parameter is `false`, the request returns a `404` status * code when there are no matches or only partial matches. */ allow_no_match?: boolean; /** Indicates if certain fields should be removed from the configuration on * retrieval. This allows the configuration to be in an acceptable format to * be retrieved and then added to another cluster. */ exclude_generated?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; allow_no_match?: never; exclude_generated?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; allow_no_match?: never; exclude_generated?: never; }; } export interface MlGetJobsResponse { count: long; jobs: MlJob[]; } export interface MlGetMemoryStatsJvmStats { /** Maximum amount of memory available for use by the heap. */ heap_max?: ByteSize; /** Maximum amount of memory, in bytes, available for use by the heap. */ heap_max_in_bytes: integer; /** Amount of Java heap currently being used for caching inference models. */ java_inference?: ByteSize; /** Amount of Java heap, in bytes, currently being used for caching inference models. */ java_inference_in_bytes: integer; /** Maximum amount of Java heap to be used for caching inference models. */ java_inference_max?: ByteSize; /** Maximum amount of Java heap, in bytes, to be used for caching inference models. */ java_inference_max_in_bytes: integer; } export interface MlGetMemoryStatsMemMlStats { /** Amount of native memory set aside for anomaly detection jobs. */ anomaly_detectors?: ByteSize; /** Amount of native memory, in bytes, set aside for anomaly detection jobs. */ anomaly_detectors_in_bytes: integer; /** Amount of native memory set aside for data frame analytics jobs. */ data_frame_analytics?: ByteSize; /** Amount of native memory, in bytes, set aside for data frame analytics jobs. */ data_frame_analytics_in_bytes: integer; /** Maximum amount of native memory (separate to the JVM heap) that may be used by machine learning native processes. */ max?: ByteSize; /** Maximum amount of native memory (separate to the JVM heap), in bytes, that may be used by machine learning native processes. */ max_in_bytes: integer; /** Amount of native memory set aside for loading machine learning native code shared libraries. */ native_code_overhead?: ByteSize; /** Amount of native memory, in bytes, set aside for loading machine learning native code shared libraries. */ native_code_overhead_in_bytes: integer; /** Amount of native memory set aside for trained models that have a PyTorch model_type. */ native_inference?: ByteSize; /** Amount of native memory, in bytes, set aside for trained models that have a PyTorch model_type. */ native_inference_in_bytes: integer; } export interface MlGetMemoryStatsMemStats { /** If the amount of physical memory has been overridden using the es.total_memory_bytes system property * then this reports the overridden value. Otherwise it reports the same value as total. */ adjusted_total?: ByteSize; /** If the amount of physical memory has been overridden using the `es.total_memory_bytes` system property * then this reports the overridden value in bytes. Otherwise it reports the same value as `total_in_bytes`. */ adjusted_total_in_bytes: integer; /** Total amount of physical memory. */ total?: ByteSize; /** Total amount of physical memory in bytes. */ total_in_bytes: integer; /** Contains statistics about machine learning use of native memory on the node. */ ml: MlGetMemoryStatsMemMlStats; } export interface MlGetMemoryStatsMemory { attributes: Record; /** Contains Java Virtual Machine (JVM) statistics for the node. */ jvm: MlGetMemoryStatsJvmStats; /** Contains statistics about memory usage for the node. */ mem: MlGetMemoryStatsMemStats; /** Human-readable identifier for the node. Based on the Node name setting setting. */ name: Name; /** Roles assigned to the node. */ roles: string[]; /** The host and port where transport HTTP connections are accepted. */ transport_address: TransportAddress; ephemeral_id: Id; } export interface MlGetMemoryStatsRequest extends RequestBase { /** The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or * `ml:true` */ node_id?: Id; /** Period to wait for a connection to the master node. If no response is received before the timeout * expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. If no response is received before the timeout expires, the request * fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { node_id?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { node_id?: never; master_timeout?: never; timeout?: never; }; } export interface MlGetMemoryStatsResponse { _nodes: NodeStatistics; cluster_name: Name; nodes: Record; } export interface MlGetModelSnapshotUpgradeStatsRequest extends RequestBase { /** Identifier for the anomaly detection job. */ job_id: Id; /** A numerical character string that uniquely identifies the model snapshot. You can get information for multiple * snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, * by specifying `*` as the snapshot ID, or by omitting the snapshot ID. */ snapshot_id: Id; /** Specifies what to do when the request: * * - Contains wildcard expressions and there are no jobs that match. * - Contains the _all string or no identifiers and there are no matches. * - Contains wildcard expressions and there are only partial matches. * * The default value is true, which returns an empty jobs array when there are no matches and the subset of results * when there are partial matches. If this parameter is false, the request returns a 404 status code when there are * no matches or only partial matches. */ allow_no_match?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; snapshot_id?: never; allow_no_match?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; snapshot_id?: never; allow_no_match?: never; }; } export interface MlGetModelSnapshotUpgradeStatsResponse { count: long; model_snapshot_upgrades: MlModelSnapshotUpgrade[]; } export interface MlGetModelSnapshotsRequest extends RequestBase { /** Identifier for the anomaly detection job. */ job_id: Id; /** A numerical character string that uniquely identifies the model snapshot. You can get information for multiple * snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, * by specifying `*` as the snapshot ID, or by omitting the snapshot ID. */ snapshot_id?: Id; /** Skips the specified number of snapshots. */ from?: integer; /** Specifies the maximum number of snapshots to obtain. */ size?: integer; /** Refer to the description for the `desc` query parameter. */ desc?: boolean; /** Refer to the description for the `end` query parameter. */ end?: DateTime; page?: MlPage; /** Refer to the description for the `sort` query parameter. */ sort?: Field; /** Refer to the description for the `start` query parameter. */ start?: DateTime; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; snapshot_id?: never; from?: never; size?: never; desc?: never; end?: never; page?: never; sort?: never; start?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; snapshot_id?: never; from?: never; size?: never; desc?: never; end?: never; page?: never; sort?: never; start?: never; }; } export interface MlGetModelSnapshotsResponse { count: long; model_snapshots: MlModelSnapshot[]; } export interface MlGetOverallBucketsRequest extends RequestBase { /** Identifier for the anomaly detection job. It can be a job identifier, a * group name, a comma-separated list of jobs or groups, or a wildcard * expression. * * You can summarize the bucket results for all anomaly detection jobs by * using `_all` or by specifying `*` as the ``. */ job_id: Id; /** Refer to the description for the `allow_no_match` query parameter. */ allow_no_match?: boolean; /** Refer to the description for the `bucket_span` query parameter. */ bucket_span?: Duration; /** Refer to the description for the `end` query parameter. */ end?: DateTime; /** Refer to the description for the `exclude_interim` query parameter. */ exclude_interim?: boolean; /** Refer to the description for the `overall_score` query parameter. */ overall_score?: double | string; /** Refer to the description for the `start` query parameter. */ start?: DateTime; /** Refer to the description for the `top_n` query parameter. */ top_n?: integer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; allow_no_match?: never; bucket_span?: never; end?: never; exclude_interim?: never; overall_score?: never; start?: never; top_n?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; allow_no_match?: never; bucket_span?: never; end?: never; exclude_interim?: never; overall_score?: never; start?: never; top_n?: never; }; } export interface MlGetOverallBucketsResponse { count: long; /** Array of overall bucket objects */ overall_buckets: MlOverallBucket[]; } export interface MlGetRecordsRequest extends RequestBase { /** Identifier for the anomaly detection job. */ job_id: Id; /** Skips the specified number of records. */ from?: integer; /** Specifies the maximum number of records to obtain. */ size?: integer; /** Refer to the description for the `desc` query parameter. */ desc?: boolean; /** Refer to the description for the `end` query parameter. */ end?: DateTime; /** Refer to the description for the `exclude_interim` query parameter. */ exclude_interim?: boolean; page?: MlPage; /** Refer to the description for the `record_score` query parameter. */ record_score?: double; /** Refer to the description for the `sort` query parameter. */ sort?: Field; /** Refer to the description for the `start` query parameter. */ start?: DateTime; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; from?: never; size?: never; desc?: never; end?: never; exclude_interim?: never; page?: never; record_score?: never; sort?: never; start?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; from?: never; size?: never; desc?: never; end?: never; exclude_interim?: never; page?: never; record_score?: never; sort?: never; start?: never; }; } export interface MlGetRecordsResponse { count: long; records: MlAnomaly[]; } export interface MlGetTrainedModelsRequest extends RequestBase { /** The unique identifier of the trained model or a model alias. * * You can get information for multiple trained models in a single API * request by using a comma-separated list of model IDs or a wildcard * expression. */ model_id?: Ids; /** Specifies what to do when the request: * * - Contains wildcard expressions and there are no models that match. * - Contains the _all string or no identifiers and there are no matches. * - Contains wildcard expressions and there are only partial matches. * * If true, it returns an empty array when there are no matches and the * subset of results when there are partial matches. */ allow_no_match?: boolean; /** Specifies whether the included model definition should be returned as a * JSON map (true) or in a custom compressed format (false). */ decompress_definition?: boolean; /** Indicates if certain fields should be removed from the configuration on * retrieval. This allows the configuration to be in an acceptable format to * be retrieved and then added to another cluster. */ exclude_generated?: boolean; /** Skips the specified number of models. */ from?: integer; /** A comma delimited string of optional fields to include in the response * body. */ include?: MlInclude; /** Specifies the maximum number of models to obtain. */ size?: integer; /** A comma delimited string of tags. A trained model can have many tags, or * none. When supplied, only trained models that contain all the supplied * tags are returned. */ tags?: string | string[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { model_id?: never; allow_no_match?: never; decompress_definition?: never; exclude_generated?: never; from?: never; include?: never; size?: never; tags?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { model_id?: never; allow_no_match?: never; decompress_definition?: never; exclude_generated?: never; from?: never; include?: never; size?: never; tags?: never; }; } export interface MlGetTrainedModelsResponse { count: integer; /** An array of trained model resources, which are sorted by the model_id value in ascending order. */ trained_model_configs: MlTrainedModelConfig[]; } export interface MlGetTrainedModelsStatsRequest extends RequestBase { /** The unique identifier of the trained model or a model alias. It can be a * comma-separated list or a wildcard expression. */ model_id?: Ids; /** Specifies what to do when the request: * * - Contains wildcard expressions and there are no models that match. * - Contains the _all string or no identifiers and there are no matches. * - Contains wildcard expressions and there are only partial matches. * * If true, it returns an empty array when there are no matches and the * subset of results when there are partial matches. */ allow_no_match?: boolean; /** Skips the specified number of models. */ from?: integer; /** Specifies the maximum number of models to obtain. */ size?: integer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { model_id?: never; allow_no_match?: never; from?: never; size?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { model_id?: never; allow_no_match?: never; from?: never; size?: never; }; } export interface MlGetTrainedModelsStatsResponse { /** The total number of trained model statistics that matched the requested ID patterns. Could be higher than the number of items in the trained_model_stats array as the size of the array is restricted by the supplied size parameter. */ count: integer; /** An array of trained model statistics, which are sorted by the model_id value in ascending order. */ trained_model_stats: MlTrainedModelStats[]; } export interface MlInferTrainedModelRequest extends RequestBase { /** The unique identifier of the trained model. */ model_id: Id; /** Controls the amount of time to wait for inference results. */ timeout?: Duration; /** An array of objects to pass to the model for inference. The objects should contain a fields matching your * configured trained model input. Typically, for NLP models, the field name is `text_field`. * Currently, for NLP models, only a single value is allowed. */ docs: Record[]; /** The inference configuration updates to apply on the API call */ inference_config?: MlInferenceConfigUpdateContainer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { model_id?: never; timeout?: never; docs?: never; inference_config?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { model_id?: never; timeout?: never; docs?: never; inference_config?: never; }; } export interface MlInferTrainedModelResponse { inference_results: MlInferenceResponseResult[]; } export interface MlInfoAnomalyDetectors { categorization_analyzer: MlCategorizationAnalyzer; categorization_examples_limit: integer; model_memory_limit: string; model_snapshot_retention_days: integer; daily_model_snapshot_retention_after_days: integer; } export interface MlInfoDatafeeds { scroll_size: integer; } export interface MlInfoDefaults { anomaly_detectors: MlInfoAnomalyDetectors; datafeeds: MlInfoDatafeeds; } export interface MlInfoLimits { max_single_ml_node_processors?: integer; total_ml_processors?: integer; max_model_memory_limit?: ByteSize; effective_max_model_memory_limit?: ByteSize; total_ml_memory: ByteSize; } export interface MlInfoNativeCode { build_hash: string; version: VersionString; } export interface MlInfoRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export interface MlInfoResponse { defaults: MlInfoDefaults; limits: MlInfoLimits; upgrade_mode: boolean; native_code: MlInfoNativeCode; } export interface MlOpenJobRequest extends RequestBase { /** Identifier for the anomaly detection job. */ job_id: Id; /** Refer to the description for the `timeout` query parameter. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; timeout?: never; }; } export interface MlOpenJobResponse { opened: boolean; /** The ID of the node that the job was started on. In serverless this will be the "serverless". * If the job is allowed to open lazily and has not yet been assigned to a node, this value is an empty string. */ node: NodeId; } export interface MlPostCalendarEventsRequest extends RequestBase { /** A string that uniquely identifies a calendar. */ calendar_id: Id; /** A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. */ events: MlCalendarEvent[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { calendar_id?: never; events?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { calendar_id?: never; events?: never; }; } export interface MlPostCalendarEventsResponse { events: MlCalendarEvent[]; } export interface MlPostDataRequest extends RequestBase { /** Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. */ job_id: Id; /** Specifies the end of the bucket resetting range. */ reset_end?: DateTime; /** Specifies the start of the bucket resetting range. */ reset_start?: DateTime; data?: TData[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; reset_end?: never; reset_start?: never; data?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; reset_end?: never; reset_start?: never; data?: never; }; } export interface MlPostDataResponse { job_id: Id; processed_record_count: long; processed_field_count: long; input_bytes: long; input_field_count: long; invalid_date_count: long; missing_field_count: long; out_of_order_timestamp_count: long; empty_bucket_count: long; sparse_bucket_count: long; bucket_count: long; earliest_record_timestamp?: EpochTime; latest_record_timestamp?: EpochTime; last_data_time?: EpochTime; latest_empty_bucket_timestamp?: EpochTime; latest_sparse_bucket_timestamp?: EpochTime; input_record_count: long; log_time?: EpochTime; } export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig { source: MlDataframeAnalyticsSource; analysis: MlDataframeAnalysisContainer; model_memory_limit?: string; max_num_threads?: integer; analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[]; } export interface MlPreviewDataFrameAnalyticsRequest extends RequestBase { /** Identifier for the data frame analytics job. */ id?: Id; /** A data frame analytics config as described in create data frame analytics * jobs. Note that `id` and `dest` don’t need to be provided in the context of * this API. */ config?: MlPreviewDataFrameAnalyticsDataframePreviewConfig; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; config?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; config?: never; }; } export interface MlPreviewDataFrameAnalyticsResponse { /** An array of objects that contain feature name and value pairs. The features have been processed and indicate what will be sent to the model for training. */ feature_values: Record[]; } export interface MlPreviewDatafeedRequest extends RequestBase { /** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase * alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric * characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job * configuration details in the request body. */ datafeed_id?: Id; /** The start time from where the datafeed preview should begin */ start?: DateTime; /** The end time when the datafeed preview should stop */ end?: DateTime; /** The datafeed definition to preview. */ datafeed_config?: MlDatafeedConfig; /** The configuration details for the anomaly detection job that is associated with the datafeed. If the * `datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must * supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is * used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. */ job_config?: MlJobConfig; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { datafeed_id?: never; start?: never; end?: never; datafeed_config?: never; job_config?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { datafeed_id?: never; start?: never; end?: never; datafeed_config?: never; job_config?: never; }; } export type MlPreviewDatafeedResponse = TDocument[]; export interface MlPutCalendarRequest extends RequestBase { /** A string that uniquely identifies a calendar. */ calendar_id: Id; /** An array of anomaly detection job identifiers. */ job_ids?: Id[]; /** A description of the calendar. */ description?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { calendar_id?: never; job_ids?: never; description?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { calendar_id?: never; job_ids?: never; description?: never; }; } export interface MlPutCalendarResponse { /** A string that uniquely identifies a calendar. */ calendar_id: Id; /** A description of the calendar. */ description?: string; /** A list of anomaly detection job identifiers or group names. */ job_ids: Ids; } export interface MlPutCalendarJobRequest extends RequestBase { /** A string that uniquely identifies a calendar. */ calendar_id: Id; /** An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a comma-separated list of jobs or groups. */ job_id: Ids; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { calendar_id?: never; job_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { calendar_id?: never; job_id?: never; }; } export interface MlPutCalendarJobResponse { /** A string that uniquely identifies a calendar. */ calendar_id: Id; /** A description of the calendar. */ description?: string; /** A list of anomaly detection job identifiers or group names. */ job_ids: Ids; } export interface MlPutDataFrameAnalyticsRequest extends RequestBase { /** Identifier for the data frame analytics job. This identifier can contain * lowercase alphanumeric characters (a-z and 0-9), hyphens, and * underscores. It must start and end with alphanumeric characters. */ id: Id; /** Specifies whether this job can start when there is insufficient machine * learning node capacity for it to be immediately assigned to a node. If * set to `false` and a machine learning node with capacity to run the job * cannot be immediately found, the API returns an error. If set to `true`, * the API does not return an error; the job waits in the `starting` state * until sufficient machine learning node capacity is available. This * behavior is also affected by the cluster-wide * `xpack.ml.max_lazy_ml_nodes` setting. */ allow_lazy_start?: boolean; /** The analysis configuration, which contains the information necessary to * perform one of the following types of analysis: classification, outlier * detection, or regression. */ analysis: MlDataframeAnalysisContainer; /** Specifies `includes` and/or `excludes` patterns to select which fields * will be included in the analysis. The patterns specified in `excludes` * are applied last, therefore `excludes` takes precedence. In other words, * if the same field is specified in both `includes` and `excludes`, then * the field will not be included in the analysis. If `analyzed_fields` is * not set, only the relevant fields will be included. For example, all the * numeric fields for outlier detection. * The supported fields vary for each type of analysis. Outlier detection * requires numeric or `boolean` data to analyze. The algorithms don’t * support missing values therefore fields that have data types other than * numeric or boolean are ignored. Documents where included fields contain * missing values, null values, or an array are also ignored. Therefore the * `dest` index may contain documents that don’t have an outlier score. * Regression supports fields that are numeric, `boolean`, `text`, * `keyword`, and `ip` data types. It is also tolerant of missing values. * Fields that are supported are included in the analysis, other fields are * ignored. Documents where included fields contain an array with two or * more values are also ignored. Documents in the `dest` index that don’t * contain a results field are not included in the regression analysis. * Classification supports fields that are numeric, `boolean`, `text`, * `keyword`, and `ip` data types. It is also tolerant of missing values. * Fields that are supported are included in the analysis, other fields are * ignored. Documents where included fields contain an array with two or * more values are also ignored. Documents in the `dest` index that don’t * contain a results field are not included in the classification analysis. * Classification analysis can be improved by mapping ordinal variable * values to a single number. For example, in case of age ranges, you can * model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. */ analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[]; /** A description of the job. */ description?: string; /** The destination configuration. */ dest: MlDataframeAnalyticsDestination; /** The maximum number of threads to be used by the analysis. Using more * threads may decrease the time necessary to complete the analysis at the * cost of using more CPU. Note that the process may use additional threads * for operational functionality other than the analysis itself. */ max_num_threads?: integer; _meta?: Metadata; /** The approximate maximum amount of memory resources that are permitted for * analytical processing. If your `elasticsearch.yml` file contains an * `xpack.ml.max_model_memory_limit` setting, an error occurs when you try * to create data frame analytics jobs that have `model_memory_limit` values * greater than that setting. */ model_memory_limit?: string; /** The configuration of how to source the analysis data. */ source: MlDataframeAnalyticsSource; headers?: HttpHeaders; version?: VersionString; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; allow_lazy_start?: never; analysis?: never; analyzed_fields?: never; description?: never; dest?: never; max_num_threads?: never; _meta?: never; model_memory_limit?: never; source?: never; headers?: never; version?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; allow_lazy_start?: never; analysis?: never; analyzed_fields?: never; description?: never; dest?: never; max_num_threads?: never; _meta?: never; model_memory_limit?: never; source?: never; headers?: never; version?: never; }; } export interface MlPutDataFrameAnalyticsResponse { authorization?: MlDataframeAnalyticsAuthorization; allow_lazy_start: boolean; analysis: MlDataframeAnalysisContainer; analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[]; create_time: EpochTime; description?: string; dest: MlDataframeAnalyticsDestination; id: Id; max_num_threads: integer; _meta?: Metadata; model_memory_limit: string; source: MlDataframeAnalyticsSource; version: VersionString; } export interface MlPutDatafeedRequest extends RequestBase { /** A numerical character string that uniquely identifies the datafeed. * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. * It must start and end with alphanumeric characters. */ datafeed_id: Id; /** If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` * string or when no indices are specified. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines * whether wildcard expressions match hidden data streams. Supports comma-separated values. */ expand_wildcards?: ExpandWildcards; /** If true, concrete, expanded, or aliased indices are ignored when frozen. */ ignore_throttled?: boolean; /** If true, unavailable indices (missing or closed) are ignored. */ ignore_unavailable?: boolean; /** If set, the datafeed performs aggregation searches. * Support for aggregations is limited and should be used only with low cardinality data. */ aggregations?: Record; /** If set, the datafeed performs aggregation searches. * Support for aggregations is limited and should be used only with low cardinality data. * @alias aggregations */ aggs?: Record; /** Datafeeds might be required to search over long time periods, for several months or years. * This search is split into time chunks in order to ensure the load on Elasticsearch is managed. * Chunking configuration controls how the size of these time chunks are calculated; * it is an advanced configuration option. */ chunking_config?: MlChunkingConfig; /** Specifies whether the datafeed checks for missing data and the size of the window. * The datafeed can optionally search over indices that have already been read in an effort to determine whether * any data has subsequently been added to the index. If missing data is found, it is a good indication that the * `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. * This check runs only on real-time datafeeds. */ delayed_data_check_config?: MlDelayedDataCheckConfig; /** The interval at which scheduled queries are made while the datafeed runs in real time. * The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible * fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last * (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses * aggregations, this value must be divisible by the interval of the date histogram aggregation. */ frequency?: Duration; /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master * nodes and the machine learning nodes must have the `remote_cluster_client` role. */ indices?: Indices; /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master * nodes and the machine learning nodes must have the `remote_cluster_client` role. * @alias indices */ indexes?: Indices; /** Specifies index expansion options that are used during search */ indices_options?: IndicesOptions; /** Identifier for the anomaly detection job. */ job_id?: Id; /** If a real-time datafeed has never seen any data (including during any initial training period), it automatically * stops and closes the associated job after this many real-time searches return no documents. In other words, * it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no * end time that sees no data remains started until it is explicitly stopped. By default, it is not set. */ max_empty_searches?: integer; /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an * Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this * object is passed verbatim to Elasticsearch. */ query?: QueryDslQueryContainer; /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might * not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default * value is randomly selected between `60s` and `120s`. This randomness improves the query performance * when there are multiple jobs running on the same node. */ query_delay?: Duration; /** Specifies runtime fields for the datafeed search. */ runtime_mappings?: MappingRuntimeFields; /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. * The detector configuration objects in a job can contain functions that use these script fields. */ script_fields?: Record; /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. * The maximum value is the value of `index.max_result_window`, which is 10,000 by default. */ scroll_size?: integer; headers?: HttpHeaders; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { datafeed_id?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; aggregations?: never; aggs?: never; chunking_config?: never; delayed_data_check_config?: never; frequency?: never; indices?: never; indexes?: never; indices_options?: never; job_id?: never; max_empty_searches?: never; query?: never; query_delay?: never; runtime_mappings?: never; script_fields?: never; scroll_size?: never; headers?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { datafeed_id?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; aggregations?: never; aggs?: never; chunking_config?: never; delayed_data_check_config?: never; frequency?: never; indices?: never; indexes?: never; indices_options?: never; job_id?: never; max_empty_searches?: never; query?: never; query_delay?: never; runtime_mappings?: never; script_fields?: never; scroll_size?: never; headers?: never; }; } export interface MlPutDatafeedResponse { aggregations?: Record; authorization?: MlDatafeedAuthorization; chunking_config: MlChunkingConfig; delayed_data_check_config?: MlDelayedDataCheckConfig; datafeed_id: Id; frequency?: Duration; indices: string[]; job_id: Id; indices_options?: IndicesOptions; max_empty_searches?: integer; query: QueryDslQueryContainer; query_delay: Duration; runtime_mappings?: MappingRuntimeFields; script_fields?: Record; scroll_size: integer; } export interface MlPutFilterRequest extends RequestBase { /** A string that uniquely identifies a filter. */ filter_id: Id; /** A description of the filter. */ description?: string; /** The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. * Up to 10000 items are allowed in each filter. */ items?: string[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { filter_id?: never; description?: never; items?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { filter_id?: never; description?: never; items?: never; }; } export interface MlPutFilterResponse { description: string; filter_id: Id; items: string[]; } export interface MlPutJobRequest extends RequestBase { /** The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ job_id: Id; /** If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the * `_all` string or when no indices are specified. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines * whether wildcard expressions match hidden data streams. Supports comma-separated values. */ expand_wildcards?: ExpandWildcards; /** If `true`, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean; /** If `true`, unavailable indices (missing or closed) are ignored. */ ignore_unavailable?: boolean; /** Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. */ allow_lazy_open?: boolean; /** Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. */ analysis_config: MlAnalysisConfig; /** Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ analysis_limits?: MlAnalysisLimits; /** Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low. */ background_persist_interval?: Duration; /** Advanced configuration option. Contains custom meta data about the job. */ custom_settings?: MlCustomSettings; /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. */ daily_model_snapshot_retention_after_days?: long; /** Defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. */ data_description: MlDataDescription; /** Defines a datafeed for the anomaly detection job. If Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. */ datafeed_config?: MlDatafeedConfig; /** A description of the job. */ description?: string; /** A list of job groups. A job can belong to no groups or many. */ groups?: string[]; /** This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance of the system; it is not feasible for jobs with many entities. Model plot provides a simplified and indicative view of the model and its bounds. It does not display complex features such as multivariate correlations or multimodal data. As such, anomalies may occasionally be reported which cannot be seen in the model plot. Model plot config can be configured when the job is created or updated later. It must be disabled if performance issues are experienced. */ model_plot_config?: MlModelPlotConfig; /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted. */ model_snapshot_retention_days?: long; /** Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. */ renormalization_window_days?: long; /** A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. */ results_index_name?: IndexName; /** Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. */ results_retention_days?: long; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; allow_lazy_open?: never; analysis_config?: never; analysis_limits?: never; background_persist_interval?: never; custom_settings?: never; daily_model_snapshot_retention_after_days?: never; data_description?: never; datafeed_config?: never; description?: never; groups?: never; model_plot_config?: never; model_snapshot_retention_days?: never; renormalization_window_days?: never; results_index_name?: never; results_retention_days?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; allow_lazy_open?: never; analysis_config?: never; analysis_limits?: never; background_persist_interval?: never; custom_settings?: never; daily_model_snapshot_retention_after_days?: never; data_description?: never; datafeed_config?: never; description?: never; groups?: never; model_plot_config?: never; model_snapshot_retention_days?: never; renormalization_window_days?: never; results_index_name?: never; results_retention_days?: never; }; } export interface MlPutJobResponse { allow_lazy_open: boolean; analysis_config: MlAnalysisConfigRead; analysis_limits: MlAnalysisLimits; background_persist_interval?: Duration; create_time: DateTime; custom_settings?: MlCustomSettings; daily_model_snapshot_retention_after_days: long; data_description: MlDataDescription; datafeed_config?: MlDatafeed; description?: string; groups?: string[]; job_id: Id; job_type: string; job_version: string; model_plot_config?: MlModelPlotConfig; model_snapshot_id?: Id; model_snapshot_retention_days: long; renormalization_window_days?: long; results_index_name: string; results_retention_days?: long; } export interface MlPutTrainedModelAggregateOutput { logistic_regression?: MlPutTrainedModelWeights; weighted_sum?: MlPutTrainedModelWeights; weighted_mode?: MlPutTrainedModelWeights; exponent?: MlPutTrainedModelWeights; } export interface MlPutTrainedModelDefinition { /** Collection of preprocessors */ preprocessors?: MlPutTrainedModelPreprocessor[]; /** The definition of the trained model. */ trained_model: MlPutTrainedModelTrainedModel; } export interface MlPutTrainedModelEnsemble { aggregate_output?: MlPutTrainedModelAggregateOutput; classification_labels?: string[]; feature_names?: string[]; target_type?: string; trained_models: MlPutTrainedModelTrainedModel[]; } export interface MlPutTrainedModelFrequencyEncodingPreprocessor { field: string; feature_name: string; frequency_map: Record; } export interface MlPutTrainedModelInput { field_names: Names; } export interface MlPutTrainedModelOneHotEncodingPreprocessor { field: string; hot_map: Record; } export interface MlPutTrainedModelPreprocessor { frequency_encoding?: MlPutTrainedModelFrequencyEncodingPreprocessor; one_hot_encoding?: MlPutTrainedModelOneHotEncodingPreprocessor; target_mean_encoding?: MlPutTrainedModelTargetMeanEncodingPreprocessor; } export interface MlPutTrainedModelRequest extends RequestBase { /** The unique identifier of the trained model. */ model_id: Id; /** If set to `true` and a `compressed_definition` is provided, * the request defers definition decompression and skips relevant * validations. */ defer_definition_decompression?: boolean; /** Whether to wait for all child operations (e.g. model download) * to complete. */ wait_for_completion?: boolean; /** The compressed (GZipped and Base64 encoded) inference definition of the * model. If compressed_definition is specified, then definition cannot be * specified. */ compressed_definition?: string; /** The inference definition for the model. If definition is specified, then * compressed_definition cannot be specified. */ definition?: MlPutTrainedModelDefinition; /** A human-readable description of the inference trained model. */ description?: string; /** The default configuration for inference. This can be either a regression * or classification configuration. It must match the underlying * definition.trained_model's target_type. For pre-packaged models such as * ELSER the config is not required. */ inference_config?: MlInferenceConfigCreateContainer; /** The input field names for the model definition. */ input?: MlPutTrainedModelInput; /** An object map that contains metadata about the model. */ metadata?: any; /** The model type. */ model_type?: MlTrainedModelType; /** The estimated memory usage in bytes to keep the trained model in memory. * This property is supported only if defer_definition_decompression is true * or the model definition is not supplied. */ model_size_bytes?: long; /** The platform architecture (if applicable) of the trained mode. If the model * only works on one platform, because it is heavily optimized for a particular * processor architecture and OS combination, then this field specifies which. * The format of the string must match the platform identifiers used by Elasticsearch, * so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, * or `windows-x86_64`. For portable models (those that work independent of processor * architecture or OS features), leave this field unset. */ platform_architecture?: string; /** An array of tags to organize the model. */ tags?: string[]; /** Optional prefix strings applied at inference */ prefix_strings?: MlTrainedModelPrefixStrings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { model_id?: never; defer_definition_decompression?: never; wait_for_completion?: never; compressed_definition?: never; definition?: never; description?: never; inference_config?: never; input?: never; metadata?: never; model_type?: never; model_size_bytes?: never; platform_architecture?: never; tags?: never; prefix_strings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { model_id?: never; defer_definition_decompression?: never; wait_for_completion?: never; compressed_definition?: never; definition?: never; description?: never; inference_config?: never; input?: never; metadata?: never; model_type?: never; model_size_bytes?: never; platform_architecture?: never; tags?: never; prefix_strings?: never; }; } export type MlPutTrainedModelResponse = MlTrainedModelConfig; export interface MlPutTrainedModelTargetMeanEncodingPreprocessor { field: string; feature_name: string; target_map: Record; default_value: double; } export interface MlPutTrainedModelTrainedModel { /** The definition for a binary decision tree. */ tree?: MlPutTrainedModelTrainedModelTree; /** The definition of a node in a tree. * There are two major types of nodes: leaf nodes and not-leaf nodes. * - Leaf nodes only need node_index and leaf_value defined. * - All other nodes need split_feature, left_child, right_child, threshold, decision_type, and default_left defined. */ tree_node?: MlPutTrainedModelTrainedModelTreeNode; /** The definition for an ensemble model */ ensemble?: MlPutTrainedModelEnsemble; } export interface MlPutTrainedModelTrainedModelTree { classification_labels?: string[]; feature_names: string[]; target_type?: string; tree_structure: MlPutTrainedModelTrainedModelTreeNode[]; } export interface MlPutTrainedModelTrainedModelTreeNode { decision_type?: string; default_left?: boolean; leaf_value?: double; left_child?: integer; node_index: integer; right_child?: integer; split_feature?: integer; split_gain?: integer; threshold?: double; } export interface MlPutTrainedModelWeights { weights: double; } export interface MlPutTrainedModelAliasRequest extends RequestBase { /** The alias to create or update. This value cannot end in numbers. */ model_alias: Name; /** The identifier for the trained model that the alias refers to. */ model_id: Id; /** Specifies whether the alias gets reassigned to the specified trained * model if it is already assigned to a different model. If the alias is * already assigned and this parameter is false, the API returns an error. */ reassign?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { model_alias?: never; model_id?: never; reassign?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { model_alias?: never; model_id?: never; reassign?: never; }; } export type MlPutTrainedModelAliasResponse = AcknowledgedResponseBase; export interface MlPutTrainedModelDefinitionPartRequest extends RequestBase { /** The unique identifier of the trained model. */ model_id: Id; /** The definition part number. When the definition is loaded for inference the definition parts are streamed in the * order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. */ part: integer; /** The definition part for the model. Must be a base64 encoded string. */ definition: string; /** The total uncompressed definition length in bytes. Not base64 encoded. */ total_definition_length: long; /** The total number of parts that will be uploaded. Must be greater than 0. */ total_parts: integer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { model_id?: never; part?: never; definition?: never; total_definition_length?: never; total_parts?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { model_id?: never; part?: never; definition?: never; total_definition_length?: never; total_parts?: never; }; } export type MlPutTrainedModelDefinitionPartResponse = AcknowledgedResponseBase; export interface MlPutTrainedModelVocabularyRequest extends RequestBase { /** The unique identifier of the trained model. */ model_id: Id; /** The model vocabulary, which must not be empty. */ vocabulary: string[]; /** The optional model merges if required by the tokenizer. */ merges?: string[]; /** The optional vocabulary value scores if required by the tokenizer. */ scores?: double[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { model_id?: never; vocabulary?: never; merges?: never; scores?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { model_id?: never; vocabulary?: never; merges?: never; scores?: never; }; } export type MlPutTrainedModelVocabularyResponse = AcknowledgedResponseBase; export interface MlResetJobRequest extends RequestBase { /** The ID of the job to reset. */ job_id: Id; /** Should this request wait until the operation has completed before * returning. */ wait_for_completion?: boolean; /** Specifies whether annotations that have been added by the * user should be deleted along with any auto-generated annotations when the job is * reset. */ delete_user_annotations?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; wait_for_completion?: never; delete_user_annotations?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; wait_for_completion?: never; delete_user_annotations?: never; }; } export type MlResetJobResponse = AcknowledgedResponseBase; export interface MlRevertModelSnapshotRequest extends RequestBase { /** Identifier for the anomaly detection job. */ job_id: Id; /** You can specify `empty` as the . Reverting to the empty * snapshot means the anomaly detection job starts learning a new model from * scratch when it is started. */ snapshot_id: Id; /** Refer to the description for the `delete_intervening_results` query parameter. */ delete_intervening_results?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; snapshot_id?: never; delete_intervening_results?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; snapshot_id?: never; delete_intervening_results?: never; }; } export interface MlRevertModelSnapshotResponse { model: MlModelSnapshot; } export interface MlSetUpgradeModeRequest extends RequestBase { /** When `true`, it enables `upgrade_mode` which temporarily halts all job * and datafeed tasks and prohibits new job and datafeed tasks from * starting. */ enabled?: boolean; /** The time to wait for the request to be completed. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { enabled?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { enabled?: never; timeout?: never; }; } export type MlSetUpgradeModeResponse = AcknowledgedResponseBase; export interface MlStartDataFrameAnalyticsRequest extends RequestBase { /** Identifier for the data frame analytics job. This identifier can contain * lowercase alphanumeric characters (a-z and 0-9), hyphens, and * underscores. It must start and end with alphanumeric characters. */ id: Id; /** Controls the amount of time to wait until the data frame analytics job * starts. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; timeout?: never; }; } export interface MlStartDataFrameAnalyticsResponse { acknowledged: boolean; /** The ID of the node that the job was started on. If the job is allowed to open lazily and has not yet been assigned to a node, this value is an empty string. * The node ID of the node the job has been assigned to, or * an empty string if it hasn't been assigned to a node. In * serverless if the job has been assigned to run then the * node ID will be "serverless". */ node: NodeId; } export interface MlStartDatafeedRequest extends RequestBase { /** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase * alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric * characters. */ datafeed_id: Id; /** Refer to the description for the `end` query parameter. */ end?: DateTime; /** Refer to the description for the `start` query parameter. */ start?: DateTime; /** Refer to the description for the `timeout` query parameter. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { datafeed_id?: never; end?: never; start?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { datafeed_id?: never; end?: never; start?: never; timeout?: never; }; } export interface MlStartDatafeedResponse { /** The ID of the node that the job was started on. In serverless this will be the "serverless". * If the job is allowed to open lazily and has not yet been assigned to a node, this value is an empty string. */ node: NodeIds; /** For a successful response, this value is always `true`. On failure, an exception is returned instead. */ started: boolean; } export interface MlStartTrainedModelDeploymentRequest extends RequestBase { /** The unique identifier of the trained model. Currently, only PyTorch models are supported. */ model_id: Id; /** The inference cache size (in memory outside the JVM heap) per node for the model. * The default value is the same size as the `model_size_bytes`. To disable the cache, * `0b` can be provided. */ cache_size?: ByteSize; /** A unique identifier for the deployment of the model. * @remarks This property is not supported on Elastic Cloud Serverless. */ deployment_id?: string; /** The number of model allocations on each node where the model is deployed. * All allocations on a node share the same copy of the model in memory but use * a separate set of threads to evaluate the model. * Increasing this value generally increases the throughput. * If this setting is greater than the number of hardware threads * it will automatically be changed to a value less than the number of hardware threads. * If adaptive_allocations is enabled, do not set this value, because it’s automatically set. */ number_of_allocations?: integer; /** The deployment priority. */ priority?: MlTrainingPriority; /** Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds * this value, new requests are rejected with a 429 error. */ queue_capacity?: integer; /** Sets the number of threads used by each model allocation during inference. This generally increases * the inference speed. The inference process is a compute-bound process; any number * greater than the number of available hardware threads on the machine does not increase the * inference speed. If this setting is greater than the number of hardware threads * it will automatically be changed to a value less than the number of hardware threads. */ threads_per_allocation?: integer; /** Specifies the amount of time to wait for the model to deploy. */ timeout?: Duration; /** Specifies the allocation status to wait for before returning. */ wait_for?: MlDeploymentAllocationState; /** Adaptive allocations configuration. When enabled, the number of allocations * is set based on the current load. * If adaptive_allocations is enabled, do not set the number of allocations manually. */ adaptive_allocations?: MlAdaptiveAllocationsSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { model_id?: never; cache_size?: never; deployment_id?: never; number_of_allocations?: never; priority?: never; queue_capacity?: never; threads_per_allocation?: never; timeout?: never; wait_for?: never; adaptive_allocations?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { model_id?: never; cache_size?: never; deployment_id?: never; number_of_allocations?: never; priority?: never; queue_capacity?: never; threads_per_allocation?: never; timeout?: never; wait_for?: never; adaptive_allocations?: never; }; } export interface MlStartTrainedModelDeploymentResponse { assignment: MlTrainedModelAssignment; } export interface MlStopDataFrameAnalyticsRequest extends RequestBase { /** Identifier for the data frame analytics job. This identifier can contain * lowercase alphanumeric characters (a-z and 0-9), hyphens, and * underscores. It must start and end with alphanumeric characters. */ id: Id; /** Specifies what to do when the request: * * 1. Contains wildcard expressions and there are no data frame analytics * jobs that match. * 2. Contains the _all string or no identifiers and there are no matches. * 3. Contains wildcard expressions and there are only partial matches. * * The default value is true, which returns an empty data_frame_analytics * array when there are no matches and the subset of results when there are * partial matches. If this parameter is false, the request returns a 404 * status code when there are no matches or only partial matches. */ allow_no_match?: boolean; /** If true, the data frame analytics job is stopped forcefully. */ force?: boolean; /** Controls the amount of time to wait until the data frame analytics job * stops. Defaults to 20 seconds. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; allow_no_match?: never; force?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; allow_no_match?: never; force?: never; timeout?: never; }; } export interface MlStopDataFrameAnalyticsResponse { stopped: boolean; } export interface MlStopDatafeedRequest extends RequestBase { /** Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated * list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as * the identifier. */ datafeed_id: Id; /** Refer to the description for the `allow_no_match` query parameter. */ allow_no_match?: boolean; /** Refer to the description for the `force` query parameter. */ force?: boolean; /** Refer to the description for the `timeout` query parameter. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { datafeed_id?: never; allow_no_match?: never; force?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { datafeed_id?: never; allow_no_match?: never; force?: never; timeout?: never; }; } export interface MlStopDatafeedResponse { stopped: boolean; } export interface MlStopTrainedModelDeploymentRequest extends RequestBase { /** The unique identifier of the trained model. */ model_id: Id; /** Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; * contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and * there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. * If `false`, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean; /** Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you * restart the model deployment. */ force?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { model_id?: never; allow_no_match?: never; force?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { model_id?: never; allow_no_match?: never; force?: never; }; } export interface MlStopTrainedModelDeploymentResponse { stopped: boolean; } export interface MlUpdateDataFrameAnalyticsRequest extends RequestBase { /** Identifier for the data frame analytics job. This identifier can contain * lowercase alphanumeric characters (a-z and 0-9), hyphens, and * underscores. It must start and end with alphanumeric characters. */ id: Id; /** A description of the job. */ description?: string; /** The approximate maximum amount of memory resources that are permitted for * analytical processing. If your `elasticsearch.yml` file contains an * `xpack.ml.max_model_memory_limit` setting, an error occurs when you try * to create data frame analytics jobs that have `model_memory_limit` values * greater than that setting. */ model_memory_limit?: string; /** The maximum number of threads to be used by the analysis. Using more * threads may decrease the time necessary to complete the analysis at the * cost of using more CPU. Note that the process may use additional threads * for operational functionality other than the analysis itself. */ max_num_threads?: integer; /** Specifies whether this job can start when there is insufficient machine * learning node capacity for it to be immediately assigned to a node. */ allow_lazy_start?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; description?: never; model_memory_limit?: never; max_num_threads?: never; allow_lazy_start?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; description?: never; model_memory_limit?: never; max_num_threads?: never; allow_lazy_start?: never; }; } export interface MlUpdateDataFrameAnalyticsResponse { authorization?: MlDataframeAnalyticsAuthorization; allow_lazy_start: boolean; analysis: MlDataframeAnalysisContainer; analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[]; create_time: long; description?: string; dest: MlDataframeAnalyticsDestination; id: Id; max_num_threads: integer; model_memory_limit: string; source: MlDataframeAnalyticsSource; version: VersionString; } export interface MlUpdateDatafeedRequest extends RequestBase { /** A numerical character string that uniquely identifies the datafeed. * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. * It must start and end with alphanumeric characters. */ datafeed_id: Id; /** If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the * `_all` string or when no indices are specified. */ allow_no_indices?: boolean; /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines * whether wildcard expressions match hidden data streams. Supports comma-separated values. */ expand_wildcards?: ExpandWildcards; /** If `true`, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean; /** If `true`, unavailable indices (missing or closed) are ignored. */ ignore_unavailable?: boolean; /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only * with low cardinality data. */ aggregations?: Record; /** Datafeeds might search over long time periods, for several months or years. This search is split into time * chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of * these time chunks are calculated; it is an advanced configuration option. */ chunking_config?: MlChunkingConfig; /** Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally * search over indices that have already been read in an effort to determine whether any data has subsequently been * added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and * the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time * datafeeds. */ delayed_data_check_config?: MlDelayedDataCheckConfig; /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is * either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket * span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are * written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value * must be divisible by the interval of the date histogram aggregation. */ frequency?: Duration; /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine * learning nodes must have the `remote_cluster_client` role. */ indices?: string[]; /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine * learning nodes must have the `remote_cluster_client` role. * @alias indices */ indexes?: string[]; /** Specifies index expansion options that are used during search. */ indices_options?: IndicesOptions; job_id?: Id; /** If a real-time datafeed has never seen any data (including during any initial training period), it automatically * stops and closes the associated job after this many real-time searches return no documents. In other words, * it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no * end time that sees no data remains started until it is explicitly stopped. By default, it is not set. */ max_empty_searches?: integer; /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an * Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this * object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also * changed. Therefore, the time required to learn might be long and the understandability of the results is * unpredictable. If you want to make significant changes to the source data, it is recommended that you * clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one * when you are satisfied with the results of the job. */ query?: QueryDslQueryContainer; /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might * not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default * value is randomly selected between `60s` and `120s`. This randomness improves the query performance * when there are multiple jobs running on the same node. */ query_delay?: Duration; /** Specifies runtime fields for the datafeed search. */ runtime_mappings?: MappingRuntimeFields; /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. * The detector configuration objects in a job can contain functions that use these script fields. */ script_fields?: Record; /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. * The maximum value is the value of `index.max_result_window`. */ scroll_size?: integer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { datafeed_id?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; aggregations?: never; chunking_config?: never; delayed_data_check_config?: never; frequency?: never; indices?: never; indexes?: never; indices_options?: never; job_id?: never; max_empty_searches?: never; query?: never; query_delay?: never; runtime_mappings?: never; script_fields?: never; scroll_size?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { datafeed_id?: never; allow_no_indices?: never; expand_wildcards?: never; ignore_throttled?: never; ignore_unavailable?: never; aggregations?: never; chunking_config?: never; delayed_data_check_config?: never; frequency?: never; indices?: never; indexes?: never; indices_options?: never; job_id?: never; max_empty_searches?: never; query?: never; query_delay?: never; runtime_mappings?: never; script_fields?: never; scroll_size?: never; }; } export interface MlUpdateDatafeedResponse { authorization?: MlDatafeedAuthorization; aggregations?: Record; chunking_config: MlChunkingConfig; delayed_data_check_config?: MlDelayedDataCheckConfig; datafeed_id: Id; frequency?: Duration; indices: string[]; indices_options?: IndicesOptions; job_id: Id; max_empty_searches?: integer; query: QueryDslQueryContainer; query_delay: Duration; runtime_mappings?: MappingRuntimeFields; script_fields?: Record; scroll_size: integer; } export interface MlUpdateFilterRequest extends RequestBase { /** A string that uniquely identifies a filter. */ filter_id: Id; /** The items to add to the filter. */ add_items?: string[]; /** A description for the filter. */ description?: string; /** The items to remove from the filter. */ remove_items?: string[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { filter_id?: never; add_items?: never; description?: never; remove_items?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { filter_id?: never; add_items?: never; description?: never; remove_items?: never; }; } export interface MlUpdateFilterResponse { description: string; filter_id: Id; items: string[]; } export interface MlUpdateJobRequest extends RequestBase { /** Identifier for the job. */ job_id: Id; /** Advanced configuration option. Specifies whether this job can open when * there is insufficient machine learning node capacity for it to be * immediately assigned to a node. If `false` and a machine learning node * with capacity to run the job cannot immediately be found, the open * anomaly detection jobs API returns an error. However, this is also * subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this * option is set to `true`, the open anomaly detection jobs API does not * return an error and the job waits in the opening state until sufficient * machine learning node capacity is available. */ allow_lazy_open?: boolean; analysis_limits?: MlAnalysisMemoryLimit; /** Advanced configuration option. The time between each periodic persistence * of the model. * The default value is a randomized value between 3 to 4 hours, which * avoids all jobs persisting at exactly the same time. The smallest allowed * value is 1 hour. * For very large models (several GB), persistence could take 10-20 minutes, * so do not set the value too low. * If the job is open when you make the update, you must stop the datafeed, * close the job, then reopen the job and restart the datafeed for the * changes to take effect. */ background_persist_interval?: Duration; /** Advanced configuration option. Contains custom meta data about the job. * For example, it can contain custom URL information as shown in Adding * custom URLs to machine learning results. */ custom_settings?: Record; categorization_filters?: string[]; /** A description of the job. */ description?: string; model_plot_config?: MlModelPlotConfig; model_prune_window?: Duration; /** Advanced configuration option, which affects the automatic removal of old * model snapshots for this job. It specifies a period of time (in days) * after which only the first snapshot per day is retained. This period is * relative to the timestamp of the most recent snapshot for this job. Valid * values range from 0 to `model_snapshot_retention_days`. For jobs created * before version 7.8.0, the default value matches * `model_snapshot_retention_days`. */ daily_model_snapshot_retention_after_days?: long; /** Advanced configuration option, which affects the automatic removal of old * model snapshots for this job. It specifies the maximum period of time (in * days) that snapshots are retained. This period is relative to the * timestamp of the most recent snapshot for this job. */ model_snapshot_retention_days?: long; /** Advanced configuration option. The period over which adjustments to the * score are applied, as new data is seen. */ renormalization_window_days?: long; /** Advanced configuration option. The period of time (in days) that results * are retained. Age is calculated relative to the timestamp of the latest * bucket result. If this property has a non-null value, once per day at * 00:30 (server time), results that are the specified number of days older * than the latest bucket result are deleted from Elasticsearch. The default * value is null, which means all results are retained. */ results_retention_days?: long; /** A list of job groups. A job can belong to no groups or many. */ groups?: string[]; /** An array of detector update objects. */ detectors?: MlDetectorUpdate[]; /** Settings related to how categorization interacts with partition fields. */ per_partition_categorization?: MlPerPartitionCategorization; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; allow_lazy_open?: never; analysis_limits?: never; background_persist_interval?: never; custom_settings?: never; categorization_filters?: never; description?: never; model_plot_config?: never; model_prune_window?: never; daily_model_snapshot_retention_after_days?: never; model_snapshot_retention_days?: never; renormalization_window_days?: never; results_retention_days?: never; groups?: never; detectors?: never; per_partition_categorization?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; allow_lazy_open?: never; analysis_limits?: never; background_persist_interval?: never; custom_settings?: never; categorization_filters?: never; description?: never; model_plot_config?: never; model_prune_window?: never; daily_model_snapshot_retention_after_days?: never; model_snapshot_retention_days?: never; renormalization_window_days?: never; results_retention_days?: never; groups?: never; detectors?: never; per_partition_categorization?: never; }; } export interface MlUpdateJobResponse { allow_lazy_open: boolean; analysis_config: MlAnalysisConfigRead; analysis_limits: MlAnalysisLimits; background_persist_interval?: Duration; create_time: EpochTime; finished_time?: EpochTime; custom_settings?: Record; daily_model_snapshot_retention_after_days: long; data_description: MlDataDescription; datafeed_config?: MlDatafeed; description?: string; groups?: string[]; job_id: Id; job_type: string; job_version: VersionString; model_plot_config?: MlModelPlotConfig; model_snapshot_id?: Id; model_snapshot_retention_days: long; renormalization_window_days?: long; results_index_name: IndexName; results_retention_days?: long; } export interface MlUpdateModelSnapshotRequest extends RequestBase { /** Identifier for the anomaly detection job. */ job_id: Id; /** Identifier for the model snapshot. */ snapshot_id: Id; /** A description of the model snapshot. */ description?: string; /** If `true`, this snapshot will not be deleted during automatic cleanup of * snapshots older than `model_snapshot_retention_days`. However, this * snapshot will be deleted when the job is deleted. */ retain?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; snapshot_id?: never; description?: never; retain?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; snapshot_id?: never; description?: never; retain?: never; }; } export interface MlUpdateModelSnapshotResponse { acknowledged: boolean; model: MlModelSnapshot; } export interface MlUpdateTrainedModelDeploymentRequest extends RequestBase { /** The unique identifier of the trained model. Currently, only PyTorch models are supported. */ model_id: Id; /** The number of model allocations on each node where the model is deployed. * All allocations on a node share the same copy of the model in memory but use * a separate set of threads to evaluate the model. * Increasing this value generally increases the throughput. * If this setting is greater than the number of hardware threads * it will automatically be changed to a value less than the number of hardware threads. * If adaptive_allocations is enabled, do not set this value, because it’s automatically set. */ number_of_allocations?: integer; /** Adaptive allocations configuration. When enabled, the number of allocations * is set based on the current load. * If adaptive_allocations is enabled, do not set the number of allocations manually. */ adaptive_allocations?: MlAdaptiveAllocationsSettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { model_id?: never; number_of_allocations?: never; adaptive_allocations?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { model_id?: never; number_of_allocations?: never; adaptive_allocations?: never; }; } export interface MlUpdateTrainedModelDeploymentResponse { assignment: MlTrainedModelAssignment; } export interface MlUpgradeJobSnapshotRequest extends RequestBase { /** Identifier for the anomaly detection job. */ job_id: Id; /** A numerical character string that uniquely identifies the model snapshot. */ snapshot_id: Id; /** When true, the API won’t respond until the upgrade is complete. * Otherwise, it responds as soon as the upgrade task is assigned to a node. */ wait_for_completion?: boolean; /** Controls the time to wait for the request to complete. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; snapshot_id?: never; wait_for_completion?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; snapshot_id?: never; wait_for_completion?: never; timeout?: never; }; } export interface MlUpgradeJobSnapshotResponse { /** The ID of the node that the upgrade task was started on if it is still running. In serverless this will be the "serverless". */ node: NodeId; /** When true, this means the task is complete. When false, it is still running. */ completed: boolean; } export interface MlValidateRequest extends RequestBase { job_id?: Id; analysis_config?: MlAnalysisConfig; analysis_limits?: MlAnalysisLimits; data_description?: MlDataDescription; description?: string; model_plot?: MlModelPlotConfig; model_snapshot_id?: Id; model_snapshot_retention_days?: long; results_index_name?: IndexName; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { job_id?: never; analysis_config?: never; analysis_limits?: never; data_description?: never; description?: never; model_plot?: never; model_snapshot_id?: never; model_snapshot_retention_days?: never; results_index_name?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { job_id?: never; analysis_config?: never; analysis_limits?: never; data_description?: never; description?: never; model_plot?: never; model_snapshot_id?: never; model_snapshot_retention_days?: never; results_index_name?: never; }; } export type MlValidateResponse = AcknowledgedResponseBase; export interface MlValidateDetectorRequest extends RequestBase { detector?: MlDetector; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { detector?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { detector?: never; }; } export type MlValidateDetectorResponse = AcknowledgedResponseBase; export interface MonitoringBulkRequest extends RequestBase { /** Default document type for items which don't provide one */ type?: string; /** Identifier of the monitored system */ system_id: string; /** */ system_api_version: string; /** Collection interval (e.g., '10s' or '10000ms') of the payload */ interval: Duration; operations?: (BulkOperationContainer | BulkUpdateAction | TDocument)[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { type?: never; system_id?: never; system_api_version?: never; interval?: never; operations?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { type?: never; system_id?: never; system_api_version?: never; interval?: never; operations?: never; }; } export interface MonitoringBulkResponse { error?: ErrorCause; /** True if there is was an error */ errors: boolean; /** Was collection disabled? */ ignored: boolean; took: long; } export interface NodesAdaptiveSelection { /** The exponentially weighted moving average queue size of search requests on the keyed node. */ avg_queue_size?: long; /** The exponentially weighted moving average response time of search requests on the keyed node. */ avg_response_time?: Duration; /** The exponentially weighted moving average response time, in nanoseconds, of search requests on the keyed node. */ avg_response_time_ns?: long; /** The exponentially weighted moving average service time of search requests on the keyed node. */ avg_service_time?: Duration; /** The exponentially weighted moving average service time, in nanoseconds, of search requests on the keyed node. */ avg_service_time_ns?: long; /** The number of outstanding search requests to the keyed node from the node these stats are for. */ outgoing_searches?: long; /** The rank of this node; used for shard selection when routing search requests. */ rank?: string; } export interface NodesBreaker { /** Estimated memory used for the operation. */ estimated_size?: string; /** Estimated memory used, in bytes, for the operation. */ estimated_size_in_bytes?: long; /** Memory limit for the circuit breaker. */ limit_size?: string; /** Memory limit, in bytes, for the circuit breaker. */ limit_size_in_bytes?: long; /** A constant that all estimates for the circuit breaker are multiplied with to calculate a final estimate. */ overhead?: float; /** Total number of times the circuit breaker has been triggered and prevented an out of memory error. */ tripped?: float; } export interface NodesCgroup { /** Contains statistics about `cpuacct` control group for the node. */ cpuacct?: NodesCpuAcct; /** Contains statistics about `cpu` control group for the node. */ cpu?: NodesCgroupCpu; /** Contains statistics about the memory control group for the node. */ memory?: NodesCgroupMemory; } export interface NodesCgroupCpu { /** The `cpu` control group to which the Elasticsearch process belongs. */ control_group?: string; /** The period of time, in microseconds, for how regularly all tasks in the same cgroup as the Elasticsearch process should have their access to CPU resources reallocated. */ cfs_period_micros?: integer; /** The total amount of time, in microseconds, for which all tasks in the same cgroup as the Elasticsearch process can run during one period `cfs_period_micros`. */ cfs_quota_micros?: integer; /** Contains CPU statistics for the node. */ stat?: NodesCgroupCpuStat; } export interface NodesCgroupCpuStat { /** The number of reporting periods (as specified by `cfs_period_micros`) that have elapsed. */ number_of_elapsed_periods?: long; /** The number of times all tasks in the same cgroup as the Elasticsearch process have been throttled. */ number_of_times_throttled?: long; /** The total amount of time, in nanoseconds, for which all tasks in the same cgroup as the Elasticsearch process have been throttled. */ time_throttled_nanos?: DurationValue; } export interface NodesCgroupMemory { /** The `memory` control group to which the Elasticsearch process belongs. */ control_group?: string; /** The maximum amount of user memory (including file cache) allowed for all tasks in the same cgroup as the Elasticsearch process. * This value can be too big to store in a `long`, so is returned as a string so that the value returned can exactly match what the underlying operating system interface returns. * Any value that is too large to parse into a `long` almost certainly means no limit has been set for the cgroup. */ limit_in_bytes?: string; /** The total current memory usage by processes in the cgroup, in bytes, by all tasks in the same cgroup as the Elasticsearch process. * This value is stored as a string for consistency with `limit_in_bytes`. */ usage_in_bytes?: string; } export interface NodesClient { /** Unique ID for the HTTP client. */ id?: long; /** Reported agent for the HTTP client. * If unavailable, this property is not included in the response. */ agent?: string; /** Local address for the HTTP connection. */ local_address?: string; /** Remote address for the HTTP connection. */ remote_address?: string; /** The URI of the client’s most recent request. */ last_uri?: string; /** Time at which the client opened the connection. */ opened_time_millis?: long; /** Time at which the client closed the connection if the connection is closed. */ closed_time_millis?: long; /** Time of the most recent request from this client. */ last_request_time_millis?: long; /** Number of requests from this client. */ request_count?: long; /** Cumulative size in bytes of all requests from this client. */ request_size_bytes?: long; /** Value from the client’s `x-opaque-id` HTTP header. * If unavailable, this property is not included in the response. */ x_opaque_id?: string; } export interface NodesClusterAppliedStats { recordings?: NodesRecording[]; } export interface NodesClusterStateQueue { /** Total number of cluster states in queue. */ total?: long; /** Number of pending cluster states in queue. */ pending?: long; /** Number of committed cluster states in queue. */ committed?: long; } export interface NodesClusterStateUpdate { /** The number of cluster state update attempts that did not change the cluster state since the node started. */ count: long; /** The cumulative amount of time spent computing no-op cluster state updates since the node started. */ computation_time?: Duration; /** The cumulative amount of time, in milliseconds, spent computing no-op cluster state updates since the node started. */ computation_time_millis?: DurationValue; /** The cumulative amount of time spent publishing cluster state updates which ultimately succeeded, which includes everything from the start of the publication (just after the computation of the new cluster state) until the publication has finished and the master node is ready to start processing the next state update. * This includes the time measured by `context_construction_time`, `commit_time`, `completion_time` and `master_apply_time`. */ publication_time?: Duration; /** The cumulative amount of time, in milliseconds, spent publishing cluster state updates which ultimately succeeded, which includes everything from the start of the publication (just after the computation of the new cluster state) until the publication has finished and the master node is ready to start processing the next state update. * This includes the time measured by `context_construction_time`, `commit_time`, `completion_time` and `master_apply_time`. */ publication_time_millis?: DurationValue; /** The cumulative amount of time spent constructing a publication context since the node started for publications that ultimately succeeded. * This statistic includes the time spent computing the difference between the current and new cluster state preparing a serialized representation of this difference. */ context_construction_time?: Duration; /** The cumulative amount of time, in milliseconds, spent constructing a publication context since the node started for publications that ultimately succeeded. * This statistic includes the time spent computing the difference between the current and new cluster state preparing a serialized representation of this difference. */ context_construction_time_millis?: DurationValue; /** The cumulative amount of time spent waiting for a successful cluster state update to commit, which measures the time from the start of each publication until a majority of the master-eligible nodes have written the state to disk and confirmed the write to the elected master. */ commit_time?: Duration; /** The cumulative amount of time, in milliseconds, spent waiting for a successful cluster state update to commit, which measures the time from the start of each publication until a majority of the master-eligible nodes have written the state to disk and confirmed the write to the elected master. */ commit_time_millis?: DurationValue; /** The cumulative amount of time spent waiting for a successful cluster state update to complete, which measures the time from the start of each publication until all the other nodes have notified the elected master that they have applied the cluster state. */ completion_time?: Duration; /** The cumulative amount of time, in milliseconds, spent waiting for a successful cluster state update to complete, which measures the time from the start of each publication until all the other nodes have notified the elected master that they have applied the cluster state. */ completion_time_millis?: DurationValue; /** The cumulative amount of time spent successfully applying cluster state updates on the elected master since the node started. */ master_apply_time?: Duration; /** The cumulative amount of time, in milliseconds, spent successfully applying cluster state updates on the elected master since the node started. */ master_apply_time_millis?: DurationValue; /** The cumulative amount of time spent notifying listeners of a no-op cluster state update since the node started. */ notification_time?: Duration; /** The cumulative amount of time, in milliseconds, spent notifying listeners of a no-op cluster state update since the node started. */ notification_time_millis?: DurationValue; } export interface NodesContext { context?: string; compilations?: long; cache_evictions?: long; compilation_limit_triggered?: long; } export interface NodesCpu { percent?: integer; sys?: Duration; sys_in_millis?: DurationValue; total?: Duration; total_in_millis?: DurationValue; user?: Duration; user_in_millis?: DurationValue; load_average?: Record; } export interface NodesCpuAcct { /** The `cpuacct` control group to which the Elasticsearch process belongs. */ control_group?: string; /** The total CPU time, in nanoseconds, consumed by all tasks in the same cgroup as the Elasticsearch process. */ usage_nanos?: DurationValue; } export interface NodesDataPathStats { /** Total amount of disk space available to this Java virtual machine on this file store. */ available?: string; /** Total number of bytes available to this Java virtual machine on this file store. */ available_in_bytes?: long; disk_queue?: string; disk_reads?: long; disk_read_size?: string; disk_read_size_in_bytes?: long; disk_writes?: long; disk_write_size?: string; disk_write_size_in_bytes?: long; /** Total amount of unallocated disk space in the file store. */ free?: string; /** Total number of unallocated bytes in the file store. */ free_in_bytes?: long; /** Mount point of the file store (for example: `/dev/sda2`). */ mount?: string; /** Path to the file store. */ path?: string; /** Total size of the file store. */ total?: string; /** Total size of the file store in bytes. */ total_in_bytes?: long; /** Type of the file store (ex: ext4). */ type?: string; } export interface NodesDiscovery { /** Contains statistics for the cluster state queue of the node. */ cluster_state_queue?: NodesClusterStateQueue; /** Contains statistics for the published cluster states of the node. */ published_cluster_states?: NodesPublishedClusterStates; /** Contains low-level statistics about how long various activities took during cluster state updates while the node was the elected master. * Omitted if the node is not master-eligible. * Every field whose name ends in `_time` within this object is also represented as a raw number of milliseconds in a field whose name ends in `_time_millis`. * The human-readable fields with a `_time` suffix are only returned if requested with the `?human=true` query parameter. */ cluster_state_update?: Record; serialized_cluster_states?: NodesSerializedClusterState; cluster_applier_stats?: NodesClusterAppliedStats; } export interface NodesExtendedMemoryStats extends NodesMemoryStats { /** Percentage of free memory. */ free_percent?: integer; /** Percentage of used memory. */ used_percent?: integer; } export interface NodesFileSystem { /** List of all file stores. */ data?: NodesDataPathStats[]; /** Last time the file stores statistics were refreshed. * Recorded in milliseconds since the Unix Epoch. */ timestamp?: long; /** Contains statistics for all file stores of the node. */ total?: NodesFileSystemTotal; /** Contains I/O statistics for the node. */ io_stats?: NodesIoStats; } export interface NodesFileSystemTotal { /** Total disk space available to this Java virtual machine on all file stores. * Depending on OS or process level restrictions, this might appear less than `free`. * This is the actual amount of free disk space the Elasticsearch node can utilise. */ available?: string; /** Total number of bytes available to this Java virtual machine on all file stores. * Depending on OS or process level restrictions, this might appear less than `free_in_bytes`. * This is the actual amount of free disk space the Elasticsearch node can utilise. */ available_in_bytes?: long; /** Total unallocated disk space in all file stores. */ free?: string; /** Total number of unallocated bytes in all file stores. */ free_in_bytes?: long; /** Total size of all file stores. */ total?: string; /** Total size of all file stores in bytes. */ total_in_bytes?: long; } export interface NodesGarbageCollector { /** Contains statistics about JVM garbage collectors for the node. */ collectors?: Record; } export interface NodesGarbageCollectorTotal { /** Total number of JVM garbage collectors that collect objects. */ collection_count?: long; /** Total time spent by JVM collecting objects. */ collection_time?: string; /** Total time, in milliseconds, spent by JVM collecting objects. */ collection_time_in_millis?: long; } export interface NodesHttp { /** Current number of open HTTP connections for the node. */ current_open?: integer; /** Total number of HTTP connections opened for the node. */ total_opened?: long; /** Information on current and recently-closed HTTP client connections. * Clients that have been closed longer than the `http.client_stats.closed_channels.max_age` setting will not be represented here. */ clients?: NodesClient[]; /** Detailed HTTP stats broken down by route * @remarks This property is not supported on Elastic Cloud Serverless. */ routes: Record; } export interface NodesHttpRoute { requests: NodesHttpRouteRequests; responses: NodesHttpRouteResponses; } export interface NodesHttpRouteRequests { count: long; total_size_in_bytes: long; size_histogram: NodesSizeHttpHistogram[]; } export interface NodesHttpRouteResponses { count: long; total_size_in_bytes: long; handling_time_histogram: NodesTimeHttpHistogram[]; size_histogram: NodesSizeHttpHistogram[]; } export interface NodesIndexingPressure { /** Contains statistics for memory consumption from indexing load. */ memory?: NodesIndexingPressureMemory; } export interface NodesIndexingPressureMemory { /** Configured memory limit for the indexing requests. * Replica requests have an automatic limit that is 1.5x this value. */ limit?: ByteSize; /** Configured memory limit, in bytes, for the indexing requests. * Replica requests have an automatic limit that is 1.5x this value. */ limit_in_bytes?: long; /** Contains statistics for current indexing load. */ current?: NodesPressureMemory; /** Contains statistics for the cumulative indexing load since the node started. */ total?: NodesPressureMemory; } export interface NodesIngest { /** Contains statistics about ingest pipelines for the node. */ pipelines?: Record; /** Contains statistics about ingest operations for the node. */ total?: NodesIngestTotal; } export interface NodesIngestStats { /** Total number of documents ingested during the lifetime of this node. */ count: long; /** Total number of documents currently being ingested. */ current: long; /** Total number of failed ingest operations during the lifetime of this node. */ failed: long; /** Total number of ingest processors. */ processors: Record[]; /** Total time, in milliseconds, spent preprocessing ingest documents during the lifetime of this node. */ time_in_millis: DurationValue; /** Total number of bytes of all documents ingested by the pipeline. * This field is only present on pipelines which are the first to process a document. * Thus, it is not present on pipelines which only serve as a final pipeline after a default pipeline, a pipeline run after a reroute processor, or pipelines in pipeline processors. */ ingested_as_first_pipeline_in_bytes: long; /** Total number of bytes of all documents produced by the pipeline. * This field is only present on pipelines which are the first to process a document. * Thus, it is not present on pipelines which only serve as a final pipeline after a default pipeline, a pipeline run after a reroute processor, or pipelines in pipeline processors. * In situations where there are subsequent pipelines, the value represents the size of the document after all pipelines have run. */ produced_as_first_pipeline_in_bytes: long; } export interface NodesIngestTotal { /** Total number of documents ingested during the lifetime of this node. */ count: long; /** Total number of documents currently being ingested. */ current: long; /** Total number of failed ingest operations during the lifetime of this node. */ failed: long; /** Total time, in milliseconds, spent preprocessing ingest documents during the lifetime of this node. */ time_in_millis: DurationValue; } export interface NodesIoStatDevice { /** The Linux device name. */ device_name?: string; /** The total number of read and write operations for the device completed since starting Elasticsearch. */ operations?: long; /** The total number of kilobytes read for the device since starting Elasticsearch. */ read_kilobytes?: long; /** The total number of read operations for the device completed since starting Elasticsearch. */ read_operations?: long; /** The total number of kilobytes written for the device since starting Elasticsearch. */ write_kilobytes?: long; /** The total number of write operations for the device completed since starting Elasticsearch. */ write_operations?: long; } export interface NodesIoStats { /** Array of disk metrics for each device that is backing an Elasticsearch data path. * These disk metrics are probed periodically and averages between the last probe and the current probe are computed. */ devices?: NodesIoStatDevice[]; /** The sum of the disk metrics for all devices that back an Elasticsearch data path. */ total?: NodesIoStatDevice; } export interface NodesJvm { /** Contains statistics about JVM buffer pools for the node. */ buffer_pools?: Record; /** Contains statistics about classes loaded by JVM for the node. */ classes?: NodesJvmClasses; /** Contains statistics about JVM garbage collectors for the node. */ gc?: NodesGarbageCollector; /** Contains JVM memory usage statistics for the node. */ mem?: NodesJvmMemoryStats; /** Contains statistics about JVM thread usage for the node. */ threads?: NodesJvmThreads; /** Last time JVM statistics were refreshed. */ timestamp?: long; /** Human-readable JVM uptime. * Only returned if the `human` query parameter is `true`. */ uptime?: string; /** JVM uptime in milliseconds. */ uptime_in_millis?: long; } export interface NodesJvmClasses { /** Number of classes currently loaded by JVM. */ current_loaded_count?: long; /** Total number of classes loaded since the JVM started. */ total_loaded_count?: long; /** Total number of classes unloaded since the JVM started. */ total_unloaded_count?: long; } export interface NodesJvmMemoryStats { /** Memory, in bytes, currently in use by the heap. */ heap_used_in_bytes?: long; /** Percentage of memory currently in use by the heap. */ heap_used_percent?: long; /** Amount of memory, in bytes, available for use by the heap. */ heap_committed_in_bytes?: long; /** Maximum amount of memory, in bytes, available for use by the heap. */ heap_max_in_bytes?: long; /** Non-heap memory used, in bytes. */ non_heap_used_in_bytes?: long; /** Amount of non-heap memory available, in bytes. */ non_heap_committed_in_bytes?: long; /** Contains statistics about heap memory usage for the node. */ pools?: Record; } export interface NodesJvmThreads { /** Number of active threads in use by JVM. */ count?: long; /** Highest number of threads used by JVM. */ peak_count?: long; } export interface NodesKeyedProcessor { stats?: NodesProcessor; type?: string; } export interface NodesMemoryStats { /** If the amount of physical memory has been overridden using the `es`.`total_memory_bytes` system property then this reports the overridden value in bytes. * Otherwise it reports the same value as `total_in_bytes`. */ adjusted_total_in_bytes?: long; resident?: string; resident_in_bytes?: long; share?: string; share_in_bytes?: long; total_virtual?: string; total_virtual_in_bytes?: long; /** Total amount of physical memory in bytes. */ total_in_bytes?: long; /** Amount of free physical memory in bytes. */ free_in_bytes?: long; /** Amount of used physical memory in bytes. */ used_in_bytes?: long; } export interface NodesNodeBufferPool { /** Number of buffer pools. */ count?: long; /** Total capacity of buffer pools. */ total_capacity?: string; /** Total capacity of buffer pools in bytes. */ total_capacity_in_bytes?: long; /** Size of buffer pools. */ used?: string; /** Size of buffer pools in bytes. */ used_in_bytes?: long; } export interface NodesNodeReloadResult { name: Name; reload_exception?: ErrorCause; } export interface NodesNodesResponseBase { /** Contains statistics about the number of nodes selected by the request’s node filters. */ _nodes?: NodeStatistics; } export interface NodesOperatingSystem { cpu?: NodesCpu; mem?: NodesExtendedMemoryStats; swap?: NodesMemoryStats; cgroup?: NodesCgroup; timestamp?: long; } export interface NodesPool { /** Memory, in bytes, used by the heap. */ used_in_bytes?: long; /** Maximum amount of memory, in bytes, available for use by the heap. */ max_in_bytes?: long; /** Largest amount of memory, in bytes, historically used by the heap. */ peak_used_in_bytes?: long; /** Largest amount of memory, in bytes, historically used by the heap. */ peak_max_in_bytes?: long; } export interface NodesPressureMemory { /** Memory consumed by indexing requests in the coordinating, primary, or replica stage. */ all?: ByteSize; /** Memory consumed, in bytes, by indexing requests in the coordinating, primary, or replica stage. */ all_in_bytes?: long; /** Memory consumed by indexing requests in the coordinating or primary stage. * This value is not the sum of coordinating and primary as a node can reuse the coordinating memory if the primary stage is executed locally. */ combined_coordinating_and_primary?: ByteSize; /** Memory consumed, in bytes, by indexing requests in the coordinating or primary stage. * This value is not the sum of coordinating and primary as a node can reuse the coordinating memory if the primary stage is executed locally. */ combined_coordinating_and_primary_in_bytes?: long; /** Memory consumed by indexing requests in the coordinating stage. */ coordinating?: ByteSize; /** Memory consumed, in bytes, by indexing requests in the coordinating stage. */ coordinating_in_bytes?: long; /** Memory consumed by indexing requests in the primary stage. */ primary?: ByteSize; /** Memory consumed, in bytes, by indexing requests in the primary stage. */ primary_in_bytes?: long; /** Memory consumed by indexing requests in the replica stage. */ replica?: ByteSize; /** Memory consumed, in bytes, by indexing requests in the replica stage. */ replica_in_bytes?: long; /** Number of indexing requests rejected in the coordinating stage. */ coordinating_rejections?: long; /** Number of indexing requests rejected in the primary stage. */ primary_rejections?: long; /** Number of indexing requests rejected in the replica stage. */ replica_rejections?: long; } export interface NodesProcess { /** Contains CPU statistics for the node. */ cpu?: NodesCpu; /** Contains virtual memory statistics for the node. */ mem?: NodesMemoryStats; /** Number of opened file descriptors associated with the current or `-1` if not supported. */ open_file_descriptors?: integer; /** Maximum number of file descriptors allowed on the system, or `-1` if not supported. */ max_file_descriptors?: integer; /** Last time the statistics were refreshed. * Recorded in milliseconds since the Unix Epoch. */ timestamp?: long; } export interface NodesProcessor { /** Number of documents transformed by the processor. */ count?: long; /** Number of documents currently being transformed by the processor. */ current?: long; /** Number of failed operations for the processor. */ failed?: long; /** Time, in milliseconds, spent by the processor transforming documents. */ time_in_millis?: DurationValue; } export interface NodesPublishedClusterStates { /** Number of published cluster states. */ full_states?: long; /** Number of incompatible differences between published cluster states. */ incompatible_diffs?: long; /** Number of compatible differences between published cluster states. */ compatible_diffs?: long; } export interface NodesRecording { name?: string; cumulative_execution_count?: long; cumulative_execution_time?: Duration; cumulative_execution_time_millis?: DurationValue; } export interface NodesRepositoryLocation { base_path: string; /** Container name (Azure) */ container?: string; /** Bucket name (GCP, S3) */ bucket?: string; } export interface NodesRepositoryMeteringInformation { /** Repository name. */ repository_name: Name; /** Repository type. */ repository_type: string; /** Represents an unique location within the repository. */ repository_location: NodesRepositoryLocation; /** An identifier that changes every time the repository is updated. */ repository_ephemeral_id: Id; /** Time the repository was created or updated. Recorded in milliseconds since the Unix Epoch. */ repository_started_at: EpochTime; /** Time the repository was deleted or updated. Recorded in milliseconds since the Unix Epoch. */ repository_stopped_at?: EpochTime; /** A flag that tells whether or not this object has been archived. When a repository is closed or updated the * repository metering information is archived and kept for a certain period of time. This allows retrieving the * repository metering information of previous repository instantiations. */ archived: boolean; /** The cluster state version when this object was archived, this field can be used as a logical timestamp to delete * all the archived metrics up to an observed version. This field is only present for archived repository metering * information objects. The main purpose of this field is to avoid possible race conditions during repository metering * information deletions, i.e. deleting archived repositories metering information that we haven’t observed yet. */ cluster_version?: VersionNumber; /** An object with the number of request performed against the repository grouped by request type. */ request_counts: NodesRequestCounts; } export interface NodesRequestCounts { /** Number of Get Blob Properties requests (Azure) */ GetBlobProperties?: long; /** Number of Get Blob requests (Azure) */ GetBlob?: long; /** Number of List Blobs requests (Azure) */ ListBlobs?: long; /** Number of Put Blob requests (Azure) */ PutBlob?: long; /** Number of Put Block (Azure) */ PutBlock?: long; /** Number of Put Block List requests */ PutBlockList?: long; /** Number of get object requests (GCP, S3) */ GetObject?: long; /** Number of list objects requests (GCP, S3) */ ListObjects?: long; /** Number of insert object requests, including simple, multipart and resumable uploads. Resumable uploads * can perform multiple http requests to insert a single object but they are considered as a single request * since they are billed as an individual operation. (GCP) */ InsertObject?: long; /** Number of PutObject requests (S3) */ PutObject?: long; /** Number of Multipart requests, including CreateMultipartUpload, UploadPart and CompleteMultipartUpload requests (S3) */ PutMultipartObject?: long; } export interface NodesScriptCache { /** Total number of times the script cache has evicted old data. */ cache_evictions?: long; /** Total number of times the script compilation circuit breaker has limited inline script compilations. */ compilation_limit_triggered?: long; /** Total number of inline script compilations performed by the node. */ compilations?: long; context?: string; } export interface NodesScripting { /** Total number of times the script cache has evicted old data. */ cache_evictions?: long; /** Total number of inline script compilations performed by the node. */ compilations?: long; /** Contains this recent history of script compilations. */ compilations_history?: Record; /** Total number of times the script compilation circuit breaker has limited inline script compilations. */ compilation_limit_triggered?: long; contexts?: NodesContext[]; } export interface NodesSerializedClusterState { /** Number of published cluster states. */ full_states?: NodesSerializedClusterStateDetail; diffs?: NodesSerializedClusterStateDetail; } export interface NodesSerializedClusterStateDetail { count?: long; uncompressed_size?: string; uncompressed_size_in_bytes?: long; compressed_size?: string; compressed_size_in_bytes?: long; } export interface NodesSizeHttpHistogram { count: long; ge_bytes?: long; lt_bytes?: long; } export interface NodesStats { /** Statistics about adaptive replica selection. */ adaptive_selection?: Record; /** Statistics about the field data circuit breaker. */ breakers?: Record; /** File system information, data path, free disk space, read/write stats. */ fs?: NodesFileSystem; /** Network host for the node, based on the network host setting. */ host?: Host; /** HTTP connection information. */ http?: NodesHttp; /** Statistics about ingest preprocessing. */ ingest?: NodesIngest; /** IP address and port for the node. */ ip?: Ip | Ip[]; /** JVM stats, memory pool information, garbage collection, buffer pools, number of loaded/unloaded classes. */ jvm?: NodesJvm; /** Human-readable identifier for the node. * Based on the node name setting. */ name?: Name; /** Operating system stats, load average, mem, swap. */ os?: NodesOperatingSystem; /** Process statistics, memory consumption, cpu usage, open file descriptors. */ process?: NodesProcess; /** Roles assigned to the node. */ roles?: NodeRoles; /** Contains script statistics for the node. */ script?: NodesScripting; script_cache?: Record; /** Statistics about each thread pool, including current size, queue and rejected tasks. */ thread_pool?: Record; timestamp?: long; /** Transport statistics about sent and received bytes in cluster communication. */ transport?: NodesTransport; /** Host and port for the transport layer, used for internal communication between nodes in a cluster. */ transport_address?: TransportAddress; /** Contains a list of attributes for the node. */ attributes?: Record; /** Contains node discovery statistics for the node. */ discovery?: NodesDiscovery; /** Contains indexing pressure statistics for the node. */ indexing_pressure?: NodesIndexingPressure; /** Indices stats about size, document count, indexing and deletion times, search times, field cache size, merges and flushes. */ indices?: IndicesStatsShardStats; } export interface NodesThreadCount { /** Number of active threads in the thread pool. */ active?: long; /** Number of tasks completed by the thread pool executor. */ completed?: long; /** Highest number of active threads in the thread pool. */ largest?: long; /** Number of tasks in queue for the thread pool. */ queue?: long; /** Number of tasks rejected by the thread pool executor. */ rejected?: long; /** Number of threads in the thread pool. */ threads?: long; } export interface NodesTimeHttpHistogram { count: long; ge_millis?: long; lt_millis?: long; } export interface NodesTransport { /** The distribution of the time spent handling each inbound message on a transport thread, represented as a histogram. */ inbound_handling_time_histogram?: NodesTransportHistogram[]; /** The distribution of the time spent sending each outbound transport message on a transport thread, represented as a histogram. */ outbound_handling_time_histogram?: NodesTransportHistogram[]; /** Total number of RX (receive) packets received by the node during internal cluster communication. */ rx_count?: long; /** Size of RX packets received by the node during internal cluster communication. */ rx_size?: string; /** Size, in bytes, of RX packets received by the node during internal cluster communication. */ rx_size_in_bytes?: long; /** Current number of inbound TCP connections used for internal communication between nodes. */ server_open?: integer; /** Total number of TX (transmit) packets sent by the node during internal cluster communication. */ tx_count?: long; /** Size of TX packets sent by the node during internal cluster communication. */ tx_size?: string; /** Size, in bytes, of TX packets sent by the node during internal cluster communication. */ tx_size_in_bytes?: long; /** The cumulative number of outbound transport connections that this node has opened since it started. * Each transport connection may comprise multiple TCP connections but is only counted once in this statistic. * Transport connections are typically long-lived so this statistic should remain constant in a stable cluster. */ total_outbound_connections?: long; } export interface NodesTransportHistogram { /** The number of times a transport thread took a period of time within the bounds of this bucket to handle an inbound message. */ count?: long; /** The exclusive upper bound of the bucket in milliseconds. * May be omitted on the last bucket if this bucket has no upper bound. */ lt_millis?: long; /** The inclusive lower bound of the bucket in milliseconds. May be omitted on the first bucket if this bucket has no lower bound. */ ge_millis?: long; } export interface NodesClearRepositoriesMeteringArchiveRequest extends RequestBase { /** Comma-separated list of node IDs or names used to limit returned information. */ node_id: NodeIds; /** Specifies the maximum `archive_version` to be cleared from the archive. */ max_archive_version: long; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { node_id?: never; max_archive_version?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { node_id?: never; max_archive_version?: never; }; } export type NodesClearRepositoriesMeteringArchiveResponse = NodesClearRepositoriesMeteringArchiveResponseBase; export interface NodesClearRepositoriesMeteringArchiveResponseBase extends NodesNodesResponseBase { /** Name of the cluster. Based on the `cluster.name` setting. */ cluster_name: Name; /** Contains repositories metering information for the nodes selected by the request. */ nodes: Record; } export interface NodesGetRepositoriesMeteringInfoRequest extends RequestBase { /** Comma-separated list of node IDs or names used to limit returned information. */ node_id: NodeIds; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { node_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { node_id?: never; }; } export type NodesGetRepositoriesMeteringInfoResponse = NodesGetRepositoriesMeteringInfoResponseBase; export interface NodesGetRepositoriesMeteringInfoResponseBase extends NodesNodesResponseBase { /** Name of the cluster. Based on the `cluster.name` setting. */ cluster_name: Name; /** Contains repositories metering information for the nodes selected by the request. */ nodes: Record; } export interface NodesHotThreadsRequest extends RequestBase { /** List of node IDs or names used to limit returned information. */ node_id?: NodeIds; /** If true, known idle threads (e.g. waiting in a socket select, or to get * a task from an empty queue) are filtered out. */ ignore_idle_threads?: boolean; /** The interval to do the second sampling of threads. */ interval?: Duration; /** Number of samples of thread stacktrace. */ snapshots?: long; /** Specifies the number of hot threads to provide information for. */ threads?: long; /** Period to wait for a response. If no response is received * before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** The type to sample. */ type?: ThreadType; /** The sort order for 'cpu' type (default: total) */ sort?: ThreadType; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { node_id?: never; ignore_idle_threads?: never; interval?: never; snapshots?: never; threads?: never; timeout?: never; type?: never; sort?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { node_id?: never; ignore_idle_threads?: never; interval?: never; snapshots?: never; threads?: never; timeout?: never; type?: never; sort?: never; }; } export interface NodesHotThreadsResponse { } export interface NodesInfoDeprecationIndexing { enabled: boolean | string; } export interface NodesInfoNodeInfo { attributes: Record; build_flavor: string; /** Short hash of the last git commit in this release. */ build_hash: string; build_type: string; /** The node’s host name. */ host: Host; http?: NodesInfoNodeInfoHttp; /** The node’s IP address. */ ip: Ip; jvm?: NodesInfoNodeJvmInfo; /** The node's name */ name: Name; network?: NodesInfoNodeInfoNetwork; os?: NodesInfoNodeOperatingSystemInfo; plugins?: PluginStats[]; process?: NodesInfoNodeProcessInfo; roles: NodeRoles; settings?: NodesInfoNodeInfoSettings; thread_pool?: Record; /** Total heap allowed to be used to hold recently indexed documents before they must be written to disk. This size is a shared pool across all shards on this node, and is controlled by Indexing Buffer settings. */ total_indexing_buffer?: long; /** Same as total_indexing_buffer, but expressed in bytes. */ total_indexing_buffer_in_bytes?: ByteSize; transport?: NodesInfoNodeInfoTransport; /** Host and port where transport HTTP connections are accepted. */ transport_address: TransportAddress; /** Elasticsearch version running on this node. */ version: VersionString; modules?: PluginStats[]; ingest?: NodesInfoNodeInfoIngest; aggregations?: Record; } export interface NodesInfoNodeInfoAction { destructive_requires_name: string; } export interface NodesInfoNodeInfoAggregation { types: string[]; } export interface NodesInfoNodeInfoBootstrap { memory_lock: string; } export interface NodesInfoNodeInfoClient { type: string; } export interface NodesInfoNodeInfoDiscoverKeys { seed_hosts?: string[]; type?: string; seed_providers?: string[]; } export type NodesInfoNodeInfoDiscover = NodesInfoNodeInfoDiscoverKeys & { [property: string]: any; }; export interface NodesInfoNodeInfoHttp { bound_address: string[]; max_content_length?: ByteSize; max_content_length_in_bytes: long; publish_address: string; } export interface NodesInfoNodeInfoIngest { processors: NodesInfoNodeInfoIngestProcessor[]; } export interface NodesInfoNodeInfoIngestDownloader { enabled: string; } export interface NodesInfoNodeInfoIngestInfo { downloader: NodesInfoNodeInfoIngestDownloader; } export interface NodesInfoNodeInfoIngestProcessor { type: string; } export interface NodesInfoNodeInfoJvmMemory { direct_max?: ByteSize; direct_max_in_bytes: long; heap_init?: ByteSize; heap_init_in_bytes: long; heap_max?: ByteSize; heap_max_in_bytes: long; non_heap_init?: ByteSize; non_heap_init_in_bytes: long; non_heap_max?: ByteSize; non_heap_max_in_bytes: long; } export interface NodesInfoNodeInfoMemory { total: string; total_in_bytes: long; } export interface NodesInfoNodeInfoNetwork { primary_interface: NodesInfoNodeInfoNetworkInterface; refresh_interval: integer; } export interface NodesInfoNodeInfoNetworkInterface { address: string; mac_address: string; name: Name; } export interface NodesInfoNodeInfoOSCPU { cache_size: string; cache_size_in_bytes: integer; cores_per_socket: integer; mhz: integer; model: string; total_cores: integer; total_sockets: integer; vendor: string; } export interface NodesInfoNodeInfoPath { logs?: string; home?: string; repo?: string[]; data?: string | string[]; } export interface NodesInfoNodeInfoRepositories { url: NodesInfoNodeInfoRepositoriesUrl; } export interface NodesInfoNodeInfoRepositoriesUrl { allowed_urls: string; } export interface NodesInfoNodeInfoScript { allowed_types: string; disable_max_compilations_rate?: string; } export interface NodesInfoNodeInfoSearch { remote: NodesInfoNodeInfoSearchRemote; } export interface NodesInfoNodeInfoSearchRemote { connect: string; } export interface NodesInfoNodeInfoSettings { cluster: NodesInfoNodeInfoSettingsCluster; node: NodesInfoNodeInfoSettingsNode; path?: NodesInfoNodeInfoPath; repositories?: NodesInfoNodeInfoRepositories; discovery?: NodesInfoNodeInfoDiscover; action?: NodesInfoNodeInfoAction; client?: NodesInfoNodeInfoClient; http: NodesInfoNodeInfoSettingsHttp; bootstrap?: NodesInfoNodeInfoBootstrap; transport: NodesInfoNodeInfoSettingsTransport; network?: NodesInfoNodeInfoSettingsNetwork; xpack?: NodesInfoNodeInfoXpack; script?: NodesInfoNodeInfoScript; search?: NodesInfoNodeInfoSearch; ingest?: NodesInfoNodeInfoSettingsIngest; } export interface NodesInfoNodeInfoSettingsCluster { name: Name; routing?: IndicesIndexRouting; election: NodesInfoNodeInfoSettingsClusterElection; initial_master_nodes?: string[]; deprecation_indexing?: NodesInfoDeprecationIndexing; } export interface NodesInfoNodeInfoSettingsClusterElection { strategy: Name; } export interface NodesInfoNodeInfoSettingsHttp { type: NodesInfoNodeInfoSettingsHttpType | string; 'type.default'?: string; compression?: boolean | string; port?: integer | string; } export interface NodesInfoNodeInfoSettingsHttpType { default: string; } export interface NodesInfoNodeInfoSettingsIngest { attachment?: NodesInfoNodeInfoIngestInfo; append?: NodesInfoNodeInfoIngestInfo; csv?: NodesInfoNodeInfoIngestInfo; convert?: NodesInfoNodeInfoIngestInfo; date?: NodesInfoNodeInfoIngestInfo; date_index_name?: NodesInfoNodeInfoIngestInfo; dot_expander?: NodesInfoNodeInfoIngestInfo; enrich?: NodesInfoNodeInfoIngestInfo; fail?: NodesInfoNodeInfoIngestInfo; foreach?: NodesInfoNodeInfoIngestInfo; json?: NodesInfoNodeInfoIngestInfo; user_agent?: NodesInfoNodeInfoIngestInfo; kv?: NodesInfoNodeInfoIngestInfo; geoip?: NodesInfoNodeInfoIngestInfo; grok?: NodesInfoNodeInfoIngestInfo; gsub?: NodesInfoNodeInfoIngestInfo; join?: NodesInfoNodeInfoIngestInfo; lowercase?: NodesInfoNodeInfoIngestInfo; remove?: NodesInfoNodeInfoIngestInfo; rename?: NodesInfoNodeInfoIngestInfo; script?: NodesInfoNodeInfoIngestInfo; set?: NodesInfoNodeInfoIngestInfo; sort?: NodesInfoNodeInfoIngestInfo; split?: NodesInfoNodeInfoIngestInfo; trim?: NodesInfoNodeInfoIngestInfo; uppercase?: NodesInfoNodeInfoIngestInfo; urldecode?: NodesInfoNodeInfoIngestInfo; bytes?: NodesInfoNodeInfoIngestInfo; dissect?: NodesInfoNodeInfoIngestInfo; set_security_user?: NodesInfoNodeInfoIngestInfo; pipeline?: NodesInfoNodeInfoIngestInfo; drop?: NodesInfoNodeInfoIngestInfo; circle?: NodesInfoNodeInfoIngestInfo; inference?: NodesInfoNodeInfoIngestInfo; } export interface NodesInfoNodeInfoSettingsNetwork { host?: Host | Host[]; } export interface NodesInfoNodeInfoSettingsNode { name: Name; attr: Record; max_local_storage_nodes?: string; } export interface NodesInfoNodeInfoSettingsTransport { type: NodesInfoNodeInfoSettingsTransportType | string; 'type.default'?: string; features?: NodesInfoNodeInfoSettingsTransportFeatures; } export interface NodesInfoNodeInfoSettingsTransportFeatures { 'x-pack': string; } export interface NodesInfoNodeInfoSettingsTransportType { default: string; } export interface NodesInfoNodeInfoTransport { bound_address: string[]; publish_address: string; profiles: Record; } export interface NodesInfoNodeInfoXpack { license?: NodesInfoNodeInfoXpackLicense; security: NodesInfoNodeInfoXpackSecurity; notification?: Record; ml?: NodesInfoNodeInfoXpackMl; } export interface NodesInfoNodeInfoXpackLicense { self_generated: NodesInfoNodeInfoXpackLicenseType; } export interface NodesInfoNodeInfoXpackLicenseType { type: string; } export interface NodesInfoNodeInfoXpackMl { use_auto_machine_memory_percent?: boolean; } export interface NodesInfoNodeInfoXpackSecurity { http?: NodesInfoNodeInfoXpackSecuritySsl; enabled: string; transport?: NodesInfoNodeInfoXpackSecuritySsl; authc?: NodesInfoNodeInfoXpackSecurityAuthc; } export interface NodesInfoNodeInfoXpackSecurityAuthc { realms?: NodesInfoNodeInfoXpackSecurityAuthcRealms; token?: NodesInfoNodeInfoXpackSecurityAuthcToken; } export interface NodesInfoNodeInfoXpackSecurityAuthcRealms { file?: Record; native?: Record; pki?: Record; } export interface NodesInfoNodeInfoXpackSecurityAuthcRealmsStatus { enabled?: string; order: string; } export interface NodesInfoNodeInfoXpackSecurityAuthcToken { enabled: string; } export interface NodesInfoNodeInfoXpackSecuritySsl { ssl: Record; } export interface NodesInfoNodeJvmInfo { gc_collectors: string[]; mem: NodesInfoNodeInfoJvmMemory; memory_pools: string[]; pid: integer; start_time_in_millis: EpochTime; version: VersionString; vm_name: Name; vm_vendor: string; vm_version: VersionString; using_bundled_jdk: boolean; /** @alias using_bundled_jdk */ bundled_jdk: boolean; using_compressed_ordinary_object_pointers?: boolean | string; input_arguments: string[]; } export interface NodesInfoNodeOperatingSystemInfo { /** Name of the JVM architecture (ex: amd64, x86) */ arch: string; /** Number of processors available to the Java virtual machine */ available_processors: integer; /** The number of processors actually used to calculate thread pool size. This number can be set with the node.processors setting of a node and defaults to the number of processors reported by the OS. */ allocated_processors?: integer; /** Name of the operating system (ex: Linux, Windows, Mac OS X) */ name: Name; pretty_name: Name; /** Refresh interval for the OS statistics */ refresh_interval_in_millis: DurationValue; /** Version of the operating system */ version: VersionString; cpu?: NodesInfoNodeInfoOSCPU; mem?: NodesInfoNodeInfoMemory; swap?: NodesInfoNodeInfoMemory; } export interface NodesInfoNodeProcessInfo { /** Process identifier (PID) */ id: long; /** Indicates if the process address space has been successfully locked in memory */ mlockall: boolean; /** Refresh interval for the process statistics */ refresh_interval_in_millis: DurationValue; } export interface NodesInfoNodeThreadPoolInfo { core?: integer; keep_alive?: Duration; max?: integer; queue_size: integer; size?: integer; type: string; } export interface NodesInfoRequest extends RequestBase { /** Comma-separated list of node IDs or names used to limit returned information. */ node_id?: NodeIds; /** Limits the information returned to the specific metrics. Supports a comma-separated list, such as http,ingest. */ metric?: Metrics; /** If true, returns settings in flat format. */ flat_settings?: boolean; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { node_id?: never; metric?: never; flat_settings?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { node_id?: never; metric?: never; flat_settings?: never; timeout?: never; }; } export type NodesInfoResponse = NodesInfoResponseBase; export interface NodesInfoResponseBase extends NodesNodesResponseBase { cluster_name: Name; nodes: Record; } export interface NodesReloadSecureSettingsRequest extends RequestBase { /** The names of particular nodes in the cluster to target. */ node_id?: NodeIds; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** The password for the Elasticsearch keystore. */ secure_settings_password?: Password; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { node_id?: never; timeout?: never; secure_settings_password?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { node_id?: never; timeout?: never; secure_settings_password?: never; }; } export type NodesReloadSecureSettingsResponse = NodesReloadSecureSettingsResponseBase; export interface NodesReloadSecureSettingsResponseBase extends NodesNodesResponseBase { cluster_name: Name; nodes: Record; } export interface NodesStatsRequest extends RequestBase { /** Comma-separated list of node IDs or names used to limit returned information. */ node_id?: NodeIds; /** Limit the information returned to the specified metrics */ metric?: Metrics; /** Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. */ index_metric?: Metrics; /** Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics. */ completion_fields?: Fields; /** Comma-separated list or wildcard expressions of fields to include in fielddata statistics. */ fielddata_fields?: Fields; /** Comma-separated list or wildcard expressions of fields to include in the statistics. */ fields?: Fields; /** Comma-separated list of search groups to include in the search statistics. */ groups?: boolean; /** If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). */ include_segment_file_sizes?: boolean; /** Indicates whether statistics are aggregated at the cluster, index, or shard level. */ level?: Level; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** A comma-separated list of document types for the indexing index metric. */ types?: string[]; /** If `true`, the response includes information from segments that are not loaded into memory. */ include_unloaded_segments?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { node_id?: never; metric?: never; index_metric?: never; completion_fields?: never; fielddata_fields?: never; fields?: never; groups?: never; include_segment_file_sizes?: never; level?: never; timeout?: never; types?: never; include_unloaded_segments?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { node_id?: never; metric?: never; index_metric?: never; completion_fields?: never; fielddata_fields?: never; fields?: never; groups?: never; include_segment_file_sizes?: never; level?: never; timeout?: never; types?: never; include_unloaded_segments?: never; }; } export type NodesStatsResponse = NodesStatsResponseBase; export interface NodesStatsResponseBase extends NodesNodesResponseBase { cluster_name?: Name; nodes: Record; } export interface NodesUsageNodeUsage { rest_actions: Record; since: EpochTime; timestamp: EpochTime; aggregations: Record; } export interface NodesUsageRequest extends RequestBase { /** A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes */ node_id?: NodeIds; /** Limits the information returned to the specific metrics. * A comma-separated list of the following options: `_all`, `rest_actions`. */ metric?: Metrics; /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { node_id?: never; metric?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { node_id?: never; metric?: never; timeout?: never; }; } export type NodesUsageResponse = NodesUsageResponseBase; export interface NodesUsageResponseBase extends NodesNodesResponseBase { cluster_name: Name; nodes: Record; } export interface QueryRulesQueryRule { /** A unique identifier for the rule. */ rule_id: Id; /** The type of rule. * `pinned` will identify and pin specific documents to the top of search results. * `exclude` will exclude specific documents from search results. */ type: QueryRulesQueryRuleType; /** The criteria that must be met for the rule to be applied. * If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. */ criteria: QueryRulesQueryRuleCriteria | QueryRulesQueryRuleCriteria[]; /** The actions to take when the rule is matched. * The format of this action depends on the rule type. */ actions: QueryRulesQueryRuleActions; priority?: integer; } export interface QueryRulesQueryRuleActions { /** The unique document IDs of the documents to apply the rule to. * Only one of `ids` or `docs` may be specified and at least one must be specified. */ ids?: Id[]; /** The documents to apply the rule to. * Only one of `ids` or `docs` may be specified and at least one must be specified. * There is a maximum value of 100 documents in a rule. * You can specify the following attributes for each document: * * * `_index`: The index of the document to pin. * * `_id`: The unique document ID. */ docs?: QueryDslPinnedDoc[]; } export interface QueryRulesQueryRuleCriteria { /** The type of criteria. The following criteria types are supported: * * * `always`: Matches all queries, regardless of input. * * `contains`: Matches that contain this value anywhere in the field meet the criteria defined by the rule. Only applicable for string values. * * `exact`: Only exact matches meet the criteria defined by the rule. Applicable for string or numerical values. * * `fuzzy`: Exact matches or matches within the allowed Levenshtein Edit Distance meet the criteria defined by the rule. Only applicable for string values. * * `gt`: Matches with a value greater than this value meet the criteria defined by the rule. Only applicable for numerical values. * * `gte`: Matches with a value greater than or equal to this value meet the criteria defined by the rule. Only applicable for numerical values. * * `lt`: Matches with a value less than this value meet the criteria defined by the rule. Only applicable for numerical values. * * `lte`: Matches with a value less than or equal to this value meet the criteria defined by the rule. Only applicable for numerical values. * * `prefix`: Matches that start with this value meet the criteria defined by the rule. Only applicable for string values. * * `suffix`: Matches that end with this value meet the criteria defined by the rule. Only applicable for string values. */ type: QueryRulesQueryRuleCriteriaType; /** The metadata field to match against. * This metadata will be used to match against `match_criteria` sent in the rule. * It is required for all criteria types except `always`. */ metadata?: string; /** The values to match against the `metadata` field. * Only one value must match for the criteria to be met. * It is required for all criteria types except `always`. */ values?: any[]; } export type QueryRulesQueryRuleCriteriaType = 'global' | 'exact' | 'exact_fuzzy' | 'fuzzy' | 'prefix' | 'suffix' | 'contains' | 'lt' | 'lte' | 'gt' | 'gte' | 'always'; export type QueryRulesQueryRuleType = 'pinned' | 'exclude'; export interface QueryRulesQueryRuleset { /** A unique identifier for the ruleset. */ ruleset_id: Id; /** Rules associated with the query ruleset. */ rules: QueryRulesQueryRule[]; } export interface QueryRulesDeleteRuleRequest extends RequestBase { /** The unique identifier of the query ruleset containing the rule to delete */ ruleset_id: Id; /** The unique identifier of the query rule within the specified ruleset to delete */ rule_id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { ruleset_id?: never; rule_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { ruleset_id?: never; rule_id?: never; }; } export type QueryRulesDeleteRuleResponse = AcknowledgedResponseBase; export interface QueryRulesDeleteRulesetRequest extends RequestBase { /** The unique identifier of the query ruleset to delete */ ruleset_id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { ruleset_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { ruleset_id?: never; }; } export type QueryRulesDeleteRulesetResponse = AcknowledgedResponseBase; export interface QueryRulesGetRuleRequest extends RequestBase { /** The unique identifier of the query ruleset containing the rule to retrieve */ ruleset_id: Id; /** The unique identifier of the query rule within the specified ruleset to retrieve */ rule_id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { ruleset_id?: never; rule_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { ruleset_id?: never; rule_id?: never; }; } export type QueryRulesGetRuleResponse = QueryRulesQueryRule; export interface QueryRulesGetRulesetRequest extends RequestBase { /** The unique identifier of the query ruleset */ ruleset_id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { ruleset_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { ruleset_id?: never; }; } export type QueryRulesGetRulesetResponse = QueryRulesQueryRuleset; export interface QueryRulesListRulesetsQueryRulesetListItem { /** A unique identifier for the ruleset. */ ruleset_id: Id; /** The number of rules associated with the ruleset. */ rule_total_count: integer; /** A map of criteria type (for example, `exact`) to the number of rules of that type. * * NOTE: The counts in `rule_criteria_types_counts` may be larger than the value of `rule_total_count` because a rule may have multiple criteria. */ rule_criteria_types_counts: Record; /** A map of rule type (for example, `pinned`) to the number of rules of that type. */ rule_type_counts: Record; } export interface QueryRulesListRulesetsRequest extends RequestBase { /** The offset from the first result to fetch. */ from?: integer; /** The maximum number of results to retrieve. */ size?: integer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { from?: never; size?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { from?: never; size?: never; }; } export interface QueryRulesListRulesetsResponse { count: long; results: QueryRulesListRulesetsQueryRulesetListItem[]; } export interface QueryRulesPutRuleRequest extends RequestBase { /** The unique identifier of the query ruleset containing the rule to be created or updated. */ ruleset_id: Id; /** The unique identifier of the query rule within the specified ruleset to be created or updated. */ rule_id: Id; /** The type of rule. */ type: QueryRulesQueryRuleType; /** The criteria that must be met for the rule to be applied. * If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. */ criteria: QueryRulesQueryRuleCriteria | QueryRulesQueryRuleCriteria[]; /** The actions to take when the rule is matched. * The format of this action depends on the rule type. */ actions: QueryRulesQueryRuleActions; priority?: integer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { ruleset_id?: never; rule_id?: never; type?: never; criteria?: never; actions?: never; priority?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { ruleset_id?: never; rule_id?: never; type?: never; criteria?: never; actions?: never; priority?: never; }; } export interface QueryRulesPutRuleResponse { result: Result; } export interface QueryRulesPutRulesetRequest extends RequestBase { /** The unique identifier of the query ruleset to be created or updated. */ ruleset_id: Id; rules: QueryRulesQueryRule | QueryRulesQueryRule[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { ruleset_id?: never; rules?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { ruleset_id?: never; rules?: never; }; } export interface QueryRulesPutRulesetResponse { result: Result; } export interface QueryRulesTestQueryRulesetMatchedRule { /** Ruleset unique identifier */ ruleset_id: Id; /** Rule unique identifier within that ruleset */ rule_id: Id; } export interface QueryRulesTestRequest extends RequestBase { /** The unique identifier of the query ruleset to be created or updated */ ruleset_id: Id; /** The match criteria to apply to rules in the given query ruleset. * Match criteria should match the keys defined in the `criteria.metadata` field of the rule. */ match_criteria: Record; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { ruleset_id?: never; match_criteria?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { ruleset_id?: never; match_criteria?: never; }; } export interface QueryRulesTestResponse { total_matched_rules: integer; matched_rules: QueryRulesTestQueryRulesetMatchedRule[]; } export interface RollupDateHistogramGrouping { /** How long to wait before rolling up new documents. * By default, the indexer attempts to roll up all data that is available. * However, it is not uncommon for data to arrive out of order. * The indexer is unable to deal with data that arrives after a time-span has been rolled up. * You need to specify a delay that matches the longest period of time you expect out-of-order data to arrive. */ delay?: Duration; /** The date field that is to be rolled up. */ field: Field; format?: string; interval?: Duration; /** The interval of time buckets to be generated when rolling up. */ calendar_interval?: Duration; /** The interval of time buckets to be generated when rolling up. */ fixed_interval?: Duration; /** Defines what `time_zone` the rollup documents are stored as. * Unlike raw data, which can shift timezones on the fly, rolled documents have to be stored with a specific timezone. * By default, rollup documents are stored in `UTC`. */ time_zone?: TimeZone; } export interface RollupFieldMetric { /** The field to collect metrics for. This must be a numeric of some kind. */ field: Field; /** An array of metrics to collect for the field. At least one metric must be configured. */ metrics: RollupMetric[]; } export interface RollupGroupings { /** A date histogram group aggregates a date field into time-based buckets. * This group is mandatory; you currently cannot roll up documents without a timestamp and a `date_histogram` group. */ date_histogram?: RollupDateHistogramGrouping; /** The histogram group aggregates one or more numeric fields into numeric histogram intervals. */ histogram?: RollupHistogramGrouping; /** The terms group can be used on keyword or numeric fields to allow bucketing via the terms aggregation at a later point. * The indexer enumerates and stores all values of a field for each time-period. * This can be potentially costly for high-cardinality groups such as IP addresses, especially if the time-bucket is particularly sparse. */ terms?: RollupTermsGrouping; } export interface RollupHistogramGrouping { /** The set of fields that you wish to build histograms for. * All fields specified must be some kind of numeric. * Order does not matter. */ fields: Fields; /** The interval of histogram buckets to be generated when rolling up. * For example, a value of `5` creates buckets that are five units wide (`0-5`, `5-10`, etc). * Note that only one interval can be specified in the histogram group, meaning that all fields being grouped via the histogram must share the same interval. */ interval: long; } export type RollupMetric = 'min' | 'max' | 'sum' | 'avg' | 'value_count'; export interface RollupTermsGrouping { /** The set of fields that you wish to collect terms for. * This array can contain fields that are both keyword and numerics. * Order does not matter. */ fields: Fields; } export interface RollupDeleteJobRequest extends RequestBase { /** Identifier for the job. */ id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; }; } export interface RollupDeleteJobResponse { acknowledged: boolean; task_failures?: TaskFailure[]; } export type RollupGetJobsIndexingJobState = 'started' | 'indexing' | 'stopping' | 'stopped' | 'aborting'; export interface RollupGetJobsRequest extends RequestBase { /** Identifier for the rollup job. * If it is `_all` or omitted, the API returns all rollup jobs. */ id?: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; }; } export interface RollupGetJobsResponse { jobs: RollupGetJobsRollupJob[]; } export interface RollupGetJobsRollupJob { /** The rollup job configuration. */ config: RollupGetJobsRollupJobConfiguration; /** Transient statistics about the rollup job, such as how many documents have been processed and how many rollup summary docs have been indexed. * These stats are not persisted. * If a node is restarted, these stats are reset. */ stats: RollupGetJobsRollupJobStats; /** The current status of the indexer for the rollup job. */ status: RollupGetJobsRollupJobStatus; } export interface RollupGetJobsRollupJobConfiguration { cron: string; groups: RollupGroupings; id: Id; index_pattern: string; metrics: RollupFieldMetric[]; page_size: long; rollup_index: IndexName; timeout: Duration; } export interface RollupGetJobsRollupJobStats { documents_processed: long; index_failures: long; index_time_in_ms: DurationValue; index_total: long; pages_processed: long; rollups_indexed: long; search_failures: long; search_time_in_ms: DurationValue; search_total: long; trigger_count: long; processing_time_in_ms: DurationValue; processing_total: long; } export interface RollupGetJobsRollupJobStatus { current_position?: Record; job_state: RollupGetJobsIndexingJobState; upgraded_doc_id?: boolean; } export interface RollupGetRollupCapsRequest extends RequestBase { /** Index, indices or index-pattern to return rollup capabilities for. * `_all` may be used to fetch rollup capabilities from all jobs. */ id?: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; }; } export type RollupGetRollupCapsResponse = Record; export interface RollupGetRollupCapsRollupCapabilities { /** There can be multiple, independent jobs configured for a single index or index pattern. Each of these jobs may have different configurations, so the API returns a list of all the various configurations available. */ rollup_jobs: RollupGetRollupCapsRollupCapabilitySummary[]; } export interface RollupGetRollupCapsRollupCapabilitySummary { fields: Record; index_pattern: string; job_id: string; rollup_index: string; } export interface RollupGetRollupCapsRollupFieldSummary { agg: string; calendar_interval?: Duration; time_zone?: TimeZone; } export interface RollupGetRollupIndexCapsIndexCapabilities { rollup_jobs: RollupGetRollupIndexCapsRollupJobSummary[]; } export interface RollupGetRollupIndexCapsRequest extends RequestBase { /** Data stream or index to check for rollup capabilities. * Wildcard (`*`) expressions are supported. */ index: Ids; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; }; } export type RollupGetRollupIndexCapsResponse = Record; export interface RollupGetRollupIndexCapsRollupJobSummary { fields: Record; index_pattern: string; job_id: Id; rollup_index: IndexName; } export interface RollupGetRollupIndexCapsRollupJobSummaryField { agg: string; time_zone?: TimeZone; calendar_interval?: Duration; } export interface RollupPutJobRequest extends RequestBase { /** Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the * data that is associated with the rollup job. The ID is persistent; it is stored with the rolled * up data. If you create a job, let it run for a while, then delete the job, the data that the job * rolled up is still be associated with this job ID. You cannot create a new job with the same ID * since that could lead to problems with mismatched job configurations. */ id: Id; /** A cron string which defines the intervals when the rollup job should be executed. When the interval * triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated * to the time interval of the data being rolled up. For example, you may wish to create hourly rollups * of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The * cron pattern is defined just like a Watcher cron schedule. */ cron: string; /** Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be * available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of * the groups configuration as defining a set of tools that can later be used in aggregations to partition the * data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide * enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. */ groups: RollupGroupings; /** The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to * rollup the entire index or index-pattern. */ index_pattern: string; /** Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each * group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined * on a per-field basis and for each field you configure which metric should be collected. */ metrics?: RollupFieldMetric[]; /** The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends * to execute faster, but requires more memory during processing. This value has no effect on how the data is * rolled up; it is merely used for tweaking the speed or memory cost of the indexer. */ page_size: integer; /** The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs. */ rollup_index: IndexName; /** Time to wait for the request to complete. */ timeout?: Duration; headers?: HttpHeaders; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; cron?: never; groups?: never; index_pattern?: never; metrics?: never; page_size?: never; rollup_index?: never; timeout?: never; headers?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; cron?: never; groups?: never; index_pattern?: never; metrics?: never; page_size?: never; rollup_index?: never; timeout?: never; headers?: never; }; } export type RollupPutJobResponse = AcknowledgedResponseBase; export interface RollupRollupSearchRequest extends RequestBase { /** A comma-separated list of data streams and indices used to limit the request. * This parameter has the following rules: * * * At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. * * Multiple non-rollup indices may be specified. * * Only one rollup index may be specified. If more than one are supplied, an exception occurs. * * Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. */ index: Indices; /** Indicates whether hits.total should be rendered as an integer or an object in the rest search response */ rest_total_hits_as_int?: boolean; /** Specify whether aggregation and suggester names should be prefixed by their respective types in the response */ typed_keys?: boolean; /** Specifies aggregations. */ aggregations?: Record; /** Specifies aggregations. * @alias aggregations */ aggs?: Record; /** Specifies a DSL query that is subject to some limitations. */ query?: QueryDslQueryContainer; /** Must be zero if set, as rollups work on pre-aggregated data. */ size?: integer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; rest_total_hits_as_int?: never; typed_keys?: never; aggregations?: never; aggs?: never; query?: never; size?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; rest_total_hits_as_int?: never; typed_keys?: never; aggregations?: never; aggs?: never; query?: never; size?: never; }; } export interface RollupRollupSearchResponse> { took: long; timed_out: boolean; terminated_early?: boolean; _shards: ShardStatistics; hits: SearchHitsMetadata; aggregations?: TAggregations; } export interface RollupStartJobRequest extends RequestBase { /** Identifier for the rollup job. */ id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; }; } export interface RollupStartJobResponse { started: boolean; } export interface RollupStopJobRequest extends RequestBase { /** Identifier for the rollup job. */ id: Id; /** If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. * If more than `timeout` time has passed, the API throws a timeout exception. * NOTE: Even if a timeout occurs, the stop request is still processing and eventually moves the job to STOPPED. * The timeout simply means the API call itself timed out while waiting for the status change. */ timeout?: Duration; /** If set to `true`, causes the API to block until the indexer state completely stops. * If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background. */ wait_for_completion?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; timeout?: never; wait_for_completion?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; timeout?: never; wait_for_completion?: never; }; } export interface RollupStopJobResponse { stopped: boolean; } export interface SearchApplicationAnalyticsCollection { /** Data stream for the collection. */ event_data_stream: SearchApplicationEventDataStream; } export interface SearchApplicationEventDataStream { name: IndexName; } export type SearchApplicationEventType = 'page_view' | 'search' | 'search_click'; export interface SearchApplicationSearchApplication extends SearchApplicationSearchApplicationParameters { /** Search Application name */ name: Name; /** Last time the Search Application was updated. */ updated_at_millis: EpochTime; } export interface SearchApplicationSearchApplicationParameters { /** Indices that are part of the Search Application. */ indices: IndexName[]; /** Analytics collection associated to the Search Application. */ analytics_collection_name?: Name; /** Search template to use on search operations. */ template?: SearchApplicationSearchApplicationTemplate; } export interface SearchApplicationSearchApplicationTemplate { /** The associated mustache template. */ script: Script | ScriptSource; } export interface SearchApplicationDeleteRequest extends RequestBase { /** The name of the search application to delete. */ name: Name; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; }; } export type SearchApplicationDeleteResponse = AcknowledgedResponseBase; export interface SearchApplicationDeleteBehavioralAnalyticsRequest extends RequestBase { /** The name of the analytics collection to be deleted */ name: Name; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; }; } export type SearchApplicationDeleteBehavioralAnalyticsResponse = AcknowledgedResponseBase; export interface SearchApplicationGetRequest extends RequestBase { /** The name of the search application */ name: Name; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; }; } export type SearchApplicationGetResponse = SearchApplicationSearchApplication; export interface SearchApplicationGetBehavioralAnalyticsRequest extends RequestBase { /** A list of analytics collections to limit the returned information */ name?: Name[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; }; } export type SearchApplicationGetBehavioralAnalyticsResponse = Record; export interface SearchApplicationListRequest extends RequestBase { /** Query in the Lucene query string syntax. */ q?: string; /** Starting offset. */ from?: integer; /** Specifies a max number of results to get. */ size?: integer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { q?: never; from?: never; size?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { q?: never; from?: never; size?: never; }; } export interface SearchApplicationListResponse { count: long; results: SearchApplicationSearchApplication[]; } export interface SearchApplicationPostBehavioralAnalyticsEventRequest extends RequestBase { /** The name of the behavioral analytics collection. */ collection_name: Name; /** The analytics event type. */ event_type: SearchApplicationEventType; /** Whether the response type has to include more details */ debug?: boolean; payload?: any; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { collection_name?: never; event_type?: never; debug?: never; payload?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { collection_name?: never; event_type?: never; debug?: never; payload?: never; }; } export interface SearchApplicationPostBehavioralAnalyticsEventResponse { accepted: boolean; event?: any; } export interface SearchApplicationPutRequest extends RequestBase { /** The name of the search application to be created or updated. */ name: Name; /** If `true`, this request cannot replace or update existing Search Applications. */ create?: boolean; search_application?: SearchApplicationSearchApplicationParameters; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; create?: never; search_application?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; create?: never; search_application?: never; }; } export interface SearchApplicationPutResponse { result: Result; } export interface SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase extends AcknowledgedResponseBase { /** The name of the analytics collection created or updated */ name: Name; } export interface SearchApplicationPutBehavioralAnalyticsRequest extends RequestBase { /** The name of the analytics collection to be created or updated. */ name: Name; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; }; } export type SearchApplicationPutBehavioralAnalyticsResponse = SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase; export interface SearchApplicationRenderQueryRequest extends RequestBase { /** The name of the search application to render teh query for. */ name: Name; params?: Record; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; params?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; params?: never; }; } export interface SearchApplicationRenderQueryResponse { } export interface SearchApplicationSearchRequest extends RequestBase { /** The name of the search application to be searched. */ name: Name; /** Determines whether aggregation names are prefixed by their respective types in the response. */ typed_keys?: boolean; /** Query parameters specific to this request, which will override any defaults specified in the template. */ params?: Record; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; typed_keys?: never; params?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; typed_keys?: never; params?: never; }; } export type SearchApplicationSearchResponse> = SearchResponseBody; export type SearchableSnapshotsStatsLevel = 'cluster' | 'indices' | 'shards'; export interface SearchableSnapshotsCacheStatsNode { shared_cache: SearchableSnapshotsCacheStatsShared; } export interface SearchableSnapshotsCacheStatsRequest extends RequestBase { /** The names of the nodes in the cluster to target. */ node_id?: NodeIds; master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { node_id?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { node_id?: never; master_timeout?: never; }; } export interface SearchableSnapshotsCacheStatsResponse { nodes: Record; } export interface SearchableSnapshotsCacheStatsShared { reads: long; bytes_read_in_bytes: ByteSize; writes: long; bytes_written_in_bytes: ByteSize; evictions: long; num_regions: integer; size_in_bytes: ByteSize; region_size_in_bytes: ByteSize; } export interface SearchableSnapshotsClearCacheRequest extends RequestBase { /** A comma-separated list of data streams, indices, and aliases to clear from the cache. * It supports wildcards (`*`). */ index?: Indices; /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards; /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean; /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; expand_wildcards?: never; allow_no_indices?: never; ignore_unavailable?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; expand_wildcards?: never; allow_no_indices?: never; ignore_unavailable?: never; }; } export type SearchableSnapshotsClearCacheResponse = any; export interface SearchableSnapshotsMountMountedSnapshot { snapshot: Name; indices: Indices; shards: ShardStatistics; } export interface SearchableSnapshotsMountRequest extends RequestBase { /** The name of the repository containing the snapshot of the index to mount. */ repository: Name; /** The name of the snapshot of the index to mount. */ snapshot: Name; /** The period to wait for the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration; /** If true, the request blocks until the operation is complete. */ wait_for_completion?: boolean; /** The mount option for the searchable snapshot index. */ storage?: string; /** The name of the index contained in the snapshot whose data is to be mounted. * If no `renamed_index` is specified, this name will also be used to create the new index. */ index: IndexName; /** The name of the index that will be created. */ renamed_index?: IndexName; /** The settings that should be added to the index when it is mounted. */ index_settings?: Record; /** The names of settings that should be removed from the index when it is mounted. */ ignore_index_settings?: string[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { repository?: never; snapshot?: never; master_timeout?: never; wait_for_completion?: never; storage?: never; index?: never; renamed_index?: never; index_settings?: never; ignore_index_settings?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { repository?: never; snapshot?: never; master_timeout?: never; wait_for_completion?: never; storage?: never; index?: never; renamed_index?: never; index_settings?: never; ignore_index_settings?: never; }; } export interface SearchableSnapshotsMountResponse { snapshot: SearchableSnapshotsMountMountedSnapshot; } export interface SearchableSnapshotsStatsRequest extends RequestBase { /** A comma-separated list of data streams and indices to retrieve statistics for. */ index?: Indices; /** Return stats aggregated at cluster, index or shard level */ level?: SearchableSnapshotsStatsLevel; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; level?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; level?: never; }; } export interface SearchableSnapshotsStatsResponse { stats: any; total: any; } export interface SecurityAccess { /** A list of indices permission entries for cross-cluster replication. */ replication?: SecurityReplicationAccess[]; /** A list of indices permission entries for cross-cluster search. */ search?: SecuritySearchAccess[]; } export interface SecurityApiKey { /** Id for the API key */ id: Id; /** Name of the API key. */ name: Name; /** The type of the API key (e.g. `rest` or `cross_cluster`). */ type: SecurityApiKeyType; /** Creation time for the API key in milliseconds. */ creation: EpochTime; /** Expiration time for the API key in milliseconds. */ expiration?: EpochTime; /** Invalidation status for the API key. * If the key has been invalidated, it has a value of `true`. Otherwise, it is `false`. */ invalidated: boolean; /** If the key has been invalidated, invalidation time in milliseconds. */ invalidation?: EpochTime; /** Principal for which this API key was created */ username: Username; /** Realm name of the principal for which this API key was created. */ realm: string; /** Realm type of the principal for which this API key was created */ realm_type?: string; /** Metadata of the API key */ metadata: Metadata; /** The role descriptors assigned to this API key when it was created or last updated. * An empty role descriptor means the API key inherits the owner user’s permissions. */ role_descriptors?: Record; /** The owner user’s permissions associated with the API key. * It is a point-in-time snapshot captured at creation and subsequent updates. * An API key’s effective permissions are an intersection of its assigned privileges and the owner user’s permissions. */ limited_by?: Record[]; /** The access granted to cross-cluster API keys. * The access is composed of permissions for cross cluster search and cross cluster replication. * At least one of them must be specified. * When specified, the new access assignment fully replaces the previously assigned access. */ access?: SecurityAccess; /** The profile uid for the API key owner principal, if requested and if it exists */ profile_uid?: string; /** Sorting values when using the `sort` parameter with the `security.query_api_keys` API. */ _sort?: SortResults; } export type SecurityApiKeyType = 'rest' | 'cross_cluster'; export interface SecurityApplicationGlobalUserPrivileges { manage: SecurityManageUserPrivileges; } export interface SecurityApplicationPrivileges { /** The name of the application to which this entry applies. */ application: string; /** A list of strings, where each element is the name of an application privilege or action. */ privileges: string[]; /** A list resources to which the privileges are applied. */ resources: string[]; } export interface SecurityBulkError { /** The number of errors */ count: integer; /** Details about the errors, keyed by role name */ details: Record; } export interface SecurityClusterNode { name: Name; } export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_stats' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string; export interface SecurityCreatedStatus { created: boolean; } export interface SecurityFieldSecurity { except?: Fields; grant?: Fields; } export interface SecurityGlobalPrivilege { application: SecurityApplicationGlobalUserPrivileges; } export type SecurityGrantType = 'password' | 'access_token'; export type SecurityIndexPrivilege = 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'cross_cluster_replication' | 'cross_cluster_replication_internal' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_data_stream_lifecycle' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'none' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' | string; export interface SecurityIndicesPrivileges { /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity; /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[]; /** The index level privileges that owners of the role have on the specified indices. */ privileges: SecurityIndexPrivilege[]; /** A search query that defines the documents the owners of the role have access to. A document within the specified indices must match this query for it to be accessible by the owners of the role. */ query?: SecurityIndicesPrivilegesQuery; /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. * @remarks This property is not supported on Elastic Cloud Serverless. */ allow_restricted_indices?: boolean; } export type SecurityIndicesPrivilegesQuery = string | QueryDslQueryContainer | SecurityRoleTemplateQuery; export interface SecurityManageUserPrivileges { applications: string[]; } export interface SecurityRealmInfo { name: Name; type: string; } export type SecurityRemoteClusterPrivilege = 'monitor_enrich' | 'monitor_stats'; export interface SecurityRemoteClusterPrivileges { /** A list of cluster aliases to which the permissions in this entry apply. */ clusters: Names; /** The cluster level privileges that owners of the role have on the remote cluster. */ privileges: SecurityRemoteClusterPrivilege[]; } export interface SecurityRemoteIndicesPrivileges { /** A list of cluster aliases to which the permissions in this entry apply. */ clusters: Names; /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity; /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[]; /** The index level privileges that owners of the role have on the specified indices. */ privileges: SecurityIndexPrivilege[]; /** A search query that defines the documents the owners of the role have access to. A document within the specified indices must match this query for it to be accessible by the owners of the role. */ query?: SecurityIndicesPrivilegesQuery; /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. * @remarks This property is not supported on Elastic Cloud Serverless. */ allow_restricted_indices?: boolean; } export interface SecurityRemoteUserIndicesPrivileges { /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity[]; /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[]; /** The index level privileges that owners of the role have on the specified indices. */ privileges: SecurityIndexPrivilege[]; /** Search queries that define the documents the user has access to. A document within the specified indices must match these queries for it to be accessible by the owners of the role. */ query?: SecurityIndicesPrivilegesQuery[]; /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. */ allow_restricted_indices: boolean; clusters: string[]; } export interface SecurityReplicationAccess { /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[]; /** This needs to be set to true if the patterns in the names field should cover system indices. */ allow_restricted_indices?: boolean; } export interface SecurityRestriction { /** A list of workflows to which the API key is restricted. * NOTE: In order to use a role restriction, an API key must be created with a single role descriptor. */ workflows: SecurityRestrictionWorkflow[]; } export type SecurityRestrictionWorkflow = 'search_application_query' | string; export interface SecurityRoleDescriptor { /** A list of cluster privileges. These privileges define the cluster level actions that API keys are able to execute. */ cluster?: SecurityClusterPrivilege[]; /** A list of indices permissions entries. */ indices?: SecurityIndicesPrivileges[]; /** A list of indices permissions entries. * @alias indices */ index?: SecurityIndicesPrivileges[]; /** A list of indices permissions for remote clusters. * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_indices?: SecurityRemoteIndicesPrivileges[]; /** A list of cluster permissions for remote clusters. * NOTE: This is limited a subset of the cluster permissions. * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster?: SecurityRemoteClusterPrivileges[]; /** An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. * @remarks This property is not supported on Elastic Cloud Serverless. */ global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege; /** A list of application privilege entries */ applications?: SecurityApplicationPrivileges[]; /** Optional meta-data. Within the metadata object, keys that begin with `_` are reserved for system usage. */ metadata?: Metadata; /** A list of users that the API keys can impersonate. * NOTE: In Elastic Cloud Serverless, the run-as feature is disabled. * For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. */ run_as?: string[]; /** Optional description of the role descriptor */ description?: string; /** Restriction for when the role descriptor is allowed to be effective. */ restriction?: SecurityRestriction; transient_metadata?: Record; } export interface SecurityRoleDescriptorRead { /** A list of cluster privileges. These privileges define the cluster level actions that API keys are able to execute. */ cluster: SecurityClusterPrivilege[]; /** A list of indices permissions entries. */ indices: SecurityIndicesPrivileges[]; /** A list of indices permissions entries. * @alias indices */ index: SecurityIndicesPrivileges[]; /** A list of indices permissions for remote clusters. * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_indices?: SecurityRemoteIndicesPrivileges[]; /** A list of cluster permissions for remote clusters. * NOTE: This is limited a subset of the cluster permissions. * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster?: SecurityRemoteClusterPrivileges[]; /** An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. * @remarks This property is not supported on Elastic Cloud Serverless. */ global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege; /** A list of application privilege entries */ applications?: SecurityApplicationPrivileges[]; /** Optional meta-data. Within the metadata object, keys that begin with `_` are reserved for system usage. */ metadata?: Metadata; /** A list of users that the API keys can impersonate. */ run_as?: string[]; /** An optional description of the role descriptor. */ description?: string; /** A restriction for when the role descriptor is allowed to be effective. */ restriction?: SecurityRestriction; transient_metadata?: Record; } export interface SecurityRoleMapping { enabled: boolean; metadata: Metadata; roles?: string[]; role_templates?: SecurityRoleTemplate[]; rules: SecurityRoleMappingRule; } export interface SecurityRoleMappingRule { any?: SecurityRoleMappingRule[]; all?: SecurityRoleMappingRule[]; field?: Partial>; except?: SecurityRoleMappingRule; } export interface SecurityRoleTemplate { format?: SecurityTemplateFormat; template: Script | ScriptSource; } export type SecurityRoleTemplateInlineQuery = string | QueryDslQueryContainer; export interface SecurityRoleTemplateQuery { /** When you create a role, you can specify a query that defines the document level security permissions. You can optionally * use Mustache templates in the role query to insert the username of the current authenticated user into the role. * Like other places in Elasticsearch that support templating or scripting, you can specify inline, stored, or file-based * templates and define custom parameters. You access the details for the current authenticated user through the _user parameter. */ template?: SecurityRoleTemplateScript | SecurityRoleTemplateInlineQuery; } export interface SecurityRoleTemplateScript { source?: SecurityRoleTemplateInlineQuery; /** The `id` for a stored script. */ id?: Id; /** Specifies any named parameters that are passed into the script as variables. * Use parameters instead of hard-coded values to decrease compile time. */ params?: Record; /** Specifies the language the script is written in. */ lang?: ScriptLanguage; options?: Record; } export interface SecuritySearchAccess { /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity; /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[]; /** A search query that defines the documents the owners of the role have access to. A document within the specified indices must match this query for it to be accessible by the owners of the role. */ query?: SecurityIndicesPrivilegesQuery; /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. * @remarks This property is not supported on Elastic Cloud Serverless. */ allow_restricted_indices?: boolean; } export interface SecuritySecuritySettings { index?: IndicesIndexSettings; } export type SecurityTemplateFormat = 'string' | 'json'; export interface SecurityUser { email?: string | null; full_name?: Name | null; metadata: Metadata; roles: string[]; username: Username; enabled: boolean; profile_uid?: SecurityUserProfileId; } export interface SecurityUserIndicesPrivileges { /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity[]; /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[]; /** The index level privileges that owners of the role have on the specified indices. */ privileges: SecurityIndexPrivilege[]; /** Search queries that define the documents the user has access to. A document within the specified indices must match these queries for it to be accessible by the owners of the role. */ query?: SecurityIndicesPrivilegesQuery[]; /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. */ allow_restricted_indices: boolean; } export interface SecurityUserProfile { uid: SecurityUserProfileId; user: SecurityUserProfileUser; data: Record; labels: Record; enabled?: boolean; } export interface SecurityUserProfileHitMetadata { _primary_term: long; _seq_no: SequenceNumber; } export type SecurityUserProfileId = string; export interface SecurityUserProfileUser { email?: string | null; full_name?: Name | null; realm_name: Name; realm_domain?: Name; roles: string[]; username: Username; } export interface SecurityUserProfileWithMetadata extends SecurityUserProfile { last_synchronized: long; _doc: SecurityUserProfileHitMetadata; } export interface SecurityActivateUserProfileRequest extends RequestBase { /** The user's Elasticsearch access token or JWT. * Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. * If you specify the `access_token` grant type, this parameter is required. * It is not valid with other grant types. */ access_token?: string; /** The type of grant. */ grant_type: SecurityGrantType; /** The user's password. * If you specify the `password` grant type, this parameter is required. * It is not valid with other grant types. */ password?: string; /** The username that identifies the user. * If you specify the `password` grant type, this parameter is required. * It is not valid with other grant types. */ username?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { access_token?: never; grant_type?: never; password?: never; username?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { access_token?: never; grant_type?: never; password?: never; username?: never; }; } export type SecurityActivateUserProfileResponse = SecurityUserProfileWithMetadata; export interface SecurityAuthenticateAuthenticateApiKey { id: Id; name?: Name; } export interface SecurityAuthenticateRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export interface SecurityAuthenticateResponse { api_key?: SecurityAuthenticateAuthenticateApiKey; authentication_realm: SecurityRealmInfo; email?: string | null; full_name?: Name | null; lookup_realm: SecurityRealmInfo; metadata: Metadata; roles: string[]; username: Username; enabled: boolean; authentication_type: string; token?: SecurityAuthenticateToken; } export interface SecurityAuthenticateToken { name: Name; type?: string; } export interface SecurityBulkDeleteRoleRequest extends RequestBase { /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh; /** An array of role names to delete */ names: string[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { refresh?: never; names?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { refresh?: never; names?: never; }; } export interface SecurityBulkDeleteRoleResponse { /** Array of deleted roles */ deleted?: string[]; /** Array of roles that could not be found */ not_found?: string[]; /** Present if any deletes resulted in errors */ errors?: SecurityBulkError; } export interface SecurityBulkPutRoleRequest extends RequestBase { /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh; /** A dictionary of role name to RoleDescriptor objects to add or update */ roles: Record; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { refresh?: never; roles?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { refresh?: never; roles?: never; }; } export interface SecurityBulkPutRoleResponse { /** Array of created roles */ created?: string[]; /** Array of updated roles */ updated?: string[]; /** Array of role names without any changes */ noop?: string[]; /** Present if any updates resulted in errors */ errors?: SecurityBulkError; } export interface SecurityBulkUpdateApiKeysRequest extends RequestBase { /** Expiration time for the API keys. * By default, API keys never expire. * This property can be omitted to leave the value unchanged. */ expiration?: Duration; /** The API key identifiers. */ ids: string | string[]; /** Arbitrary nested metadata to associate with the API keys. * Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. * Any information specified with this parameter fully replaces metadata previously associated with the API key. */ metadata?: Metadata; /** The role descriptors to assign to the API keys. * An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. * You can assign new privileges by specifying them in this parameter. * To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. * If an API key has no assigned privileges, it inherits the owner user's full permissions. * The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter. * The structure of a role descriptor is the same as the request for the create API keys API. */ role_descriptors?: Record; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { expiration?: never; ids?: never; metadata?: never; role_descriptors?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { expiration?: never; ids?: never; metadata?: never; role_descriptors?: never; }; } export interface SecurityBulkUpdateApiKeysResponse { errors?: SecurityBulkError; noops: string[]; updated: string[]; } export interface SecurityChangePasswordRequest extends RequestBase { /** The user whose password you want to change. If you do not specify this * parameter, the password is changed for the current user. */ username?: Username; /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh; /** The new password value. Passwords must be at least 6 characters long. */ password?: Password; /** A hash of the new password value. This must be produced using the same * hashing algorithm as has been configured for password storage. For more details, * see the explanation of the `xpack.security.authc.password_hashing.algorithm` * setting. */ password_hash?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { username?: never; refresh?: never; password?: never; password_hash?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { username?: never; refresh?: never; password?: never; password_hash?: never; }; } export interface SecurityChangePasswordResponse { } export interface SecurityClearApiKeyCacheRequest extends RequestBase { /** Comma-separated list of API key IDs to evict from the API key cache. * To evict all API keys, use `*`. * Does not support other wildcard patterns. */ ids: Ids; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { ids?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { ids?: never; }; } export interface SecurityClearApiKeyCacheResponse { _nodes: NodeStatistics; cluster_name: Name; nodes: Record; } export interface SecurityClearCachedPrivilegesRequest extends RequestBase { /** A comma-separated list of applications. * To clear all applications, use an asterism (`*`). * It does not support other wildcard patterns. */ application: Name; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { application?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { application?: never; }; } export interface SecurityClearCachedPrivilegesResponse { _nodes: NodeStatistics; cluster_name: Name; nodes: Record; } export interface SecurityClearCachedRealmsRequest extends RequestBase { /** A comma-separated list of realms. * To clear all realms, use an asterisk (`*`). * It does not support other wildcard patterns. */ realms: Names; /** A comma-separated list of the users to clear from the cache. * If you do not specify this parameter, the API evicts all users from the user cache. */ usernames?: string[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { realms?: never; usernames?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { realms?: never; usernames?: never; }; } export interface SecurityClearCachedRealmsResponse { _nodes: NodeStatistics; cluster_name: Name; nodes: Record; } export interface SecurityClearCachedRolesRequest extends RequestBase { /** A comma-separated list of roles to evict from the role cache. * To evict all roles, use an asterisk (`*`). * It does not support other wildcard patterns. */ name: Names; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; }; } export interface SecurityClearCachedRolesResponse { _nodes: NodeStatistics; cluster_name: Name; nodes: Record; } export interface SecurityClearCachedServiceTokensRequest extends RequestBase { /** The namespace, which is a top-level grouping of service accounts. */ namespace: Namespace; /** The name of the service, which must be unique within its namespace. */ service: Service; /** A comma-separated list of token names to evict from the service account token caches. * Use a wildcard (`*`) to evict all tokens that belong to a service account. * It does not support other wildcard patterns. */ name: Names; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { namespace?: never; service?: never; name?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { namespace?: never; service?: never; name?: never; }; } export interface SecurityClearCachedServiceTokensResponse { _nodes: NodeStatistics; cluster_name: Name; nodes: Record; } export interface SecurityCreateApiKeyRequest extends RequestBase { /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh; /** The expiration time for the API key. * By default, API keys never expire. */ expiration?: Duration; /** A name for the API key. */ name?: Name; /** An array of role descriptors for this API key. * When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. * If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user's permissions thereby limiting the access scope for API keys. * The structure of role descriptor is the same as the request for the create role API. * For more details, refer to the create or update roles API. * * NOTE: Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. * In this case, you must explicitly specify a role descriptor with no privileges. * The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. */ role_descriptors?: Record; /** Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. */ metadata?: Metadata; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { refresh?: never; expiration?: never; name?: never; role_descriptors?: never; metadata?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { refresh?: never; expiration?: never; name?: never; role_descriptors?: never; metadata?: never; }; } export interface SecurityCreateApiKeyResponse { /** Generated API key. */ api_key: string; /** Expiration in milliseconds for the API key. */ expiration?: long; /** Unique ID for this API key. */ id: Id; /** Specifies the name for this API key. */ name: Name; /** API key credentials which is the base64-encoding of * the UTF-8 representation of `id` and `api_key` joined * by a colon (`:`). */ encoded: string; } export interface SecurityCreateCrossClusterApiKeyRequest extends RequestBase { /** The access to be granted to this API key. * The access is composed of permissions for cross-cluster search and cross-cluster replication. * At least one of them must be specified. * * NOTE: No explicit privileges should be specified for either search or replication access. * The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. */ access: SecurityAccess; /** Expiration time for the API key. * By default, API keys never expire. */ expiration?: Duration; /** Arbitrary metadata that you want to associate with the API key. * It supports nested data structure. * Within the metadata object, keys beginning with `_` are reserved for system usage. */ metadata?: Metadata; /** Specifies the name for this API key. */ name: Name; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { access?: never; expiration?: never; metadata?: never; name?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { access?: never; expiration?: never; metadata?: never; name?: never; }; } export interface SecurityCreateCrossClusterApiKeyResponse { /** Generated API key. */ api_key: string; /** Expiration in milliseconds for the API key. */ expiration?: DurationValue; /** Unique ID for this API key. */ id: Id; /** Specifies the name for this API key. */ name: Name; /** API key credentials which is the base64-encoding of * the UTF-8 representation of `id` and `api_key` joined * by a colon (`:`). */ encoded: string; } export interface SecurityCreateServiceTokenRequest extends RequestBase { /** The name of the namespace, which is a top-level grouping of service accounts. */ namespace: Namespace; /** The name of the service. */ service: Service; /** The name for the service account token. * If omitted, a random name will be generated. * * Token names must be at least one and no more than 256 characters. * They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. * * NOTE: Token names must be unique in the context of the associated service account. * They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. */ name?: Name; /** If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { namespace?: never; service?: never; name?: never; refresh?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { namespace?: never; service?: never; name?: never; refresh?: never; }; } export interface SecurityCreateServiceTokenResponse { created: boolean; token: SecurityCreateServiceTokenToken; } export interface SecurityCreateServiceTokenToken { name: Name; value: string; } export interface SecurityDelegatePkiAuthentication { username: string; roles: string[]; full_name: string | null; email: string | null; token?: Record; metadata: Metadata; enabled: boolean; authentication_realm: SecurityDelegatePkiAuthenticationRealm; lookup_realm: SecurityDelegatePkiAuthenticationRealm; authentication_type: string; api_key?: Record; } export interface SecurityDelegatePkiAuthenticationRealm { name: string; type: string; domain?: string; } export interface SecurityDelegatePkiRequest extends RequestBase { /** The X509Certificate chain, which is represented as an ordered string array. * Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. * * The first element is the target certificate that contains the subject distinguished name that is requesting access. * This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. */ x509_certificate_chain: string[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { x509_certificate_chain?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { x509_certificate_chain?: never; }; } export interface SecurityDelegatePkiResponse { /** An access token associated with the subject distinguished name of the client's certificate. */ access_token: string; /** The amount of time (in seconds) before the token expires. */ expires_in: long; /** The type of token. */ type: string; authentication?: SecurityDelegatePkiAuthentication; } export interface SecurityDeletePrivilegesFoundStatus { found: boolean; } export interface SecurityDeletePrivilegesRequest extends RequestBase { /** The name of the application. * Application privileges are always associated with exactly one application. */ application: Name; /** The name of the privilege. */ name: Names; /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { application?: never; name?: never; refresh?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { application?: never; name?: never; refresh?: never; }; } export type SecurityDeletePrivilegesResponse = Record>; export interface SecurityDeleteRoleRequest extends RequestBase { /** The name of the role. */ name: Name; /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; refresh?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; refresh?: never; }; } export interface SecurityDeleteRoleResponse { /** If the role is successfully deleted, `found` is `true`. * Otherwise, `found` is `false`. */ found: boolean; } export interface SecurityDeleteRoleMappingRequest extends RequestBase { /** The distinct name that identifies the role mapping. * The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. */ name: Name; /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; refresh?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; refresh?: never; }; } export interface SecurityDeleteRoleMappingResponse { /** If the mapping is successfully deleted, `found` is `true`. * Otherwise, `found` is `false`. */ found: boolean; } export interface SecurityDeleteServiceTokenRequest extends RequestBase { /** The namespace, which is a top-level grouping of service accounts. */ namespace: Namespace; /** The service name. */ service: Service; /** The name of the service account token. */ name: Name; /** If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { namespace?: never; service?: never; name?: never; refresh?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { namespace?: never; service?: never; name?: never; refresh?: never; }; } export interface SecurityDeleteServiceTokenResponse { /** If the service account token is successfully deleted, the request returns `{"found": true}`. * Otherwise, the response will have status code 404 and `found` is set to `false`. */ found: boolean; } export interface SecurityDeleteUserRequest extends RequestBase { /** An identifier for the user. */ username: Username; /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { username?: never; refresh?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { username?: never; refresh?: never; }; } export interface SecurityDeleteUserResponse { /** If the user is successfully deleted, the request returns `{"found": true}`. * Otherwise, `found` is set to `false`. */ found: boolean; } export interface SecurityDisableUserRequest extends RequestBase { /** An identifier for the user. */ username: Username; /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { username?: never; refresh?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { username?: never; refresh?: never; }; } export interface SecurityDisableUserResponse { } export interface SecurityDisableUserProfileRequest extends RequestBase { /** Unique identifier for the user profile. */ uid: SecurityUserProfileId; /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. * If 'wait_for', it waits for a refresh to make this operation visible to search. * If 'false', it does nothing with refreshes. */ refresh?: Refresh; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { uid?: never; refresh?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { uid?: never; refresh?: never; }; } export type SecurityDisableUserProfileResponse = AcknowledgedResponseBase; export interface SecurityEnableUserRequest extends RequestBase { /** An identifier for the user. */ username: Username; /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { username?: never; refresh?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { username?: never; refresh?: never; }; } export interface SecurityEnableUserResponse { } export interface SecurityEnableUserProfileRequest extends RequestBase { /** A unique identifier for the user profile. */ uid: SecurityUserProfileId; /** If 'true', Elasticsearch refreshes the affected shards to make this operation * visible to search. * If 'wait_for', it waits for a refresh to make this operation visible to search. * If 'false', nothing is done with refreshes. */ refresh?: Refresh; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { uid?: never; refresh?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { uid?: never; refresh?: never; }; } export type SecurityEnableUserProfileResponse = AcknowledgedResponseBase; export interface SecurityEnrollKibanaRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export interface SecurityEnrollKibanaResponse { token: SecurityEnrollKibanaToken; /** The CA certificate used to sign the node certificates that Elasticsearch uses for TLS on the HTTP layer. * The certificate is returned as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ http_ca: string; } export interface SecurityEnrollKibanaToken { /** The name of the bearer token for the `elastic/kibana` service account. */ name: string; /** The value of the bearer token for the `elastic/kibana` service account. * Use this value to authenticate the service account with Elasticsearch. */ value: string; } export interface SecurityEnrollNodeRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export interface SecurityEnrollNodeResponse { /** The CA private key that can be used by the new node in order to sign its certificate for the HTTP layer, as a Base64 encoded string of the ASN.1 DER encoding of the key. */ http_ca_key: string; /** The CA certificate that can be used by the new node in order to sign its certificate for the HTTP layer, as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ http_ca_cert: string; /** The CA certificate that is used to sign the TLS certificate for the transport layer, as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ transport_ca_cert: string; /** The private key that the node can use for TLS for its transport layer, as a Base64 encoded string of the ASN.1 DER encoding of the key. */ transport_key: string; /** The certificate that the node can use for TLS for its transport layer, as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ transport_cert: string; /** A list of transport addresses in the form of `host:port` for the nodes that are already members of the cluster. */ nodes_addresses: string[]; } export interface SecurityGetApiKeyRequest extends RequestBase { /** An API key id. * This parameter cannot be used with any of `name`, `realm_name` or `username`. */ id?: Id; /** An API key name. * This parameter cannot be used with any of `id`, `realm_name` or `username`. * It supports prefix search with wildcard. */ name?: Name; /** A boolean flag that can be used to query API keys owned by the currently authenticated user. * The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. */ owner?: boolean; /** The name of an authentication realm. * This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. */ realm_name?: Name; /** The username of a user. * This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. */ username?: Username; /** Return the snapshot of the owner user's role descriptors * associated with the API key. An API key's actual * permission is the intersection of its assigned role * descriptors and the owner user's role descriptors. */ with_limited_by?: boolean; /** A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. */ active_only?: boolean; /** Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. */ with_profile_uid?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; name?: never; owner?: never; realm_name?: never; username?: never; with_limited_by?: never; active_only?: never; with_profile_uid?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; name?: never; owner?: never; realm_name?: never; username?: never; with_limited_by?: never; active_only?: never; with_profile_uid?: never; }; } export interface SecurityGetApiKeyResponse { api_keys: SecurityApiKey[]; } export interface SecurityGetBuiltinPrivilegesRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export interface SecurityGetBuiltinPrivilegesResponse { /** The list of cluster privileges that are understood by this version of Elasticsearch. */ cluster: SecurityClusterPrivilege[]; /** The list of index privileges that are understood by this version of Elasticsearch. */ index: IndexName[]; /** The list of remote_cluster privileges that are understood by this version of Elasticsearch. * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster: SecurityRemoteClusterPrivilege[]; } export interface SecurityGetPrivilegesRequest extends RequestBase { /** The name of the application. * Application privileges are always associated with exactly one application. * If you do not specify this parameter, the API returns information about all privileges for all applications. */ application?: Name; /** The name of the privilege. * If you do not specify this parameter, the API returns information about all privileges for the requested application. */ name?: Names; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { application?: never; name?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { application?: never; name?: never; }; } export type SecurityGetPrivilegesResponse = Record>; export interface SecurityGetRoleRequest extends RequestBase { /** The name of the role. * You can specify multiple roles as a comma-separated list. * If you do not specify this parameter, the API returns information about all roles. */ name?: Names; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; }; } export type SecurityGetRoleResponse = Record; export interface SecurityGetRoleRole { cluster: SecurityClusterPrivilege[]; indices: SecurityIndicesPrivileges[]; /** @remarks This property is not supported on Elastic Cloud Serverless. */ remote_indices?: SecurityRemoteIndicesPrivileges[]; /** @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster?: SecurityRemoteClusterPrivileges[]; metadata: Metadata; description?: string; run_as?: string[]; transient_metadata?: Record; applications: SecurityApplicationPrivileges[]; role_templates?: SecurityRoleTemplate[]; global?: Record>>; } export interface SecurityGetRoleMappingRequest extends RequestBase { /** The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a comma-separated list. If you do not specify this parameter, the API returns information about all role mappings. */ name?: Names; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; }; } export type SecurityGetRoleMappingResponse = Record; export interface SecurityGetServiceAccountsRequest extends RequestBase { /** The name of the namespace. * Omit this parameter to retrieve information about all service accounts. * If you omit this parameter, you must also omit the `service` parameter. */ namespace?: Namespace; /** The service name. * Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. */ service?: Service; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { namespace?: never; service?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { namespace?: never; service?: never; }; } export type SecurityGetServiceAccountsResponse = Record; export interface SecurityGetServiceAccountsRoleDescriptorWrapper { role_descriptor: SecurityRoleDescriptorRead; } export interface SecurityGetServiceCredentialsNodesCredentials { /** General status showing how nodes respond to the above collection request */ _nodes: NodeStatistics; /** File-backed tokens collected from all nodes */ file_tokens: Record; } export interface SecurityGetServiceCredentialsNodesCredentialsFileToken { nodes: string[]; } export interface SecurityGetServiceCredentialsRequest extends RequestBase { /** The name of the namespace. */ namespace: Namespace; /** The service name. */ service: Name; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { namespace?: never; service?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { namespace?: never; service?: never; }; } export interface SecurityGetServiceCredentialsResponse { service_account: string; count: integer; tokens: Record; /** Service account credentials collected from all nodes of the cluster. */ nodes_credentials: SecurityGetServiceCredentialsNodesCredentials; } export interface SecurityGetSettingsRequest extends RequestBase { /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; }; } export interface SecurityGetSettingsResponse { /** Settings for the index used for most security configuration, including native realm users and roles configured with the API. */ security: SecuritySecuritySettings; /** Settings for the index used to store profile information. */ 'security-profile': SecuritySecuritySettings; /** Settings for the index used to store tokens. */ 'security-tokens': SecuritySecuritySettings; } export type SecurityGetTokenAccessTokenGrantType = 'password' | 'client_credentials' | '_kerberos' | 'refresh_token'; export interface SecurityGetTokenAuthenticatedUser extends SecurityUser { authentication_realm: SecurityGetTokenUserRealm; lookup_realm: SecurityGetTokenUserRealm; authentication_provider?: SecurityGetTokenAuthenticationProvider; authentication_type: string; } export interface SecurityGetTokenAuthenticationProvider { type: string; name: Name; } export interface SecurityGetTokenRequest extends RequestBase { /** The type of grant. * Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. */ grant_type?: SecurityGetTokenAccessTokenGrantType; /** The scope of the token. * Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. */ scope?: string; /** The user's password. * If you specify the `password` grant type, this parameter is required. * This parameter is not valid with any other supported grant type. */ password?: Password; /** The base64 encoded kerberos ticket. * If you specify the `_kerberos` grant type, this parameter is required. * This parameter is not valid with any other supported grant type. */ kerberos_ticket?: string; /** The string that was returned when you created the token, which enables you to extend its life. * If you specify the `refresh_token` grant type, this parameter is required. * This parameter is not valid with any other supported grant type. */ refresh_token?: string; /** The username that identifies the user. * If you specify the `password` grant type, this parameter is required. * This parameter is not valid with any other supported grant type. */ username?: Username; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { grant_type?: never; scope?: never; password?: never; kerberos_ticket?: never; refresh_token?: never; username?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { grant_type?: never; scope?: never; password?: never; kerberos_ticket?: never; refresh_token?: never; username?: never; }; } export interface SecurityGetTokenResponse { access_token: string; expires_in: long; scope?: string; type: string; refresh_token?: string; kerberos_authentication_response_token?: string; authentication: SecurityGetTokenAuthenticatedUser; } export interface SecurityGetTokenUserRealm { name: Name; type: string; } export interface SecurityGetUserRequest extends RequestBase { /** An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves information about all users. */ username?: Username | Username[]; /** Determines whether to retrieve the user profile UID, if it exists, for the users. */ with_profile_uid?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { username?: never; with_profile_uid?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { username?: never; with_profile_uid?: never; }; } export type SecurityGetUserResponse = Record; export interface SecurityGetUserPrivilegesRequest extends RequestBase { /** The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. */ application?: Name; /** The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. */ priviledge?: Name; username?: Name | null; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { application?: never; priviledge?: never; username?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { application?: never; priviledge?: never; username?: never; }; } export interface SecurityGetUserPrivilegesResponse { applications: SecurityApplicationPrivileges[]; cluster: string[]; remote_cluster?: SecurityRemoteClusterPrivileges[]; global: SecurityGlobalPrivilege[]; indices: SecurityUserIndicesPrivileges[]; remote_indices?: SecurityRemoteUserIndicesPrivileges[]; run_as: string[]; } export interface SecurityGetUserProfileGetUserProfileErrors { count: long; details: Record; } export interface SecurityGetUserProfileRequest extends RequestBase { /** A unique identifier for the user profile. */ uid: SecurityUserProfileId | SecurityUserProfileId[]; /** A comma-separated list of filters for the `data` field of the profile document. * To return all content use `data=*`. * To return a subset of content use `data=` to retrieve content nested under the specified ``. * By default returns no `data` content. */ data?: string | string[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { uid?: never; data?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { uid?: never; data?: never; }; } export interface SecurityGetUserProfileResponse { /** A successful call returns the JSON representation of the user profile and its internal versioning numbers. * The API returns an empty object if no profile document is found for the provided `uid`. * The content of the data field is not returned by default to avoid deserializing a potential large payload. */ profiles: SecurityUserProfileWithMetadata[]; errors?: SecurityGetUserProfileGetUserProfileErrors; } export type SecurityGrantApiKeyApiKeyGrantType = 'access_token' | 'password'; export interface SecurityGrantApiKeyGrantApiKey { name: Name; /** Expiration time for the API key. By default, API keys never expire. */ expiration?: DurationLarge; /** The role descriptors for this API key. * When it is not specified or is an empty array, the API key has a point in time snapshot of permissions of the specified user or access token. * If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the permissions of the user or access token. */ role_descriptors?: Record | Record[]; /** Arbitrary metadata that you want to associate with the API key. * It supports nested data structure. * Within the `metadata` object, keys beginning with `_` are reserved for system usage. */ metadata?: Metadata; } export interface SecurityGrantApiKeyRequest extends RequestBase { /** The API key. */ api_key: SecurityGrantApiKeyGrantApiKey; /** The type of grant. Supported grant types are: `access_token`, `password`. */ grant_type: SecurityGrantApiKeyApiKeyGrantType; /** The user's access token. * If you specify the `access_token` grant type, this parameter is required. * It is not valid with other grant types. */ access_token?: string; /** The user name that identifies the user. * If you specify the `password` grant type, this parameter is required. * It is not valid with other grant types. */ username?: Username; /** The user's password. * If you specify the `password` grant type, this parameter is required. * It is not valid with other grant types. */ password?: Password; /** The name of the user to be impersonated. */ run_as?: Username; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { api_key?: never; grant_type?: never; access_token?: never; username?: never; password?: never; run_as?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { api_key?: never; grant_type?: never; access_token?: never; username?: never; password?: never; run_as?: never; }; } export interface SecurityGrantApiKeyResponse { api_key: string; id: Id; name: Name; expiration?: EpochTime; encoded: string; } export interface SecurityHasPrivilegesApplicationPrivilegesCheck { /** The name of the application. */ application: string; /** A list of the privileges that you want to check for the specified resources. * It may be either application privilege names or the names of actions that are granted by those privileges */ privileges: string[]; /** A list of resource names against which the privileges should be checked. */ resources: string[]; } export type SecurityHasPrivilegesApplicationsPrivileges = Record; export interface SecurityHasPrivilegesIndexPrivilegesCheck { /** A list of indices. */ names: Indices; /** A list of the privileges that you want to check for the specified indices. */ privileges: SecurityIndexPrivilege[]; /** This needs to be set to `true` (default is `false`) if using wildcards or regexps for patterns that cover restricted indices. * Implicitly, restricted indices do not match index patterns because restricted indices usually have limited privileges and including them in pattern tests would render most such tests false. * If restricted indices are explicitly included in the names list, privileges will be checked against them regardless of the value of `allow_restricted_indices`. */ allow_restricted_indices?: boolean; } export type SecurityHasPrivilegesPrivileges = Record; export interface SecurityHasPrivilegesRequest extends RequestBase { /** Username */ user?: Name; application?: SecurityHasPrivilegesApplicationPrivilegesCheck[]; /** A list of the cluster privileges that you want to check. */ cluster?: SecurityClusterPrivilege[]; index?: SecurityHasPrivilegesIndexPrivilegesCheck[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { user?: never; application?: never; cluster?: never; index?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { user?: never; application?: never; cluster?: never; index?: never; }; } export type SecurityHasPrivilegesResourcePrivileges = Record; export interface SecurityHasPrivilegesResponse { application: SecurityHasPrivilegesApplicationsPrivileges; cluster: Record; has_all_requested: boolean; index: Record; username: Username; } export interface SecurityHasPrivilegesUserProfileHasPrivilegesUserProfileErrors { count: long; details: Record; } export interface SecurityHasPrivilegesUserProfilePrivilegesCheck { application?: SecurityHasPrivilegesApplicationPrivilegesCheck[]; /** A list of the cluster privileges that you want to check. */ cluster?: SecurityClusterPrivilege[]; index?: SecurityHasPrivilegesIndexPrivilegesCheck[]; } export interface SecurityHasPrivilegesUserProfileRequest extends RequestBase { /** A list of profile IDs. The privileges are checked for associated users of the profiles. */ uids: SecurityUserProfileId[]; /** An object containing all the privileges to be checked. */ privileges: SecurityHasPrivilegesUserProfilePrivilegesCheck; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { uids?: never; privileges?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { uids?: never; privileges?: never; }; } export interface SecurityHasPrivilegesUserProfileResponse { /** The subset of the requested profile IDs of the users that * have all the requested privileges. */ has_privilege_uids: SecurityUserProfileId[]; /** The subset of the requested profile IDs for which an error * was encountered. It does not include the missing profile IDs * or the profile IDs of the users that do not have all the * requested privileges. This field is absent if empty. */ errors?: SecurityHasPrivilegesUserProfileHasPrivilegesUserProfileErrors; } export interface SecurityInvalidateApiKeyRequest extends RequestBase { id?: Id; /** A list of API key ids. * This parameter cannot be used with any of `name`, `realm_name`, or `username`. */ ids?: Id[]; /** An API key name. * This parameter cannot be used with any of `ids`, `realm_name` or `username`. */ name?: Name; /** Query API keys owned by the currently authenticated user. * The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. * * NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. */ owner?: boolean; /** The name of an authentication realm. * This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. */ realm_name?: string; /** The username of a user. * This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. */ username?: Username; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; ids?: never; name?: never; owner?: never; realm_name?: never; username?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; ids?: never; name?: never; owner?: never; realm_name?: never; username?: never; }; } export interface SecurityInvalidateApiKeyResponse { /** The number of errors that were encountered when invalidating the API keys. */ error_count: integer; /** Details about the errors. * This field is not present in the response when `error_count` is `0`. */ error_details?: ErrorCause[]; /** The IDs of the API keys that were invalidated as part of this request. */ invalidated_api_keys: string[]; /** The IDs of the API keys that were already invalidated. */ previously_invalidated_api_keys: string[]; } export interface SecurityInvalidateTokenRequest extends RequestBase { /** An access token. * This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. */ token?: string; /** A refresh token. * This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. */ refresh_token?: string; /** The name of an authentication realm. * This parameter cannot be used with either `refresh_token` or `token`. */ realm_name?: Name; /** The username of a user. * This parameter cannot be used with either `refresh_token` or `token`. */ username?: Username; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { token?: never; refresh_token?: never; realm_name?: never; username?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { token?: never; refresh_token?: never; realm_name?: never; username?: never; }; } export interface SecurityInvalidateTokenResponse { /** The number of errors that were encountered when invalidating the tokens. */ error_count: long; /** Details about the errors. * This field is not present in the response when `error_count` is `0`. */ error_details?: ErrorCause[]; /** The number of the tokens that were invalidated as part of this request. */ invalidated_tokens: long; /** The number of tokens that were already invalidated. */ previously_invalidated_tokens: long; } export interface SecurityOidcAuthenticateRequest extends RequestBase { /** Associate a client session with an ID token and mitigate replay attacks. * This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. */ nonce: string; /** The name of the OpenID Connect realm. * This property is useful in cases where multiple realms are defined. */ realm?: string; /** The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. * This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. */ redirect_uri: string; /** Maintain state between the authentication request and the response. * This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. */ state: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { nonce?: never; realm?: never; redirect_uri?: never; state?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { nonce?: never; realm?: never; redirect_uri?: never; state?: never; }; } export interface SecurityOidcAuthenticateResponse { /** The Elasticsearch access token. */ access_token: string; /** The duration (in seconds) of the tokens. */ expires_in: integer; /** The Elasticsearch refresh token. */ refresh_token: string; /** The type of token. */ type: string; } export interface SecurityOidcLogoutRequest extends RequestBase { /** The access token to be invalidated. */ token: string; /** The refresh token to be invalidated. */ refresh_token?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { token?: never; refresh_token?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { token?: never; refresh_token?: never; }; } export interface SecurityOidcLogoutResponse { /** A URI that points to the end session endpoint of the OpenID Connect Provider with all the parameters of the logout request as HTTP GET parameters. */ redirect: string; } export interface SecurityOidcPrepareAuthenticationRequest extends RequestBase { /** In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. * It cannot be specified when *realm* is specified. * One of *realm* or *iss* is required. */ iss?: string; /** In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the *login_hint* parameter. * This parameter is not valid when *realm* is specified. */ login_hint?: string; /** The value used to associate a client session with an ID token and to mitigate replay attacks. * If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. */ nonce?: string; /** The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. * It cannot be specified when *iss* is specified. * One of *realm* or *iss* is required. */ realm?: string; /** The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. * If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. */ state?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { iss?: never; login_hint?: never; nonce?: never; realm?: never; state?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { iss?: never; login_hint?: never; nonce?: never; realm?: never; state?: never; }; } export interface SecurityOidcPrepareAuthenticationResponse { nonce: string; realm: string; /** A URI that points to the authorization endpoint of the OpenID Connect Provider with all the parameters of the authentication request as HTTP GET parameters. */ redirect: string; state: string; } export interface SecurityPutPrivilegesActions { actions: string[]; application?: string; name?: Name; metadata?: Metadata; } export interface SecurityPutPrivilegesRequest extends RequestBase { /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh; privileges?: Record>; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { refresh?: never; privileges?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { refresh?: never; privileges?: never; }; } export type SecurityPutPrivilegesResponse = Record>; export interface SecurityPutRoleRequest extends RequestBase { /** The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. */ name: Name; /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh; /** A list of application privilege entries. */ applications?: SecurityApplicationPrivileges[]; /** A list of cluster privileges. These privileges define the cluster-level actions for users with this role. */ cluster?: SecurityClusterPrivilege[]; /** An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. * @remarks This property is not supported on Elastic Cloud Serverless. */ global?: Record; /** A list of indices permissions entries. */ indices?: SecurityIndicesPrivileges[]; /** A list of remote indices permissions entries. * * NOTE: Remote indices are effective for remote clusters configured with the API key based model. * They have no effect for remote clusters configured with the certificate based model. * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_indices?: SecurityRemoteIndicesPrivileges[]; /** A list of remote cluster permissions entries. * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster?: SecurityRemoteClusterPrivileges[]; /** Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. */ metadata?: Metadata; /** A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. */ run_as?: string[]; /** Optional description of the role descriptor */ description?: string; /** Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. */ transient_metadata?: Record; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; refresh?: never; applications?: never; cluster?: never; global?: never; indices?: never; remote_indices?: never; remote_cluster?: never; metadata?: never; run_as?: never; description?: never; transient_metadata?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; refresh?: never; applications?: never; cluster?: never; global?: never; indices?: never; remote_indices?: never; remote_cluster?: never; metadata?: never; run_as?: never; description?: never; transient_metadata?: never; }; } export interface SecurityPutRoleResponse { /** When an existing role is updated, `created` is set to `false`. */ role: SecurityCreatedStatus; } export interface SecurityPutRoleMappingRequest extends RequestBase { /** The distinct name that identifies the role mapping. * The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. */ name: Name; /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh; /** Mappings that have `enabled` set to `false` are ignored when role mapping is performed. */ enabled?: boolean; /** Additional metadata that helps define which roles are assigned to each user. * Within the metadata object, keys beginning with `_` are reserved for system usage. */ metadata?: Metadata; /** A list of role names that are granted to the users that match the role mapping rules. * Exactly one of `roles` or `role_templates` must be specified. */ roles?: string[]; /** A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. * Exactly one of `roles` or `role_templates` must be specified. */ role_templates?: SecurityRoleTemplate[]; /** The rules that determine which users should be matched by the mapping. * A rule is a logical condition that is expressed by using a JSON DSL. */ rules?: SecurityRoleMappingRule; run_as?: string[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; refresh?: never; enabled?: never; metadata?: never; roles?: never; role_templates?: never; rules?: never; run_as?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; refresh?: never; enabled?: never; metadata?: never; roles?: never; role_templates?: never; rules?: never; run_as?: never; }; } export interface SecurityPutRoleMappingResponse { created?: boolean; role_mapping: SecurityCreatedStatus; } export interface SecurityPutUserRequest extends RequestBase { /** An identifier for the user. * * NOTE: Usernames must be at least 1 and no more than 507 characters. * They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. * Leading or trailing whitespace is not allowed. */ username: Username; /** Valid values are `true`, `false`, and `wait_for`. * These values have the same meaning as in the index API, but the default value for this API is true. */ refresh?: Refresh; /** The email of the user. */ email?: string | null; /** The full name of the user. */ full_name?: string | null; /** Arbitrary metadata that you want to associate with the user. */ metadata?: Metadata; /** The user's password. * Passwords must be at least 6 characters long. * When adding a user, one of `password` or `password_hash` is required. * When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user's password */ password?: Password; /** A hash of the user's password. * This must be produced using the same hashing algorithm as has been configured for password storage. * For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. * Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. * The `password` parameter and the `password_hash` parameter cannot be used in the same request. */ password_hash?: string; /** A set of roles the user has. * The roles determine the user's access permissions. * To create a user without any roles, specify an empty list (`[]`). */ roles?: string[]; /** Specifies whether the user is enabled. */ enabled?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { username?: never; refresh?: never; email?: never; full_name?: never; metadata?: never; password?: never; password_hash?: never; roles?: never; enabled?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { username?: never; refresh?: never; email?: never; full_name?: never; metadata?: never; password?: never; password_hash?: never; roles?: never; enabled?: never; }; } export interface SecurityPutUserResponse { /** A successful call returns a JSON structure that shows whether the user has been created or updated. * When an existing user is updated, `created` is set to `false`. */ created: boolean; } export type SecurityQueryApiKeysApiKeyAggregate = AggregationsCardinalityAggregate | AggregationsValueCountAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsFilterAggregate | AggregationsFiltersAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsCompositeAggregate; export interface SecurityQueryApiKeysApiKeyAggregationContainer { /** Sub-aggregations for this aggregation. * Only applies to bucket aggregations. */ aggregations?: Record; /** Sub-aggregations for this aggregation. * Only applies to bucket aggregations. * @alias aggregations */ aggs?: Record; meta?: Metadata; /** A single-value metrics aggregation that calculates an approximate count of distinct values. */ cardinality?: AggregationsCardinalityAggregation; /** A multi-bucket aggregation that creates composite buckets from different sources. * Unlike the other multi-bucket aggregations, you can use the `composite` aggregation to paginate *all* buckets from a multi-level aggregation efficiently. */ composite?: AggregationsCompositeAggregation; /** A multi-bucket value source based aggregation that enables the user to define a set of date ranges - each representing a bucket. */ date_range?: AggregationsDateRangeAggregation; /** A single bucket aggregation that narrows the set of documents to those that match a query. */ filter?: SecurityQueryApiKeysApiKeyQueryContainer; /** A multi-bucket aggregation where each bucket contains the documents that match a query. */ filters?: SecurityQueryApiKeysApiKeyFiltersAggregation; missing?: AggregationsMissingAggregation; /** A multi-bucket value source based aggregation that enables the user to define a set of ranges - each representing a bucket. */ range?: AggregationsRangeAggregation; /** A multi-bucket value source based aggregation where buckets are dynamically built - one per unique value. */ terms?: AggregationsTermsAggregation; /** A single-value metrics aggregation that counts the number of values that are extracted from the aggregated documents. */ value_count?: AggregationsValueCountAggregation; } export interface SecurityQueryApiKeysApiKeyFiltersAggregation extends AggregationsBucketAggregationBase { /** Collection of queries from which to build buckets. */ filters?: AggregationsBuckets; /** Set to `true` to add a bucket to the response which will contain all documents that do not match any of the given filters. */ other_bucket?: boolean; /** The key with which the other bucket is returned. */ other_bucket_key?: string; /** By default, the named filters aggregation returns the buckets as an object. * Set to `false` to return the buckets as an array of objects. */ keyed?: boolean; } export interface SecurityQueryApiKeysApiKeyQueryContainer { /** Matches documents matching boolean combinations of other queries. */ bool?: QueryDslBoolQuery; /** Returns documents that contain an indexed value for a field. */ exists?: QueryDslExistsQuery; /** Returns documents based on their IDs. * This query uses document IDs stored in the `_id` field. */ ids?: QueryDslIdsQuery; /** Returns documents that match a provided text, number, date or boolean value. * The provided text is analyzed before matching. */ match?: Partial>; /** Matches all documents, giving them all a `_score` of 1.0. */ match_all?: QueryDslMatchAllQuery; /** Returns documents that contain a specific prefix in a provided field. */ prefix?: Partial>; /** Returns documents that contain terms within a provided range. */ range?: Partial>; /** Returns documents based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ simple_query_string?: QueryDslSimpleQueryStringQuery; /** Returns documents that contain an exact term in a provided field. * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ term?: Partial>; /** Returns documents that contain one or more exact terms in a provided field. * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ terms?: QueryDslTermsQuery; /** Returns documents that contain terms matching a wildcard pattern. */ wildcard?: Partial>; } export interface SecurityQueryApiKeysRequest extends RequestBase { /** Return the snapshot of the owner user's role descriptors associated with the API key. * An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). * An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. */ with_limited_by?: boolean; /** Determines whether to also retrieve the profile UID for the API key owner principal. * If it exists, the profile UID is returned under the `profile_uid` response field for each API key. */ with_profile_uid?: boolean; /** Determines whether aggregation names are prefixed by their respective types in the response. */ typed_keys?: boolean; /** Any aggregations to run over the corpus of returned API keys. * Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. * This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, * `cardinality`, `value_count`, `composite`, `filter`, and `filters`. * Additionally, aggregations only run over the same subset of fields that query works with. */ aggregations?: Record; /** Any aggregations to run over the corpus of returned API keys. * Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. * This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, * `cardinality`, `value_count`, `composite`, `filter`, and `filters`. * Additionally, aggregations only run over the same subset of fields that query works with. * @alias aggregations */ aggs?: Record; /** A query to filter which API keys to return. * If the query parameter is missing, it is equivalent to a `match_all` query. * The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, * `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. * You can query the following public information associated with an API key: `id`, `type`, `name`, * `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. * * NOTE: The queryable string values associated with API keys are internally mapped as keywords. * Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. * Such a match query is hence equivalent to a `term` query. */ query?: SecurityQueryApiKeysApiKeyQueryContainer; /** The starting document offset. * It must not be negative. * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. * To page through more hits, use the `search_after` parameter. */ from?: integer; /** The sort definition. * Other than `id`, all public fields of an API key are eligible for sorting. * In addition, sort can also be applied to the `_doc` field to sort by index order. */ sort?: Sort; /** The number of hits to return. * It must not be negative. * The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. * To page through more hits, use the `search_after` parameter. */ size?: integer; /** The search after definition. */ search_after?: SortResults; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { with_limited_by?: never; with_profile_uid?: never; typed_keys?: never; aggregations?: never; aggs?: never; query?: never; from?: never; sort?: never; size?: never; search_after?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { with_limited_by?: never; with_profile_uid?: never; typed_keys?: never; aggregations?: never; aggs?: never; query?: never; from?: never; sort?: never; size?: never; search_after?: never; }; } export interface SecurityQueryApiKeysResponse { /** The total number of API keys found. */ total: integer; /** The number of API keys returned in the response. */ count: integer; /** A list of API key information. */ api_keys: SecurityApiKey[]; /** The aggregations result, if requested. */ aggregations?: Record; } export interface SecurityQueryRoleQueryRole extends SecurityRoleDescriptor { _sort?: SortResults; /** Name of the role. */ name: string; } export interface SecurityQueryRoleRequest extends RequestBase { /** A query to filter which roles to return. * If the query parameter is missing, it is equivalent to a `match_all` query. * The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, * `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. * You can query the following information associated with roles: `name`, `description`, `metadata`, * `applications.application`, `applications.privileges`, and `applications.resources`. */ query?: SecurityQueryRoleRoleQueryContainer; /** The starting document offset. * It must not be negative. * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. * To page through more hits, use the `search_after` parameter. */ from?: integer; /** The sort definition. * You can sort on `username`, `roles`, or `enabled`. * In addition, sort can also be applied to the `_doc` field to sort by index order. */ sort?: Sort; /** The number of hits to return. * It must not be negative. * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. * To page through more hits, use the `search_after` parameter. */ size?: integer; /** The search after definition. */ search_after?: SortResults; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { query?: never; from?: never; sort?: never; size?: never; search_after?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { query?: never; from?: never; sort?: never; size?: never; search_after?: never; }; } export interface SecurityQueryRoleResponse { /** The total number of roles found. */ total: integer; /** The number of roles returned in the response. */ count: integer; /** A list of roles that match the query. * The returned role format is an extension of the role definition format. * It adds the `transient_metadata.enabled` and the `_sort` fields. * `transient_metadata.enabled` is set to `false` in case the role is automatically disabled, for example when the role grants privileges that are not allowed by the installed license. * `_sort` is present when the search query sorts on some field. * It contains the array of values that have been used for sorting. */ roles: SecurityQueryRoleQueryRole[]; } export interface SecurityQueryRoleRoleQueryContainer { /** matches roles matching boolean combinations of other queries. */ bool?: QueryDslBoolQuery; /** Returns roles that contain an indexed value for a field. */ exists?: QueryDslExistsQuery; /** Returns roles based on their IDs. * This query uses role document IDs stored in the `_id` field. */ ids?: QueryDslIdsQuery; /** Returns roles that match a provided text, number, date or boolean value. * The provided text is analyzed before matching. */ match?: Partial>; /** Matches all roles, giving them all a `_score` of 1.0. */ match_all?: QueryDslMatchAllQuery; /** Returns roles that contain a specific prefix in a provided field. */ prefix?: Partial>; /** Returns roles that contain terms within a provided range. */ range?: Partial>; /** Returns roles based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ simple_query_string?: QueryDslSimpleQueryStringQuery; /** Returns roles that contain an exact term in a provided field. * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ term?: Partial>; /** Returns roles that contain one or more exact terms in a provided field. * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ terms?: QueryDslTermsQuery; /** Returns roles that contain terms matching a wildcard pattern. */ wildcard?: Partial>; } export interface SecurityQueryUserQueryUser extends SecurityUser { _sort?: SortResults; } export interface SecurityQueryUserRequest extends RequestBase { /** Determines whether to retrieve the user profile UID, if it exists, for the users. */ with_profile_uid?: boolean; /** A query to filter which users to return. * If the query parameter is missing, it is equivalent to a `match_all` query. * The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, * `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. * You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. */ query?: SecurityQueryUserUserQueryContainer; /** The starting document offset. * It must not be negative. * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. * To page through more hits, use the `search_after` parameter. */ from?: integer; /** The sort definition. * Fields eligible for sorting are: `username`, `roles`, `enabled`. * In addition, sort can also be applied to the `_doc` field to sort by index order. */ sort?: Sort; /** The number of hits to return. * It must not be negative. * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. * To page through more hits, use the `search_after` parameter. */ size?: integer; /** The search after definition */ search_after?: SortResults; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { with_profile_uid?: never; query?: never; from?: never; sort?: never; size?: never; search_after?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { with_profile_uid?: never; query?: never; from?: never; sort?: never; size?: never; search_after?: never; }; } export interface SecurityQueryUserResponse { /** The total number of users found. */ total: integer; /** The number of users returned in the response. */ count: integer; /** A list of users that match the query. */ users: SecurityQueryUserQueryUser[]; } export interface SecurityQueryUserUserQueryContainer { /** Returns users based on their IDs. * This query uses the user document IDs stored in the `_id` field. */ ids?: QueryDslIdsQuery; /** matches users matching boolean combinations of other queries. */ bool?: QueryDslBoolQuery; /** Returns users that contain an indexed value for a field. */ exists?: QueryDslExistsQuery; /** Returns users that match a provided text, number, date or boolean value. * The provided text is analyzed before matching. */ match?: Partial>; /** Matches all users, giving them all a `_score` of 1.0. */ match_all?: QueryDslMatchAllQuery; /** Returns users that contain a specific prefix in a provided field. */ prefix?: Partial>; /** Returns users that contain terms within a provided range. */ range?: Partial>; /** Returns users based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ simple_query_string?: QueryDslSimpleQueryStringQuery; /** Returns users that contain an exact term in a provided field. * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ term?: Partial>; /** Returns users that contain one or more exact terms in a provided field. * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ terms?: QueryDslTermsQuery; /** Returns users that contain terms matching a wildcard pattern. */ wildcard?: Partial>; } export interface SecuritySamlAuthenticateRequest extends RequestBase { /** The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. */ content: string; /** A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. */ ids: Ids; /** The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. */ realm?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { content?: never; ids?: never; realm?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { content?: never; ids?: never; realm?: never; }; } export interface SecuritySamlAuthenticateResponse { /** The access token that was generated by Elasticsearch. */ access_token: string; /** The authenticated user's name. */ username: string; /** The amount of time (in seconds) left until the token expires. */ expires_in: integer; /** The refresh token that was generated by Elasticsearch. */ refresh_token: string; /** The name of the realm where the user was authenticated. */ realm: string; } export interface SecuritySamlCompleteLogoutRequest extends RequestBase { /** The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. */ realm: string; /** A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. */ ids: Ids; /** If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI. */ query_string?: string; /** If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response. */ content?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { realm?: never; ids?: never; query_string?: never; content?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { realm?: never; ids?: never; query_string?: never; content?: never; }; } export type SecuritySamlCompleteLogoutResponse = boolean; export interface SecuritySamlInvalidateRequest extends RequestBase { /** The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. */ acs?: string; /** The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. * This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. * If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. * In order for Elasticsearch to be able to verify the IdP's signature, the value of the `query_string` field must be an exact match to the string provided by the browser. * The client application must not attempt to parse or process the string in any way. */ query_string: string; /** The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the `acs` parameter. */ realm?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { acs?: never; query_string?: never; realm?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { acs?: never; query_string?: never; realm?: never; }; } export interface SecuritySamlInvalidateResponse { /** The number of tokens that were invalidated as part of this logout. */ invalidated: integer; /** The realm name of the SAML realm in Elasticsearch that authenticated the user. */ realm: string; /** A SAML logout response as a parameter so that the user can be redirected back to the SAML IdP. */ redirect: string; } export interface SecuritySamlLogoutRequest extends RequestBase { /** The access token that was returned as a response to calling the SAML authenticate API. * Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. */ token: string; /** The refresh token that was returned as a response to calling the SAML authenticate API. * Alternatively, the most recent refresh token that was received after refreshing the original access token. */ refresh_token?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { token?: never; refresh_token?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { token?: never; refresh_token?: never; }; } export interface SecuritySamlLogoutResponse { /** A URL that contains a SAML logout request as a parameter. * You can use this URL to be redirected back to the SAML IdP and to initiate Single Logout. */ redirect: string; } export interface SecuritySamlPrepareAuthenticationRequest extends RequestBase { /** The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. * The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. */ acs?: string; /** The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. * You must specify either this parameter or the `acs` parameter. */ realm?: string; /** A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. * If the Authentication Request is signed, this value is used as part of the signature computation. */ relay_state?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { acs?: never; realm?: never; relay_state?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { acs?: never; realm?: never; relay_state?: never; }; } export interface SecuritySamlPrepareAuthenticationResponse { /** A unique identifier for the SAML Request to be stored by the caller of the API. */ id: Id; /** The name of the Elasticsearch realm that was used to construct the authentication request. */ realm: string; /** The URL to redirect the user to. */ redirect: string; } export interface SecuritySamlServiceProviderMetadataRequest extends RequestBase { /** The name of the SAML realm in Elasticsearch. */ realm_name: Name; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { realm_name?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { realm_name?: never; }; } export interface SecuritySamlServiceProviderMetadataResponse { /** An XML string that contains a SAML Service Provider's metadata for the realm. */ metadata: string; } export interface SecuritySuggestUserProfilesHint { /** A list of profile UIDs to match against. */ uids?: SecurityUserProfileId[]; /** A single key-value pair to match against the labels section * of a profile. A profile is considered matching if it matches * at least one of the strings. */ labels?: Record; } export interface SecuritySuggestUserProfilesRequest extends RequestBase { /** A query string used to match name-related fields in user profile documents. * Name-related fields are the user's `username`, `full_name`, and `email`. */ name?: string; /** The number of profiles to return. */ size?: long; /** A comma-separated list of filters for the `data` field of the profile document. * To return all content use `data=*`. * To return a subset of content, use `data=` to retrieve content nested under the specified ``. * By default, the API returns no `data` content. * It is an error to specify `data` as both the query parameter and the request body field. */ data?: string | string[]; /** Extra search criteria to improve relevance of the suggestion result. * Profiles matching the spcified hint are ranked higher in the response. * Profiles not matching the hint aren't excluded from the response as long as the profile matches the `name` field query. */ hint?: SecuritySuggestUserProfilesHint; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; size?: never; data?: never; hint?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; size?: never; data?: never; hint?: never; }; } export interface SecuritySuggestUserProfilesResponse { /** Metadata about the number of matching profiles. */ total: SecuritySuggestUserProfilesTotalUserProfiles; /** The number of milliseconds it took Elasticsearch to run the request. */ took: long; /** A list of profile documents, ordered by relevance, that match the search criteria. */ profiles: SecurityUserProfile[]; } export interface SecuritySuggestUserProfilesTotalUserProfiles { value: long; relation: RelationName; } export interface SecurityUpdateApiKeyRequest extends RequestBase { /** The ID of the API key to update. */ id: Id; /** The role descriptors to assign to this API key. * The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. * You can assign new privileges by specifying them in this parameter. * To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. * If an API key has no assigned privileges, it inherits the owner user's full permissions. * The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. * The structure of a role descriptor is the same as the request for the create API keys API. */ role_descriptors?: Record; /** Arbitrary metadata that you want to associate with the API key. * It supports a nested data structure. * Within the metadata object, keys beginning with `_` are reserved for system usage. * When specified, this value fully replaces the metadata previously associated with the API key. */ metadata?: Metadata; /** The expiration time for the API key. * By default, API keys never expire. * This property can be omitted to leave the expiration unchanged. */ expiration?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; role_descriptors?: never; metadata?: never; expiration?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; role_descriptors?: never; metadata?: never; expiration?: never; }; } export interface SecurityUpdateApiKeyResponse { /** If `true`, the API key was updated. * If `false`, the API key didn't change because no change was detected. */ updated: boolean; } export interface SecurityUpdateCrossClusterApiKeyRequest extends RequestBase { /** The ID of the cross-cluster API key to update. */ id: Id; /** The access to be granted to this API key. * The access is composed of permissions for cross cluster search and cross cluster replication. * At least one of them must be specified. * When specified, the new access assignment fully replaces the previously assigned access. */ access: SecurityAccess; /** The expiration time for the API key. * By default, API keys never expire. This property can be omitted to leave the value unchanged. */ expiration?: Duration; /** Arbitrary metadata that you want to associate with the API key. * It supports nested data structure. * Within the metadata object, keys beginning with `_` are reserved for system usage. * When specified, this information fully replaces metadata previously associated with the API key. */ metadata?: Metadata; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; access?: never; expiration?: never; metadata?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; access?: never; expiration?: never; metadata?: never; }; } export interface SecurityUpdateCrossClusterApiKeyResponse { /** If `true`, the API key was updated. * If `false`, the API key didn’t change because no change was detected. */ updated: boolean; } export interface SecurityUpdateSettingsRequest extends RequestBase { /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** The period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** Settings for the index used for most security configuration, including native realm users and roles configured with the API. */ security?: SecuritySecuritySettings; /** Settings for the index used to store profile information. */ 'security-profile'?: SecuritySecuritySettings; /** Settings for the index used to store tokens. */ 'security-tokens'?: SecuritySecuritySettings; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; timeout?: never; security?: never; 'security-profile'?: never; 'security-tokens'?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; timeout?: never; security?: never; 'security-profile'?: never; 'security-tokens'?: never; }; } export interface SecurityUpdateSettingsResponse { acknowledged: boolean; } export interface SecurityUpdateUserProfileDataRequest extends RequestBase { /** A unique identifier for the user profile. */ uid: SecurityUserProfileId; /** Only perform the operation if the document has this sequence number. */ if_seq_no?: SequenceNumber; /** Only perform the operation if the document has this primary term. */ if_primary_term?: long; /** If 'true', Elasticsearch refreshes the affected shards to make this operation * visible to search. * If 'wait_for', it waits for a refresh to make this operation visible to search. * If 'false', nothing is done with refreshes. */ refresh?: Refresh; /** Searchable data that you want to associate with the user profile. * This field supports a nested data structure. * Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). */ labels?: Record; /** Non-searchable data that you want to associate with the user profile. * This field supports a nested data structure. * Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). * The data object is not searchable, but can be retrieved with the get user profile API. */ data?: Record; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { uid?: never; if_seq_no?: never; if_primary_term?: never; refresh?: never; labels?: never; data?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { uid?: never; if_seq_no?: never; if_primary_term?: never; refresh?: never; labels?: never; data?: never; }; } export type SecurityUpdateUserProfileDataResponse = AcknowledgedResponseBase; export type ShutdownType = 'restart' | 'remove' | 'replace'; export interface ShutdownDeleteNodeRequest extends RequestBase { /** The node id of node to be removed from the shutdown state */ node_id: NodeId; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: TimeUnit; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: TimeUnit; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { node_id?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { node_id?: never; master_timeout?: never; timeout?: never; }; } export type ShutdownDeleteNodeResponse = AcknowledgedResponseBase; export interface ShutdownGetNodeNodeShutdownStatus { node_id: NodeId; type: ShutdownGetNodeShutdownType; reason: string; shutdown_startedmillis: EpochTime; status: ShutdownGetNodeShutdownStatus; shard_migration: ShutdownGetNodeShardMigrationStatus; persistent_tasks: ShutdownGetNodePersistentTaskStatus; plugins: ShutdownGetNodePluginsStatus; } export interface ShutdownGetNodePersistentTaskStatus { status: ShutdownGetNodeShutdownStatus; } export interface ShutdownGetNodePluginsStatus { status: ShutdownGetNodeShutdownStatus; } export interface ShutdownGetNodeRequest extends RequestBase { /** Which node for which to retrieve the shutdown status */ node_id?: NodeIds; /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: TimeUnit; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { node_id?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { node_id?: never; master_timeout?: never; }; } export interface ShutdownGetNodeResponse { nodes: ShutdownGetNodeNodeShutdownStatus[]; } export interface ShutdownGetNodeShardMigrationStatus { status: ShutdownGetNodeShutdownStatus; } export type ShutdownGetNodeShutdownStatus = 'not_started' | 'in_progress' | 'stalled' | 'complete'; export type ShutdownGetNodeShutdownType = 'remove' | 'restart'; export interface ShutdownPutNodeRequest extends RequestBase { /** The node identifier. * This parameter is not validated against the cluster's active nodes. * This enables you to register a node for shut down while it is offline. * No error is thrown if you specify an invalid node ID. */ node_id: NodeId; /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: TimeUnit; /** The period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: TimeUnit; /** Valid values are restart, remove, or replace. * Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. * Because the node is expected to rejoin the cluster, data is not migrated off of the node. * Use remove when you need to permanently remove a node from the cluster. * The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. * Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. * During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. */ type: ShutdownType; /** A human-readable reason that the node is being shut down. * This field provides information for other cluster operators; it does not affect the shut down process. */ reason: string; /** Only valid if type is restart. * Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. * This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. * If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. */ allocation_delay?: string; /** Only valid if type is replace. * Specifies the name of the node that is replacing the node being shut down. * Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. * During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. */ target_node_name?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { node_id?: never; master_timeout?: never; timeout?: never; type?: never; reason?: never; allocation_delay?: never; target_node_name?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { node_id?: never; master_timeout?: never; timeout?: never; type?: never; reason?: never; allocation_delay?: never; target_node_name?: never; }; } export type ShutdownPutNodeResponse = AcknowledgedResponseBase; export interface SimulateIngestIngestDocumentSimulationKeys { /** Identifier for the document. */ _id: Id; /** Name of the index that the document would be indexed into if this were not a simulation. */ _index: IndexName; /** JSON body for the document. */ _source: Record; /** */ _version: SpecUtilsStringified; /** A list of the names of the pipelines executed on this document. */ executed_pipelines: string[]; /** A list of the fields that would be ignored at the indexing step. For example, a field whose * value is larger than the allowed limit would make it through all of the pipelines, but * would not be indexed into Elasticsearch. */ ignored_fields?: Record[]; /** Any error resulting from simulatng ingest on this doc. This can be an error generated by * executing a processor, or a mapping validation error when simulating indexing the resulting * doc. */ error?: ErrorCause; } export type SimulateIngestIngestDocumentSimulation = SimulateIngestIngestDocumentSimulationKeys & { [property: string]: string | Id | IndexName | Record | SpecUtilsStringified | string[] | Record[] | ErrorCause; }; export interface SimulateIngestRequest extends RequestBase { /** The index to simulate ingesting into. * This value can be overridden by specifying an index on each document. * If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. */ index?: IndexName; /** The pipeline to use as the default pipeline. * This value can be used to override the default pipeline of the index. */ pipeline?: PipelineName; /** Sample documents to test in the pipeline. */ docs: IngestDocument[]; /** A map of component template names to substitute component template definition objects. */ component_template_substitutions?: Record; /** A map of index template names to substitute index template definition objects. */ index_template_substitutions?: Record; mapping_addition?: MappingTypeMapping; /** Pipelines to test. * If you don’t specify the `pipeline` request path parameter, this parameter is required. * If you specify both this and the request path parameter, the API only uses the request path parameter. */ pipeline_substitutions?: Record; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { index?: never; pipeline?: never; docs?: never; component_template_substitutions?: never; index_template_substitutions?: never; mapping_addition?: never; pipeline_substitutions?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { index?: never; pipeline?: never; docs?: never; component_template_substitutions?: never; index_template_substitutions?: never; mapping_addition?: never; pipeline_substitutions?: never; }; } export interface SimulateIngestResponse { docs: SimulateIngestSimulateIngestDocumentResult[]; } export interface SimulateIngestSimulateIngestDocumentResult { doc?: SimulateIngestIngestDocumentSimulation; } export interface SlmConfiguration { /** If false, the snapshot fails if any data stream or index in indices is missing or closed. If true, the snapshot ignores missing or closed data streams and indices. */ ignore_unavailable?: boolean; /** A comma-separated list of data streams and indices to include in the snapshot. Multi-index syntax is supported. * By default, a snapshot includes all data streams and indices in the cluster. If this argument is provided, the snapshot only includes the specified data streams and clusters. */ indices?: Indices; /** If true, the current global state is included in the snapshot. */ include_global_state?: boolean; /** A list of feature states to be included in this snapshot. A list of features available for inclusion in the snapshot and their descriptions be can be retrieved using the get features API. * Each feature state includes one or more system indices containing data necessary for the function of that feature. Providing an empty array will include no feature states in the snapshot, regardless of the value of include_global_state. By default, all available feature states will be included in the snapshot if include_global_state is true, or no feature states if include_global_state is false. */ feature_states?: string[]; /** Attaches arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. Metadata must be less than 1024 bytes. */ metadata?: Metadata; /** If false, the entire snapshot will fail if one or more indices included in the snapshot do not have all primary shards available. */ partial?: boolean; } export interface SlmInProgress { name: Name; start_time_millis: EpochTime; state: string; uuid: Uuid; } export interface SlmInvocation { snapshot_name: Name; time: DateTime; } export interface SlmPolicy { config?: SlmConfiguration; name: Name; repository: string; retention?: SlmRetention; schedule: WatcherCronExpression; } export interface SlmRetention { /** Time period after which a snapshot is considered expired and eligible for deletion. SLM deletes expired snapshots based on the slm.retention_schedule. */ expire_after: Duration; /** Maximum number of snapshots to retain, even if the snapshots have not yet expired. If the number of snapshots in the repository exceeds this limit, the policy retains the most recent snapshots and deletes older snapshots. */ max_count: integer; /** Minimum number of snapshots to retain, even if the snapshots have expired. */ min_count: integer; } export interface SlmSnapshotLifecycle { in_progress?: SlmInProgress; last_failure?: SlmInvocation; last_success?: SlmInvocation; /** The last time the policy was modified. */ modified_date?: DateTime; modified_date_millis: EpochTime; /** The next time the policy will run. */ next_execution?: DateTime; next_execution_millis: EpochTime; policy: SlmPolicy; /** The version of the snapshot policy. * Only the latest version is stored and incremented when the policy is updated. */ version: VersionNumber; stats: SlmStatistics; } export interface SlmSnapshotPolicyStats { policy: string; snapshots_taken: long; snapshots_failed: long; snapshots_deleted: long; snapshot_deletion_failures: long; } export interface SlmStatistics { retention_deletion_time?: Duration; retention_deletion_time_millis?: DurationValue; retention_failed?: long; retention_runs?: long; retention_timed_out?: long; policy?: Id; total_snapshots_deleted?: long; /** @alias total_snapshots_deleted */ snapshots_deleted?: long; total_snapshot_deletion_failures?: long; /** @alias total_snapshot_deletion_failures */ snapshot_deletion_failures?: long; total_snapshots_failed?: long; /** @alias total_snapshots_failed */ snapshots_failed?: long; total_snapshots_taken?: long; /** @alias total_snapshots_taken */ snapshots_taken?: long; } export interface SlmDeleteLifecycleRequest extends RequestBase { /** The id of the snapshot lifecycle policy to remove */ policy_id: Name; /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** The period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { policy_id?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { policy_id?: never; master_timeout?: never; timeout?: never; }; } export type SlmDeleteLifecycleResponse = AcknowledgedResponseBase; export interface SlmExecuteLifecycleRequest extends RequestBase { /** The id of the snapshot lifecycle policy to be executed */ policy_id: Name; /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** The period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { policy_id?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { policy_id?: never; master_timeout?: never; timeout?: never; }; } export interface SlmExecuteLifecycleResponse { snapshot_name: Name; } export interface SlmExecuteRetentionRequest extends RequestBase { /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** The period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; timeout?: never; }; } export type SlmExecuteRetentionResponse = AcknowledgedResponseBase; export interface SlmGetLifecycleRequest extends RequestBase { /** Comma-separated list of snapshot lifecycle policies to retrieve */ policy_id?: Names; /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** The period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { policy_id?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { policy_id?: never; master_timeout?: never; timeout?: never; }; } export type SlmGetLifecycleResponse = Record; export interface SlmGetStatsRequest extends RequestBase { /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; timeout?: never; }; } export interface SlmGetStatsResponse { retention_deletion_time: Duration; retention_deletion_time_millis: DurationValue; retention_failed: long; retention_runs: long; retention_timed_out: long; total_snapshots_deleted: long; total_snapshot_deletion_failures: long; total_snapshots_failed: long; total_snapshots_taken: long; policy_stats: SlmSnapshotPolicyStats[]; } export interface SlmGetStatusRequest extends RequestBase { /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration; /** The period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; timeout?: never; }; } export interface SlmGetStatusResponse { operation_mode: LifecycleOperationMode; } export interface SlmPutLifecycleRequest extends RequestBase { /** The identifier for the snapshot lifecycle policy you want to create or update. */ policy_id: Name; /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration; /** The period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration; /** Configuration for each snapshot created by the policy. */ config?: SlmConfiguration; /** Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. */ name?: Name; /** Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. */ repository?: string; /** Retention rules used to retain and delete snapshots created by the policy. */ retention?: SlmRetention; /** Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. */ schedule?: WatcherCronExpression; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { policy_id?: never; master_timeout?: never; timeout?: never; config?: never; name?: never; repository?: never; retention?: never; schedule?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { policy_id?: never; master_timeout?: never; timeout?: never; config?: never; name?: never; repository?: never; retention?: never; schedule?: never; }; } export type SlmPutLifecycleResponse = AcknowledgedResponseBase; export interface SlmStartRequest extends RequestBase { /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration; /** The period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; timeout?: never; }; } export type SlmStartResponse = AcknowledgedResponseBase; export interface SlmStopRequest extends RequestBase { /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration; /** The period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; timeout?: never; }; } export type SlmStopResponse = AcknowledgedResponseBase; export interface SnapshotAzureRepository extends SnapshotRepositoryBase { /** The Azure repository type. */ type: 'azure'; /** The repository settings. */ settings?: SnapshotAzureRepositorySettings; } export interface SnapshotAzureRepositorySettings extends SnapshotRepositorySettingsBase { /** The path to the repository data within the container. * It defaults to the root directory. * * NOTE: Don't set `base_path` when configuring a snapshot repository for Elastic Cloud Enterprise. * Elastic Cloud Enterprise automatically generates the `base_path` for each deployment so that multiple deployments can share the same bucket. */ base_path?: string; /** The name of the Azure repository client to use. */ client?: string; /** The Azure container. */ container?: string; /** The maxmimum batch size, between 1 and 256, used for `BlobBatch` requests. * Defaults to 256 which is the maximum number supported by the Azure blob batch API. */ delete_objects_max_size?: integer; /** Either `primary_only` or `secondary_only`. * Note that if you set it to `secondary_only`, it will force `readonly` to `true`. */ location_mode?: string; /** The maximum number of concurrent batch delete requests that will be submitted for any individual bulk delete with `BlobBatch`. * Note that the effective number of concurrent deletes is further limited by the Azure client connection and event loop thread limits. * Defaults to 10, minimum is 1, maximum is 100. */ max_concurrent_batch_deletes?: integer; /** If `true`, the repository is read-only. * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. * * Only a cluster with write access can create snapshots in the repository. * All other clusters connected to the repository should have the `readonly` parameter set to `true`. * If `false`, the cluster can write to the repository and create snapshots in it. * * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ readonly?: boolean; } export interface SnapshotFileCountSnapshotStats { file_count: integer; size_in_bytes: long; } export interface SnapshotGcsRepository extends SnapshotRepositoryBase { /** The Google Cloud Storage repository type. */ type: 'gcs'; /** The repository settings. */ settings: SnapshotGcsRepositorySettings; } export interface SnapshotGcsRepositorySettings extends SnapshotRepositorySettingsBase { /** The name of the bucket to be used for snapshots. */ bucket: string; /** The name used by the client when it uses the Google Cloud Storage service. */ application_name?: string; /** The path to the repository data within the bucket. * It defaults to the root of the bucket. * * NOTE: Don't set `base_path` when configuring a snapshot repository for Elastic Cloud Enterprise. * Elastic Cloud Enterprise automatically generates the `base_path` for each deployment so that multiple deployments can share the same bucket. */ base_path?: string; /** The name of the client to use to connect to Google Cloud Storage. */ client?: string; /** If `true`, the repository is read-only. * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. * * Only a cluster with write access can create snapshots in the repository. * All other clusters connected to the repository should have the `readonly` parameter set to `true`. * * If `false`, the cluster can write to the repository and create snapshots in it. * * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ readonly?: boolean; } export interface SnapshotIndexDetails { shard_count: integer; size?: ByteSize; size_in_bytes: long; max_segments_per_shard: long; } export interface SnapshotInfoFeatureState { feature_name: string; indices: Indices; } export interface SnapshotReadOnlyUrlRepository extends SnapshotRepositoryBase { /** The read-only URL repository type. */ type: 'url'; /** The repository settings. */ settings: SnapshotReadOnlyUrlRepositorySettings; } export interface SnapshotReadOnlyUrlRepositorySettings extends SnapshotRepositorySettingsBase { /** The maximum number of retries for HTTP and HTTPS URLs. */ http_max_retries?: integer; /** The maximum wait time for data transfers over a connection. */ http_socket_timeout?: Duration; /** The maximum number of snapshots the repository can contain. * The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. */ max_number_of_snapshots?: integer; /** The URL location of the root of the shared filesystem repository. * The following protocols are supported: * * * `file` * * `ftp` * * `http` * * `https` * * `jar` * * URLs using the HTTP, HTTPS, or FTP protocols must be explicitly allowed with the `repositories.url.allowed_urls` cluster setting. * This setting supports wildcards in the place of a host, path, query, or fragment in the URL. * * URLs using the file protocol must point to the location of a shared filesystem accessible to all master and data nodes in the cluster. * This location must be registered in the `path.repo` setting. * You don't need to register URLs using the FTP, HTTP, HTTPS, or JAR protocols in the `path.repo` setting. */ url: string; } export type SnapshotRepository = SnapshotAzureRepository | SnapshotGcsRepository | SnapshotS3Repository | SnapshotSharedFileSystemRepository | SnapshotReadOnlyUrlRepository | SnapshotSourceOnlyRepository; export interface SnapshotRepositoryBase { uuid?: Uuid; } export interface SnapshotRepositorySettingsBase { /** Big files can be broken down into multiple smaller blobs in the blob store during snapshotting. * It is not recommended to change this value from its default unless there is an explicit reason for limiting the size of blobs in the repository. * Setting a value lower than the default can result in an increased number of API calls to the blob store during snapshot create and restore operations compared to using the default value and thus make both operations slower and more costly. * Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. * The default varies by repository type. */ chunk_size?: ByteSize; /** When set to `true`, metadata files are stored in compressed format. * This setting doesn't affect index files that are already compressed by default. */ compress?: boolean; /** The maximum snapshot restore rate per node. * It defaults to unlimited. * Note that restores are also throttled through recovery settings. */ max_restore_bytes_per_sec?: ByteSize; /** The maximum snapshot creation rate per node. * It defaults to 40mb per second. * Note that if the recovery settings for managed services are set, then it defaults to unlimited, and the rate is additionally throttled through recovery settings. */ max_snapshot_bytes_per_sec?: ByteSize; } export interface SnapshotS3Repository extends SnapshotRepositoryBase { /** The S3 repository type. */ type: 's3'; /** The repository settings. * * NOTE: In addition to the specified settings, you can also use all non-secure client settings in the repository settings. * In this case, the client settings found in the repository settings will be merged with those of the named client used by the repository. * Conflicts between client and repository settings are resolved by the repository settings taking precedence over client settings. */ settings: SnapshotS3RepositorySettings; } export interface SnapshotS3RepositorySettings extends SnapshotRepositorySettingsBase { /** The name of the S3 bucket to use for snapshots. * The bucket name must adhere to Amazon's S3 bucket naming rules. */ bucket: string; /** The path to the repository data within its bucket. * It defaults to an empty string, meaning that the repository is at the root of the bucket. * The value of this setting should not start or end with a forward slash (`/`). * * NOTE: Don't set base_path when configuring a snapshot repository for Elastic Cloud Enterprise. * Elastic Cloud Enterprise automatically generates the `base_path` for each deployment so that multiple deployments may share the same bucket. */ base_path?: string; /** The minimum threshold below which the chunk is uploaded using a single request. * Beyond this threshold, the S3 repository will use the AWS Multipart Upload API to split the chunk into several parts, each of `buffer_size` length, and to upload each part in its own request. * Note that setting a buffer size lower than 5mb is not allowed since it will prevent the use of the Multipart API and may result in upload errors. * It is also not possible to set a buffer size greater than 5gb as it is the maximum upload size allowed by S3. * Defaults to `100mb` or 5% of JVM heap, whichever is smaller. */ buffer_size?: ByteSize; /** The S3 repository supports all S3 canned ACLs: `private`, `public-read`, `public-read-write`, `authenticated-read`, `log-delivery-write`, `bucket-owner-read`, `bucket-owner-full-control`. * You could specify a canned ACL using the `canned_acl` setting. * When the S3 repository creates buckets and objects, it adds the canned ACL into the buckets and objects. */ canned_acl?: string; /** The name of the S3 client to use to connect to S3. */ client?: string; /** The maxmimum batch size, between 1 and 1000, used for `DeleteObjects` requests. * Defaults to 1000 which is the maximum number supported by the AWS DeleteObjects API. */ delete_objects_max_size?: integer; /** The time to wait before trying again if an attempt to read a linearizable register fails. */ get_register_retry_delay?: Duration; /** The maximum number of parts that Elasticsearch will write during a multipart upload of a single object. * Files which are larger than `buffer_size × max_multipart_parts` will be chunked into several smaller objects. * Elasticsearch may also split a file across multiple objects to satisfy other constraints such as the `chunk_size` limit. * Defaults to `10000` which is the maximum number of parts in a multipart upload in AWS S3. */ max_multipart_parts?: integer; /** The maximum number of possibly-dangling multipart uploads to clean up in each batch of snapshot deletions. * Defaults to 1000 which is the maximum number supported by the AWS ListMultipartUploads API. * If set to `0`, Elasticsearch will not attempt to clean up dangling multipart uploads. */ max_multipart_upload_cleanup_size?: integer; /** If true, the repository is read-only. * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. * * Only a cluster with write access can create snapshots in the repository. * All other clusters connected to the repository should have the `readonly` parameter set to `true`. * * If `false`, the cluster can write to the repository and create snapshots in it. * * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ readonly?: boolean; /** When set to `true`, files are encrypted on server side using an AES256 algorithm. */ server_side_encryption?: boolean; /** The S3 storage class for objects written to the repository. * Values may be `standard`, `reduced_redundancy`, `standard_ia`, `onezone_ia`, and `intelligent_tiering`. */ storage_class?: string; /** The delay before the first retry and the amount the delay is incremented by on each subsequent retry. * The default is 50ms and the minimum is 0ms. */ 'throttled_delete_retry.delay_increment'?: Duration; /** The upper bound on how long the delays between retries will grow to. * The default is 5s and the minimum is 0ms. */ 'throttled_delete_retry.maximum_delay'?: Duration; /** The number times to retry a throttled snapshot deletion. * The default is 10 and the minimum value is 0 which will disable retries altogether. * Note that if retries are enabled in the Azure client, each of these retries comprises that many client-level retries. */ 'throttled_delete_retry.maximum_number_of_retries'?: integer; } export interface SnapshotShardsStats { /** The number of shards that initialized, started, and finalized successfully. */ done: long; /** The number of shards that failed to be included in the snapshot. */ failed: long; /** The number of shards that are finalizing but are not done. */ finalizing: long; /** The number of shards that are still initializing. */ initializing: long; /** The number of shards that have started but are not finalized. */ started: long; /** The total number of shards included in the snapshot. */ total: long; } export type SnapshotShardsStatsStage = 'DONE' | 'FAILURE' | 'FINALIZE' | 'INIT' | 'STARTED'; export interface SnapshotShardsStatsSummary { incremental: SnapshotShardsStatsSummaryItem; total: SnapshotShardsStatsSummaryItem; start_time_in_millis: EpochTime; time?: Duration; time_in_millis: DurationValue; } export interface SnapshotShardsStatsSummaryItem { file_count: long; size_in_bytes: long; } export interface SnapshotSharedFileSystemRepository extends SnapshotRepositoryBase { /** The shared file system repository type. */ type: 'fs'; /** The repository settings. */ settings: SnapshotSharedFileSystemRepositorySettings; } export interface SnapshotSharedFileSystemRepositorySettings extends SnapshotRepositorySettingsBase { /** The location of the shared filesystem used to store and retrieve snapshots. * This location must be registered in the `path.repo` setting on all master and data nodes in the cluster. * Unlike `path.repo`, this setting supports only a single file path. */ location: string; /** The maximum number of snapshots the repository can contain. * The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. */ max_number_of_snapshots?: integer; /** If `true`, the repository is read-only. * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. * * Only a cluster with write access can create snapshots in the repository. * All other clusters connected to the repository should have the `readonly` parameter set to `true`. * * If `false`, the cluster can write to the repository and create snapshots in it. * * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ readonly?: boolean; } export interface SnapshotSnapshotIndexStats { shards: Record; shards_stats: SnapshotShardsStats; stats: SnapshotSnapshotStats; } export interface SnapshotSnapshotInfo { data_streams: string[]; duration?: Duration; duration_in_millis?: DurationValue; end_time?: DateTime; end_time_in_millis?: EpochTime; failures?: SnapshotSnapshotShardFailure[]; include_global_state?: boolean; indices?: IndexName[]; index_details?: Record; metadata?: Metadata; reason?: string; repository?: Name; snapshot: Name; shards?: ShardStatistics; start_time?: DateTime; start_time_in_millis?: EpochTime; state?: string; uuid: Uuid; version?: VersionString; version_id?: VersionNumber; feature_states?: SnapshotInfoFeatureState[]; } export interface SnapshotSnapshotShardFailure { index: IndexName; node_id?: Id; reason: string; shard_id: integer; index_uuid: Id; status: string; } export interface SnapshotSnapshotShardsStatus { stage: SnapshotShardsStatsStage; stats: SnapshotShardsStatsSummary; } export type SnapshotSnapshotSort = 'start_time' | 'duration' | 'name' | 'index_count' | 'repository' | 'shard_count' | 'failed_shard_count'; export interface SnapshotSnapshotStats { /** The number and size of files that still need to be copied as part of the incremental snapshot. * For completed snapshots, this property indicates the number and size of files that were not already in the repository and were copied as part of the incremental snapshot. */ incremental: SnapshotFileCountSnapshotStats; /** The time, in milliseconds, when the snapshot creation process started. */ start_time_in_millis: EpochTime; time?: Duration; /** The total time, in milliseconds, that it took for the snapshot process to complete. */ time_in_millis: DurationValue; /** The total number and size of files that are referenced by the snapshot. */ total: SnapshotFileCountSnapshotStats; } export interface SnapshotSourceOnlyRepository extends SnapshotRepositoryBase { /** The source-only repository type. */ type: 'source'; /** The repository settings. */ settings: SnapshotSourceOnlyRepositorySettings; } export interface SnapshotSourceOnlyRepositorySettings extends SnapshotRepositorySettingsBase { /** The delegated repository type. For valid values, refer to the `type` parameter. * Source repositories can use `settings` properties for its delegated repository type. */ delegate_type?: string; /** The maximum number of snapshots the repository can contain. * The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. */ max_number_of_snapshots?: integer; /** If `true`, the repository is read-only. * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. * * Only a cluster with write access can create snapshots in the repository. * All other clusters connected to the repository should have the `readonly` parameter set to `true`. * * If `false`, the cluster can write to the repository and create snapshots in it. * * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ read_only?: boolean; /** If `true`, the repository is read-only. * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. * * Only a cluster with write access can create snapshots in the repository. * All other clusters connected to the repository should have the `readonly` parameter set to `true`. * * If `false`, the cluster can write to the repository and create snapshots in it. * * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. * @alias read_only */ readonly?: boolean; } export interface SnapshotStatus { /** Indicates whether the current cluster state is included in the snapshot. */ include_global_state: boolean; indices: Record; /** The name of the repository that includes the snapshot. */ repository: string; /** Statistics for the shards in the snapshot. */ shards_stats: SnapshotShardsStats; /** The name of the snapshot. */ snapshot: string; /** The current snapshot state: * * * `FAILED`: The snapshot finished with an error and failed to store any data. * * `STARTED`: The snapshot is currently running. * * `SUCCESS`: The snapshot completed. */ state: string; /** Details about the number (`file_count`) and size (`size_in_bytes`) of files included in the snapshot. */ stats: SnapshotSnapshotStats; /** The universally unique identifier (UUID) for the snapshot. */ uuid: Uuid; } export interface SnapshotCleanupRepositoryCleanupRepositoryResults { /** The number of binary large objects (blobs) removed from the snapshot repository during cleanup operations. * A non-zero value indicates that unreferenced blobs were found and subsequently cleaned up. */ deleted_blobs: long; /** The number of bytes freed by cleanup operations. */ deleted_bytes: long; } export interface SnapshotCleanupRepositoryRequest extends RequestBase { /** The name of the snapshot repository to clean up. */ name: Name; /** The period to wait for a connection to the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1` */ master_timeout?: Duration; /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }; } export interface SnapshotCleanupRepositoryResponse { /** Statistics for cleanup operations. */ results: SnapshotCleanupRepositoryCleanupRepositoryResults; } export interface SnapshotCloneRequest extends RequestBase { /** The name of the snapshot repository that both source and target snapshot belong to. */ repository: Name; /** The source snapshot name. */ snapshot: Name; /** The target snapshot name. */ target_snapshot: Name; /** The period to wait for the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration; /** A comma-separated list of indices to include in the snapshot. * Multi-target syntax is supported. */ indices: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { repository?: never; snapshot?: never; target_snapshot?: never; master_timeout?: never; indices?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { repository?: never; snapshot?: never; target_snapshot?: never; master_timeout?: never; indices?: never; }; } export type SnapshotCloneResponse = AcknowledgedResponseBase; export interface SnapshotCreateRequest extends RequestBase { /** The name of the repository for the snapshot. */ repository: Name; /** The name of the snapshot. * It supportes date math. * It must be unique in the repository. */ snapshot: Name; /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** If `true`, the request returns a response when the snapshot is complete. * If `false`, the request returns a response when the snapshot initializes. */ wait_for_completion?: boolean; /** Determines how wildcard patterns in the `indices` parameter match data streams and indices. * It supports comma-separated values such as `open,hidden`. */ expand_wildcards?: ExpandWildcards; /** The feature states to include in the snapshot. * Each feature state includes one or more system indices containing related data. * You can view a list of eligible features using the get features API. * * If `include_global_state` is `true`, all current feature states are included by default. * If `include_global_state` is `false`, no feature states are included by default. * * Note that specifying an empty array will result in the default behavior. * To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`). */ feature_states?: string[]; /** If `true`, the request ignores data streams and indices in `indices` that are missing or closed. * If `false`, the request returns an error for any data stream or index that is missing or closed. */ ignore_unavailable?: boolean; /** If `true`, the current cluster state is included in the snapshot. * The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. * It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). */ include_global_state?: boolean; /** A comma-separated list of data streams and indices to include in the snapshot. * It supports a multi-target syntax. * The default is an empty array (`[]`), which includes all regular data streams and regular indices. * To exclude all data streams and indices, use `-*`. * * You can't use this parameter to include or exclude system indices or system data streams from a snapshot. * Use `feature_states` instead. */ indices?: Indices; /** Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. * It can have any contents but it must be less than 1024 bytes. * This information is not automatically generated by Elasticsearch. */ metadata?: Metadata; /** If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. * Only shards that were successfully included in the snapshot will be restored. * All missing shards will be recreated as empty. * * If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. */ partial?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { repository?: never; snapshot?: never; master_timeout?: never; wait_for_completion?: never; expand_wildcards?: never; feature_states?: never; ignore_unavailable?: never; include_global_state?: never; indices?: never; metadata?: never; partial?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { repository?: never; snapshot?: never; master_timeout?: never; wait_for_completion?: never; expand_wildcards?: never; feature_states?: never; ignore_unavailable?: never; include_global_state?: never; indices?: never; metadata?: never; partial?: never; }; } export interface SnapshotCreateResponse { /** Equals `true` if the snapshot was accepted. Present when the request had `wait_for_completion` set to `false` */ accepted?: boolean; /** Snapshot information. Present when the request had `wait_for_completion` set to `true` */ snapshot?: SnapshotSnapshotInfo; } export interface SnapshotCreateRepositoryRequest extends RequestBase { /** The name of the snapshot repository to register or update. */ name: Name; /** The period to wait for the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration; /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration; /** If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. * If `false`, this verification is skipped. * You can also perform this verification with the verify snapshot repository API. */ verify?: boolean; repository?: SnapshotRepository; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; verify?: never; repository?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; verify?: never; repository?: never; }; } export type SnapshotCreateRepositoryResponse = AcknowledgedResponseBase; export interface SnapshotDeleteRequest extends RequestBase { /** The name of the repository to delete a snapshot from. */ repository: Name; /** A comma-separated list of snapshot names to delete. * It also accepts wildcards (`*`). */ snapshot: Name; /** The period to wait for the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { repository?: never; snapshot?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { repository?: never; snapshot?: never; master_timeout?: never; }; } export type SnapshotDeleteResponse = AcknowledgedResponseBase; export interface SnapshotDeleteRepositoryRequest extends RequestBase { /** The ame of the snapshot repositories to unregister. * Wildcard (`*`) patterns are supported. */ name: Names; /** The period to wait for the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration; /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }; } export type SnapshotDeleteRepositoryResponse = AcknowledgedResponseBase; export interface SnapshotGetRequest extends RequestBase { /** A comma-separated list of snapshot repository names used to limit the request. * Wildcard (`*`) expressions are supported. */ repository: Name; /** A comma-separated list of snapshot names to retrieve * Wildcards (`*`) are supported. * * * To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. * * To get information about any snapshots that are currently running, use `_current`. */ snapshot: Names; /** An offset identifier to start pagination from as returned by the next field in the response body. */ after?: string; /** The value of the current sort column at which to start retrieval. * It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. * It can be a millisecond time value or a number when sorting by `index-` or shard count. */ from_sort_value?: string; /** If `false`, the request returns an error for any snapshots that are unavailable. */ ignore_unavailable?: boolean; /** If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. * The default is `false`, meaning that this information is omitted. */ index_details?: boolean; /** If `true`, the response includes the name of each index in each snapshot. */ index_names?: boolean; /** If `true`, the response includes the repository name in each snapshot. */ include_repository?: boolean; /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** The sort order. * Valid values are `asc` for ascending and `desc` for descending order. * The default behavior is ascending order. */ order?: SortOrder; /** Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. */ offset?: integer; /** The maximum number of snapshots to return. * The default is 0, which means to return all that match the request without limit. */ size?: integer; /** Filter snapshots by a comma-separated list of snapshot lifecycle management (SLM) policy names that snapshots belong to. * * You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. * For example, the pattern `*,-policy-a-\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. * Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. * To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. */ slm_policy_filter?: Name; /** The sort order for the result. * The default behavior is sorting by snapshot start time stamp. */ sort?: SnapshotSnapshotSort; /** If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. * * NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. */ verbose?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { repository?: never; snapshot?: never; after?: never; from_sort_value?: never; ignore_unavailable?: never; index_details?: never; index_names?: never; include_repository?: never; master_timeout?: never; order?: never; offset?: never; size?: never; slm_policy_filter?: never; sort?: never; verbose?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { repository?: never; snapshot?: never; after?: never; from_sort_value?: never; ignore_unavailable?: never; index_details?: never; index_names?: never; include_repository?: never; master_timeout?: never; order?: never; offset?: never; size?: never; slm_policy_filter?: never; sort?: never; verbose?: never; }; } export interface SnapshotGetResponse { /** The number of remaining snapshots that were not returned due to size limits and that can be fetched by additional requests using the `next` field value. */ remaining: integer; /** The total number of snapshots that match the request when ignoring the size limit or `after` query parameter. */ total: integer; /** If the request contained a size limit and there might be more results, a `next` field will be added to the response. * It can be used as the `after` query parameter to fetch additional results. */ next?: string; responses?: SnapshotGetSnapshotResponseItem[]; snapshots?: SnapshotSnapshotInfo[]; } export interface SnapshotGetSnapshotResponseItem { repository: Name; snapshots?: SnapshotSnapshotInfo[]; error?: ErrorCause; } export interface SnapshotGetRepositoryRequest extends RequestBase { /** A comma-separated list of snapshot repository names used to limit the request. * Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`. * * To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. */ name?: Names; /** If `true`, the request gets information from the local node only. * If `false`, the request gets information from the master node. */ local?: boolean; /** The period to wait for the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; local?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; local?: never; master_timeout?: never; }; } export type SnapshotGetRepositoryResponse = Record; export interface SnapshotRepositoryAnalyzeBlobDetails { /** The name of the blob. */ name: string; /** Indicates whether the blob was overwritten while the read operations were ongoing. * /** */ overwritten: boolean; read_early: boolean; /** The position, in bytes, at which read operations completed. */ read_end: long; /** The position, in bytes, at which read operations started. */ read_start: long; /** A description of every read operation performed on the blob. */ reads: SnapshotRepositoryAnalyzeReadBlobDetails; /** The size of the blob. */ size: ByteSize; /** The size of the blob in bytes. */ size_bytes: long; } export interface SnapshotRepositoryAnalyzeDetailsInfo { /** A description of the blob that was written and read. */ blob: SnapshotRepositoryAnalyzeBlobDetails; /** The elapsed time spent overwriting the blob. * If the blob was not overwritten, this information is omitted. */ overwrite_elapsed?: Duration; /** The elapsed time spent overwriting the blob, in nanoseconds. * If the blob was not overwritten, this information is omitted. */ overwrite_elapsed_nanos?: DurationValue; /** The elapsed time spent writing the blob. */ write_elapsed: Duration; /** The elapsed time spent writing the blob, in nanoseconds. */ write_elapsed_nanos: DurationValue; /** The length of time spent waiting for the `max_snapshot_bytes_per_sec` (or `indices.recovery.max_bytes_per_sec` if the recovery settings for managed services are set) throttle while writing the blob. */ write_throttled: Duration; /** The length of time spent waiting for the `max_snapshot_bytes_per_sec` (or `indices.recovery.max_bytes_per_sec` if the recovery settings for managed services are set) throttle while writing the blob, in nanoseconds. */ write_throttled_nanos: DurationValue; /** The node which wrote the blob and coordinated the read operations. */ writer_node: SnapshotRepositoryAnalyzeSnapshotNodeInfo; } export interface SnapshotRepositoryAnalyzeReadBlobDetails { /** Indicates whether the read operation may have started before the write operation was complete. */ before_write_complete?: boolean; /** The length of time spent reading the blob. * If the blob was not found, this detail is omitted. */ elapsed?: Duration; /** The length of time spent reading the blob, in nanoseconds. * If the blob was not found, this detail is omitted. */ elapsed_nanos?: DurationValue; /** The length of time waiting for the first byte of the read operation to be received. * If the blob was not found, this detail is omitted. */ first_byte_time?: Duration; /** The length of time waiting for the first byte of the read operation to be received, in nanoseconds. * If the blob was not found, this detail is omitted. */ first_byte_time_nanos: DurationValue; /** Indicates whether the blob was found by the read operation. * If the read was started before the write completed or the write was ended before completion, it might be false. */ found: boolean; /** The node that performed the read operation. */ node: SnapshotRepositoryAnalyzeSnapshotNodeInfo; /** The length of time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles during the read of the blob. * If the blob was not found, this detail is omitted. */ throttled?: Duration; /** The length of time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles during the read of the blob, in nanoseconds. * If the blob was not found, this detail is omitted. */ throttled_nanos?: DurationValue; } export interface SnapshotRepositoryAnalyzeReadSummaryInfo { /** The number of read operations performed in the test. */ count: integer; /** The maximum time spent waiting for the first byte of any read request to be received. */ max_wait: Duration; /** The maximum time spent waiting for the first byte of any read request to be received, in nanoseconds. */ max_wait_nanos: DurationValue; /** The total elapsed time spent on reading blobs in the test. */ total_elapsed: Duration; /** The total elapsed time spent on reading blobs in the test, in nanoseconds. */ total_elapsed_nanos: DurationValue; /** The total size of all the blobs or partial blobs read in the test. */ total_size: ByteSize; /** The total size of all the blobs or partial blobs read in the test, in bytes. */ total_size_bytes: long; /** The total time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles. */ total_throttled: Duration; /** The total time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles, in nanoseconds. */ total_throttled_nanos: DurationValue; /** The total time spent waiting for the first byte of each read request to be received. */ total_wait: Duration; /** The total time spent waiting for the first byte of each read request to be received, in nanoseconds. */ total_wait_nanos: DurationValue; } export interface SnapshotRepositoryAnalyzeRequest extends RequestBase { /** The name of the repository. */ name: Name; /** The total number of blobs to write to the repository during the test. * For realistic experiments, you should set it to at least `2000`. */ blob_count?: integer; /** The number of operations to run concurrently during the test. */ concurrency?: integer; /** Indicates whether to return detailed results, including timing information for every operation performed during the analysis. * If false, it returns only a summary of the analysis. */ detailed?: boolean; /** The number of nodes on which to perform an early read operation while writing each blob. * Early read operations are only rarely performed. */ early_read_node_count?: integer; /** The maximum size of a blob to be written during the test. * For realistic experiments, you should set it to at least `2gb`. */ max_blob_size?: ByteSize; /** An upper limit on the total size of all the blobs written during the test. * For realistic experiments, you should set it to at least `1tb`. */ max_total_data_size?: ByteSize; /** The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. */ rare_action_probability?: double; /** Indicates whether to rarely cancel writes before they complete. */ rarely_abort_writes?: boolean; /** The number of nodes on which to read a blob after writing. */ read_node_count?: integer; /** The minimum number of linearizable register operations to perform in total. * For realistic experiments, you should set it to at least `100`. */ register_operation_count?: integer; /** The seed for the pseudo-random number generator used to generate the list of operations performed during the test. * To repeat the same set of operations in multiple experiments, use the same seed in each experiment. * Note that the operations are performed concurrently so might not always happen in the same order on each run. */ seed?: integer; /** The period of time to wait for the test to complete. * If no response is received before the timeout expires, the test is cancelled and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; blob_count?: never; concurrency?: never; detailed?: never; early_read_node_count?: never; max_blob_size?: never; max_total_data_size?: never; rare_action_probability?: never; rarely_abort_writes?: never; read_node_count?: never; register_operation_count?: never; seed?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; blob_count?: never; concurrency?: never; detailed?: never; early_read_node_count?: never; max_blob_size?: never; max_total_data_size?: never; rare_action_probability?: never; rarely_abort_writes?: never; read_node_count?: never; register_operation_count?: never; seed?: never; timeout?: never; }; } export interface SnapshotRepositoryAnalyzeResponse { /** The number of blobs written to the repository during the test. */ blob_count: integer; /** The path in the repository under which all the blobs were written during the test. */ blob_path: string; /** The number of write operations performed concurrently during the test. */ concurrency: integer; /** The node that coordinated the analysis and performed the final cleanup. */ coordinating_node: SnapshotRepositoryAnalyzeSnapshotNodeInfo; /** The time it took to delete all the blobs in the container. */ delete_elapsed: Duration; /** The time it took to delete all the blobs in the container, in nanoseconds. */ delete_elapsed_nanos: DurationValue; /** A description of every read and write operation performed during the test. */ details: SnapshotRepositoryAnalyzeDetailsInfo; /** The limit on the number of nodes on which early read operations were performed after writing each blob. */ early_read_node_count: integer; /** A list of correctness issues detected, which is empty if the API succeeded. * It is included to emphasize that a successful response does not guarantee correct behaviour in future. */ issues_detected: string[]; /** The time it took to retrieve a list of all the blobs in the container. */ listing_elapsed: Duration; /** The time it took to retrieve a list of all the blobs in the container, in nanoseconds. */ listing_elapsed_nanos: DurationValue; /** The limit on the size of a blob written during the test. */ max_blob_size: ByteSize; /** The limit, in bytes, on the size of a blob written during the test. */ max_blob_size_bytes: long; /** The limit on the total size of all blob written during the test. */ max_total_data_size: ByteSize; /** The limit, in bytes, on the total size of all blob written during the test. */ max_total_data_size_bytes: long; /** The probability of performing rare actions during the test. */ rare_action_probability: double; /** The limit on the number of nodes on which read operations were performed after writing each blob. */ read_node_count: integer; /** The name of the repository that was the subject of the analysis. */ repository: string; /** The seed for the pseudo-random number generator used to generate the operations used during the test. */ seed: long; /** A collection of statistics that summarize the results of the test. */ summary: SnapshotRepositoryAnalyzeSummaryInfo; } export interface SnapshotRepositoryAnalyzeSnapshotNodeInfo { id: Id; name: Name; } export interface SnapshotRepositoryAnalyzeSummaryInfo { /** A collection of statistics that summarise the results of the read operations in the test. */ read: SnapshotRepositoryAnalyzeReadSummaryInfo; /** A collection of statistics that summarise the results of the write operations in the test. */ write: SnapshotRepositoryAnalyzeWriteSummaryInfo; } export interface SnapshotRepositoryAnalyzeWriteSummaryInfo { /** The number of write operations performed in the test. */ count: integer; /** The total elapsed time spent on writing blobs in the test. */ total_elapsed: Duration; /** The total elapsed time spent on writing blobs in the test, in nanoseconds. */ total_elapsed_nanos: DurationValue; /** The total size of all the blobs written in the test. */ total_size: ByteSize; /** The total size of all the blobs written in the test, in bytes. */ total_size_bytes: long; /** The total time spent waiting due to the `max_snapshot_bytes_per_sec` throttle. */ total_throttled: Duration; /** The total time spent waiting due to the `max_snapshot_bytes_per_sec` throttle, in nanoseconds. */ total_throttled_nanos: long; } export interface SnapshotRepositoryVerifyIntegrityRequest extends RequestBase { /** The name of the snapshot repository. */ name: Names; /** If `verify_blob_contents` is `true`, this parameter specifies how many blobs to verify at once. */ blob_thread_pool_concurrency?: integer; /** The maximum number of index snapshots to verify concurrently within each index verification. */ index_snapshot_verification_concurrency?: integer; /** The number of indices to verify concurrently. * The default behavior is to use the entire `snapshot_meta` thread pool. */ index_verification_concurrency?: integer; /** If `verify_blob_contents` is `true`, this parameter specifies the maximum amount of data that Elasticsearch will read from the repository every second. */ max_bytes_per_sec?: string; /** The number of shard snapshot failures to track during integrity verification, in order to avoid excessive resource usage. * If your repository contains more than this number of shard snapshot failures, the verification will fail. */ max_failed_shard_snapshots?: integer; /** The maximum number of snapshot metadata operations to run concurrently. * The default behavior is to use at most half of the `snapshot_meta` thread pool at once. */ meta_thread_pool_concurrency?: integer; /** The number of snapshots to verify concurrently. * The default behavior is to use at most half of the `snapshot_meta` thread pool at once. */ snapshot_verification_concurrency?: integer; /** Indicates whether to verify the checksum of every data blob in the repository. * If this feature is enabled, Elasticsearch will read the entire repository contents, which may be extremely slow and expensive. */ verify_blob_contents?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; blob_thread_pool_concurrency?: never; index_snapshot_verification_concurrency?: never; index_verification_concurrency?: never; max_bytes_per_sec?: never; max_failed_shard_snapshots?: never; meta_thread_pool_concurrency?: never; snapshot_verification_concurrency?: never; verify_blob_contents?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; blob_thread_pool_concurrency?: never; index_snapshot_verification_concurrency?: never; index_verification_concurrency?: never; max_bytes_per_sec?: never; max_failed_shard_snapshots?: never; meta_thread_pool_concurrency?: never; snapshot_verification_concurrency?: never; verify_blob_contents?: never; }; } export type SnapshotRepositoryVerifyIntegrityResponse = any; export interface SnapshotRestoreRequest extends RequestBase { /** The name of the repository to restore a snapshot from. */ repository: Name; /** The name of the snapshot to restore. */ snapshot: Name; /** The period to wait for the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration; /** If `true`, the request returns a response when the restore operation completes. * The operation is complete when it finishes all attempts to recover primary shards for restored indices. * This applies even if one or more of the recovery attempts fail. * * If `false`, the request returns a response when the restore operation initializes. */ wait_for_completion?: boolean; /** The feature states to restore. * If `include_global_state` is `true`, the request restores all feature states in the snapshot by default. * If `include_global_state` is `false`, the request restores no feature states by default. * Note that specifying an empty array will result in the default behavior. * To restore no feature states, regardless of the `include_global_state` value, specify an array containing only the value `none` (`["none"]`). */ feature_states?: string[]; /** The index settings to not restore from the snapshot. * You can't use this option to ignore `index.number_of_shards`. * * For data streams, this option applies only to restored backing indices. * New backing indices are configured using the data stream's matching index template. */ ignore_index_settings?: string[]; /** If `true`, the request ignores any index or data stream in indices that's missing from the snapshot. * If `false`, the request returns an error for any missing index or data stream. */ ignore_unavailable?: boolean; /** If `true`, the request restores aliases for any restored data streams and indices. * If `false`, the request doesn’t restore aliases. */ include_aliases?: boolean; /** If `true`, restore the cluster state. The cluster state includes: * * * Persistent cluster settings * * Index templates * * Legacy index templates * * Ingest pipelines * * Index lifecycle management (ILM) policies * * Stored scripts * * For snapshots taken after 7.12.0, feature states * * If `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot. * It completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot. * * Use the `feature_states` parameter to configure how feature states are restored. * * If `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail. */ include_global_state?: boolean; /** Index settings to add or change in restored indices, including backing indices. * You can't use this option to change `index.number_of_shards`. * * For data streams, this option applies only to restored backing indices. * New backing indices are configured using the data stream's matching index template. */ index_settings?: IndicesIndexSettings; /** A comma-separated list of indices and data streams to restore. * It supports a multi-target syntax. * The default behavior is all regular indices and regular data streams in the snapshot. * * You can't use this parameter to restore system indices or system data streams. * Use `feature_states` instead. */ indices?: Indices; /** If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. * * If true, it allows restoring a partial snapshot of indices with unavailable shards. * Only shards that were successfully included in the snapshot will be restored. * All missing shards will be recreated as empty. */ partial?: boolean; /** A rename pattern to apply to restored data streams and indices. * Data streams and indices matching the rename pattern will be renamed according to `rename_replacement`. * * The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic. */ rename_pattern?: string; /** The rename replacement string that is used with the `rename_pattern`. */ rename_replacement?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { repository?: never; snapshot?: never; master_timeout?: never; wait_for_completion?: never; feature_states?: never; ignore_index_settings?: never; ignore_unavailable?: never; include_aliases?: never; include_global_state?: never; index_settings?: never; indices?: never; partial?: never; rename_pattern?: never; rename_replacement?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { repository?: never; snapshot?: never; master_timeout?: never; wait_for_completion?: never; feature_states?: never; ignore_index_settings?: never; ignore_unavailable?: never; include_aliases?: never; include_global_state?: never; index_settings?: never; indices?: never; partial?: never; rename_pattern?: never; rename_replacement?: never; }; } export interface SnapshotRestoreResponse { accepted?: boolean; snapshot?: SnapshotRestoreSnapshotRestore; } export interface SnapshotRestoreSnapshotRestore { indices: IndexName[]; snapshot: string; shards: ShardStatistics; } export interface SnapshotStatusRequest extends RequestBase { /** The snapshot repository name used to limit the request. * It supports wildcards (`*`) if `` isn't specified. */ repository?: Name; /** A comma-separated list of snapshots to retrieve status for. * The default is currently running snapshots. * Wildcards (`*`) are not supported. */ snapshot?: Names; /** If `false`, the request returns an error for any snapshots that are unavailable. * If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. */ ignore_unavailable?: boolean; /** The period to wait for the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { repository?: never; snapshot?: never; ignore_unavailable?: never; master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { repository?: never; snapshot?: never; ignore_unavailable?: never; master_timeout?: never; }; } export interface SnapshotStatusResponse { snapshots: SnapshotStatus[]; } export interface SnapshotVerifyRepositoryCompactNodeInfo { /** A human-readable name for the node. * You can set this name using the `node.name` property in `elasticsearch.yml`. * The default value is the machine's hostname. */ name: Name; } export interface SnapshotVerifyRepositoryRequest extends RequestBase { /** The name of the snapshot repository to verify. */ name: Name; /** The period to wait for the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration; /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { name?: never; master_timeout?: never; timeout?: never; }; } export interface SnapshotVerifyRepositoryResponse { /** Information about the nodes connected to the snapshot repository. * The key is the ID of the node. */ nodes: Record; } export interface SqlColumn { name: Name; type: string; } export type SqlRow = any[]; export interface SqlClearCursorRequest extends RequestBase { /** Cursor to clear. */ cursor: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { cursor?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { cursor?: never; }; } export interface SqlClearCursorResponse { succeeded: boolean; } export interface SqlDeleteAsyncRequest extends RequestBase { /** The identifier for the search. */ id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; }; } export type SqlDeleteAsyncResponse = AcknowledgedResponseBase; export interface SqlGetAsyncRequest extends RequestBase { /** The identifier for the search. */ id: Id; /** The separator for CSV results. * The API supports this parameter only for CSV responses. */ delimiter?: string; /** The format for the response. * You must specify a format using this parameter or the `Accept` HTTP header. * If you specify both, the API uses this parameter. */ format?: string; /** The retention period for the search and its results. * It defaults to the `keep_alive` period for the original SQL search. */ keep_alive?: Duration; /** The period to wait for complete results. * It defaults to no timeout, meaning the request waits for complete search results. */ wait_for_completion_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; delimiter?: never; format?: never; keep_alive?: never; wait_for_completion_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; delimiter?: never; format?: never; keep_alive?: never; wait_for_completion_timeout?: never; }; } export interface SqlGetAsyncResponse { /** Identifier for the search. * This value is returned only for async and saved synchronous searches. * For CSV, TSV, and TXT responses, this value is returned in the `Async-ID` HTTP header. */ id: Id; /** If `true`, the search is still running. * If `false`, the search has finished. * This value is returned only for async and saved synchronous searches. * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ is_running: boolean; /** If `true`, the response does not contain complete search results. * If `is_partial` is `true` and `is_running` is `true`, the search is still running. * If `is_partial` is `true` but `is_running` is `false`, the results are partial due to a failure or timeout. * This value is returned only for async and saved synchronous searches. * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ is_partial: boolean; /** Column headings for the search results. Each object is a column. */ columns?: SqlColumn[]; /** The cursor for the next set of paginated results. * For CSV, TSV, and TXT responses, this value is returned in the `Cursor` HTTP header. */ cursor?: string; /** The values for the search results. */ rows: SqlRow[]; } export interface SqlGetAsyncStatusRequest extends RequestBase { /** The identifier for the search. */ id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; }; } export interface SqlGetAsyncStatusResponse { /** The timestamp, in milliseconds since the Unix epoch, when Elasticsearch will delete the search and its results, even if the search is still running. */ expiration_time_in_millis: EpochTime; /** The identifier for the search. */ id: string; /** If `true`, the search is still running. * If `false`, the search has finished. */ is_running: boolean; /** If `true`, the response does not contain complete search results. * If `is_partial` is `true` and `is_running` is `true`, the search is still running. * If `is_partial` is `true` but `is_running` is `false`, the results are partial due to a failure or timeout. */ is_partial: boolean; /** The timestamp, in milliseconds since the Unix epoch, when the search started. * The API returns this property only for running searches. */ start_time_in_millis: EpochTime; /** The HTTP status code for the search. * The API returns this property only for completed searches. */ completion_status?: uint; } export interface SqlQueryRequest extends RequestBase { /** The format for the response. * You can also specify a format using the `Accept` HTTP header. * If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. */ format?: SqlQuerySqlFormat; /** If `true`, the response has partial results when there are shard request timeouts or shard failures. * If `false`, the API returns an error with no partial results. */ allow_partial_search_results?: boolean; /** The default catalog (cluster) for queries. * If unspecified, the queries execute on the data in the local cluster only. */ catalog?: string; /** If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. * The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. */ columnar?: boolean; /** The cursor used to retrieve a set of paginated results. * If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. * It ignores other request body parameters. */ cursor?: string; /** The maximum number of rows (or entries) to return in one response. */ fetch_size?: integer; /** If `false`, the API returns an exception when encountering multiple values for a field. * If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. */ field_multi_value_leniency?: boolean; /** The Elasticsearch query DSL for additional filtering. */ filter?: QueryDslQueryContainer; /** If `true`, the search can run on frozen indices. */ index_using_frozen?: boolean; /** The retention period for an async or saved synchronous search. */ keep_alive?: Duration; /** If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. * If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. */ keep_on_completion?: boolean; /** The minimum retention period for the scroll cursor. * After this time period, a pagination request might fail because the scroll cursor is no longer available. * Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. */ page_timeout?: Duration; /** The values for parameters in the query. */ params?: Record; /** The SQL query to run. */ query?: string; /** The timeout before the request fails. */ request_timeout?: Duration; /** One or more runtime fields for the search request. * These fields take precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields; /** The ISO-8601 time zone ID for the search. */ time_zone?: TimeZone; /** The period to wait for complete results. * It defaults to no timeout, meaning the request waits for complete search results. * If the search doesn't finish within this period, the search becomes async. * * To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. */ wait_for_completion_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { format?: never; allow_partial_search_results?: never; catalog?: never; columnar?: never; cursor?: never; fetch_size?: never; field_multi_value_leniency?: never; filter?: never; index_using_frozen?: never; keep_alive?: never; keep_on_completion?: never; page_timeout?: never; params?: never; query?: never; request_timeout?: never; runtime_mappings?: never; time_zone?: never; wait_for_completion_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { format?: never; allow_partial_search_results?: never; catalog?: never; columnar?: never; cursor?: never; fetch_size?: never; field_multi_value_leniency?: never; filter?: never; index_using_frozen?: never; keep_alive?: never; keep_on_completion?: never; page_timeout?: never; params?: never; query?: never; request_timeout?: never; runtime_mappings?: never; time_zone?: never; wait_for_completion_timeout?: never; }; } export interface SqlQueryResponse { /** Column headings for the search results. Each object is a column. */ columns?: SqlColumn[]; /** The cursor for the next set of paginated results. * For CSV, TSV, and TXT responses, this value is returned in the `Cursor` HTTP header. */ cursor?: string; /** The identifier for the search. * This value is returned only for async and saved synchronous searches. * For CSV, TSV, and TXT responses, this value is returned in the `Async-ID` HTTP header. */ id?: Id; /** If `true`, the search is still running. * If `false`, the search has finished. * This value is returned only for async and saved synchronous searches. * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ is_running?: boolean; /** If `true`, the response does not contain complete search results. * If `is_partial` is `true` and `is_running` is `true`, the search is still running. * If `is_partial` is `true` but `is_running` is `false`, the results are partial due to a failure or timeout. * This value is returned only for async and saved synchronous searches. * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ is_partial?: boolean; /** The values for the search results. */ rows: SqlRow[]; } export type SqlQuerySqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile'; export interface SqlTranslateRequest extends RequestBase { /** The maximum number of rows (or entries) to return in one response. */ fetch_size?: integer; /** The Elasticsearch query DSL for additional filtering. */ filter?: QueryDslQueryContainer; /** The SQL query to run. */ query: string; /** The ISO-8601 time zone ID for the search. */ time_zone?: TimeZone; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { fetch_size?: never; filter?: never; query?: never; time_zone?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { fetch_size?: never; filter?: never; query?: never; time_zone?: never; }; } export interface SqlTranslateResponse { aggregations?: Record; size?: long; _source?: SearchSourceConfig; fields?: (QueryDslFieldAndFormat | Field)[]; query?: QueryDslQueryContainer; sort?: Sort; } export interface SslCertificatesCertificateInformation { /** If the path refers to a container file (a jks keystore, or a PKCS#12 file), it is the alias of the certificate. * Otherwise, it is null. */ alias: string | null; /** The ISO formatted date of the certificate's expiry (not-after) date. */ expiry: DateTime; /** The format of the file. * Valid values include `jks`, `PKCS12`, and `PEM`. */ format: string; /** Indicates whether Elasticsearch has access to the private key for this certificate. */ has_private_key: boolean; /** The Distinguished Name of the certificate's issuer. */ issuer?: string; /** The path to the certificate, as configured in the `elasticsearch.yml` file. */ path: string; /** The hexadecimal representation of the certificate's serial number. */ serial_number: string; /** The Distinguished Name of the certificate's subject. */ subject_dn: string; } export interface SslCertificatesRequest extends RequestBase { /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any; }; /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; }; } export type SslCertificatesResponse = SslCertificatesCertificateInformation[]; export interface SynonymsSynonymRule { /** The identifier for the synonym rule. * If you do not specify a synonym rule ID when you create a rule, an identifier is created automatically by Elasticsearch. */ id?: Id; /** The synonyms that conform the synonym rule in Solr format. */ synonyms: SynonymsSynonymString; } export interface SynonymsSynonymRuleRead { /** Synonym Rule identifier */ id: Id; /** Synonyms, in Solr format, that conform the synonym rule. */ synonyms: SynonymsSynonymString; } export type SynonymsSynonymString = string; export interface SynonymsSynonymsUpdateResult { /** The update operation result. */ result: Result; /** Updating synonyms in a synonym set reloads the associated analyzers. * This information is the analyzers reloading result. */ reload_analyzers_details: IndicesReloadSearchAnalyzersReloadResult; } export interface SynonymsDeleteSynonymRequest extends RequestBase { /** The synonyms set identifier to delete. */ id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; }; } export type SynonymsDeleteSynonymResponse = AcknowledgedResponseBase; export interface SynonymsDeleteSynonymRuleRequest extends RequestBase { /** The ID of the synonym set to update. */ set_id: Id; /** The ID of the synonym rule to delete. */ rule_id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { set_id?: never; rule_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { set_id?: never; rule_id?: never; }; } export type SynonymsDeleteSynonymRuleResponse = SynonymsSynonymsUpdateResult; export interface SynonymsGetSynonymRequest extends RequestBase { /** The synonyms set identifier to retrieve. */ id: Id; /** The starting offset for query rules to retrieve. */ from?: integer; /** The max number of query rules to retrieve. */ size?: integer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; from?: never; size?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; from?: never; size?: never; }; } export interface SynonymsGetSynonymResponse { /** The total number of synonyms rules that the synonyms set contains. */ count: integer; /** Synonym rule details. */ synonyms_set: SynonymsSynonymRuleRead[]; } export interface SynonymsGetSynonymRuleRequest extends RequestBase { /** The ID of the synonym set to retrieve the synonym rule from. */ set_id: Id; /** The ID of the synonym rule to retrieve. */ rule_id: Id; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { set_id?: never; rule_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { set_id?: never; rule_id?: never; }; } export type SynonymsGetSynonymRuleResponse = SynonymsSynonymRuleRead; export interface SynonymsGetSynonymsSetsRequest extends RequestBase { /** The starting offset for synonyms sets to retrieve. */ from?: integer; /** The maximum number of synonyms sets to retrieve. */ size?: integer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { from?: never; size?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { from?: never; size?: never; }; } export interface SynonymsGetSynonymsSetsResponse { /** The total number of synonyms sets defined. */ count: integer; /** The identifier and total number of defined synonym rules for each synonyms set. */ results: SynonymsGetSynonymsSetsSynonymsSetItem[]; } export interface SynonymsGetSynonymsSetsSynonymsSetItem { /** Synonyms set identifier */ synonyms_set: Id; /** Number of synonym rules that the synonym set contains */ count: integer; } export interface SynonymsPutSynonymRequest extends RequestBase { /** The ID of the synonyms set to be created or updated. */ id: Id; /** The synonym rules definitions for the synonyms set. */ synonyms_set: SynonymsSynonymRule | SynonymsSynonymRule[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; synonyms_set?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; synonyms_set?: never; }; } export interface SynonymsPutSynonymResponse { result: Result; reload_analyzers_details: IndicesReloadSearchAnalyzersReloadResult; } export interface SynonymsPutSynonymRuleRequest extends RequestBase { /** The ID of the synonym set. */ set_id: Id; /** The ID of the synonym rule to be updated or created. */ rule_id: Id; /** The synonym rule information definition, which must be in Solr format. */ synonyms: SynonymsSynonymString; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { set_id?: never; rule_id?: never; synonyms?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { set_id?: never; rule_id?: never; synonyms?: never; }; } export type SynonymsPutSynonymRuleResponse = SynonymsSynonymsUpdateResult; export type TasksGroupBy = 'nodes' | 'parents' | 'none'; export interface TasksNodeTasks { name?: NodeId; transport_address?: TransportAddress; host?: Host; ip?: Ip; roles?: string[]; attributes?: Record; tasks: Record; } export interface TasksParentTaskInfo extends TasksTaskInfo { children?: TasksTaskInfo[]; } export interface TasksTaskInfo { action: string; cancelled?: boolean; cancellable: boolean; /** Human readable text that identifies the particular request that the task is performing. * For example, it might identify the search request being performed by a search task. * Other kinds of tasks have different descriptions, like `_reindex` which has the source and the destination, or `_bulk` which just has the number of requests and the destination indices. * Many requests will have only an empty description because more detailed information about the request is not easily available or particularly helpful in identifying the request. */ description?: string; headers: Record; id: long; node: NodeId; running_time?: Duration; running_time_in_nanos: DurationValue; start_time_in_millis: EpochTime; /** The internal status of the task, which varies from task to task. * The format also varies. * While the goal is to keep the status for a particular task consistent from version to version, this is not always possible because sometimes the implementation changes. * Fields might be removed from the status for a particular request so any parsing you do of the status might break in minor releases. */ status?: any; type: string; parent_task_id?: TaskId; } export type TasksTaskInfos = TasksTaskInfo[] | Record; export interface TasksTaskListResponseBase { node_failures?: ErrorCause[]; task_failures?: TaskFailure[]; /** Task information grouped by node, if `group_by` was set to `node` (the default). */ nodes?: Record; /** Either a flat list of tasks if `group_by` was set to `none`, or grouped by parents if * `group_by` was set to `parents`. */ tasks?: TasksTaskInfos; } export interface TasksCancelRequest extends RequestBase { /** The task identifier. */ task_id?: TaskId; /** A comma-separated list or wildcard expression of actions that is used to limit the request. */ actions?: string | string[]; /** A comma-separated list of node IDs or names that is used to limit the request. */ nodes?: string[]; /** A parent task ID that is used to limit the tasks. */ parent_task_id?: string; /** If true, the request blocks until all found tasks are complete. */ wait_for_completion?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_id?: never; actions?: never; nodes?: never; parent_task_id?: never; wait_for_completion?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_id?: never; actions?: never; nodes?: never; parent_task_id?: never; wait_for_completion?: never; }; } export type TasksCancelResponse = TasksTaskListResponseBase; export interface TasksGetRequest extends RequestBase { /** The task identifier. */ task_id: Id; /** The period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** If `true`, the request blocks until the task has completed. */ wait_for_completion?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { task_id?: never; timeout?: never; wait_for_completion?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { task_id?: never; timeout?: never; wait_for_completion?: never; }; } export interface TasksGetResponse { completed: boolean; task: TasksTaskInfo; response?: any; error?: ErrorCause; } export interface TasksListRequest extends RequestBase { /** A comma-separated list or wildcard expression of actions used to limit the request. * For example, you can use `cluser:*` to retrieve all cluster-related tasks. */ actions?: string | string[]; /** If `true`, the response includes detailed information about the running tasks. * This information is useful to distinguish tasks from each other but is more costly to run. */ detailed?: boolean; /** A key that is used to group tasks in the response. * The task lists can be grouped either by nodes or by parent tasks. */ group_by?: TasksGroupBy; /** A comma-separated list of node IDs or names that is used to limit the returned information. */ nodes?: NodeIds; /** A parent task identifier that is used to limit returned information. * To return all tasks, omit this parameter or use a value of `-1`. * If the parent task is not found, the API does not return a 404 response code. */ parent_task_id?: Id; /** The period to wait for each node to respond. * If a node does not respond before its timeout expires, the response does not include its information. * However, timed out nodes are included in the `node_failures` property. */ timeout?: Duration; /** If `true`, the request blocks until the operation is complete. */ wait_for_completion?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { actions?: never; detailed?: never; group_by?: never; nodes?: never; parent_task_id?: never; timeout?: never; wait_for_completion?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { actions?: never; detailed?: never; group_by?: never; nodes?: never; parent_task_id?: never; timeout?: never; wait_for_completion?: never; }; } export type TasksListResponse = TasksTaskListResponseBase; export type TextStructureEcsCompatibilityType = 'disabled' | 'v1'; export interface TextStructureFieldStat { count: integer; cardinality: integer; top_hits: TextStructureTopHit[]; mean_value?: integer; median_value?: integer; max_value?: integer; min_value?: integer; earliest?: string; latest?: string; } export type TextStructureFormatType = 'delimited' | 'ndjson' | 'semi_structured_text' | 'xml'; export interface TextStructureTopHit { count: long; value: any; } export interface TextStructureFindFieldStructureRequest extends RequestBase { /** If `format` is set to `delimited`, you can specify the column names in a comma-separated list. * If this parameter is not specified, the structure finder uses the column names from the header row of the text. * If the text does not have a header row, columns are named "column1", "column2", "column3", for example. */ column_names?: string; /** If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. * Only a single character is supported; the delimiter cannot have multiple characters. * By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). * In this default scenario, all rows must have the same number of fields for the delimited format to be detected. * If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ delimiter?: string; /** The number of documents to include in the structural analysis. * The minimum value is 2. */ documents_to_sample?: uint; /** The mode of compatibility with ECS compliant Grok patterns. * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. * This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. * If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. * The intention in that situation is that a user who knows the meanings will rename the fields before using them. */ ecs_compatibility?: TextStructureEcsCompatibilityType; /** If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. */ explain?: boolean; /** The field that should be analyzed. */ field: Field; /** The high level structure of the text. * By default, the API chooses the format. * In this default scenario, all rows must have the same number of fields for a delimited format to be detected. * If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ format?: TextStructureFormatType; /** If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. * The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. * If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". * If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ grok_pattern?: GrokPattern; /** The name of the index that contains the analyzed field. */ index: IndexName; /** If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. * Only a single character is supported. * If this parameter is not specified, the default value is a double quote (`"`). * If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ quote?: string; /** If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. * If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. * Otherwise, the default value is `false`. */ should_trim_fields?: boolean; /** The maximum amount of time that the structure analysis can take. * If the analysis is still running when the timeout expires, it will be stopped. */ timeout?: Duration; /** The name of the field that contains the primary timestamp of each record in the text. * In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. * * If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. * Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. * * For structured text, if you specify this parameter, the field must exist within the text. * * If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. * For structured text, it is not compulsory to have a timestamp in the text. */ timestamp_field?: Field; /** The Java time format of the timestamp field in the text. * Only a subset of Java time format letter groups are supported: * * * `a` * * `d` * * `dd` * * `EEE` * * `EEEE` * * `H` * * `HH` * * `h` * * `M` * * `MM` * * `MMM` * * `MMMM` * * `mm` * * `ss` * * `XX` * * `XXX` * * `yy` * * `yyyy` * * `zzz` * * Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). * Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. * For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. * * One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. * Another is when the timestamp format is one that the structure finder does not consider by default. * * If this parameter is not specified, the structure finder chooses the best format from a built-in set. * * If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. * When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. */ timestamp_format?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { column_names?: never; delimiter?: never; documents_to_sample?: never; ecs_compatibility?: never; explain?: never; field?: never; format?: never; grok_pattern?: never; index?: never; quote?: never; should_trim_fields?: never; timeout?: never; timestamp_field?: never; timestamp_format?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { column_names?: never; delimiter?: never; documents_to_sample?: never; ecs_compatibility?: never; explain?: never; field?: never; format?: never; grok_pattern?: never; index?: never; quote?: never; should_trim_fields?: never; timeout?: never; timestamp_field?: never; timestamp_format?: never; }; } export interface TextStructureFindFieldStructureResponse { charset: string; ecs_compatibility?: TextStructureEcsCompatibilityType; field_stats: Record; format: TextStructureFormatType; grok_pattern?: GrokPattern; java_timestamp_formats?: string[]; joda_timestamp_formats?: string[]; ingest_pipeline: IngestPipelineConfig; mappings: MappingTypeMapping; multiline_start_pattern?: string; need_client_timezone: boolean; num_lines_analyzed: integer; num_messages_analyzed: integer; sample_start: string; timestamp_field?: Field; } export interface TextStructureFindMessageStructureRequest extends RequestBase { /** If the format is `delimited`, you can specify the column names in a comma-separated list. * If this parameter is not specified, the structure finder uses the column names from the header row of the text. * If the text does not have a header role, columns are named "column1", "column2", "column3", for example. */ column_names?: string; /** If you the format is `delimited`, you can specify the character used to delimit the values in each row. * Only a single character is supported; the delimiter cannot have multiple characters. * By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). * In this default scenario, all rows must have the same number of fields for the delimited format to be detected. * If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ delimiter?: string; /** The mode of compatibility with ECS compliant Grok patterns. * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. * This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. * If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. */ ecs_compatibility?: TextStructureEcsCompatibilityType; /** If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. */ explain?: boolean; /** The high level structure of the text. * By default, the API chooses the format. * In this default scenario, all rows must have the same number of fields for a delimited format to be detected. * If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ format?: TextStructureFormatType; /** If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. * The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. * If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". * If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ grok_pattern?: GrokPattern; /** If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. * Only a single character is supported. * If this parameter is not specified, the default value is a double quote (`"`). * If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ quote?: string; /** If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. * If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. * Otherwise, the default value is `false`. */ should_trim_fields?: boolean; /** The maximum amount of time that the structure analysis can take. * If the analysis is still running when the timeout expires, it will be stopped. */ timeout?: Duration; /** The name of the field that contains the primary timestamp of each record in the text. * In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. * * If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. * Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. * * For structured text, if you specify this parameter, the field must exist within the text. * * If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. * For structured text, it is not compulsory to have a timestamp in the text. */ timestamp_field?: Field; /** The Java time format of the timestamp field in the text. * Only a subset of Java time format letter groups are supported: * * * `a` * * `d` * * `dd` * * `EEE` * * `EEEE` * * `H` * * `HH` * * `h` * * `M` * * `MM` * * `MMM` * * `MMMM` * * `mm` * * `ss` * * `XX` * * `XXX` * * `yy` * * `yyyy` * * `zzz` * * Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). * Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. * For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. * * One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. * Another is when the timestamp format is one that the structure finder does not consider by default. * * If this parameter is not specified, the structure finder chooses the best format from a built-in set. * * If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. * When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. */ timestamp_format?: string; /** The list of messages you want to analyze. */ messages: string[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { column_names?: never; delimiter?: never; ecs_compatibility?: never; explain?: never; format?: never; grok_pattern?: never; quote?: never; should_trim_fields?: never; timeout?: never; timestamp_field?: never; timestamp_format?: never; messages?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { column_names?: never; delimiter?: never; ecs_compatibility?: never; explain?: never; format?: never; grok_pattern?: never; quote?: never; should_trim_fields?: never; timeout?: never; timestamp_field?: never; timestamp_format?: never; messages?: never; }; } export interface TextStructureFindMessageStructureResponse { charset: string; ecs_compatibility?: TextStructureEcsCompatibilityType; field_stats: Record; format: TextStructureFormatType; grok_pattern?: GrokPattern; java_timestamp_formats?: string[]; joda_timestamp_formats?: string[]; ingest_pipeline: IngestPipelineConfig; mappings: MappingTypeMapping; multiline_start_pattern?: string; need_client_timezone: boolean; num_lines_analyzed: integer; num_messages_analyzed: integer; sample_start: string; timestamp_field?: Field; } export interface TextStructureFindStructureRequest { /** The text's character set. * It must be a character set that is supported by the JVM that Elasticsearch uses. * For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. * If this parameter is not specified, the structure finder chooses an appropriate character set. */ charset?: string; /** If you have set format to `delimited`, you can specify the column names in a comma-separated list. * If this parameter is not specified, the structure finder uses the column names from the header row of the text. * If the text does not have a header role, columns are named "column1", "column2", "column3", for example. */ column_names?: string; /** If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. * Only a single character is supported; the delimiter cannot have multiple characters. * By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). * In this default scenario, all rows must have the same number of fields for the delimited format to be detected. * If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ delimiter?: string; /** The mode of compatibility with ECS compliant Grok patterns. * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. * Valid values are `disabled` and `v1`. * This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. * If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. */ ecs_compatibility?: string; /** If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. * If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. */ explain?: boolean; /** The high level structure of the text. * Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. * By default, the API chooses the format. * In this default scenario, all rows must have the same number of fields for a delimited format to be detected. * If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ format?: string; /** If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. * The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. * If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". * If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ grok_pattern?: GrokPattern; /** If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. * If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. */ has_header_row?: boolean; /** The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. * If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. */ line_merge_size_limit?: uint; /** The number of lines to include in the structural analysis, starting from the beginning of the text. * The minimum is 2. * If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. * * NOTE: The number of lines and the variation of the lines affects the speed of the analysis. * For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. * If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. */ lines_to_sample?: uint; /** If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. * Only a single character is supported. * If this parameter is not specified, the default value is a double quote (`"`). * If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ quote?: string; /** If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. * If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. * Otherwise, the default value is `false`. */ should_trim_fields?: boolean; /** The maximum amount of time that the structure analysis can take. * If the analysis is still running when the timeout expires then it will be stopped. */ timeout?: Duration; /** The name of the field that contains the primary timestamp of each record in the text. * In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. * * If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. * Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. * * For structured text, if you specify this parameter, the field must exist within the text. * * If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. * For structured text, it is not compulsory to have a timestamp in the text. */ timestamp_field?: Field; /** The Java time format of the timestamp field in the text. * * Only a subset of Java time format letter groups are supported: * * * `a` * * `d` * * `dd` * * `EEE` * * `EEEE` * * `H` * * `HH` * * `h` * * `M` * * `MM` * * `MMM` * * `MMMM` * * `mm` * * `ss` * * `XX` * * `XXX` * * `yy` * * `yyyy` * * `zzz` * * Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. * Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. * For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. * * One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. * Another is when the timestamp format is one that the structure finder does not consider by default. * * If this parameter is not specified, the structure finder chooses the best format from a built-in set. * * If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. * When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. */ timestamp_format?: string; text_files?: TJsonDocument[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { charset?: never; column_names?: never; delimiter?: never; ecs_compatibility?: never; explain?: never; format?: never; grok_pattern?: never; has_header_row?: never; line_merge_size_limit?: never; lines_to_sample?: never; quote?: never; should_trim_fields?: never; timeout?: never; timestamp_field?: never; timestamp_format?: never; text_files?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { charset?: never; column_names?: never; delimiter?: never; ecs_compatibility?: never; explain?: never; format?: never; grok_pattern?: never; has_header_row?: never; line_merge_size_limit?: never; lines_to_sample?: never; quote?: never; should_trim_fields?: never; timeout?: never; timestamp_field?: never; timestamp_format?: never; text_files?: never; }; } export interface TextStructureFindStructureResponse { /** The character encoding used to parse the text. */ charset: string; has_header_row?: boolean; /** For UTF character encodings, it indicates whether the text begins with a byte order marker. */ has_byte_order_marker: boolean; /** Valid values include `ndjson`, `xml`, `delimited`, and `semi_structured_text`. */ format: string; /** The most common values of each field, plus basic numeric statistics for the numeric `page_count` field. * This information may provide clues that the data needs to be cleaned or transformed prior to use by other Elastic Stack functionality. */ field_stats: Record; /** The first two messages in the text verbatim. * This may help diagnose parse errors or accidental uploads of the wrong text. */ sample_start: string; /** The number of distinct messages the lines contained. * For NDJSON, this value is the same as `num_lines_analyzed`. * For other text formats, messages can span several lines. */ num_messages_analyzed: integer; /** Some suitable mappings for an index into which the data could be ingested. */ mappings: MappingTypeMapping; quote?: string; delimiter?: string; /** If a timestamp format is detected that does not include a timezone, `need_client_timezone` is `true`. * The server that parses the text must therefore be told the correct timezone by the client. */ need_client_timezone: boolean; /** The number of lines of the text that were analyzed. */ num_lines_analyzed: integer; /** If `format` is `delimited`, the `column_names` field lists the column names in the order they appear in the sample. */ column_names?: string[]; explanation?: string[]; grok_pattern?: GrokPattern; multiline_start_pattern?: string; exclude_lines_pattern?: string; /** The Java time formats recognized in the time fields. * Elasticsearch mappings and ingest pipelines use this format. */ java_timestamp_formats?: string[]; /** Information that is used to tell Logstash how to parse timestamps. */ joda_timestamp_formats?: string[]; /** The field considered most likely to be the primary timestamp of each document. */ timestamp_field?: Field; should_trim_fields?: boolean; ingest_pipeline: IngestPipelineConfig; } export interface TextStructureTestGrokPatternMatchedField { match: string; offset: integer; length: integer; } export interface TextStructureTestGrokPatternMatchedText { matched: boolean; fields?: Record; } export interface TextStructureTestGrokPatternRequest extends RequestBase { /** The mode of compatibility with ECS compliant Grok patterns. * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. * Valid values are `disabled` and `v1`. */ ecs_compatibility?: string; /** The Grok pattern to run on the text. */ grok_pattern: GrokPattern; /** The lines of text to run the Grok pattern on. */ text: string[]; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { ecs_compatibility?: never; grok_pattern?: never; text?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { ecs_compatibility?: never; grok_pattern?: never; text?: never; }; } export interface TextStructureTestGrokPatternResponse { matches: TextStructureTestGrokPatternMatchedText[]; } export interface TransformDestination { /** The destination index for the transform. The mappings of the destination index are deduced based on the source * fields when possible. If alternate mappings are required, use the create index API prior to starting the * transform. */ index?: IndexName; /** The unique identifier for an ingest pipeline. */ pipeline?: string; } export interface TransformLatest { /** Specifies the date field that is used to identify the latest documents. */ sort: Field; /** Specifies an array of one or more fields that are used to group the data. */ unique_key: Field[]; } export interface TransformPivot { /** Defines how to aggregate the grouped data. The following aggregations are currently supported: average, bucket * script, bucket selector, cardinality, filter, geo bounds, geo centroid, geo line, max, median absolute deviation, * min, missing, percentiles, rare terms, scripted metric, stats, sum, terms, top metrics, value count, weighted * average. */ aggregations?: Record; /** Defines how to aggregate the grouped data. The following aggregations are currently supported: average, bucket * script, bucket selector, cardinality, filter, geo bounds, geo centroid, geo line, max, median absolute deviation, * min, missing, percentiles, rare terms, scripted metric, stats, sum, terms, top metrics, value count, weighted * average. * @alias aggregations */ aggs?: Record; /** Defines how to group the data. More than one grouping can be defined per pivot. The following groupings are * currently supported: date histogram, geotile grid, histogram, terms. */ group_by?: Record; } export interface TransformPivotGroupByContainer { date_histogram?: AggregationsDateHistogramAggregation; geotile_grid?: AggregationsGeoTileGridAggregation; histogram?: AggregationsHistogramAggregation; terms?: AggregationsTermsAggregation; } export interface TransformRetentionPolicy { /** The date field that is used to calculate the age of the document. */ field: Field; /** Specifies the maximum age of a document in the destination index. Documents that are older than the configured * value are removed from the destination index. */ max_age: Duration; } export interface TransformRetentionPolicyContainer { /** Specifies that the transform uses a time field to set the retention policy. */ time?: TransformRetentionPolicy; } export interface TransformSettings { /** Specifies whether the transform checkpoint ranges should be optimized for performance. Such optimization can align * checkpoint ranges with the date histogram interval when date histogram is specified as a group source in the * transform config. As a result, less document updates in the destination index will be performed thus improving * overall performance. */ align_checkpoints?: boolean; /** Defines if dates in the ouput should be written as ISO formatted string or as millis since epoch. epoch_millis was * the default for transforms created before version 7.11. For compatible output set this value to `true`. */ dates_as_epoch_millis?: boolean; /** Specifies whether the transform should deduce the destination index mappings from the transform configuration. */ deduce_mappings?: boolean; /** Specifies a limit on the number of input documents per second. This setting throttles the transform by adding a * wait time between search requests. The default value is null, which disables throttling. */ docs_per_second?: float; /** Defines the initial page size to use for the composite aggregation for each checkpoint. If circuit breaker * exceptions occur, the page size is dynamically adjusted to a lower value. The minimum value is `10` and the * maximum is `65,536`. */ max_page_search_size?: integer; /** If `true`, the transform runs in unattended mode. In unattended mode, the transform retries indefinitely in case * of an error which means the transform never fails. Setting the number of retries other than infinite fails in * validation. */ unattended?: boolean; } export interface TransformSource { /** The source indices for the transform. It can be a single index, an index pattern (for example, `"my-index-*""`), an * array of indices (for example, `["my-index-000001", "my-index-000002"]`), or an array of index patterns (for * example, `["my-index-*", "my-other-index-*"]`. For remote indices use the syntax `"remote_name:index_name"`. If * any indices are in remote clusters then the master node and at least one transform node must have the `remote_cluster_client` node role. */ index: Indices; /** A query clause that retrieves a subset of data from the source index. */ query?: QueryDslQueryContainer; /** Definitions of search-time runtime fields that can be used by the transform. For search runtime fields all data * nodes, including remote nodes, must be 7.12 or later. */ runtime_mappings?: MappingRuntimeFields; } export interface TransformSyncContainer { /** Specifies that the transform uses a time field to synchronize the source and destination indices. */ time?: TransformTimeSync; } export interface TransformTimeSync { /** The time delay between the current time and the latest input data time. */ delay?: Duration; /** The date field that is used to identify new documents in the source. In general, it’s a good idea to use a field * that contains the ingest timestamp. If you use a different field, you might need to set the delay such that it * accounts for data transmission delays. */ field: Field; } export interface TransformDeleteTransformRequest extends RequestBase { /** Identifier for the transform. */ transform_id: Id; /** If this value is false, the transform must be stopped before it can be deleted. If true, the transform is * deleted regardless of its current state. */ force?: boolean; /** If this value is true, the destination index is deleted together with the transform. If false, the destination * index will not be deleted */ delete_dest_index?: boolean; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { transform_id?: never; force?: never; delete_dest_index?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { transform_id?: never; force?: never; delete_dest_index?: never; timeout?: never; }; } export type TransformDeleteTransformResponse = AcknowledgedResponseBase; export interface TransformGetTransformRequest extends RequestBase { /** Identifier for the transform. It can be a transform identifier or a * wildcard expression. You can get information for all transforms by using * `_all`, by specifying `*` as the ``, or by omitting the * ``. */ transform_id?: Names; /** Specifies what to do when the request: * * 1. Contains wildcard expressions and there are no transforms that match. * 2. Contains the _all string or no identifiers and there are no matches. * 3. Contains wildcard expressions and there are only partial matches. * * If this parameter is false, the request returns a 404 status code when * there are no matches or only partial matches. */ allow_no_match?: boolean; /** Skips the specified number of transforms. */ from?: integer; /** Specifies the maximum number of transforms to obtain. */ size?: integer; /** Excludes fields that were automatically added when creating the * transform. This allows the configuration to be in an acceptable format to * be retrieved and then added to another cluster. */ exclude_generated?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { transform_id?: never; allow_no_match?: never; from?: never; size?: never; exclude_generated?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { transform_id?: never; allow_no_match?: never; from?: never; size?: never; exclude_generated?: never; }; } export interface TransformGetTransformResponse { count: long; transforms: TransformGetTransformTransformSummary[]; } export interface TransformGetTransformTransformSummary { /** The security privileges that the transform uses to run its queries. If Elastic Stack security features were disabled at the time of the most recent update to the transform, this property is omitted. */ authorization?: MlTransformAuthorization; /** The time the transform was created. */ create_time?: EpochTime; create_time_string?: DateTime; /** Free text description of the transform. */ description?: string; /** The destination for the transform. */ dest: ReindexDestination; frequency?: Duration; id: Id; latest?: TransformLatest; /** The pivot method transforms the data by aggregating and grouping it. */ pivot?: TransformPivot; retention_policy?: TransformRetentionPolicyContainer; /** Defines optional transform settings. */ settings?: TransformSettings; /** The source of the data for the transform. */ source: TransformSource; /** Defines the properties transforms require to run continuously. */ sync?: TransformSyncContainer; /** The version of Elasticsearch that existed on the node when the transform was created. */ version?: VersionString; _meta?: Metadata; } export interface TransformGetTransformStatsCheckpointStats { checkpoint: long; checkpoint_progress?: TransformGetTransformStatsTransformProgress; timestamp?: DateTime; timestamp_millis?: EpochTime; time_upper_bound?: DateTime; time_upper_bound_millis?: EpochTime; } export interface TransformGetTransformStatsCheckpointing { changes_last_detected_at?: long; changes_last_detected_at_string?: DateTime; last: TransformGetTransformStatsCheckpointStats; next?: TransformGetTransformStatsCheckpointStats; operations_behind?: long; last_search_time?: long; last_search_time_string?: DateTime; } export interface TransformGetTransformStatsRequest extends RequestBase { /** Identifier for the transform. It can be a transform identifier or a * wildcard expression. You can get information for all transforms by using * `_all`, by specifying `*` as the ``, or by omitting the * ``. */ transform_id: Names; /** Specifies what to do when the request: * * 1. Contains wildcard expressions and there are no transforms that match. * 2. Contains the _all string or no identifiers and there are no matches. * 3. Contains wildcard expressions and there are only partial matches. * * If this parameter is false, the request returns a 404 status code when * there are no matches or only partial matches. */ allow_no_match?: boolean; /** Skips the specified number of transforms. */ from?: long; /** Specifies the maximum number of transforms to obtain. */ size?: long; /** Controls the time to wait for the stats */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { transform_id?: never; allow_no_match?: never; from?: never; size?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { transform_id?: never; allow_no_match?: never; from?: never; size?: never; timeout?: never; }; } export interface TransformGetTransformStatsResponse { count: long; transforms: TransformGetTransformStatsTransformStats[]; } export interface TransformGetTransformStatsTransformHealthIssue { /** The type of the issue */ type: string; /** A description of the issue */ issue: string; /** Details about the issue */ details?: string; /** Number of times this issue has occurred since it started */ count: integer; /** The timestamp this issue occurred for for the first time */ first_occurrence?: EpochTime; first_occurence_string?: DateTime; } export interface TransformGetTransformStatsTransformIndexerStats { delete_time_in_ms?: EpochTime; documents_indexed: long; documents_deleted?: long; documents_processed: long; exponential_avg_checkpoint_duration_ms: DurationValue; exponential_avg_documents_indexed: double; exponential_avg_documents_processed: double; index_failures: long; index_time_in_ms: DurationValue; index_total: long; pages_processed: long; processing_time_in_ms: DurationValue; processing_total: long; search_failures: long; search_time_in_ms: DurationValue; search_total: long; trigger_count: long; } export interface TransformGetTransformStatsTransformProgress { docs_indexed: long; docs_processed: long; docs_remaining?: long; percent_complete?: double; total_docs?: long; } export interface TransformGetTransformStatsTransformStats { checkpointing: TransformGetTransformStatsCheckpointing; health?: TransformGetTransformStatsTransformStatsHealth; id: Id; /** @remarks This property is not supported on Elastic Cloud Serverless. */ node?: NodeAttributes; reason?: string; state: string; stats: TransformGetTransformStatsTransformIndexerStats; } export interface TransformGetTransformStatsTransformStatsHealth { status: HealthStatus; /** If a non-healthy status is returned, contains a list of issues of the transform. */ issues?: TransformGetTransformStatsTransformHealthIssue[]; } export interface TransformPreviewTransformRequest extends RequestBase { /** Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform * configuration details in the request body. */ transform_id?: Id; /** Period to wait for a response. If no response is received before the * timeout expires, the request fails and returns an error. */ timeout?: Duration; /** The destination for the transform. */ dest?: TransformDestination; /** Free text description of the transform. */ description?: string; /** The interval between checks for changes in the source indices when the * transform is running continuously. Also determines the retry interval in * the event of transient failures while the transform is searching or * indexing. The minimum value is 1s and the maximum is 1h. */ frequency?: Duration; /** The pivot method transforms the data by aggregating and grouping it. * These objects define the group by fields and the aggregation to reduce * the data. */ pivot?: TransformPivot; /** The source of the data for the transform. */ source?: TransformSource; /** Defines optional transform settings. */ settings?: TransformSettings; /** Defines the properties transforms require to run continuously. */ sync?: TransformSyncContainer; /** Defines a retention policy for the transform. Data that meets the defined * criteria is deleted from the destination index. */ retention_policy?: TransformRetentionPolicyContainer; /** The latest method transforms the data by finding the latest document for * each unique key. */ latest?: TransformLatest; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { transform_id?: never; timeout?: never; dest?: never; description?: never; frequency?: never; pivot?: never; source?: never; settings?: never; sync?: never; retention_policy?: never; latest?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { transform_id?: never; timeout?: never; dest?: never; description?: never; frequency?: never; pivot?: never; source?: never; settings?: never; sync?: never; retention_policy?: never; latest?: never; }; } export interface TransformPreviewTransformResponse { generated_dest_index: IndicesIndexState; preview: TTransform[]; } export interface TransformPutTransformRequest extends RequestBase { /** Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), * hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. */ transform_id: Id; /** When the transform is created, a series of validations occur to ensure its success. For example, there is a * check for the existence of the source indices and a check that the destination index is not part of the source * index pattern. You can use this parameter to skip the checks, for example when the source index does not exist * until after the transform is created. The validations are always run when you start the transform, however, with * the exception of privilege checks. */ defer_validation?: boolean; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** The destination for the transform. */ dest: TransformDestination; /** Free text description of the transform. */ description?: string; /** The interval between checks for changes in the source indices when the transform is running continuously. Also * determines the retry interval in the event of transient failures while the transform is searching or indexing. * The minimum value is `1s` and the maximum is `1h`. */ frequency?: Duration; /** The latest method transforms the data by finding the latest document for each unique key. */ latest?: TransformLatest; /** Defines optional transform metadata. */ _meta?: Metadata; /** The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields * and the aggregation to reduce the data. */ pivot?: TransformPivot; /** Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the * destination index. */ retention_policy?: TransformRetentionPolicyContainer; /** Defines optional transform settings. */ settings?: TransformSettings; /** The source of the data for the transform. */ source: TransformSource; /** Defines the properties transforms require to run continuously. */ sync?: TransformSyncContainer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { transform_id?: never; defer_validation?: never; timeout?: never; dest?: never; description?: never; frequency?: never; latest?: never; _meta?: never; pivot?: never; retention_policy?: never; settings?: never; source?: never; sync?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { transform_id?: never; defer_validation?: never; timeout?: never; dest?: never; description?: never; frequency?: never; latest?: never; _meta?: never; pivot?: never; retention_policy?: never; settings?: never; source?: never; sync?: never; }; } export type TransformPutTransformResponse = AcknowledgedResponseBase; export interface TransformResetTransformRequest extends RequestBase { /** Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), * hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. */ transform_id: Id; /** If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform * must be stopped before it can be reset. */ force?: boolean; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { transform_id?: never; force?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { transform_id?: never; force?: never; timeout?: never; }; } export type TransformResetTransformResponse = AcknowledgedResponseBase; export interface TransformScheduleNowTransformRequest extends RequestBase { /** Identifier for the transform. */ transform_id: Id; /** Controls the time to wait for the scheduling to take place */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { transform_id?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { transform_id?: never; timeout?: never; }; } export type TransformScheduleNowTransformResponse = AcknowledgedResponseBase; export interface TransformStartTransformRequest extends RequestBase { /** Identifier for the transform. */ transform_id: Id; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; /** Restricts the set of transformed entities to those changed after this time. Relative times like now-30d are supported. Only applicable for continuous transforms. */ from?: string; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { transform_id?: never; timeout?: never; from?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { transform_id?: never; timeout?: never; from?: never; }; } export type TransformStartTransformResponse = AcknowledgedResponseBase; export interface TransformStopTransformRequest extends RequestBase { /** Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression. * To stop all transforms, use `_all` or `*` as the identifier. */ transform_id: Name; /** Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; * contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there * are only partial matches. * * If it is true, the API returns a successful acknowledgement message when there are no matches. When there are * only partial matches, the API stops the appropriate transforms. * * If it is false, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean; /** If it is true, the API forcefully stops the transforms. */ force?: boolean; /** Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the * timeout expires, the request returns a timeout exception. However, the request continues processing and * eventually moves the transform to a STOPPED state. */ timeout?: Duration; /** If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, * the transform stops as soon as possible. */ wait_for_checkpoint?: boolean; /** If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns * immediately and the indexer is stopped asynchronously in the background. */ wait_for_completion?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { transform_id?: never; allow_no_match?: never; force?: never; timeout?: never; wait_for_checkpoint?: never; wait_for_completion?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { transform_id?: never; allow_no_match?: never; force?: never; timeout?: never; wait_for_checkpoint?: never; wait_for_completion?: never; }; } export type TransformStopTransformResponse = AcknowledgedResponseBase; export interface TransformUpdateTransformRequest extends RequestBase { /** Identifier for the transform. */ transform_id: Id; /** When true, deferrable validations are not run. This behavior may be * desired if the source index does not exist until after the transform is * created. */ defer_validation?: boolean; /** Period to wait for a response. If no response is received before the * timeout expires, the request fails and returns an error. */ timeout?: Duration; /** The destination for the transform. */ dest?: TransformDestination; /** Free text description of the transform. */ description?: string; /** The interval between checks for changes in the source indices when the * transform is running continuously. Also determines the retry interval in * the event of transient failures while the transform is searching or * indexing. The minimum value is 1s and the maximum is 1h. */ frequency?: Duration; /** Defines optional transform metadata. */ _meta?: Metadata; /** The source of the data for the transform. */ source?: TransformSource; /** Defines optional transform settings. */ settings?: TransformSettings; /** Defines the properties transforms require to run continuously. */ sync?: TransformSyncContainer; /** Defines a retention policy for the transform. Data that meets the defined * criteria is deleted from the destination index. */ retention_policy?: TransformRetentionPolicyContainer | null; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { transform_id?: never; defer_validation?: never; timeout?: never; dest?: never; description?: never; frequency?: never; _meta?: never; source?: never; settings?: never; sync?: never; retention_policy?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { transform_id?: never; defer_validation?: never; timeout?: never; dest?: never; description?: never; frequency?: never; _meta?: never; source?: never; settings?: never; sync?: never; retention_policy?: never; }; } export interface TransformUpdateTransformResponse { authorization?: MlTransformAuthorization; create_time: long; description: string; dest: ReindexDestination; frequency?: Duration; id: Id; latest?: TransformLatest; pivot?: TransformPivot; retention_policy?: TransformRetentionPolicyContainer; settings: TransformSettings; source: ReindexSource; sync?: TransformSyncContainer; version: VersionString; _meta?: Metadata; } export interface TransformUpgradeTransformsRequest extends RequestBase { /** When true, the request checks for updates but does not run them. */ dry_run?: boolean; /** Period to wait for a response. If no response is received before the timeout expires, the request fails and * returns an error. */ timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { dry_run?: never; timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { dry_run?: never; timeout?: never; }; } export interface TransformUpgradeTransformsResponse { /** The number of transforms that need to be upgraded. */ needs_update: integer; /** The number of transforms that don’t require upgrading. */ no_action: integer; /** The number of transforms that have been upgraded. */ updated: integer; } export interface WatcherAcknowledgeState { state: WatcherAcknowledgementOptions; timestamp: DateTime; } export type WatcherAcknowledgementOptions = 'awaits_successful_execution' | 'ackable' | 'acked'; export interface WatcherAction { action_type?: WatcherActionType; condition?: WatcherConditionContainer; foreach?: string; max_iterations?: integer; name?: Name; throttle_period?: Duration; throttle_period_in_millis?: DurationValue; transform?: TransformContainer; index?: WatcherIndexAction; logging?: WatcherLoggingAction; email?: WatcherEmailAction; pagerduty?: WatcherPagerDutyAction; slack?: WatcherSlackAction; webhook?: WatcherWebhookAction; } export type WatcherActionExecutionMode = 'simulate' | 'force_simulate' | 'execute' | 'force_execute' | 'skip'; export interface WatcherActionStatus { ack: WatcherAcknowledgeState; last_execution?: WatcherExecutionState; last_successful_execution?: WatcherExecutionState; last_throttle?: WatcherThrottleState; } export type WatcherActionStatusOptions = 'success' | 'failure' | 'simulated' | 'throttled'; export type WatcherActionType = 'email' | 'webhook' | 'index' | 'logging' | 'slack' | 'pagerduty'; export type WatcherActions = Record; export interface WatcherActivationState { active: boolean; timestamp: DateTime; } export interface WatcherActivationStatus { actions: WatcherActions; state: WatcherActivationState; version: VersionNumber; } export interface WatcherAlwaysCondition { } export interface WatcherArrayCompareConditionKeys { path: string; } export type WatcherArrayCompareCondition = WatcherArrayCompareConditionKeys & { [property: string]: WatcherArrayCompareOpParams | string; }; export interface WatcherArrayCompareOpParams { quantifier: WatcherQuantifier; value: FieldValue; } export interface WatcherChainInput { inputs: Partial>[]; } export interface WatcherConditionContainer { always?: WatcherAlwaysCondition; array_compare?: Partial>; compare?: Partial>>>; never?: WatcherNeverCondition; script?: WatcherScriptCondition; } export type WatcherConditionOp = 'not_eq' | 'eq' | 'lt' | 'gt' | 'lte' | 'gte'; export type WatcherConditionType = 'always' | 'never' | 'script' | 'compare' | 'array_compare'; export type WatcherConnectionScheme = 'http' | 'https'; export type WatcherCronExpression = string; export interface WatcherDailySchedule { at: WatcherScheduleTimeOfDay[]; } export type WatcherDataAttachmentFormat = 'json' | 'yaml'; export interface WatcherDataEmailAttachment { format?: WatcherDataAttachmentFormat; } export type WatcherDay = 'sunday' | 'monday' | 'tuesday' | 'wednesday' | 'thursday' | 'friday' | 'saturday'; export interface WatcherEmail { id?: Id; bcc?: string | string[]; body?: WatcherEmailBody; cc?: string | string[]; from?: string; priority?: WatcherEmailPriority; reply_to?: string | string[]; sent_date?: DateTime; subject: string; to: string | string[]; attachments?: Record; } export interface WatcherEmailAction extends WatcherEmail { } export interface WatcherEmailAttachmentContainer { http?: WatcherHttpEmailAttachment; reporting?: WatcherReportingEmailAttachment; data?: WatcherDataEmailAttachment; } export interface WatcherEmailBody { html?: string; text?: string; } export type WatcherEmailPriority = 'lowest' | 'low' | 'normal' | 'high' | 'highest'; export interface WatcherEmailResult { account?: string; message: WatcherEmail; reason?: string; } export type WatcherExecutionPhase = 'awaits_execution' | 'started' | 'input' | 'condition' | 'actions' | 'watch_transform' | 'aborted' | 'finished'; export interface WatcherExecutionResult { actions: WatcherExecutionResultAction[]; condition: WatcherExecutionResultCondition; execution_duration: DurationValue; execution_time: DateTime; input: WatcherExecutionResultInput; } export interface WatcherExecutionResultAction { email?: WatcherEmailResult; id: Id; index?: WatcherIndexResult; logging?: WatcherLoggingResult; pagerduty?: WatcherPagerDutyResult; reason?: string; slack?: WatcherSlackResult; status: WatcherActionStatusOptions; type: WatcherActionType; webhook?: WatcherWebhookResult; error?: ErrorCause; } export interface WatcherExecutionResultCondition { met: boolean; status: WatcherActionStatusOptions; type: WatcherConditionType; } export interface WatcherExecutionResultInput { payload: Record; status: WatcherActionStatusOptions; type: WatcherInputType; } export interface WatcherExecutionState { successful: boolean; timestamp: DateTime; reason?: string; } export type WatcherExecutionStatus = 'awaits_execution' | 'checking' | 'execution_not_needed' | 'throttled' | 'executed' | 'failed' | 'deleted_while_queued' | 'not_executed_already_queued'; export interface WatcherExecutionThreadPool { /** The largest size of the execution thread pool, which indicates the largest number of concurrent running watches. */ max_size: long; /** The number of watches that were triggered and are currently queued. */ queue_size: long; } export interface WatcherHourAndMinute { hour: integer[]; minute: integer[]; } export interface WatcherHourlySchedule { minute: integer[]; } export interface WatcherHttpEmailAttachment { content_type?: string; inline?: boolean; request?: WatcherHttpInputRequestDefinition; } export interface WatcherHttpInput { extract?: string[]; request?: WatcherHttpInputRequestDefinition; response_content_type?: WatcherResponseContentType; } export interface WatcherHttpInputAuthentication { basic: WatcherHttpInputBasicAuthentication; } export interface WatcherHttpInputBasicAuthentication { password: Password; username: Username; } export type WatcherHttpInputMethod = 'head' | 'get' | 'post' | 'put' | 'delete'; export interface WatcherHttpInputProxy { host: Host; port: uint; } export interface WatcherHttpInputRequestDefinition { auth?: WatcherHttpInputAuthentication; body?: string; connection_timeout?: Duration; headers?: Record; host?: Host; method?: WatcherHttpInputMethod; params?: Record; path?: string; port?: uint; proxy?: WatcherHttpInputProxy; read_timeout?: Duration; scheme?: WatcherConnectionScheme; url?: string; } export interface WatcherHttpInputRequestResult extends WatcherHttpInputRequestDefinition { } export interface WatcherHttpInputResponseResult { body: string; headers: HttpHeaders; status: integer; } export interface WatcherIndexAction { index: IndexName; doc_id?: Id; refresh?: Refresh; op_type?: OpType; timeout?: Duration; execution_time_field?: Field; } export interface WatcherIndexResult { response: WatcherIndexResultSummary; } export interface WatcherIndexResultSummary { created: boolean; id: Id; index: IndexName; result: Result; version: VersionNumber; } export interface WatcherInputContainer { chain?: WatcherChainInput; http?: WatcherHttpInput; search?: WatcherSearchInput; simple?: Record; } export type WatcherInputType = 'http' | 'search' | 'simple'; export interface WatcherLoggingAction { level?: string; text: string; category?: string; } export interface WatcherLoggingResult { logged_text: string; } export type WatcherMonth = 'january' | 'february' | 'march' | 'april' | 'may' | 'june' | 'july' | 'august' | 'september' | 'october' | 'november' | 'december'; export interface WatcherNeverCondition { } export interface WatcherPagerDutyAction extends WatcherPagerDutyEvent { } export interface WatcherPagerDutyContext { href?: string; src?: string; type: WatcherPagerDutyContextType; } export type WatcherPagerDutyContextType = 'link' | 'image'; export interface WatcherPagerDutyEvent { account?: string; attach_payload: boolean; client?: string; client_url?: string; contexts?: WatcherPagerDutyContext[]; /** @alias contexts */ context?: WatcherPagerDutyContext[]; description: string; event_type?: WatcherPagerDutyEventType; incident_key: string; proxy?: WatcherPagerDutyEventProxy; } export interface WatcherPagerDutyEventProxy { host?: Host; port?: integer; } export type WatcherPagerDutyEventType = 'trigger' | 'resolve' | 'acknowledge'; export interface WatcherPagerDutyResult { event: WatcherPagerDutyEvent; reason?: string; request?: WatcherHttpInputRequestResult; response?: WatcherHttpInputResponseResult; } export type WatcherQuantifier = 'some' | 'all'; export interface WatcherQueryWatch { _id: Id; status?: WatcherWatchStatus; watch?: WatcherWatch; _primary_term?: integer; _seq_no?: SequenceNumber; } export interface WatcherReportingEmailAttachment { url: string; inline?: boolean; retries?: integer; interval?: Duration; request?: WatcherHttpInputRequestDefinition; } export type WatcherResponseContentType = 'json' | 'yaml' | 'text'; export interface WatcherScheduleContainer { timezone?: string; cron?: WatcherCronExpression; daily?: WatcherDailySchedule; hourly?: WatcherHourlySchedule; interval?: Duration; monthly?: WatcherTimeOfMonth | WatcherTimeOfMonth[]; weekly?: WatcherTimeOfWeek | WatcherTimeOfWeek[]; yearly?: WatcherTimeOfYear | WatcherTimeOfYear[]; } export type WatcherScheduleTimeOfDay = string | WatcherHourAndMinute; export interface WatcherScheduleTriggerEvent { scheduled_time: DateTime; triggered_time?: DateTime; } export interface WatcherScriptCondition { lang?: ScriptLanguage; params?: Record; source?: ScriptSource; id?: string; } export interface WatcherSearchInput { extract?: string[]; request: WatcherSearchInputRequestDefinition; timeout?: Duration; } export interface WatcherSearchInputRequestBody { query: QueryDslQueryContainer; } export interface WatcherSearchInputRequestDefinition { body?: WatcherSearchInputRequestBody; indices?: IndexName[]; indices_options?: IndicesOptions; search_type?: SearchType; template?: WatcherSearchTemplateRequestBody; rest_total_hits_as_int?: boolean; } export interface WatcherSearchTemplateRequestBody { explain?: boolean; /** ID of the search template to use. If no source is specified, * this parameter is required. */ id?: Id; params?: Record; profile?: boolean; /** An inline search template. Supports the same parameters as the search API's * request body. Also supports Mustache variables. If no id is specified, this * parameter is required. */ source?: string; } export interface WatcherSimulatedActions { actions: string[]; all: WatcherSimulatedActions; use_all: boolean; } export interface WatcherSlackAction { account?: string; message: WatcherSlackMessage; } export interface WatcherSlackAttachment { author_icon?: string; author_link?: string; author_name: string; color?: string; fallback?: string; fields?: WatcherSlackAttachmentField[]; footer?: string; footer_icon?: string; image_url?: string; pretext?: string; text?: string; thumb_url?: string; title: string; title_link?: string; ts?: EpochTime; } export interface WatcherSlackAttachmentField { short: boolean; title: string; value: string; } export interface WatcherSlackDynamicAttachment { attachment_template: WatcherSlackAttachment; list_path: string; } export interface WatcherSlackMessage { attachments: WatcherSlackAttachment[]; dynamic_attachments?: WatcherSlackDynamicAttachment; from: string; icon?: string; text: string; to: string[]; } export interface WatcherSlackResult { account?: string; message: WatcherSlackMessage; } export interface WatcherThrottleState { reason: string; timestamp: DateTime; } export interface WatcherTimeOfMonth { at: string[]; on: integer[]; } export interface WatcherTimeOfWeek { at: string[]; on: WatcherDay[]; } export interface WatcherTimeOfYear { at: string[]; int: WatcherMonth[]; on: integer[]; } export interface WatcherTriggerContainer { schedule?: WatcherScheduleContainer; } export interface WatcherTriggerEventContainer { schedule?: WatcherScheduleTriggerEvent; } export interface WatcherTriggerEventResult { manual: WatcherTriggerEventContainer; triggered_time: DateTime; type: string; } export interface WatcherWatch { actions: Record; condition: WatcherConditionContainer; input: WatcherInputContainer; metadata?: Metadata; status?: WatcherWatchStatus; throttle_period?: Duration; throttle_period_in_millis?: DurationValue; transform?: TransformContainer; trigger: WatcherTriggerContainer; } export interface WatcherWatchStatus { actions: WatcherActions; last_checked?: DateTime; last_met_condition?: DateTime; state: WatcherActivationState; version: VersionNumber; execution_state?: string; } export interface WatcherWebhookAction extends WatcherHttpInputRequestDefinition { } export interface WatcherWebhookResult { request: WatcherHttpInputRequestResult; response?: WatcherHttpInputResponseResult; } export interface WatcherAckWatchRequest extends RequestBase { /** The watch identifier. */ watch_id: Name; /** A comma-separated list of the action identifiers to acknowledge. * If you omit this parameter, all of the actions of the watch are acknowledged. */ action_id?: Names; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { watch_id?: never; action_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { watch_id?: never; action_id?: never; }; } export interface WatcherAckWatchResponse { status: WatcherWatchStatus; } export interface WatcherActivateWatchRequest extends RequestBase { /** The watch identifier. */ watch_id: Name; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { watch_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { watch_id?: never; }; } export interface WatcherActivateWatchResponse { status: WatcherActivationStatus; } export interface WatcherDeactivateWatchRequest extends RequestBase { /** The watch identifier. */ watch_id: Name; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { watch_id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { watch_id?: never; }; } export interface WatcherDeactivateWatchResponse { status: WatcherActivationStatus; } export interface WatcherDeleteWatchRequest extends RequestBase { /** The watch identifier. */ id: Name; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; }; } export interface WatcherDeleteWatchResponse { found: boolean; _id: Id; _version: VersionNumber; } export interface WatcherExecuteWatchRequest extends RequestBase { /** The watch identifier. */ id?: Id; /** Defines whether the watch runs in debug mode. */ debug?: boolean; /** Determines how to handle the watch actions as part of the watch execution. */ action_modes?: Record; /** When present, the watch uses this object as a payload instead of executing its own input. */ alternative_input?: Record; /** When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. */ ignore_condition?: boolean; /** When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. * In addition, the status of the watch is updated, possibly throttling subsequent runs. * This can also be specified as an HTTP parameter. */ record_execution?: boolean; simulated_actions?: WatcherSimulatedActions; /** This structure is parsed as the data of the trigger event that will be used during the watch execution. */ trigger_data?: WatcherScheduleTriggerEvent; /** When present, this watch is used instead of the one specified in the request. * This watch is not persisted to the index and `record_execution` cannot be set. */ watch?: WatcherWatch; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; debug?: never; action_modes?: never; alternative_input?: never; ignore_condition?: never; record_execution?: never; simulated_actions?: never; trigger_data?: never; watch?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; debug?: never; action_modes?: never; alternative_input?: never; ignore_condition?: never; record_execution?: never; simulated_actions?: never; trigger_data?: never; watch?: never; }; } export interface WatcherExecuteWatchResponse { /** The watch record identifier as it would be stored in the `.watcher-history` index. */ _id: Id; /** The watch record document as it would be stored in the `.watcher-history` index. */ watch_record: WatcherExecuteWatchWatchRecord; } export interface WatcherExecuteWatchWatchRecord { condition: WatcherConditionContainer; input: WatcherInputContainer; messages: string[]; metadata?: Metadata; node: string; result: WatcherExecutionResult; state: WatcherExecutionStatus; trigger_event: WatcherTriggerEventResult; user: Username; watch_id: Id; status?: WatcherWatchStatus; } export interface WatcherGetSettingsRequest extends RequestBase { /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; }; } export interface WatcherGetSettingsResponse { index: IndicesIndexSettings; } export interface WatcherGetWatchRequest extends RequestBase { /** The watch identifier. */ id: Name; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; }; } export interface WatcherGetWatchResponse { found: boolean; _id: Id; status?: WatcherWatchStatus; watch?: WatcherWatch; _primary_term?: integer; _seq_no?: SequenceNumber; _version?: VersionNumber; } export interface WatcherPutWatchRequest extends RequestBase { /** The identifier for the watch. */ id: Id; /** The initial state of the watch. * The default value is `true`, which means the watch is active by default. */ active?: boolean; /** only update the watch if the last operation that has changed the watch has the specified primary term */ if_primary_term?: long; /** only update the watch if the last operation that has changed the watch has the specified sequence number */ if_seq_no?: SequenceNumber; /** Explicit version number for concurrency control */ version?: VersionNumber; /** The list of actions that will be run if the condition matches. */ actions?: Record; /** The condition that defines if the actions should be run. */ condition?: WatcherConditionContainer; /** The input that defines the input that loads the data for the watch. */ input?: WatcherInputContainer; /** Metadata JSON that will be copied into the history entries. */ metadata?: Metadata; /** The minimum time between actions being run. * The default is 5 seconds. * This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. * If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. */ throttle_period?: Duration; /** Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request. */ throttle_period_in_millis?: DurationValue; /** The transform that processes the watch payload to prepare it for the watch actions. */ transform?: TransformContainer; /** The trigger that defines when the watch should run. */ trigger?: WatcherTriggerContainer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { id?: never; active?: never; if_primary_term?: never; if_seq_no?: never; version?: never; actions?: never; condition?: never; input?: never; metadata?: never; throttle_period?: never; throttle_period_in_millis?: never; transform?: never; trigger?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { id?: never; active?: never; if_primary_term?: never; if_seq_no?: never; version?: never; actions?: never; condition?: never; input?: never; metadata?: never; throttle_period?: never; throttle_period_in_millis?: never; transform?: never; trigger?: never; }; } export interface WatcherPutWatchResponse { created: boolean; _id: Id; _primary_term: long; _seq_no: SequenceNumber; _version: VersionNumber; } export interface WatcherQueryWatchesRequest extends RequestBase { /** The offset from the first result to fetch. * It must be non-negative. */ from?: integer; /** The number of hits to return. * It must be non-negative. */ size?: integer; /** A query that filters the watches to be returned. */ query?: QueryDslQueryContainer; /** One or more fields used to sort the search results. */ sort?: Sort; /** Retrieve the next page of hits using a set of sort values from the previous page. */ search_after?: SortResults; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { from?: never; size?: never; query?: never; sort?: never; search_after?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { from?: never; size?: never; query?: never; sort?: never; search_after?: never; }; } export interface WatcherQueryWatchesResponse { /** The total number of watches found. */ count: integer; /** A list of watches based on the `from`, `size`, or `search_after` request body parameters. */ watches: WatcherQueryWatch[]; } export interface WatcherStartRequest extends RequestBase { /** Period to wait for a connection to the master node. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; }; } export type WatcherStartResponse = AcknowledgedResponseBase; export interface WatcherStatsRequest extends RequestBase { /** Defines which additional metrics are included in the response. */ metric?: WatcherStatsWatcherMetric | WatcherStatsWatcherMetric[]; /** Defines whether stack traces are generated for each watch that is running. */ emit_stacktraces?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { metric?: never; emit_stacktraces?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { metric?: never; emit_stacktraces?: never; }; } export interface WatcherStatsResponse { _nodes: NodeStatistics; cluster_name: Name; manually_stopped: boolean; stats: WatcherStatsWatcherNodeStats[]; } export interface WatcherStatsWatchRecordQueuedStats { /** The time the watch was run. * This is just before the input is being run. */ execution_time: DateTime; } export interface WatcherStatsWatchRecordStats extends WatcherStatsWatchRecordQueuedStats { /** The current watch execution phase. */ execution_phase: WatcherExecutionPhase; /** The time the watch was triggered by the trigger engine. */ triggered_time: DateTime; executed_actions?: string[]; watch_id: Id; /** The watch record identifier. */ watch_record_id: Id; } export type WatcherStatsWatcherMetric = '_all' | 'all' | 'queued_watches' | 'current_watches' | 'pending_watches'; export interface WatcherStatsWatcherNodeStats { /** The current executing watches metric gives insight into the watches that are currently being executed by Watcher. * Additional information is shared per watch that is currently executing. * This information includes the `watch_id`, the time its execution started and its current execution phase. * To include this metric, the `metric` option should be set to `current_watches` or `_all`. * In addition you can also specify the `emit_stacktraces=true` parameter, which adds stack traces for each watch that is being run. * These stack traces can give you more insight into an execution of a watch. */ current_watches?: WatcherStatsWatchRecordStats[]; execution_thread_pool: WatcherExecutionThreadPool; /** Watcher moderates the execution of watches such that their execution won't put too much pressure on the node and its resources. * If too many watches trigger concurrently and there isn't enough capacity to run them all, some of the watches are queued, waiting for the current running watches to finish.s * The queued watches metric gives insight on these queued watches. * * To include this metric, the `metric` option should include `queued_watches` or `_all`. */ queued_watches?: WatcherStatsWatchRecordQueuedStats[]; /** The number of watches currently registered. */ watch_count: long; /** The current state of Watcher. */ watcher_state: WatcherStatsWatcherState; node_id: Id; } export type WatcherStatsWatcherState = 'stopped' | 'starting' | 'started' | 'stopping'; export interface WatcherStopRequest extends RequestBase { /** The period to wait for the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; }; } export type WatcherStopResponse = AcknowledgedResponseBase; export interface WatcherUpdateSettingsRequest extends RequestBase { /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration; /** The period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration; 'index.auto_expand_replicas'?: string; 'index.number_of_replicas'?: integer; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; timeout?: never; 'index.auto_expand_replicas'?: never; 'index.number_of_replicas'?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; timeout?: never; 'index.auto_expand_replicas'?: never; 'index.number_of_replicas'?: never; }; } export interface WatcherUpdateSettingsResponse { acknowledged: boolean; } export interface XpackInfoBuildInformation { date: DateTime; hash: string; } export interface XpackInfoFeature { available: boolean; description?: string; enabled: boolean; native_code_info?: XpackInfoNativeCodeInformation; } export interface XpackInfoFeatures { aggregate_metric: XpackInfoFeature; analytics: XpackInfoFeature; ccr: XpackInfoFeature; data_streams: XpackInfoFeature; data_tiers: XpackInfoFeature; enrich: XpackInfoFeature; /** @remarks This property is not supported on Elastic Cloud Serverless. */ enterprise_search: XpackInfoFeature; eql: XpackInfoFeature; /** @remarks This property is not supported on Elastic Cloud Serverless. */ esql: XpackInfoFeature; graph: XpackInfoFeature; ilm: XpackInfoFeature; logstash: XpackInfoFeature; logsdb: XpackInfoFeature; ml: XpackInfoFeature; monitoring: XpackInfoFeature; rollup: XpackInfoFeature; runtime_fields?: XpackInfoFeature; searchable_snapshots: XpackInfoFeature; security: XpackInfoFeature; slm: XpackInfoFeature; spatial: XpackInfoFeature; sql: XpackInfoFeature; transform: XpackInfoFeature; /** @remarks This property is not supported on Elastic Cloud Serverless. */ universal_profiling: XpackInfoFeature; voting_only: XpackInfoFeature; watcher: XpackInfoFeature; /** @remarks This property is not supported on Elastic Cloud Serverless. */ archive: XpackInfoFeature; } export interface XpackInfoMinimalLicenseInformation { expiry_date_in_millis: EpochTime; mode: LicenseLicenseType; status: LicenseLicenseStatus; type: LicenseLicenseType; uid: string; } export interface XpackInfoNativeCodeInformation { build_hash: string; version: VersionString; } export interface XpackInfoRequest extends RequestBase { /** A comma-separated list of the information categories to include in the response. * For example, `build,license,features`. */ categories?: XpackInfoXPackCategory[]; /** If this param is used it must be set to true */ accept_enterprise?: boolean; /** Defines whether additional human-readable information is included in the response. * In particular, it adds descriptions and a tag line. */ human?: boolean; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { categories?: never; accept_enterprise?: never; human?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { categories?: never; accept_enterprise?: never; human?: never; }; } export interface XpackInfoResponse { build: XpackInfoBuildInformation; features: XpackInfoFeatures; license: XpackInfoMinimalLicenseInformation; tagline: string; } export type XpackInfoXPackCategory = 'build' | 'features' | 'license'; export interface XpackUsageAnalytics extends XpackUsageBase { stats: XpackUsageAnalyticsStatistics; } export interface XpackUsageAnalyticsStatistics { boxplot_usage: long; cumulative_cardinality_usage: long; string_stats_usage: long; top_metrics_usage: long; t_test_usage: long; moving_percentiles_usage: long; normalize_usage: long; rate_usage: long; multi_terms_usage?: long; } export interface XpackUsageArchive extends XpackUsageBase { indices_count: long; } export interface XpackUsageAudit extends XpackUsageFeatureToggle { outputs?: string[]; } export interface XpackUsageBase { available: boolean; enabled: boolean; } export interface XpackUsageCcr extends XpackUsageBase { auto_follow_patterns_count: integer; follower_indices_count: integer; } export interface XpackUsageCounter { active: long; total: long; } export interface XpackUsageDataStreams extends XpackUsageBase { data_streams: long; indices_count: long; } export interface XpackUsageDataTierPhaseStatistics { node_count: long; index_count: long; total_shard_count: long; primary_shard_count: long; doc_count: long; total_size_bytes: long; primary_size_bytes: long; primary_shard_size_avg_bytes: long; primary_shard_size_median_bytes: long; primary_shard_size_mad_bytes: long; } export interface XpackUsageDataTiers extends XpackUsageBase { data_warm: XpackUsageDataTierPhaseStatistics; data_frozen?: XpackUsageDataTierPhaseStatistics; data_cold: XpackUsageDataTierPhaseStatistics; data_content: XpackUsageDataTierPhaseStatistics; data_hot: XpackUsageDataTierPhaseStatistics; } export interface XpackUsageDatafeed { count: long; } export interface XpackUsageEql extends XpackUsageBase { features: XpackUsageEqlFeatures; queries: Record; } export interface XpackUsageEqlFeatures { join: uint; joins: XpackUsageEqlFeaturesJoin; keys: XpackUsageEqlFeaturesKeys; event: uint; pipes: XpackUsageEqlFeaturesPipes; sequence: uint; sequences: XpackUsageEqlFeaturesSequences; } export interface XpackUsageEqlFeaturesJoin { join_queries_two: uint; join_queries_three: uint; join_until: uint; join_queries_five_or_more: uint; join_queries_four: uint; } export interface XpackUsageEqlFeaturesKeys { join_keys_two: uint; join_keys_one: uint; join_keys_three: uint; join_keys_five_or_more: uint; join_keys_four: uint; } export interface XpackUsageEqlFeaturesPipes { pipe_tail: uint; pipe_head: uint; } export interface XpackUsageEqlFeaturesSequences { sequence_queries_three: uint; sequence_queries_four: uint; sequence_queries_two: uint; sequence_until: uint; sequence_queries_five_or_more: uint; sequence_maxspan: uint; } export interface XpackUsageFeatureToggle { enabled: boolean; } export interface XpackUsageFlattened extends XpackUsageBase { field_count: integer; } export interface XpackUsageHealthStatistics extends XpackUsageBase { invocations: XpackUsageInvocations; } export interface XpackUsageIlm { policy_count: integer; policy_stats: XpackUsageIlmPolicyStatistics[]; } export interface XpackUsageIlmPolicyStatistics { indices_managed: integer; phases: XpackUsagePhases; } export interface XpackUsageInvocations { total: long; } export interface XpackUsageIpFilter { http: boolean; transport: boolean; } export interface XpackUsageJobUsage { count: integer; created_by: Record; detectors: MlJobStatistics; forecasts: XpackUsageMlJobForecasts; model_size: MlJobStatistics; } export interface XpackUsageMachineLearning extends XpackUsageBase { datafeeds: Record; /** Job usage statistics. The `_all` entry is always present and gathers statistics for all jobs. */ jobs: Record; node_count: integer; data_frame_analytics_jobs: XpackUsageMlDataFrameAnalyticsJobs; inference: XpackUsageMlInference; } export interface XpackUsageMlCounter { count: long; } export interface XpackUsageMlDataFrameAnalyticsJobs { memory_usage?: XpackUsageMlDataFrameAnalyticsJobsMemory; _all: XpackUsageMlDataFrameAnalyticsJobsCount; analysis_counts?: XpackUsageMlDataFrameAnalyticsJobsAnalysis; stopped?: XpackUsageMlDataFrameAnalyticsJobsCount; } export interface XpackUsageMlDataFrameAnalyticsJobsAnalysis { classification?: integer; outlier_detection?: integer; regression?: integer; } export interface XpackUsageMlDataFrameAnalyticsJobsCount { count: long; } export interface XpackUsageMlDataFrameAnalyticsJobsMemory { peak_usage_bytes: MlJobStatistics; } export interface XpackUsageMlInference { ingest_processors: Record; trained_models: XpackUsageMlInferenceTrainedModels; deployments?: XpackUsageMlInferenceDeployments; } export interface XpackUsageMlInferenceDeployments { count: integer; inference_counts: MlJobStatistics; model_sizes_bytes: MlJobStatistics; time_ms: XpackUsageMlInferenceDeploymentsTimeMs; } export interface XpackUsageMlInferenceDeploymentsTimeMs { avg: double; } export interface XpackUsageMlInferenceIngestProcessor { num_docs_processed: XpackUsageMlInferenceIngestProcessorCount; pipelines: XpackUsageMlCounter; num_failures: XpackUsageMlInferenceIngestProcessorCount; time_ms: XpackUsageMlInferenceIngestProcessorCount; } export interface XpackUsageMlInferenceIngestProcessorCount { max: long; sum: long; min: long; } export interface XpackUsageMlInferenceTrainedModels { estimated_operations?: MlJobStatistics; estimated_heap_memory_usage_bytes?: MlJobStatistics; count?: XpackUsageMlInferenceTrainedModelsCount; _all: XpackUsageMlCounter; model_size_bytes?: MlJobStatistics; } export interface XpackUsageMlInferenceTrainedModelsCount { total: long; prepackaged: long; other: long; pass_through?: long; regression?: long; classification?: long; ner?: long; text_embedding?: long; } export interface XpackUsageMlJobForecasts { total: long; forecasted_jobs: long; } export interface XpackUsageMonitoring extends XpackUsageBase { collection_enabled: boolean; enabled_exporters: Record; } export interface XpackUsagePhase { actions: string[]; min_age: DurationValue; } export interface XpackUsagePhases { cold?: XpackUsagePhase; delete?: XpackUsagePhase; frozen?: XpackUsagePhase; hot?: XpackUsagePhase; warm?: XpackUsagePhase; } export interface XpackUsageQuery { count?: integer; failed?: integer; paging?: integer; total?: integer; } export interface XpackUsageRealm extends XpackUsageBase { name?: string[]; order?: long[]; size?: long[]; cache?: XpackUsageRealmCache[]; has_authorization_realms?: boolean[]; has_default_username_pattern?: boolean[]; has_truststore?: boolean[]; is_authentication_delegated?: boolean[]; } export interface XpackUsageRealmCache { size: long; } export interface XpackUsageRequest extends RequestBase { /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration; /** All values in `body` will be added to the request body. */ body?: string | ({ [key: string]: any; } & { master_timeout?: never; }); /** All values in `querystring` will be added to the request querystring. */ querystring?: { [key: string]: any; } & { master_timeout?: never; }; } export interface XpackUsageResponse { aggregate_metric: XpackUsageBase; analytics: XpackUsageAnalytics; archive: XpackUsageArchive; watcher: XpackUsageWatcher; ccr: XpackUsageCcr; data_frame?: XpackUsageBase; data_science?: XpackUsageBase; data_streams?: XpackUsageDataStreams; data_tiers: XpackUsageDataTiers; enrich?: XpackUsageBase; eql: XpackUsageEql; flattened?: XpackUsageFlattened; graph: XpackUsageBase; health_api?: XpackUsageHealthStatistics; ilm: XpackUsageIlm; logstash: XpackUsageBase; ml: XpackUsageMachineLearning; monitoring: XpackUsageMonitoring; rollup: XpackUsageBase; runtime_fields?: XpackUsageRuntimeFieldTypes; spatial: XpackUsageBase; searchable_snapshots: XpackUsageSearchableSnapshots; security: XpackUsageSecurity; slm: XpackUsageSlm; sql: XpackUsageSql; transform: XpackUsageBase; vectors?: XpackUsageVector; voting_only: XpackUsageBase; } export interface XpackUsageRoleMapping { enabled: integer; size: integer; } export interface XpackUsageRuntimeFieldTypes extends XpackUsageBase { field_types: XpackUsageRuntimeFieldsType[]; } export interface XpackUsageRuntimeFieldsType { chars_max: long; chars_total: long; count: long; doc_max: long; doc_total: long; index_count: long; lang: string[]; lines_max: long; lines_total: long; name: Field; scriptless_count: long; shadowed_count: long; source_max: long; source_total: long; } export interface XpackUsageSearchableSnapshots extends XpackUsageBase { indices_count: integer; full_copy_indices_count?: integer; shared_cache_indices_count?: integer; } export interface XpackUsageSecurity extends XpackUsageBase { api_key_service: XpackUsageFeatureToggle; anonymous: XpackUsageFeatureToggle; audit: XpackUsageAudit; fips_140: XpackUsageFeatureToggle; ipfilter: XpackUsageIpFilter; realms: Record; role_mapping: Record; roles: XpackUsageSecurityRoles; ssl: XpackUsageSsl; system_key?: XpackUsageFeatureToggle; token_service: XpackUsageFeatureToggle; operator_privileges: XpackUsageBase; } export interface XpackUsageSecurityRoles { native: XpackUsageSecurityRolesNative; dls: XpackUsageSecurityRolesDls; file: XpackUsageSecurityRolesFile; } export interface XpackUsageSecurityRolesDls { bit_set_cache: XpackUsageSecurityRolesDlsBitSetCache; } export interface XpackUsageSecurityRolesDlsBitSetCache { count: integer; memory?: ByteSize; memory_in_bytes: ulong; } export interface XpackUsageSecurityRolesFile { dls: boolean; fls: boolean; size: long; } export interface XpackUsageSecurityRolesNative { dls: boolean; fls: boolean; size: long; } export interface XpackUsageSlm extends XpackUsageBase { policy_count?: integer; policy_stats?: SlmStatistics; } export interface XpackUsageSql extends XpackUsageBase { features: Record; queries: Record; } export interface XpackUsageSsl { http: XpackUsageFeatureToggle; transport: XpackUsageFeatureToggle; } export interface XpackUsageVector extends XpackUsageBase { dense_vector_dims_avg_count: integer; dense_vector_fields_count: integer; sparse_vector_fields_count?: integer; } export interface XpackUsageWatcher extends XpackUsageBase { execution: XpackUsageWatcherActions; watch: XpackUsageWatcherWatch; count: XpackUsageCounter; } export interface XpackUsageWatcherActionTotals { total: Duration; total_time_in_ms: DurationValue; } export interface XpackUsageWatcherActions { actions: Record; } export interface XpackUsageWatcherWatch { input: Record; condition?: Record; action?: Record; trigger: XpackUsageWatcherWatchTrigger; } export interface XpackUsageWatcherWatchTrigger { schedule?: XpackUsageWatcherWatchTriggerSchedule; _all: XpackUsageCounter; } export interface XpackUsageWatcherWatchTriggerSchedule extends XpackUsageCounter { cron: XpackUsageCounter; _all: XpackUsageCounter; } export interface SpecUtilsAdditionalProperties { } export interface SpecUtilsAdditionalProperty { } export interface SpecUtilsCommonQueryParameters { /** When set to `true` Elasticsearch will include the full stack trace of errors * when they occur. */ error_trace?: boolean; /** Comma-separated list of filters in dot notation which reduce the response * returned by Elasticsearch. */ filter_path?: string | string[]; /** When set to `true` will return statistics in a format suitable for humans. * For example `"exists_time": "1h"` for humans and * `"eixsts_time_in_millis": 3600000` for computers. When disabled the human * readable values will be omitted. This makes sense for responses being consumed * only by machines. */ human?: boolean; /** If set to `true` the returned JSON will be "pretty-formatted". Only use * this option for debugging only. */ pretty?: boolean; } export interface SpecUtilsCommonCatQueryParameters { /** Specifies the format to return the columnar data in, can be set to * `text`, `json`, `cbor`, `yaml`, or `smile`. */ format?: string; /** When set to `true` will output available columns. This option * can't be combined with any other query string option. */ help?: boolean; /** When set to `true` will enable verbose output. */ v?: boolean; } export interface SpecUtilsOverloadOf { }