/// import { ChannelCredentials, ChannelOptions, UntypedServiceImplementation, handleUnaryCall, handleServerStreamingCall, Client, ClientUnaryCall, Metadata, CallOptions, ClientReadableStream, ServiceError } from '@grpc/grpc-js'; import _m0 from 'protobufjs/minimal'; import { Cluster_Environment, ConfigSpec, Cluster, Host } from '../../../../../yandex/cloud/mdb/kafka/v1/cluster'; import { MaintenanceWindow } from '../../../../../yandex/cloud/mdb/kafka/v1/maintenance'; import { FieldMask } from '../../../../../google/protobuf/field_mask'; import { TopicSpec } from '../../../../../yandex/cloud/mdb/kafka/v1/topic'; import { UserSpec } from '../../../../../yandex/cloud/mdb/kafka/v1/user'; import { Operation } from '../../../../../yandex/cloud/operation/operation'; export declare const protobufPackage = "yandex.cloud.mdb.kafka.v1"; export interface GetClusterRequest { /** * ID of the Apache Kafka® Cluster resource to return. * * To get the cluster ID, make a [ClusterService.List] request. */ clusterId: string; } export interface ListClustersRequest { /** * ID of the folder to list Apache Kafka® clusters in. * * To get the folder ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. */ folderId: string; /** * The maximum number of results per page to return. * * If the number of available results is larger than [page_size], the service returns a [ListClustersResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; /** * Page token. * * To get the next page of results, set [page_token] to the [ListClustersResponse.next_page_token] returned by the previous list request. */ pageToken: string; /** Filter support is not currently implemented. Any filters are ignored. */ filter: string; } export interface ListClustersResponse { /** List of Apache Kafka® clusters. */ clusters: Cluster[]; /** * Token that allows you to get the next page of results for list requests. * * If the number of results is larger than [ListClustersRequest.page_size], use [next_page_token] as the value for the [ListClustersRequest.page_token] parameter in the next list request. * Each subsequent list request will have its own [next_page_token] to continue paging through the results. */ nextPageToken: string; } export interface CreateClusterRequest { /** * ID of the folder to create the Apache Kafka® cluster in. * * To get the folder ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. */ folderId: string; /** Name of the Apache Kafka® cluster. The name must be unique within the folder. */ name: string; /** Description of the Apache Kafka® cluster. */ description: string; /** * Custom labels for the Apache Kafka® cluster as `key:value` pairs. * * For example, "project": "mvp" or "source": "dictionary". */ labels: { [key: string]: string; }; /** Deployment environment of the Apache Kafka® cluster. */ environment: Cluster_Environment; /** Kafka and hosts configuration the Apache Kafka® cluster. */ configSpec?: ConfigSpec; /** One or more configurations of topics to be created in the Apache Kafka® cluster. */ topicSpecs: TopicSpec[]; /** Configurations of accounts to be created in the Apache Kafka® cluster. */ userSpecs: UserSpec[]; /** ID of the network to create the Apache Kafka® cluster in. */ networkId: string; /** IDs of subnets to create brokers in. */ subnetId: string[]; /** User security groups */ securityGroupIds: string[]; /** Host groups to place VMs of cluster on. */ hostGroupIds: string[]; /** Deletion Protection inhibits deletion of the cluster */ deletionProtection: boolean; /** Window of maintenance operations. */ maintenanceWindow?: MaintenanceWindow; } export interface CreateClusterRequest_LabelsEntry { key: string; value: string; } export interface CreateClusterMetadata { /** ID of the Apache Kafka® cluster that is being created. */ clusterId: string; } export interface UpdateClusterRequest { /** * ID of the Apache Kafka® cluster to update. * * To get the Apache Kafka® cluster ID, make a [ClusterService.List] request. */ clusterId: string; updateMask?: FieldMask; /** New description of the Apache Kafka® cluster. */ description: string; /** * Custom labels for the Apache Kafka® cluster as `key:value` pairs. * * For example, "project": "mvp" or "source": "dictionary". * * The new set of labels will completely replace the old ones. * To add a label, request the current set with the [ClusterService.Get] method, then send an [ClusterService.Update] request with the new label added to the set. */ labels: { [key: string]: string; }; /** * New configuration and resources for hosts in the Apache Kafka® cluster. * * Use [update_mask] to prevent reverting all cluster settings that are not listed in [config_spec] to their default values. */ configSpec?: ConfigSpec; /** New name for the Apache Kafka® cluster. */ name: string; /** User security groups */ securityGroupIds: string[]; /** Deletion Protection inhibits deletion of the cluster */ deletionProtection: boolean; /** New maintenance window settings for the cluster. */ maintenanceWindow?: MaintenanceWindow; /** ID of the network to move the cluster to. */ networkId: string; /** IDs of subnets where the hosts are located or a new host is being created */ subnetIds: string[]; } export interface UpdateClusterRequest_LabelsEntry { key: string; value: string; } export interface UpdateClusterMetadata { /** ID of the Apache Kafka® cluster that is being updated. */ clusterId: string; } export interface DeleteClusterRequest { /** * ID of the Apache Kafka® cluster to delete. * * To get the Apache Kafka® cluster ID, make a [ClusterService.List] request. */ clusterId: string; } export interface DeleteClusterMetadata { /** ID of the Apache Kafka® cluster that is being deleted. */ clusterId: string; } export interface ListClusterLogsRequest { /** * ID of the Apache Kafka® cluster to request logs for. * * To get the Apache Kafka® cluster ID, make a [ClusterService.List] request. */ clusterId: string; /** * Columns from the logs table to request. * * If no columns are specified, full log records are returned. */ columnFilter: string[]; /** Start timestamp for the logs request. */ fromTime?: Date; /** End timestamp for the logs request. */ toTime?: Date; /** * The maximum number of results per page to return. * * If the number of available results is larger than [page_size], the service returns a [ListClusterLogsResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; /** * Page token. * * To get the next page of results, set [page_token] to the [ListClusterLogsResponse.next_page_token] returned by the previous list request. */ pageToken: string; /** * The flag that defines behavior of providing the next page token. * * If this flag is set to `true`, this API method will always return [ListClusterLogsResponse.next_page_token], even if current page is empty. */ alwaysNextPageToken: boolean; /** * A filter expression that filters resources listed in the response. * * The expression must specify: * 1. The field name to filter by. Currently filtering can be applied to the `hostname` field. * 2. An `=` operator. * 3. The value in double quotes (`"`). Must be 1-63 characters long and match the regular expression `[a-z0-9.-]{1,61}`. * * Example of a filter: `message.hostname='node1.db.cloud.yandex.net'` */ filter: string; } /** A single log record. */ export interface LogRecord { /** Log record timestamp. */ timestamp?: Date; /** Contents of the log record. */ message: { [key: string]: string; }; } export interface LogRecord_MessageEntry { key: string; value: string; } export interface ListClusterLogsResponse { /** Requested log records. */ logs: LogRecord[]; /** * Token that allows you to get the next page of results for list requests. * * If the number of results is larger than [ListClusterLogsRequest.page_size], use [next_page_token] as the value for the [ListClusterLogsRequest.page_token] query parameter in the next list request. * Each subsequent list request will have its own [next_page_token] to continue paging through the results. * This value is interchangeable with [StreamLogRecord.next_record_token] from StreamLogs method. */ nextPageToken: string; } export interface StreamLogRecord { /** One of the requested log records. */ record?: LogRecord; /** * This token allows you to continue streaming logs starting from the exact same record. * * To continue streaming, specify value of [next_record_token] as value for [StreamClusterLogsRequest.record_token] parameter in the next StreamLogs request. * * This value is interchangeable with [ListClusterLogsResponse.next_page_token] from ListLogs method. */ nextRecordToken: string; } export interface StreamClusterLogsRequest { /** * ID of the Apache Kafka® cluster. * * To get the Apache Kafka® cluster ID, make a [ClusterService.List] request. */ clusterId: string; /** * Columns from logs table to get in the response. * * If no columns are specified, full log records are returned. */ columnFilter: string[]; /** Start timestamp for the logs request. */ fromTime?: Date; /** * End timestamp for the logs request. * * If this field is not set, all existing logs will be sent and then the new ones as they appear. * In essence it has `tail -f` semantics. */ toTime?: Date; /** * Record token. * * Set [record_token] to the [StreamLogRecord.next_record_token] returned by a previous [ClusterService.StreamLogs] request to start streaming from next log record. */ recordToken: string; /** * A filter expression that filters resources listed in the response. * * The expression must specify: * 1. The field name to filter by. Currently filtering can be applied to the `hostname` field. * 2. An `=` operator. * 3. The value in double quotes (`"`). Must be 3-63 characters long and match the regular expression `[a-z][-a-z0-9]{1,61}[a-z0-9]`. * * Example of a filter: `message.hostname='node1.db.cloud.yandex.net'` */ filter: string; } export interface ListClusterOperationsRequest { /** ID of the Apache Kafka® cluster to list operations for. */ clusterId: string; /** * The maximum number of results per page to return. * * If the number of available results is larger than [page_size], the service returns a [ListClusterOperationsResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; /** * Page token. * * To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } export interface ListClusterOperationsResponse { /** List of operations for the specified Apache Kafka® cluster. */ operations: Operation[]; /** * Token that allows you to get the next page of results for list requests. * * If the number of results is larger than [ListClusterOperationsRequest.page_size], use [next_page_token] as the value for the [ListClusterOperationsRequest.page_token] query parameter in the next list request. * Each subsequent list request will have its own [next_page_token] to continue paging through the results. */ nextPageToken: string; } export interface ListClusterHostsRequest { /** * ID of the Apache Kafka® cluster. * * To get the Apache Kafka® cluster ID, make a [ClusterService.List] request. */ clusterId: string; /** * The maximum number of results per page to return. * * If the number of available results is larger than [page_size], the service returns a [ListClusterHostsResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; /** * Page token. * * To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } export interface ListClusterHostsResponse { /** List of hosts. */ hosts: Host[]; /** * Token that allows you to get the next page of results for list requests. * * If the number of results is larger than [ListClusterHostsRequest.page_size], use the [next_page_token] as the value for the [ListClusterHostsRequest.page_token] query parameter in the next list request. * Each subsequent list request will have its own [next_page_token] to continue paging through the results. */ nextPageToken: string; } export interface MoveClusterRequest { /** * ID of the Apache Kafka® cluster to move. * * To get the Apache Kafka® cluster ID, make a [ClusterService.List] request. */ clusterId: string; /** ID of the destination folder. */ destinationFolderId: string; } export interface MoveClusterMetadata { /** ID of the Apache Kafka® cluster being moved. */ clusterId: string; /** ID of the source folder. */ sourceFolderId: string; /** ID of the destnation folder. */ destinationFolderId: string; } export interface StartClusterRequest { /** * ID of the Apache Kafka® cluster to start. * * To get the Apache Kafka® cluster ID, make a [ClusterService.List] request. */ clusterId: string; } export interface StartClusterMetadata { /** ID of the Apache Kafka® cluster. */ clusterId: string; } export interface StopClusterRequest { /** * ID of the Apache Kafka® cluster to stop. * * To get the Apache Kafka® cluster ID, make a [ClusterService.List] request. */ clusterId: string; } export interface StopClusterMetadata { /** ID of the Apache Kafka® cluster. */ clusterId: string; } export interface RescheduleMaintenanceRequest { /** ID of the Kafka cluster to reschedule the maintenance operation for. */ clusterId: string; /** The type of reschedule request. */ rescheduleType: RescheduleMaintenanceRequest_RescheduleType; /** The time until which this maintenance operation should be delayed. The value should be ahead of the first time when the maintenance operation has been scheduled for no more than two weeks. The value can also point to the past moment of time if [reschedule_type.IMMEDIATE] reschedule type is chosen. */ delayedUntil?: Date; } export declare enum RescheduleMaintenanceRequest_RescheduleType { RESCHEDULE_TYPE_UNSPECIFIED = 0, /** IMMEDIATE - Start the maintenance operation immediately. */ IMMEDIATE = 1, /** NEXT_AVAILABLE_WINDOW - Start the maintenance operation within the next available maintenance window. */ NEXT_AVAILABLE_WINDOW = 2, /** SPECIFIC_TIME - Start the maintenance operation at the specific time. */ SPECIFIC_TIME = 3, UNRECOGNIZED = -1 } export declare function rescheduleMaintenanceRequest_RescheduleTypeFromJSON(object: any): RescheduleMaintenanceRequest_RescheduleType; export declare function rescheduleMaintenanceRequest_RescheduleTypeToJSON(object: RescheduleMaintenanceRequest_RescheduleType): string; export interface RescheduleMaintenanceMetadata { /** ID of the Kafka cluster. */ clusterId: string; /** The time until which this maintenance operation is to be delayed. */ delayedUntil?: Date; } export declare const GetClusterRequest: { encode(message: GetClusterRequest, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): GetClusterRequest; fromJSON(object: any): GetClusterRequest; toJSON(message: GetClusterRequest): unknown; fromPartial, never>>(object: I): GetClusterRequest; }; export declare const ListClustersRequest: { encode(message: ListClustersRequest, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): ListClustersRequest; fromJSON(object: any): ListClustersRequest; toJSON(message: ListClustersRequest): unknown; fromPartial, never>>(object: I): ListClustersRequest; }; export declare const ListClustersResponse: { encode(message: ListClustersResponse, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): ListClustersResponse; fromJSON(object: any): ListClustersResponse; toJSON(message: ListClustersResponse): unknown; fromPartial, never>) | undefined; environment?: Cluster_Environment | undefined; monitoring?: ({ name?: string | undefined; description?: string | undefined; link?: string | undefined; }[] & ({ name?: string | undefined; description?: string | undefined; link?: string | undefined; } & { name?: string | undefined; description?: string | undefined; link?: string | undefined; } & Record, never>)[] & Record, never>) | undefined; config?: ({ version?: string | undefined; kafka?: { resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; kafkaConfig28?: { compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: import("./common").SaslMechanism[] | undefined; } | undefined; kafkaConfig3?: { compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: import("./common").SaslMechanism[] | undefined; } | undefined; } | undefined; zookeeper?: { resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; } | undefined; zoneId?: string[] | undefined; brokersCount?: number | undefined; assignPublicIp?: boolean | undefined; unmanagedTopics?: boolean | undefined; schemaRegistry?: boolean | undefined; access?: { dataTransfer?: boolean | undefined; } | undefined; restApiConfig?: { enabled?: boolean | undefined; } | undefined; diskSizeAutoscaling?: { plannedUsageThreshold?: number | undefined; emergencyUsageThreshold?: number | undefined; diskSizeLimit?: number | undefined; } | undefined; kraft?: { resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; } | undefined; } & { version?: string | undefined; kafka?: ({ resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; kafkaConfig28?: { compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: import("./common").SaslMechanism[] | undefined; } | undefined; kafkaConfig3?: { compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: import("./common").SaslMechanism[] | undefined; } | undefined; } & { resources?: ({ resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & Record, never>) | undefined; kafkaConfig28?: ({ compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: import("./common").SaslMechanism[] | undefined; } & { compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: (string[] & string[] & Record, never>) | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: (import("./common").SaslMechanism[] & import("./common").SaslMechanism[] & Record, never>) | undefined; } & Record, never>) | undefined; kafkaConfig3?: ({ compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: import("./common").SaslMechanism[] | undefined; } & { compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: (string[] & string[] & Record, never>) | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: (import("./common").SaslMechanism[] & import("./common").SaslMechanism[] & Record, never>) | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; zookeeper?: ({ resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; } & { resources?: ({ resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; zoneId?: (string[] & string[] & Record, never>) | undefined; brokersCount?: number | undefined; assignPublicIp?: boolean | undefined; unmanagedTopics?: boolean | undefined; schemaRegistry?: boolean | undefined; access?: ({ dataTransfer?: boolean | undefined; } & { dataTransfer?: boolean | undefined; } & Record, never>) | undefined; restApiConfig?: ({ enabled?: boolean | undefined; } & { enabled?: boolean | undefined; } & Record, never>) | undefined; diskSizeAutoscaling?: ({ plannedUsageThreshold?: number | undefined; emergencyUsageThreshold?: number | undefined; diskSizeLimit?: number | undefined; } & { plannedUsageThreshold?: number | undefined; emergencyUsageThreshold?: number | undefined; diskSizeLimit?: number | undefined; } & Record, never>) | undefined; kraft?: ({ resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; } & { resources?: ({ resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; networkId?: string | undefined; health?: import("../../../../../yandex/cloud/mdb/kafka/v1/cluster").Cluster_Health | undefined; status?: import("../../../../../yandex/cloud/mdb/kafka/v1/cluster").Cluster_Status | undefined; securityGroupIds?: (string[] & string[] & Record, never>) | undefined; hostGroupIds?: (string[] & string[] & Record, never>) | undefined; deletionProtection?: boolean | undefined; maintenanceWindow?: ({ anytime?: {} | undefined; weeklyMaintenanceWindow?: { day?: import("../../../../../yandex/cloud/mdb/kafka/v1/maintenance").WeeklyMaintenanceWindow_WeekDay | undefined; hour?: number | undefined; } | undefined; } & { anytime?: ({} & {} & Record, never>) | undefined; weeklyMaintenanceWindow?: ({ day?: import("../../../../../yandex/cloud/mdb/kafka/v1/maintenance").WeeklyMaintenanceWindow_WeekDay | undefined; hour?: number | undefined; } & { day?: import("../../../../../yandex/cloud/mdb/kafka/v1/maintenance").WeeklyMaintenanceWindow_WeekDay | undefined; hour?: number | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; plannedOperation?: ({ info?: string | undefined; delayedUntil?: Date | undefined; } & { info?: string | undefined; delayedUntil?: Date | undefined; } & Record, never>) | undefined; } & Record, never>)[] & Record, never>) | undefined; nextPageToken?: string | undefined; } & Record, never>>(object: I): ListClustersResponse; }; export declare const CreateClusterRequest: { encode(message: CreateClusterRequest, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): CreateClusterRequest; fromJSON(object: any): CreateClusterRequest; toJSON(message: CreateClusterRequest): unknown; fromPartial, never>) | undefined; environment?: Cluster_Environment | undefined; configSpec?: ({ version?: string | undefined; kafka?: { resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; kafkaConfig28?: { compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: import("./common").SaslMechanism[] | undefined; } | undefined; kafkaConfig3?: { compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: import("./common").SaslMechanism[] | undefined; } | undefined; } | undefined; zookeeper?: { resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; } | undefined; zoneId?: string[] | undefined; brokersCount?: number | undefined; assignPublicIp?: boolean | undefined; unmanagedTopics?: boolean | undefined; schemaRegistry?: boolean | undefined; access?: { dataTransfer?: boolean | undefined; } | undefined; restApiConfig?: { enabled?: boolean | undefined; } | undefined; diskSizeAutoscaling?: { plannedUsageThreshold?: number | undefined; emergencyUsageThreshold?: number | undefined; diskSizeLimit?: number | undefined; } | undefined; kraft?: { resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; } | undefined; } & { version?: string | undefined; kafka?: ({ resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; kafkaConfig28?: { compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: import("./common").SaslMechanism[] | undefined; } | undefined; kafkaConfig3?: { compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: import("./common").SaslMechanism[] | undefined; } | undefined; } & { resources?: ({ resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & Record, never>) | undefined; kafkaConfig28?: ({ compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: import("./common").SaslMechanism[] | undefined; } & { compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: (string[] & string[] & Record, never>) | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: (import("./common").SaslMechanism[] & import("./common").SaslMechanism[] & Record, never>) | undefined; } & Record, never>) | undefined; kafkaConfig3?: ({ compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: import("./common").SaslMechanism[] | undefined; } & { compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: (string[] & string[] & Record, never>) | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: (import("./common").SaslMechanism[] & import("./common").SaslMechanism[] & Record, never>) | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; zookeeper?: ({ resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; } & { resources?: ({ resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; zoneId?: (string[] & string[] & Record, never>) | undefined; brokersCount?: number | undefined; assignPublicIp?: boolean | undefined; unmanagedTopics?: boolean | undefined; schemaRegistry?: boolean | undefined; access?: ({ dataTransfer?: boolean | undefined; } & { dataTransfer?: boolean | undefined; } & Record, never>) | undefined; restApiConfig?: ({ enabled?: boolean | undefined; } & { enabled?: boolean | undefined; } & Record, never>) | undefined; diskSizeAutoscaling?: ({ plannedUsageThreshold?: number | undefined; emergencyUsageThreshold?: number | undefined; diskSizeLimit?: number | undefined; } & { plannedUsageThreshold?: number | undefined; emergencyUsageThreshold?: number | undefined; diskSizeLimit?: number | undefined; } & Record, never>) | undefined; kraft?: ({ resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; } & { resources?: ({ resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; topicSpecs?: ({ name?: string | undefined; partitions?: number | undefined; replicationFactor?: number | undefined; topicConfig28?: { cleanupPolicy?: import("../../../../../yandex/cloud/mdb/kafka/v1/topic").Topicconfig28_CleanupPolicy | undefined; compressionType?: import("./common").CompressionType | undefined; deleteRetentionMs?: number | undefined; fileDeleteDelayMs?: number | undefined; flushMessages?: number | undefined; flushMs?: number | undefined; minCompactionLagMs?: number | undefined; retentionBytes?: number | undefined; retentionMs?: number | undefined; maxMessageBytes?: number | undefined; minInsyncReplicas?: number | undefined; segmentBytes?: number | undefined; preallocate?: boolean | undefined; } | undefined; topicConfig3?: { cleanupPolicy?: import("../../../../../yandex/cloud/mdb/kafka/v1/topic").TopicConfig3_CleanupPolicy | undefined; compressionType?: import("./common").CompressionType | undefined; deleteRetentionMs?: number | undefined; fileDeleteDelayMs?: number | undefined; flushMessages?: number | undefined; flushMs?: number | undefined; minCompactionLagMs?: number | undefined; retentionBytes?: number | undefined; retentionMs?: number | undefined; maxMessageBytes?: number | undefined; minInsyncReplicas?: number | undefined; segmentBytes?: number | undefined; preallocate?: boolean | undefined; } | undefined; }[] & ({ name?: string | undefined; partitions?: number | undefined; replicationFactor?: number | undefined; topicConfig28?: { cleanupPolicy?: import("../../../../../yandex/cloud/mdb/kafka/v1/topic").Topicconfig28_CleanupPolicy | undefined; compressionType?: import("./common").CompressionType | undefined; deleteRetentionMs?: number | undefined; fileDeleteDelayMs?: number | undefined; flushMessages?: number | undefined; flushMs?: number | undefined; minCompactionLagMs?: number | undefined; retentionBytes?: number | undefined; retentionMs?: number | undefined; maxMessageBytes?: number | undefined; minInsyncReplicas?: number | undefined; segmentBytes?: number | undefined; preallocate?: boolean | undefined; } | undefined; topicConfig3?: { cleanupPolicy?: import("../../../../../yandex/cloud/mdb/kafka/v1/topic").TopicConfig3_CleanupPolicy | undefined; compressionType?: import("./common").CompressionType | undefined; deleteRetentionMs?: number | undefined; fileDeleteDelayMs?: number | undefined; flushMessages?: number | undefined; flushMs?: number | undefined; minCompactionLagMs?: number | undefined; retentionBytes?: number | undefined; retentionMs?: number | undefined; maxMessageBytes?: number | undefined; minInsyncReplicas?: number | undefined; segmentBytes?: number | undefined; preallocate?: boolean | undefined; } | undefined; } & { name?: string | undefined; partitions?: number | undefined; replicationFactor?: number | undefined; topicConfig28?: ({ cleanupPolicy?: import("../../../../../yandex/cloud/mdb/kafka/v1/topic").Topicconfig28_CleanupPolicy | undefined; compressionType?: import("./common").CompressionType | undefined; deleteRetentionMs?: number | undefined; fileDeleteDelayMs?: number | undefined; flushMessages?: number | undefined; flushMs?: number | undefined; minCompactionLagMs?: number | undefined; retentionBytes?: number | undefined; retentionMs?: number | undefined; maxMessageBytes?: number | undefined; minInsyncReplicas?: number | undefined; segmentBytes?: number | undefined; preallocate?: boolean | undefined; } & { cleanupPolicy?: import("../../../../../yandex/cloud/mdb/kafka/v1/topic").Topicconfig28_CleanupPolicy | undefined; compressionType?: import("./common").CompressionType | undefined; deleteRetentionMs?: number | undefined; fileDeleteDelayMs?: number | undefined; flushMessages?: number | undefined; flushMs?: number | undefined; minCompactionLagMs?: number | undefined; retentionBytes?: number | undefined; retentionMs?: number | undefined; maxMessageBytes?: number | undefined; minInsyncReplicas?: number | undefined; segmentBytes?: number | undefined; preallocate?: boolean | undefined; } & Record, never>) | undefined; topicConfig3?: ({ cleanupPolicy?: import("../../../../../yandex/cloud/mdb/kafka/v1/topic").TopicConfig3_CleanupPolicy | undefined; compressionType?: import("./common").CompressionType | undefined; deleteRetentionMs?: number | undefined; fileDeleteDelayMs?: number | undefined; flushMessages?: number | undefined; flushMs?: number | undefined; minCompactionLagMs?: number | undefined; retentionBytes?: number | undefined; retentionMs?: number | undefined; maxMessageBytes?: number | undefined; minInsyncReplicas?: number | undefined; segmentBytes?: number | undefined; preallocate?: boolean | undefined; } & { cleanupPolicy?: import("../../../../../yandex/cloud/mdb/kafka/v1/topic").TopicConfig3_CleanupPolicy | undefined; compressionType?: import("./common").CompressionType | undefined; deleteRetentionMs?: number | undefined; fileDeleteDelayMs?: number | undefined; flushMessages?: number | undefined; flushMs?: number | undefined; minCompactionLagMs?: number | undefined; retentionBytes?: number | undefined; retentionMs?: number | undefined; maxMessageBytes?: number | undefined; minInsyncReplicas?: number | undefined; segmentBytes?: number | undefined; preallocate?: boolean | undefined; } & Record, never>) | undefined; } & Record, never>)[] & Record, never>) | undefined; userSpecs?: ({ name?: string | undefined; password?: string | undefined; permissions?: { topicName?: string | undefined; role?: import("../../../../../yandex/cloud/mdb/kafka/v1/user").Permission_AccessRole | undefined; allowHosts?: string[] | undefined; }[] | undefined; }[] & ({ name?: string | undefined; password?: string | undefined; permissions?: { topicName?: string | undefined; role?: import("../../../../../yandex/cloud/mdb/kafka/v1/user").Permission_AccessRole | undefined; allowHosts?: string[] | undefined; }[] | undefined; } & { name?: string | undefined; password?: string | undefined; permissions?: ({ topicName?: string | undefined; role?: import("../../../../../yandex/cloud/mdb/kafka/v1/user").Permission_AccessRole | undefined; allowHosts?: string[] | undefined; }[] & ({ topicName?: string | undefined; role?: import("../../../../../yandex/cloud/mdb/kafka/v1/user").Permission_AccessRole | undefined; allowHosts?: string[] | undefined; } & { topicName?: string | undefined; role?: import("../../../../../yandex/cloud/mdb/kafka/v1/user").Permission_AccessRole | undefined; allowHosts?: (string[] & string[] & Record, never>) | undefined; } & Record, never>)[] & Record, never>) | undefined; } & Record, never>)[] & Record, never>) | undefined; networkId?: string | undefined; subnetId?: (string[] & string[] & Record, never>) | undefined; securityGroupIds?: (string[] & string[] & Record, never>) | undefined; hostGroupIds?: (string[] & string[] & Record, never>) | undefined; deletionProtection?: boolean | undefined; maintenanceWindow?: ({ anytime?: {} | undefined; weeklyMaintenanceWindow?: { day?: import("../../../../../yandex/cloud/mdb/kafka/v1/maintenance").WeeklyMaintenanceWindow_WeekDay | undefined; hour?: number | undefined; } | undefined; } & { anytime?: ({} & {} & Record, never>) | undefined; weeklyMaintenanceWindow?: ({ day?: import("../../../../../yandex/cloud/mdb/kafka/v1/maintenance").WeeklyMaintenanceWindow_WeekDay | undefined; hour?: number | undefined; } & { day?: import("../../../../../yandex/cloud/mdb/kafka/v1/maintenance").WeeklyMaintenanceWindow_WeekDay | undefined; hour?: number | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; } & Record, never>>(object: I): CreateClusterRequest; }; export declare const CreateClusterRequest_LabelsEntry: { encode(message: CreateClusterRequest_LabelsEntry, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): CreateClusterRequest_LabelsEntry; fromJSON(object: any): CreateClusterRequest_LabelsEntry; toJSON(message: CreateClusterRequest_LabelsEntry): unknown; fromPartial, never>>(object: I): CreateClusterRequest_LabelsEntry; }; export declare const CreateClusterMetadata: { encode(message: CreateClusterMetadata, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): CreateClusterMetadata; fromJSON(object: any): CreateClusterMetadata; toJSON(message: CreateClusterMetadata): unknown; fromPartial, never>>(object: I): CreateClusterMetadata; }; export declare const UpdateClusterRequest: { encode(message: UpdateClusterRequest, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): UpdateClusterRequest; fromJSON(object: any): UpdateClusterRequest; toJSON(message: UpdateClusterRequest): unknown; fromPartial, never>) | undefined; } & Record, never>) | undefined; description?: string | undefined; labels?: ({ [x: string]: string | undefined; } & { [x: string]: string | undefined; } & Record, never>) | undefined; configSpec?: ({ version?: string | undefined; kafka?: { resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; kafkaConfig28?: { compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: import("./common").SaslMechanism[] | undefined; } | undefined; kafkaConfig3?: { compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: import("./common").SaslMechanism[] | undefined; } | undefined; } | undefined; zookeeper?: { resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; } | undefined; zoneId?: string[] | undefined; brokersCount?: number | undefined; assignPublicIp?: boolean | undefined; unmanagedTopics?: boolean | undefined; schemaRegistry?: boolean | undefined; access?: { dataTransfer?: boolean | undefined; } | undefined; restApiConfig?: { enabled?: boolean | undefined; } | undefined; diskSizeAutoscaling?: { plannedUsageThreshold?: number | undefined; emergencyUsageThreshold?: number | undefined; diskSizeLimit?: number | undefined; } | undefined; kraft?: { resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; } | undefined; } & { version?: string | undefined; kafka?: ({ resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; kafkaConfig28?: { compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: import("./common").SaslMechanism[] | undefined; } | undefined; kafkaConfig3?: { compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: import("./common").SaslMechanism[] | undefined; } | undefined; } & { resources?: ({ resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & Record, never>) | undefined; kafkaConfig28?: ({ compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: import("./common").SaslMechanism[] | undefined; } & { compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: (string[] & string[] & Record, never>) | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: (import("./common").SaslMechanism[] & import("./common").SaslMechanism[] & Record, never>) | undefined; } & Record, never>) | undefined; kafkaConfig3?: ({ compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: import("./common").SaslMechanism[] | undefined; } & { compressionType?: import("./common").CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: (string[] & string[] & Record, never>) | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: (import("./common").SaslMechanism[] & import("./common").SaslMechanism[] & Record, never>) | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; zookeeper?: ({ resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; } & { resources?: ({ resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; zoneId?: (string[] & string[] & Record, never>) | undefined; brokersCount?: number | undefined; assignPublicIp?: boolean | undefined; unmanagedTopics?: boolean | undefined; schemaRegistry?: boolean | undefined; access?: ({ dataTransfer?: boolean | undefined; } & { dataTransfer?: boolean | undefined; } & Record, never>) | undefined; restApiConfig?: ({ enabled?: boolean | undefined; } & { enabled?: boolean | undefined; } & Record, never>) | undefined; diskSizeAutoscaling?: ({ plannedUsageThreshold?: number | undefined; emergencyUsageThreshold?: number | undefined; diskSizeLimit?: number | undefined; } & { plannedUsageThreshold?: number | undefined; emergencyUsageThreshold?: number | undefined; diskSizeLimit?: number | undefined; } & Record, never>) | undefined; kraft?: ({ resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; } & { resources?: ({ resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; name?: string | undefined; securityGroupIds?: (string[] & string[] & Record, never>) | undefined; deletionProtection?: boolean | undefined; maintenanceWindow?: ({ anytime?: {} | undefined; weeklyMaintenanceWindow?: { day?: import("../../../../../yandex/cloud/mdb/kafka/v1/maintenance").WeeklyMaintenanceWindow_WeekDay | undefined; hour?: number | undefined; } | undefined; } & { anytime?: ({} & {} & Record, never>) | undefined; weeklyMaintenanceWindow?: ({ day?: import("../../../../../yandex/cloud/mdb/kafka/v1/maintenance").WeeklyMaintenanceWindow_WeekDay | undefined; hour?: number | undefined; } & { day?: import("../../../../../yandex/cloud/mdb/kafka/v1/maintenance").WeeklyMaintenanceWindow_WeekDay | undefined; hour?: number | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; networkId?: string | undefined; subnetIds?: (string[] & string[] & Record, never>) | undefined; } & Record, never>>(object: I): UpdateClusterRequest; }; export declare const UpdateClusterRequest_LabelsEntry: { encode(message: UpdateClusterRequest_LabelsEntry, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): UpdateClusterRequest_LabelsEntry; fromJSON(object: any): UpdateClusterRequest_LabelsEntry; toJSON(message: UpdateClusterRequest_LabelsEntry): unknown; fromPartial, never>>(object: I): UpdateClusterRequest_LabelsEntry; }; export declare const UpdateClusterMetadata: { encode(message: UpdateClusterMetadata, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): UpdateClusterMetadata; fromJSON(object: any): UpdateClusterMetadata; toJSON(message: UpdateClusterMetadata): unknown; fromPartial, never>>(object: I): UpdateClusterMetadata; }; export declare const DeleteClusterRequest: { encode(message: DeleteClusterRequest, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): DeleteClusterRequest; fromJSON(object: any): DeleteClusterRequest; toJSON(message: DeleteClusterRequest): unknown; fromPartial, never>>(object: I): DeleteClusterRequest; }; export declare const DeleteClusterMetadata: { encode(message: DeleteClusterMetadata, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): DeleteClusterMetadata; fromJSON(object: any): DeleteClusterMetadata; toJSON(message: DeleteClusterMetadata): unknown; fromPartial, never>>(object: I): DeleteClusterMetadata; }; export declare const ListClusterLogsRequest: { encode(message: ListClusterLogsRequest, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): ListClusterLogsRequest; fromJSON(object: any): ListClusterLogsRequest; toJSON(message: ListClusterLogsRequest): unknown; fromPartial, never>) | undefined; fromTime?: Date | undefined; toTime?: Date | undefined; pageSize?: number | undefined; pageToken?: string | undefined; alwaysNextPageToken?: boolean | undefined; filter?: string | undefined; } & Record, never>>(object: I): ListClusterLogsRequest; }; export declare const LogRecord: { encode(message: LogRecord, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): LogRecord; fromJSON(object: any): LogRecord; toJSON(message: LogRecord): unknown; fromPartial, never>) | undefined; } & Record, never>>(object: I): LogRecord; }; export declare const LogRecord_MessageEntry: { encode(message: LogRecord_MessageEntry, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): LogRecord_MessageEntry; fromJSON(object: any): LogRecord_MessageEntry; toJSON(message: LogRecord_MessageEntry): unknown; fromPartial, never>>(object: I): LogRecord_MessageEntry; }; export declare const ListClusterLogsResponse: { encode(message: ListClusterLogsResponse, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): ListClusterLogsResponse; fromJSON(object: any): ListClusterLogsResponse; toJSON(message: ListClusterLogsResponse): unknown; fromPartial, never>) | undefined; } & Record, never>)[] & Record, never>) | undefined; nextPageToken?: string | undefined; } & Record, never>>(object: I): ListClusterLogsResponse; }; export declare const StreamLogRecord: { encode(message: StreamLogRecord, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): StreamLogRecord; fromJSON(object: any): StreamLogRecord; toJSON(message: StreamLogRecord): unknown; fromPartial, never>) | undefined; } & Record, never>) | undefined; nextRecordToken?: string | undefined; } & Record, never>>(object: I): StreamLogRecord; }; export declare const StreamClusterLogsRequest: { encode(message: StreamClusterLogsRequest, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): StreamClusterLogsRequest; fromJSON(object: any): StreamClusterLogsRequest; toJSON(message: StreamClusterLogsRequest): unknown; fromPartial, never>) | undefined; fromTime?: Date | undefined; toTime?: Date | undefined; recordToken?: string | undefined; filter?: string | undefined; } & Record, never>>(object: I): StreamClusterLogsRequest; }; export declare const ListClusterOperationsRequest: { encode(message: ListClusterOperationsRequest, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): ListClusterOperationsRequest; fromJSON(object: any): ListClusterOperationsRequest; toJSON(message: ListClusterOperationsRequest): unknown; fromPartial, never>>(object: I): ListClusterOperationsRequest; }; export declare const ListClusterOperationsResponse: { encode(message: ListClusterOperationsResponse, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): ListClusterOperationsResponse; fromJSON(object: any): ListClusterOperationsResponse; toJSON(message: ListClusterOperationsResponse): unknown; fromPartial, never>) | undefined; error?: ({ code?: number | undefined; message?: string | undefined; details?: { typeUrl?: string | undefined; value?: Buffer | undefined; }[] | undefined; } & { code?: number | undefined; message?: string | undefined; details?: ({ typeUrl?: string | undefined; value?: Buffer | undefined; }[] & ({ typeUrl?: string | undefined; value?: Buffer | undefined; } & { typeUrl?: string | undefined; value?: Buffer | undefined; } & Record, never>)[] & Record, never>) | undefined; } & Record, never>) | undefined; response?: ({ typeUrl?: string | undefined; value?: Buffer | undefined; } & { typeUrl?: string | undefined; value?: Buffer | undefined; } & Record, never>) | undefined; } & Record, never>)[] & Record, never>) | undefined; nextPageToken?: string | undefined; } & Record, never>>(object: I): ListClusterOperationsResponse; }; export declare const ListClusterHostsRequest: { encode(message: ListClusterHostsRequest, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): ListClusterHostsRequest; fromJSON(object: any): ListClusterHostsRequest; toJSON(message: ListClusterHostsRequest): unknown; fromPartial, never>>(object: I): ListClusterHostsRequest; }; export declare const ListClusterHostsResponse: { encode(message: ListClusterHostsResponse, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): ListClusterHostsResponse; fromJSON(object: any): ListClusterHostsResponse; toJSON(message: ListClusterHostsResponse): unknown; fromPartial, never>) | undefined; health?: import("../../../../../yandex/cloud/mdb/kafka/v1/cluster").Host_Health | undefined; subnetId?: string | undefined; assignPublicIp?: boolean | undefined; } & Record, never>)[] & Record, never>) | undefined; nextPageToken?: string | undefined; } & Record, never>>(object: I): ListClusterHostsResponse; }; export declare const MoveClusterRequest: { encode(message: MoveClusterRequest, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): MoveClusterRequest; fromJSON(object: any): MoveClusterRequest; toJSON(message: MoveClusterRequest): unknown; fromPartial, never>>(object: I): MoveClusterRequest; }; export declare const MoveClusterMetadata: { encode(message: MoveClusterMetadata, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): MoveClusterMetadata; fromJSON(object: any): MoveClusterMetadata; toJSON(message: MoveClusterMetadata): unknown; fromPartial, never>>(object: I): MoveClusterMetadata; }; export declare const StartClusterRequest: { encode(message: StartClusterRequest, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): StartClusterRequest; fromJSON(object: any): StartClusterRequest; toJSON(message: StartClusterRequest): unknown; fromPartial, never>>(object: I): StartClusterRequest; }; export declare const StartClusterMetadata: { encode(message: StartClusterMetadata, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): StartClusterMetadata; fromJSON(object: any): StartClusterMetadata; toJSON(message: StartClusterMetadata): unknown; fromPartial, never>>(object: I): StartClusterMetadata; }; export declare const StopClusterRequest: { encode(message: StopClusterRequest, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): StopClusterRequest; fromJSON(object: any): StopClusterRequest; toJSON(message: StopClusterRequest): unknown; fromPartial, never>>(object: I): StopClusterRequest; }; export declare const StopClusterMetadata: { encode(message: StopClusterMetadata, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): StopClusterMetadata; fromJSON(object: any): StopClusterMetadata; toJSON(message: StopClusterMetadata): unknown; fromPartial, never>>(object: I): StopClusterMetadata; }; export declare const RescheduleMaintenanceRequest: { encode(message: RescheduleMaintenanceRequest, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): RescheduleMaintenanceRequest; fromJSON(object: any): RescheduleMaintenanceRequest; toJSON(message: RescheduleMaintenanceRequest): unknown; fromPartial, never>>(object: I): RescheduleMaintenanceRequest; }; export declare const RescheduleMaintenanceMetadata: { encode(message: RescheduleMaintenanceMetadata, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): RescheduleMaintenanceMetadata; fromJSON(object: any): RescheduleMaintenanceMetadata; toJSON(message: RescheduleMaintenanceMetadata): unknown; fromPartial, never>>(object: I): RescheduleMaintenanceMetadata; }; /** A set of methods for managing Apache Kafka® clusters. */ export declare const ClusterServiceService: { /** * Returns the specified Apache Kafka® cluster. * * To get the list of available Apache Kafka® clusters, make a [List] request. */ readonly get: { readonly path: "/yandex.cloud.mdb.kafka.v1.ClusterService/Get"; readonly requestStream: false; readonly responseStream: false; readonly requestSerialize: (value: GetClusterRequest) => Buffer; readonly requestDeserialize: (value: Buffer) => GetClusterRequest; readonly responseSerialize: (value: Cluster) => Buffer; readonly responseDeserialize: (value: Buffer) => Cluster; }; /** Retrieves the list of Apache Kafka® clusters that belong to the specified folder. */ readonly list: { readonly path: "/yandex.cloud.mdb.kafka.v1.ClusterService/List"; readonly requestStream: false; readonly responseStream: false; readonly requestSerialize: (value: ListClustersRequest) => Buffer; readonly requestDeserialize: (value: Buffer) => ListClustersRequest; readonly responseSerialize: (value: ListClustersResponse) => Buffer; readonly responseDeserialize: (value: Buffer) => ListClustersResponse; }; /** Creates a new Apache Kafka® cluster in the specified folder. */ readonly create: { readonly path: "/yandex.cloud.mdb.kafka.v1.ClusterService/Create"; readonly requestStream: false; readonly responseStream: false; readonly requestSerialize: (value: CreateClusterRequest) => Buffer; readonly requestDeserialize: (value: Buffer) => CreateClusterRequest; readonly responseSerialize: (value: Operation) => Buffer; readonly responseDeserialize: (value: Buffer) => Operation; }; /** Updates the specified Apache Kafka® cluster. */ readonly update: { readonly path: "/yandex.cloud.mdb.kafka.v1.ClusterService/Update"; readonly requestStream: false; readonly responseStream: false; readonly requestSerialize: (value: UpdateClusterRequest) => Buffer; readonly requestDeserialize: (value: Buffer) => UpdateClusterRequest; readonly responseSerialize: (value: Operation) => Buffer; readonly responseDeserialize: (value: Buffer) => Operation; }; /** Deletes the specified Apache Kafka® cluster. */ readonly delete: { readonly path: "/yandex.cloud.mdb.kafka.v1.ClusterService/Delete"; readonly requestStream: false; readonly responseStream: false; readonly requestSerialize: (value: DeleteClusterRequest) => Buffer; readonly requestDeserialize: (value: Buffer) => DeleteClusterRequest; readonly responseSerialize: (value: Operation) => Buffer; readonly responseDeserialize: (value: Buffer) => Operation; }; /** Moves the specified Apache Kafka® cluster to the specified folder. */ readonly move: { readonly path: "/yandex.cloud.mdb.kafka.v1.ClusterService/Move"; readonly requestStream: false; readonly responseStream: false; readonly requestSerialize: (value: MoveClusterRequest) => Buffer; readonly requestDeserialize: (value: Buffer) => MoveClusterRequest; readonly responseSerialize: (value: Operation) => Buffer; readonly responseDeserialize: (value: Buffer) => Operation; }; /** Starts the specified Apache Kafka® cluster. */ readonly start: { readonly path: "/yandex.cloud.mdb.kafka.v1.ClusterService/Start"; readonly requestStream: false; readonly responseStream: false; readonly requestSerialize: (value: StartClusterRequest) => Buffer; readonly requestDeserialize: (value: Buffer) => StartClusterRequest; readonly responseSerialize: (value: Operation) => Buffer; readonly responseDeserialize: (value: Buffer) => Operation; }; /** Stops the specified Apache Kafka® cluster. */ readonly stop: { readonly path: "/yandex.cloud.mdb.kafka.v1.ClusterService/Stop"; readonly requestStream: false; readonly responseStream: false; readonly requestSerialize: (value: StopClusterRequest) => Buffer; readonly requestDeserialize: (value: Buffer) => StopClusterRequest; readonly responseSerialize: (value: Operation) => Buffer; readonly responseDeserialize: (value: Buffer) => Operation; }; /** Reschedule planned maintenance operation. */ readonly rescheduleMaintenance: { readonly path: "/yandex.cloud.mdb.kafka.v1.ClusterService/RescheduleMaintenance"; readonly requestStream: false; readonly responseStream: false; readonly requestSerialize: (value: RescheduleMaintenanceRequest) => Buffer; readonly requestDeserialize: (value: Buffer) => RescheduleMaintenanceRequest; readonly responseSerialize: (value: Operation) => Buffer; readonly responseDeserialize: (value: Buffer) => Operation; }; /** * Retrieves logs for the specified Apache Kafka® cluster. * * For more information about logs, see the [Logs](/docs/managed-kafka/operations/cluster-logs) section in the documentation. */ readonly listLogs: { readonly path: "/yandex.cloud.mdb.kafka.v1.ClusterService/ListLogs"; readonly requestStream: false; readonly responseStream: false; readonly requestSerialize: (value: ListClusterLogsRequest) => Buffer; readonly requestDeserialize: (value: Buffer) => ListClusterLogsRequest; readonly responseSerialize: (value: ListClusterLogsResponse) => Buffer; readonly responseDeserialize: (value: Buffer) => ListClusterLogsResponse; }; /** Same as [ListLogs] but using server-side streaming. Also allows for `tail -f` semantics. */ readonly streamLogs: { readonly path: "/yandex.cloud.mdb.kafka.v1.ClusterService/StreamLogs"; readonly requestStream: false; readonly responseStream: true; readonly requestSerialize: (value: StreamClusterLogsRequest) => Buffer; readonly requestDeserialize: (value: Buffer) => StreamClusterLogsRequest; readonly responseSerialize: (value: StreamLogRecord) => Buffer; readonly responseDeserialize: (value: Buffer) => StreamLogRecord; }; /** Retrieves the list of operations for the specified Apache Kafka® cluster. */ readonly listOperations: { readonly path: "/yandex.cloud.mdb.kafka.v1.ClusterService/ListOperations"; readonly requestStream: false; readonly responseStream: false; readonly requestSerialize: (value: ListClusterOperationsRequest) => Buffer; readonly requestDeserialize: (value: Buffer) => ListClusterOperationsRequest; readonly responseSerialize: (value: ListClusterOperationsResponse) => Buffer; readonly responseDeserialize: (value: Buffer) => ListClusterOperationsResponse; }; /** Retrieves a list of hosts for the specified Apache Kafka® cluster. */ readonly listHosts: { readonly path: "/yandex.cloud.mdb.kafka.v1.ClusterService/ListHosts"; readonly requestStream: false; readonly responseStream: false; readonly requestSerialize: (value: ListClusterHostsRequest) => Buffer; readonly requestDeserialize: (value: Buffer) => ListClusterHostsRequest; readonly responseSerialize: (value: ListClusterHostsResponse) => Buffer; readonly responseDeserialize: (value: Buffer) => ListClusterHostsResponse; }; }; export interface ClusterServiceServer extends UntypedServiceImplementation { /** * Returns the specified Apache Kafka® cluster. * * To get the list of available Apache Kafka® clusters, make a [List] request. */ get: handleUnaryCall; /** Retrieves the list of Apache Kafka® clusters that belong to the specified folder. */ list: handleUnaryCall; /** Creates a new Apache Kafka® cluster in the specified folder. */ create: handleUnaryCall; /** Updates the specified Apache Kafka® cluster. */ update: handleUnaryCall; /** Deletes the specified Apache Kafka® cluster. */ delete: handleUnaryCall; /** Moves the specified Apache Kafka® cluster to the specified folder. */ move: handleUnaryCall; /** Starts the specified Apache Kafka® cluster. */ start: handleUnaryCall; /** Stops the specified Apache Kafka® cluster. */ stop: handleUnaryCall; /** Reschedule planned maintenance operation. */ rescheduleMaintenance: handleUnaryCall; /** * Retrieves logs for the specified Apache Kafka® cluster. * * For more information about logs, see the [Logs](/docs/managed-kafka/operations/cluster-logs) section in the documentation. */ listLogs: handleUnaryCall; /** Same as [ListLogs] but using server-side streaming. Also allows for `tail -f` semantics. */ streamLogs: handleServerStreamingCall; /** Retrieves the list of operations for the specified Apache Kafka® cluster. */ listOperations: handleUnaryCall; /** Retrieves a list of hosts for the specified Apache Kafka® cluster. */ listHosts: handleUnaryCall; } export interface ClusterServiceClient extends Client { /** * Returns the specified Apache Kafka® cluster. * * To get the list of available Apache Kafka® clusters, make a [List] request. */ get(request: GetClusterRequest, callback: (error: ServiceError | null, response: Cluster) => void): ClientUnaryCall; get(request: GetClusterRequest, metadata: Metadata, callback: (error: ServiceError | null, response: Cluster) => void): ClientUnaryCall; get(request: GetClusterRequest, metadata: Metadata, options: Partial, callback: (error: ServiceError | null, response: Cluster) => void): ClientUnaryCall; /** Retrieves the list of Apache Kafka® clusters that belong to the specified folder. */ list(request: ListClustersRequest, callback: (error: ServiceError | null, response: ListClustersResponse) => void): ClientUnaryCall; list(request: ListClustersRequest, metadata: Metadata, callback: (error: ServiceError | null, response: ListClustersResponse) => void): ClientUnaryCall; list(request: ListClustersRequest, metadata: Metadata, options: Partial, callback: (error: ServiceError | null, response: ListClustersResponse) => void): ClientUnaryCall; /** Creates a new Apache Kafka® cluster in the specified folder. */ create(request: CreateClusterRequest, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; create(request: CreateClusterRequest, metadata: Metadata, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; create(request: CreateClusterRequest, metadata: Metadata, options: Partial, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; /** Updates the specified Apache Kafka® cluster. */ update(request: UpdateClusterRequest, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; update(request: UpdateClusterRequest, metadata: Metadata, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; update(request: UpdateClusterRequest, metadata: Metadata, options: Partial, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; /** Deletes the specified Apache Kafka® cluster. */ delete(request: DeleteClusterRequest, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; delete(request: DeleteClusterRequest, metadata: Metadata, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; delete(request: DeleteClusterRequest, metadata: Metadata, options: Partial, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; /** Moves the specified Apache Kafka® cluster to the specified folder. */ move(request: MoveClusterRequest, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; move(request: MoveClusterRequest, metadata: Metadata, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; move(request: MoveClusterRequest, metadata: Metadata, options: Partial, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; /** Starts the specified Apache Kafka® cluster. */ start(request: StartClusterRequest, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; start(request: StartClusterRequest, metadata: Metadata, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; start(request: StartClusterRequest, metadata: Metadata, options: Partial, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; /** Stops the specified Apache Kafka® cluster. */ stop(request: StopClusterRequest, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; stop(request: StopClusterRequest, metadata: Metadata, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; stop(request: StopClusterRequest, metadata: Metadata, options: Partial, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; /** Reschedule planned maintenance operation. */ rescheduleMaintenance(request: RescheduleMaintenanceRequest, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; rescheduleMaintenance(request: RescheduleMaintenanceRequest, metadata: Metadata, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; rescheduleMaintenance(request: RescheduleMaintenanceRequest, metadata: Metadata, options: Partial, callback: (error: ServiceError | null, response: Operation) => void): ClientUnaryCall; /** * Retrieves logs for the specified Apache Kafka® cluster. * * For more information about logs, see the [Logs](/docs/managed-kafka/operations/cluster-logs) section in the documentation. */ listLogs(request: ListClusterLogsRequest, callback: (error: ServiceError | null, response: ListClusterLogsResponse) => void): ClientUnaryCall; listLogs(request: ListClusterLogsRequest, metadata: Metadata, callback: (error: ServiceError | null, response: ListClusterLogsResponse) => void): ClientUnaryCall; listLogs(request: ListClusterLogsRequest, metadata: Metadata, options: Partial, callback: (error: ServiceError | null, response: ListClusterLogsResponse) => void): ClientUnaryCall; /** Same as [ListLogs] but using server-side streaming. Also allows for `tail -f` semantics. */ streamLogs(request: StreamClusterLogsRequest, options?: Partial): ClientReadableStream; streamLogs(request: StreamClusterLogsRequest, metadata?: Metadata, options?: Partial): ClientReadableStream; /** Retrieves the list of operations for the specified Apache Kafka® cluster. */ listOperations(request: ListClusterOperationsRequest, callback: (error: ServiceError | null, response: ListClusterOperationsResponse) => void): ClientUnaryCall; listOperations(request: ListClusterOperationsRequest, metadata: Metadata, callback: (error: ServiceError | null, response: ListClusterOperationsResponse) => void): ClientUnaryCall; listOperations(request: ListClusterOperationsRequest, metadata: Metadata, options: Partial, callback: (error: ServiceError | null, response: ListClusterOperationsResponse) => void): ClientUnaryCall; /** Retrieves a list of hosts for the specified Apache Kafka® cluster. */ listHosts(request: ListClusterHostsRequest, callback: (error: ServiceError | null, response: ListClusterHostsResponse) => void): ClientUnaryCall; listHosts(request: ListClusterHostsRequest, metadata: Metadata, callback: (error: ServiceError | null, response: ListClusterHostsResponse) => void): ClientUnaryCall; listHosts(request: ListClusterHostsRequest, metadata: Metadata, options: Partial, callback: (error: ServiceError | null, response: ListClusterHostsResponse) => void): ClientUnaryCall; } export declare const ClusterServiceClient: { new (address: string, credentials: ChannelCredentials, options?: Partial): ClusterServiceClient; service: typeof ClusterServiceService; }; type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; export type DeepPartial = T extends Builtin ? T : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> : T extends {} ? { [K in keyof T]?: DeepPartial; } : Partial; type KeysOfUnion = T extends T ? keyof T : never; export type Exact = P extends Builtin ? P : P & { [K in keyof P]: Exact; } & Record>, never>; export {};