import _m0 from 'protobufjs/minimal'; import { MaintenanceWindow, MaintenanceOperation } from '../../../../../yandex/cloud/mdb/kafka/v1/maintenance'; import { CompressionType, SaslMechanism } from '../../../../../yandex/cloud/mdb/kafka/v1/common'; export declare const protobufPackage = "yandex.cloud.mdb.kafka.v1"; /** * An Apache Kafka® cluster resource. * For more information, see the [Concepts](/docs/managed-kafka/concepts) section of the documentation. */ export interface Cluster { /** * ID of the Apache Kafka® cluster. * This ID is assigned at creation time. */ id: string; /** ID of the folder that the Apache Kafka® cluster belongs to. */ folderId: string; /** Creation timestamp. */ createdAt?: Date; /** * Name of the Apache Kafka® cluster. * The name must be unique within the folder. 1-63 characters long. Value must match the regular expression `[a-zA-Z0-9_-]*`. */ name: string; /** Description of the Apache Kafka® cluster. 0-256 characters long. */ description: string; /** * Custom labels for the Apache Kafka® cluster as `key:value` pairs. * A maximum of 64 labels per resource is allowed. */ labels: { [key: string]: string; }; /** Deployment environment of the Apache Kafka® cluster. */ environment: Cluster_Environment; /** * Description of monitoring systems relevant to the Apache Kafka® cluster. * * The field is ignored for response of List method. */ monitoring: Monitoring[]; /** * Configuration of the Apache Kafka® cluster. * * The field is ignored for response of List method. */ config?: ConfigSpec; /** ID of the network that the cluster belongs to. */ networkId: string; /** Aggregated cluster health. */ health: Cluster_Health; /** Current state of the cluster. */ status: Cluster_Status; /** User security groups */ securityGroupIds: string[]; /** Host groups hosting VMs of the cluster. */ hostGroupIds: string[]; /** Deletion Protection inhibits deletion of the cluster */ deletionProtection: boolean; /** Window of maintenance operations. */ maintenanceWindow?: MaintenanceWindow; /** Scheduled maintenance operation. */ plannedOperation?: MaintenanceOperation; } export declare enum Cluster_Environment { ENVIRONMENT_UNSPECIFIED = 0, /** PRODUCTION - Stable environment with a conservative update policy when only hotfixes are applied during regular maintenance. */ PRODUCTION = 1, /** PRESTABLE - Environment with a more aggressive update policy when new versions are rolled out irrespective of backward compatibility. */ PRESTABLE = 2, UNRECOGNIZED = -1 } export declare function cluster_EnvironmentFromJSON(object: any): Cluster_Environment; export declare function cluster_EnvironmentToJSON(object: Cluster_Environment): string; export declare enum Cluster_Health { /** HEALTH_UNKNOWN - State of the cluster is unknown ([Host.health] of all hosts in the cluster is `UNKNOWN`). */ HEALTH_UNKNOWN = 0, /** ALIVE - Cluster is alive and well ([Host.health] of all hosts in the cluster is `ALIVE`). */ ALIVE = 1, /** DEAD - Cluster is inoperable ([Host.health] of all hosts in the cluster is `DEAD`). */ DEAD = 2, /** DEGRADED - Cluster is in degraded state ([Host.health] of at least one of the hosts in the cluster is not `ALIVE`). */ DEGRADED = 3, UNRECOGNIZED = -1 } export declare function cluster_HealthFromJSON(object: any): Cluster_Health; export declare function cluster_HealthToJSON(object: Cluster_Health): string; export declare enum Cluster_Status { /** STATUS_UNKNOWN - Cluster state is unknown. */ STATUS_UNKNOWN = 0, /** CREATING - Cluster is being created. */ CREATING = 1, /** RUNNING - Cluster is running normally. */ RUNNING = 2, /** ERROR - Cluster encountered a problem and cannot operate. */ ERROR = 3, /** UPDATING - Cluster is being updated. */ UPDATING = 4, /** STOPPING - Cluster is stopping. */ STOPPING = 5, /** STOPPED - Cluster stopped. */ STOPPED = 6, /** STARTING - Cluster is starting. */ STARTING = 7, UNRECOGNIZED = -1 } export declare function cluster_StatusFromJSON(object: any): Cluster_Status; export declare function cluster_StatusToJSON(object: Cluster_Status): string; export interface Cluster_LabelsEntry { key: string; value: string; } /** Metadata of monitoring system. */ export interface Monitoring { /** Name of the monitoring system. */ name: string; /** Description of the monitoring system. */ description: string; /** Link to the monitoring system charts for the Apache Kafka® cluster. */ link: string; } export interface ConfigSpec { /** Version of Apache Kafka® used in the cluster. Possible values: `2.8`, `3.0`, `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`. */ version: string; /** Configuration and resource allocation for Kafka brokers. */ kafka?: ConfigSpec_Kafka; /** Configuration and resource allocation for ZooKeeper hosts. */ zookeeper?: ConfigSpec_Zookeeper; /** IDs of availability zones where Kafka brokers reside. */ zoneId: string[]; /** The number of Kafka brokers deployed in each availability zone. */ brokersCount?: number; /** * The flag that defines whether a public IP address is assigned to the cluster. * If the value is `true`, then Apache Kafka® cluster is available on the Internet via it's public IP address. */ assignPublicIp: boolean; /** * Allows to manage topics via AdminAPI * Deprecated. Feature enabled permanently. * * @deprecated */ unmanagedTopics: boolean; /** Enables managed schema registry on cluster */ schemaRegistry: boolean; /** Access policy for external services. */ access?: Access; /** Configuration of REST API. */ restApiConfig?: ConfigSpec_RestAPIConfig; /** DiskSizeAutoscaling settings */ diskSizeAutoscaling?: DiskSizeAutoscaling; /** Configuration and resource allocation for KRaft-controller hosts. */ kraft?: ConfigSpec_KRaft; } export interface ConfigSpec_Kafka { /** Resources allocated to Kafka brokers. */ resources?: Resources; kafkaConfig28?: Kafkaconfig28 | undefined; kafkaConfig3?: KafkaConfig3 | undefined; } export interface ConfigSpec_Zookeeper { /** Resources allocated to ZooKeeper hosts. */ resources?: Resources; } export interface ConfigSpec_KRaft { /** Resources allocated to KRaft controller hosts. */ resources?: Resources; } export interface ConfigSpec_RestAPIConfig { /** Is REST API enabled for this cluster. */ enabled: boolean; } export interface Resources { /** * ID of the preset for computational resources available to a host (CPU, memory, etc.). * All available presets are listed in the [documentation](/docs/managed-kafka/concepts/instance-types). */ resourcePresetId: string; /** Volume of the storage available to a host, in bytes. Must be greater than 2 * partition segment size in bytes * partitions count, so each partition can have one active segment file and one closed segment file that can be deleted. */ diskSize: number; /** Type of the storage environment for the host. */ diskTypeId: string; } /** Kafka version 2.8 broker configuration. */ export interface Kafkaconfig28 { /** Cluster topics compression type. */ compressionType: CompressionType; /** * The number of messages accumulated on a log partition before messages are flushed to disk. * * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_8.flush_messages] setting. */ logFlushIntervalMessages?: number; /** * The maximum time (in milliseconds) that a message in any topic is kept in memory before flushed to disk. * If not set, the value of [log_flush_scheduler_interval_ms] is used. * * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_8.flush_ms] setting. */ logFlushIntervalMs?: number; /** * The frequency of checks (in milliseconds) for any logs that need to be flushed to disk. * This check is done by the log flusher. */ logFlushSchedulerIntervalMs?: number; /** * Partition size limit; Kafka will discard old log segments to free up space if `delete` [TopicConfig2_8.cleanup_policy] is in effect. * This setting is helpful if you need to control the size of a log due to limited disk space. * * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_8.retention_bytes] setting. */ logRetentionBytes?: number; /** The number of hours to keep a log segment file before deleting it. */ logRetentionHours?: number; /** * The number of minutes to keep a log segment file before deleting it. * * If not set, the value of [log_retention_hours] is used. */ logRetentionMinutes?: number; /** * The number of milliseconds to keep a log segment file before deleting it. * * If not set, the value of [log_retention_minutes] is used. * * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_8.retention_ms] setting. */ logRetentionMs?: number; /** * The maximum size of a single log file. * * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_8.segment_bytes] setting. */ logSegmentBytes?: number; /** * Should pre allocate file when create new segment? * * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_8.preallocate] setting. * Deprecated. Feature useless for Yandex Cloud. * * @deprecated */ logPreallocate?: boolean; /** The SO_SNDBUF buffer of the socket server sockets. If the value is -1, the OS default will be used. */ socketSendBufferBytes?: number; /** The SO_RCVBUF buffer of the socket server sockets. If the value is -1, the OS default will be used. */ socketReceiveBufferBytes?: number; /** Enable auto creation of topic on the server */ autoCreateTopicsEnable?: boolean; /** Default number of partitions per topic on the whole cluster */ numPartitions?: number; /** Default replication factor of the topic on the whole cluster */ defaultReplicationFactor?: number; /** The largest record batch size allowed by Kafka. Default value: 1048588. */ messageMaxBytes?: number; /** The number of bytes of messages to attempt to fetch for each partition. Default value: 1048576. */ replicaFetchMaxBytes?: number; /** A list of cipher suites. */ sslCipherSuites: string[]; /** Offset storage time after a consumer group loses all its consumers. Default: 10080. */ offsetsRetentionMinutes?: number; /** The list of SASL mechanisms enabled in the Kafka server. Default: [SCRAM_SHA_512]. */ saslEnabledMechanisms: SaslMechanism[]; } /** Kafka version 3.x broker configuration. */ export interface KafkaConfig3 { /** Cluster topics compression type. */ compressionType: CompressionType; /** * The number of messages accumulated on a log partition before messages are flushed to disk. * * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig3.flush_messages] setting. */ logFlushIntervalMessages?: number; /** * The maximum time (in milliseconds) that a message in any topic is kept in memory before flushed to disk. * If not set, the value of [log_flush_scheduler_interval_ms] is used. * * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig3.flush_ms] setting. */ logFlushIntervalMs?: number; /** * The frequency of checks (in milliseconds) for any logs that need to be flushed to disk. * This check is done by the log flusher. */ logFlushSchedulerIntervalMs?: number; /** * Partition size limit; Kafka will discard old log segments to free up space if `delete` [TopicConfig3.cleanup_policy] is in effect. * This setting is helpful if you need to control the size of a log due to limited disk space. * * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig3.retention_bytes] setting. */ logRetentionBytes?: number; /** The number of hours to keep a log segment file before deleting it. */ logRetentionHours?: number; /** * The number of minutes to keep a log segment file before deleting it. * * If not set, the value of [log_retention_hours] is used. */ logRetentionMinutes?: number; /** * The number of milliseconds to keep a log segment file before deleting it. * * If not set, the value of [log_retention_minutes] is used. * * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig3.retention_ms] setting. */ logRetentionMs?: number; /** * The maximum size of a single log file. * * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig3.segment_bytes] setting. */ logSegmentBytes?: number; /** * Should pre allocate file when create new segment? * * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig3.preallocate] setting. * Deprecated. Feature useless for Yandex Cloud. * * @deprecated */ logPreallocate?: boolean; /** The SO_SNDBUF buffer of the socket server sockets. If the value is -1, the OS default will be used. */ socketSendBufferBytes?: number; /** The SO_RCVBUF buffer of the socket server sockets. If the value is -1, the OS default will be used. */ socketReceiveBufferBytes?: number; /** Enable auto creation of topic on the server */ autoCreateTopicsEnable?: boolean; /** Default number of partitions per topic on the whole cluster */ numPartitions?: number; /** Default replication factor of the topic on the whole cluster */ defaultReplicationFactor?: number; /** The largest record batch size allowed by Kafka. Default value: 1048588. */ messageMaxBytes?: number; /** The number of bytes of messages to attempt to fetch for each partition. Default value: 1048576. */ replicaFetchMaxBytes?: number; /** A list of cipher suites. */ sslCipherSuites: string[]; /** Offset storage time after a consumer group loses all its consumers. Default: 10080. */ offsetsRetentionMinutes?: number; /** The list of SASL mechanisms enabled in the Kafka server. Default: [SCRAM_SHA_512]. */ saslEnabledMechanisms: SaslMechanism[]; } /** Cluster host metadata. */ export interface Host { /** Name of the host. */ name: string; /** ID of the Apache Kafka® cluster. */ clusterId: string; /** ID of the availability zone where the host resides. */ zoneId: string; /** Host role. If the field has default value, it is not returned in the response. */ role: Host_Role; /** Computational resources allocated to the host. */ resources?: Resources; /** Aggregated host health data. If the field has default value, it is not returned in the response. */ health: Host_Health; /** ID of the subnet the host resides in. */ subnetId: string; /** * The flag that defines whether a public IP address is assigned to the node. * * If the value is `true`, then this node is available on the Internet via it's public IP address. */ assignPublicIp: boolean; } export declare enum Host_Role { /** ROLE_UNSPECIFIED - Role of the host is unspecified. Default value. */ ROLE_UNSPECIFIED = 0, /** KAFKA - The host is a Kafka broker. */ KAFKA = 1, /** ZOOKEEPER - The host is a ZooKeeper server. */ ZOOKEEPER = 2, UNRECOGNIZED = -1 } export declare function host_RoleFromJSON(object: any): Host_Role; export declare function host_RoleToJSON(object: Host_Role): string; export declare enum Host_Health { /** UNKNOWN - Health of the host is unknown. Default value. */ UNKNOWN = 0, /** ALIVE - The host is performing all its functions normally. */ ALIVE = 1, /** DEAD - The host is inoperable and cannot perform any of its essential functions. */ DEAD = 2, /** DEGRADED - The host is degraded and can perform only some of its essential functions. */ DEGRADED = 3, UNRECOGNIZED = -1 } export declare function host_HealthFromJSON(object: any): Host_Health; export declare function host_HealthToJSON(object: Host_Health): string; export interface Access { /** Allow access for DataTransfer. */ dataTransfer: boolean; } export interface DiskSizeAutoscaling { /** Threshold of storage usage (in percent) that triggers automatic scaling of the storage during the maintenance window. Zero value means disabled threshold. */ plannedUsageThreshold: number; /** Threshold of storage usage (in percent) that triggers immediate automatic scaling of the storage. Zero value means disabled threshold. */ emergencyUsageThreshold: number; /** New storage size (in bytes) that is set when one of the thresholds is achieved. */ diskSizeLimit: number; } export declare const Cluster: { encode(message: Cluster, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): Cluster; fromJSON(object: any): Cluster; toJSON(message: Cluster): unknown; fromPartial, never>) | undefined; environment?: Cluster_Environment | undefined; monitoring?: ({ name?: string | undefined; description?: string | undefined; link?: string | undefined; }[] & ({ name?: string | undefined; description?: string | undefined; link?: string | undefined; } & { name?: string | undefined; description?: string | undefined; link?: string | undefined; } & Record, never>)[] & Record, never>) | undefined; config?: ({ version?: string | undefined; kafka?: { resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; kafkaConfig28?: { compressionType?: CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: SaslMechanism[] | undefined; } | undefined; kafkaConfig3?: { compressionType?: CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: SaslMechanism[] | undefined; } | undefined; } | undefined; zookeeper?: { resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; } | undefined; zoneId?: string[] | undefined; brokersCount?: number | undefined; assignPublicIp?: boolean | undefined; unmanagedTopics?: boolean | undefined; schemaRegistry?: boolean | undefined; access?: { dataTransfer?: boolean | undefined; } | undefined; restApiConfig?: { enabled?: boolean | undefined; } | undefined; diskSizeAutoscaling?: { plannedUsageThreshold?: number | undefined; emergencyUsageThreshold?: number | undefined; diskSizeLimit?: number | undefined; } | undefined; kraft?: { resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; } | undefined; } & { version?: string | undefined; kafka?: ({ resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; kafkaConfig28?: { compressionType?: CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: SaslMechanism[] | undefined; } | undefined; kafkaConfig3?: { compressionType?: CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: SaslMechanism[] | undefined; } | undefined; } & { resources?: ({ resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & Record, never>) | undefined; kafkaConfig28?: ({ compressionType?: CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: SaslMechanism[] | undefined; } & { compressionType?: CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: (string[] & string[] & Record, never>) | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: (SaslMechanism[] & SaslMechanism[] & Record, never>) | undefined; } & Record, never>) | undefined; kafkaConfig3?: ({ compressionType?: CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: SaslMechanism[] | undefined; } & { compressionType?: CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: (string[] & string[] & Record, never>) | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: (SaslMechanism[] & SaslMechanism[] & Record, never>) | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; zookeeper?: ({ resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; } & { resources?: ({ resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; zoneId?: (string[] & string[] & Record, never>) | undefined; brokersCount?: number | undefined; assignPublicIp?: boolean | undefined; unmanagedTopics?: boolean | undefined; schemaRegistry?: boolean | undefined; access?: ({ dataTransfer?: boolean | undefined; } & { dataTransfer?: boolean | undefined; } & Record, never>) | undefined; restApiConfig?: ({ enabled?: boolean | undefined; } & { enabled?: boolean | undefined; } & Record, never>) | undefined; diskSizeAutoscaling?: ({ plannedUsageThreshold?: number | undefined; emergencyUsageThreshold?: number | undefined; diskSizeLimit?: number | undefined; } & { plannedUsageThreshold?: number | undefined; emergencyUsageThreshold?: number | undefined; diskSizeLimit?: number | undefined; } & Record, never>) | undefined; kraft?: ({ resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; } & { resources?: ({ resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; networkId?: string | undefined; health?: Cluster_Health | undefined; status?: Cluster_Status | undefined; securityGroupIds?: (string[] & string[] & Record, never>) | undefined; hostGroupIds?: (string[] & string[] & Record, never>) | undefined; deletionProtection?: boolean | undefined; maintenanceWindow?: ({ anytime?: {} | undefined; weeklyMaintenanceWindow?: { day?: import("../../../../../yandex/cloud/mdb/kafka/v1/maintenance").WeeklyMaintenanceWindow_WeekDay | undefined; hour?: number | undefined; } | undefined; } & { anytime?: ({} & {} & Record, never>) | undefined; weeklyMaintenanceWindow?: ({ day?: import("../../../../../yandex/cloud/mdb/kafka/v1/maintenance").WeeklyMaintenanceWindow_WeekDay | undefined; hour?: number | undefined; } & { day?: import("../../../../../yandex/cloud/mdb/kafka/v1/maintenance").WeeklyMaintenanceWindow_WeekDay | undefined; hour?: number | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; plannedOperation?: ({ info?: string | undefined; delayedUntil?: Date | undefined; } & { info?: string | undefined; delayedUntil?: Date | undefined; } & Record, never>) | undefined; } & Record, never>>(object: I): Cluster; }; export declare const Cluster_LabelsEntry: { encode(message: Cluster_LabelsEntry, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): Cluster_LabelsEntry; fromJSON(object: any): Cluster_LabelsEntry; toJSON(message: Cluster_LabelsEntry): unknown; fromPartial, never>>(object: I): Cluster_LabelsEntry; }; export declare const Monitoring: { encode(message: Monitoring, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): Monitoring; fromJSON(object: any): Monitoring; toJSON(message: Monitoring): unknown; fromPartial, never>>(object: I): Monitoring; }; export declare const ConfigSpec: { encode(message: ConfigSpec, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): ConfigSpec; fromJSON(object: any): ConfigSpec; toJSON(message: ConfigSpec): unknown; fromPartial, never>) | undefined; kafkaConfig28?: ({ compressionType?: CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: SaslMechanism[] | undefined; } & { compressionType?: CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: (string[] & string[] & Record, never>) | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: (SaslMechanism[] & SaslMechanism[] & Record, never>) | undefined; } & Record, never>) | undefined; kafkaConfig3?: ({ compressionType?: CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: SaslMechanism[] | undefined; } & { compressionType?: CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: (string[] & string[] & Record, never>) | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: (SaslMechanism[] & SaslMechanism[] & Record, never>) | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; zookeeper?: ({ resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; } & { resources?: ({ resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; zoneId?: (string[] & string[] & Record, never>) | undefined; brokersCount?: number | undefined; assignPublicIp?: boolean | undefined; unmanagedTopics?: boolean | undefined; schemaRegistry?: boolean | undefined; access?: ({ dataTransfer?: boolean | undefined; } & { dataTransfer?: boolean | undefined; } & Record, never>) | undefined; restApiConfig?: ({ enabled?: boolean | undefined; } & { enabled?: boolean | undefined; } & Record, never>) | undefined; diskSizeAutoscaling?: ({ plannedUsageThreshold?: number | undefined; emergencyUsageThreshold?: number | undefined; diskSizeLimit?: number | undefined; } & { plannedUsageThreshold?: number | undefined; emergencyUsageThreshold?: number | undefined; diskSizeLimit?: number | undefined; } & Record, never>) | undefined; kraft?: ({ resources?: { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } | undefined; } & { resources?: ({ resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & { resourcePresetId?: string | undefined; diskSize?: number | undefined; diskTypeId?: string | undefined; } & Record, never>) | undefined; } & Record, never>) | undefined; } & Record, never>>(object: I): ConfigSpec; }; export declare const ConfigSpec_Kafka: { encode(message: ConfigSpec_Kafka, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): ConfigSpec_Kafka; fromJSON(object: any): ConfigSpec_Kafka; toJSON(message: ConfigSpec_Kafka): unknown; fromPartial, never>) | undefined; kafkaConfig28?: ({ compressionType?: CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: SaslMechanism[] | undefined; } & { compressionType?: CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: (string[] & string[] & Record, never>) | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: (SaslMechanism[] & SaslMechanism[] & Record, never>) | undefined; } & Record, never>) | undefined; kafkaConfig3?: ({ compressionType?: CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: string[] | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: SaslMechanism[] | undefined; } & { compressionType?: CompressionType | undefined; logFlushIntervalMessages?: number | undefined; logFlushIntervalMs?: number | undefined; logFlushSchedulerIntervalMs?: number | undefined; logRetentionBytes?: number | undefined; logRetentionHours?: number | undefined; logRetentionMinutes?: number | undefined; logRetentionMs?: number | undefined; logSegmentBytes?: number | undefined; logPreallocate?: boolean | undefined; socketSendBufferBytes?: number | undefined; socketReceiveBufferBytes?: number | undefined; autoCreateTopicsEnable?: boolean | undefined; numPartitions?: number | undefined; defaultReplicationFactor?: number | undefined; messageMaxBytes?: number | undefined; replicaFetchMaxBytes?: number | undefined; sslCipherSuites?: (string[] & string[] & Record, never>) | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: (SaslMechanism[] & SaslMechanism[] & Record, never>) | undefined; } & Record, never>) | undefined; } & Record, never>>(object: I): ConfigSpec_Kafka; }; export declare const ConfigSpec_Zookeeper: { encode(message: ConfigSpec_Zookeeper, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): ConfigSpec_Zookeeper; fromJSON(object: any): ConfigSpec_Zookeeper; toJSON(message: ConfigSpec_Zookeeper): unknown; fromPartial, never>) | undefined; } & Record, never>>(object: I): ConfigSpec_Zookeeper; }; export declare const ConfigSpec_KRaft: { encode(message: ConfigSpec_KRaft, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): ConfigSpec_KRaft; fromJSON(object: any): ConfigSpec_KRaft; toJSON(message: ConfigSpec_KRaft): unknown; fromPartial, never>) | undefined; } & Record, never>>(object: I): ConfigSpec_KRaft; }; export declare const ConfigSpec_RestAPIConfig: { encode(message: ConfigSpec_RestAPIConfig, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): ConfigSpec_RestAPIConfig; fromJSON(object: any): ConfigSpec_RestAPIConfig; toJSON(message: ConfigSpec_RestAPIConfig): unknown; fromPartial, never>>(object: I): ConfigSpec_RestAPIConfig; }; export declare const Resources: { encode(message: Resources, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): Resources; fromJSON(object: any): Resources; toJSON(message: Resources): unknown; fromPartial, never>>(object: I): Resources; }; export declare const Kafkaconfig28: { encode(message: Kafkaconfig28, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): Kafkaconfig28; fromJSON(object: any): Kafkaconfig28; toJSON(message: Kafkaconfig28): unknown; fromPartial, never>) | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: (SaslMechanism[] & SaslMechanism[] & Record, never>) | undefined; } & Record, never>>(object: I): Kafkaconfig28; }; export declare const KafkaConfig3: { encode(message: KafkaConfig3, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): KafkaConfig3; fromJSON(object: any): KafkaConfig3; toJSON(message: KafkaConfig3): unknown; fromPartial, never>) | undefined; offsetsRetentionMinutes?: number | undefined; saslEnabledMechanisms?: (SaslMechanism[] & SaslMechanism[] & Record, never>) | undefined; } & Record, never>>(object: I): KafkaConfig3; }; export declare const Host: { encode(message: Host, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): Host; fromJSON(object: any): Host; toJSON(message: Host): unknown; fromPartial, never>) | undefined; health?: Host_Health | undefined; subnetId?: string | undefined; assignPublicIp?: boolean | undefined; } & Record, never>>(object: I): Host; }; export declare const Access: { encode(message: Access, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): Access; fromJSON(object: any): Access; toJSON(message: Access): unknown; fromPartial, never>>(object: I): Access; }; export declare const DiskSizeAutoscaling: { encode(message: DiskSizeAutoscaling, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): DiskSizeAutoscaling; fromJSON(object: any): DiskSizeAutoscaling; toJSON(message: DiskSizeAutoscaling): unknown; fromPartial, never>>(object: I): DiskSizeAutoscaling; }; type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; export type DeepPartial = T extends Builtin ? T : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> : T extends {} ? { [K in keyof T]?: DeepPartial; } : Partial; type KeysOfUnion = T extends T ? keyof T : never; export type Exact = P extends Builtin ? P : P & { [K in keyof P]: Exact; } & Record>, never>; export {};