import _m0 from 'protobufjs/minimal'; import { CompressionType } from '../../../../../yandex/cloud/mdb/kafka/v1/common'; export declare const protobufPackage = "yandex.cloud.mdb.kafka.v1"; /** * An Kafka topic. * For more information, see the [Concepts -> Topics and partitions](/docs/managed-kafka/concepts/topics) section of the documentation. */ export interface Topic { /** Name of the topic. */ name: string; /** * ID of an Apache Kafka® cluster that the topic belongs to. * * To get the Apache Kafka® cluster ID, make a [ClusterService.List] request. */ clusterId: string; /** The number of the topic's partitions. */ partitions?: number; /** Amount of data copies (replicas) for the topic in the cluster. */ replicationFactor?: number; topicConfig28?: Topicconfig28 | undefined; topicConfig3?: TopicConfig3 | undefined; } export interface TopicSpec { /** Name of the topic. */ name: string; /** The number of the topic's partitions. */ partitions?: number; /** Amount of copies of a topic data kept in the cluster. */ replicationFactor?: number; topicConfig28?: Topicconfig28 | undefined; topicConfig3?: TopicConfig3 | undefined; } /** A topic settings for 2.8 */ export interface Topicconfig28 { /** Retention policy to use on old log messages. */ cleanupPolicy: Topicconfig28_CleanupPolicy; /** The compression type for a given topic. */ compressionType: CompressionType; /** The amount of time in milliseconds to retain delete tombstone markers for log compacted topics. */ deleteRetentionMs?: number; /** The time to wait before deleting a file from the filesystem. */ fileDeleteDelayMs?: number; /** * The number of messages accumulated on a log partition before messages are flushed to disk. * * This setting overrides the cluster-level [KafkaConfig2_8.log_flush_interval_messages] setting on the topic level. */ flushMessages?: number; /** * The maximum time in milliseconds that a message in the topic is kept in memory before flushed to disk. * * This setting overrides the cluster-level [KafkaConfig2_8.log_flush_interval_ms] setting on the topic level. */ flushMs?: number; /** The minimum time in milliseconds a message will remain uncompacted in the log. */ minCompactionLagMs?: number; /** * The maximum size a partition can grow to before Kafka will discard old log segments to free up space if the `delete` [cleanup_policy] is in effect. * It is helpful if you need to control the size of log due to limited disk space. * * This setting overrides the cluster-level [KafkaConfig2_8.log_retention_bytes] setting on the topic level. */ retentionBytes?: number; /** * The number of milliseconds to keep a log segment's file before deleting it. * * This setting overrides the cluster-level [KafkaConfig2_8.log_retention_ms] setting on the topic level. */ retentionMs?: number; /** The largest record batch size allowed in topic. */ maxMessageBytes?: number; /** * This configuration specifies the minimum number of replicas that must acknowledge a write to topic for the write * to be considered successful (when a producer sets acks to "all"). */ minInsyncReplicas?: number; /** * This configuration controls the segment file size for the log. Retention and cleaning is always done a file * at a time so a larger segment size means fewer files but less granular control over retention. * * This setting overrides the cluster-level [KafkaConfig2_8.log_segment_bytes] setting on the topic level. */ segmentBytes?: number; /** * True if we should preallocate the file on disk when creating a new log segment. * * This setting overrides the cluster-level [KafkaConfig2_8.log_preallocate] setting on the topic level. * Deprecated. Feature useless for Yandex Cloud. * * @deprecated */ preallocate?: boolean; } export declare enum Topicconfig28_CleanupPolicy { CLEANUP_POLICY_UNSPECIFIED = 0, /** CLEANUP_POLICY_DELETE - This policy discards log segments when either their retention time or log size limit is reached. See also: [KafkaConfig2_8.log_retention_ms] and other similar parameters. */ CLEANUP_POLICY_DELETE = 1, /** CLEANUP_POLICY_COMPACT - This policy compacts messages in log. */ CLEANUP_POLICY_COMPACT = 2, /** CLEANUP_POLICY_COMPACT_AND_DELETE - This policy use both compaction and deletion for messages and log segments. */ CLEANUP_POLICY_COMPACT_AND_DELETE = 3, UNRECOGNIZED = -1 } export declare function topicconfig28_CleanupPolicyFromJSON(object: any): Topicconfig28_CleanupPolicy; export declare function topicconfig28_CleanupPolicyToJSON(object: Topicconfig28_CleanupPolicy): string; /** A topic settings for 3.x */ export interface TopicConfig3 { /** Retention policy to use on old log messages. */ cleanupPolicy: TopicConfig3_CleanupPolicy; /** The compression type for a given topic. */ compressionType: CompressionType; /** The amount of time in milliseconds to retain delete tombstone markers for log compacted topics. */ deleteRetentionMs?: number; /** The time to wait before deleting a file from the filesystem. */ fileDeleteDelayMs?: number; /** * The number of messages accumulated on a log partition before messages are flushed to disk. * * This setting overrides the cluster-level [KafkaConfig3.log_flush_interval_messages] setting on the topic level. */ flushMessages?: number; /** * The maximum time in milliseconds that a message in the topic is kept in memory before flushed to disk. * * This setting overrides the cluster-level [KafkaConfig3.log_flush_interval_ms] setting on the topic level. */ flushMs?: number; /** The minimum time in milliseconds a message will remain uncompacted in the log. */ minCompactionLagMs?: number; /** * The maximum size a partition can grow to before Kafka will discard old log segments to free up space if the `delete` [cleanup_policy] is in effect. * It is helpful if you need to control the size of log due to limited disk space. * * This setting overrides the cluster-level [KafkaConfig3.log_retention_bytes] setting on the topic level. */ retentionBytes?: number; /** * The number of milliseconds to keep a log segment's file before deleting it. * * This setting overrides the cluster-level [KafkaConfig3.log_retention_ms] setting on the topic level. */ retentionMs?: number; /** The largest record batch size allowed in topic. */ maxMessageBytes?: number; /** * This configuration specifies the minimum number of replicas that must acknowledge a write to topic for the write * to be considered successful (when a producer sets acks to "all"). */ minInsyncReplicas?: number; /** * This configuration controls the segment file size for the log. Retention and cleaning is always done a file * at a time so a larger segment size means fewer files but less granular control over retention. * * This setting overrides the cluster-level [KafkaConfig3.log_segment_bytes] setting on the topic level. */ segmentBytes?: number; /** * True if we should preallocate the file on disk when creating a new log segment. * * This setting overrides the cluster-level [KafkaConfig3.log_preallocate] setting on the topic level. * Deprecated. Feature useless for Yandex Cloud. * * @deprecated */ preallocate?: boolean; } export declare enum TopicConfig3_CleanupPolicy { CLEANUP_POLICY_UNSPECIFIED = 0, /** CLEANUP_POLICY_DELETE - This policy discards log segments when either their retention time or log size limit is reached. See also: [KafkaConfig3.log_retention_ms] and other similar parameters. */ CLEANUP_POLICY_DELETE = 1, /** CLEANUP_POLICY_COMPACT - This policy compacts messages in log. */ CLEANUP_POLICY_COMPACT = 2, /** CLEANUP_POLICY_COMPACT_AND_DELETE - This policy use both compaction and deletion for messages and log segments. */ CLEANUP_POLICY_COMPACT_AND_DELETE = 3, UNRECOGNIZED = -1 } export declare function topicConfig3_CleanupPolicyFromJSON(object: any): TopicConfig3_CleanupPolicy; export declare function topicConfig3_CleanupPolicyToJSON(object: TopicConfig3_CleanupPolicy): string; export declare const Topic: { encode(message: Topic, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): Topic; fromJSON(object: any): Topic; toJSON(message: Topic): unknown; fromPartial, never>) | undefined; topicConfig3?: ({ cleanupPolicy?: TopicConfig3_CleanupPolicy | undefined; compressionType?: CompressionType | undefined; deleteRetentionMs?: number | undefined; fileDeleteDelayMs?: number | undefined; flushMessages?: number | undefined; flushMs?: number | undefined; minCompactionLagMs?: number | undefined; retentionBytes?: number | undefined; retentionMs?: number | undefined; maxMessageBytes?: number | undefined; minInsyncReplicas?: number | undefined; segmentBytes?: number | undefined; preallocate?: boolean | undefined; } & { cleanupPolicy?: TopicConfig3_CleanupPolicy | undefined; compressionType?: CompressionType | undefined; deleteRetentionMs?: number | undefined; fileDeleteDelayMs?: number | undefined; flushMessages?: number | undefined; flushMs?: number | undefined; minCompactionLagMs?: number | undefined; retentionBytes?: number | undefined; retentionMs?: number | undefined; maxMessageBytes?: number | undefined; minInsyncReplicas?: number | undefined; segmentBytes?: number | undefined; preallocate?: boolean | undefined; } & Record, never>) | undefined; } & Record, never>>(object: I): Topic; }; export declare const TopicSpec: { encode(message: TopicSpec, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): TopicSpec; fromJSON(object: any): TopicSpec; toJSON(message: TopicSpec): unknown; fromPartial, never>) | undefined; topicConfig3?: ({ cleanupPolicy?: TopicConfig3_CleanupPolicy | undefined; compressionType?: CompressionType | undefined; deleteRetentionMs?: number | undefined; fileDeleteDelayMs?: number | undefined; flushMessages?: number | undefined; flushMs?: number | undefined; minCompactionLagMs?: number | undefined; retentionBytes?: number | undefined; retentionMs?: number | undefined; maxMessageBytes?: number | undefined; minInsyncReplicas?: number | undefined; segmentBytes?: number | undefined; preallocate?: boolean | undefined; } & { cleanupPolicy?: TopicConfig3_CleanupPolicy | undefined; compressionType?: CompressionType | undefined; deleteRetentionMs?: number | undefined; fileDeleteDelayMs?: number | undefined; flushMessages?: number | undefined; flushMs?: number | undefined; minCompactionLagMs?: number | undefined; retentionBytes?: number | undefined; retentionMs?: number | undefined; maxMessageBytes?: number | undefined; minInsyncReplicas?: number | undefined; segmentBytes?: number | undefined; preallocate?: boolean | undefined; } & Record, never>) | undefined; } & Record, never>>(object: I): TopicSpec; }; export declare const Topicconfig28: { encode(message: Topicconfig28, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): Topicconfig28; fromJSON(object: any): Topicconfig28; toJSON(message: Topicconfig28): unknown; fromPartial, never>>(object: I): Topicconfig28; }; export declare const TopicConfig3: { encode(message: TopicConfig3, writer?: _m0.Writer): _m0.Writer; decode(input: _m0.Reader | Uint8Array, length?: number): TopicConfig3; fromJSON(object: any): TopicConfig3; toJSON(message: TopicConfig3): unknown; fromPartial, never>>(object: I): TopicConfig3; }; type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; export type DeepPartial = T extends Builtin ? T : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> : T extends {} ? { [K in keyof T]?: DeepPartial; } : Partial; type KeysOfUnion = T extends T ? keyof T : never; export type Exact = P extends Builtin ? P : P & { [K in keyof P]: Exact; } & Record>, never>; export {};