import * as outputs from "../types/output"; export declare namespace accesscontextmanager { interface AccessLevelBasic { /** * How the conditions list should be combined to determine if a request * is granted this AccessLevel. If AND is used, each Condition in * conditions must be satisfied for the AccessLevel to be applied. If * OR is used, at least one Condition in conditions must be satisfied * for the AccessLevel to be applied. * Default value is `AND`. * Possible values are: `AND`, `OR`. */ combiningFunction?: string; /** * A set of requirements for the AccessLevel to be granted. * Structure is documented below. */ conditions: outputs.accesscontextmanager.AccessLevelBasicCondition[]; } interface AccessLevelBasicCondition { /** * Device specific restrictions, all restrictions must hold for * the Condition to be true. If not specified, all devices are * allowed. * Structure is documented below. */ devicePolicy?: outputs.accesscontextmanager.AccessLevelBasicConditionDevicePolicy; /** * A list of CIDR block IP subnetwork specification. May be IPv4 * or IPv6. * Note that for a CIDR IP address block, the specified IP address * portion must be properly truncated (i.e. all the host bits must * be zero) or the input is considered malformed. For example, * "192.0.2.0/24" is accepted but "192.0.2.1/24" is not. Similarly, * for IPv6, "2001:db8::/32" is accepted whereas "2001:db8::1/32" * is not. The originating IP of a request must be in one of the * listed subnets in order for this Condition to be true. * If empty, all IP addresses are allowed. */ ipSubnetworks?: string[]; /** * An allowed list of members (users, service accounts). * Using groups is not supported yet. * The signed-in user originating the request must be a part of one * of the provided members. If not specified, a request may come * from any user (logged in/not logged in, not present in any * groups, etc.). * Formats: `user:{emailid}`, `serviceAccount:{emailid}` */ members?: string[]; /** * Whether to negate the Condition. If true, the Condition becomes * a NAND over its non-empty fields, each field must be false for * the Condition overall to be satisfied. Defaults to false. */ negate?: boolean; /** * The request must originate from one of the provided * countries/regions. * Format: A valid ISO 3166-1 alpha-2 code. */ regions?: string[]; /** * A list of other access levels defined in the same Policy, * referenced by resource name. Referencing an AccessLevel which * does not exist is an error. All access levels listed must be * granted for the Condition to be true. * Format: accessPolicies/{policy_id}/accessLevels/{short_name} */ requiredAccessLevels?: string[]; /** * The request must originate from one of the provided VPC networks in Google Cloud. Cannot specify this field together with `ipSubnetworks`. * Structure is documented below. */ vpcNetworkSources?: outputs.accesscontextmanager.AccessLevelBasicConditionVpcNetworkSource[]; } interface AccessLevelBasicConditionDevicePolicy { /** * A list of allowed device management levels. * An empty list allows all management levels. * Each value may be one of: `MANAGEMENT_UNSPECIFIED`, `NONE`, `BASIC`, `COMPLETE`. */ allowedDeviceManagementLevels?: string[]; /** * A list of allowed encryptions statuses. * An empty list allows all statuses. * Each value may be one of: `ENCRYPTION_UNSPECIFIED`, `ENCRYPTION_UNSUPPORTED`, `UNENCRYPTED`, `ENCRYPTED`. */ allowedEncryptionStatuses?: string[]; /** * A list of allowed OS versions. * An empty list allows all types and all versions. * Structure is documented below. */ osConstraints?: outputs.accesscontextmanager.AccessLevelBasicConditionDevicePolicyOsConstraint[]; /** * Whether the device needs to be approved by the customer admin. */ requireAdminApproval?: boolean; /** * Whether the device needs to be corp owned. */ requireCorpOwned?: boolean; /** * Whether or not screenlock is required for the DevicePolicy * to be true. Defaults to false. */ requireScreenLock?: boolean; } interface AccessLevelBasicConditionDevicePolicyOsConstraint { /** * The minimum allowed OS version. If not set, any version * of this OS satisfies the constraint. * Format: "major.minor.patch" such as "10.5.301", "9.2.1". */ minimumVersion?: string; /** * The operating system type of the device. * Possible values are: `OS_UNSPECIFIED`, `DESKTOP_MAC`, `DESKTOP_WINDOWS`, `DESKTOP_LINUX`, `DESKTOP_CHROME_OS`, `ANDROID`, `IOS`. */ osType: string; /** * If you specify DESKTOP_CHROME_OS for osType, you can optionally include requireVerifiedChromeOs to require Chrome Verified Access. */ requireVerifiedChromeOs?: boolean; } interface AccessLevelBasicConditionVpcNetworkSource { /** * Sub networks within a VPC network. * Structure is documented below. */ vpcSubnetwork?: outputs.accesscontextmanager.AccessLevelBasicConditionVpcNetworkSourceVpcSubnetwork; } interface AccessLevelBasicConditionVpcNetworkSourceVpcSubnetwork { /** * Required. Network name to be allowed by this Access Level. Networks of foreign organizations requires `compute.network.get` permission to be granted to caller. */ network: string; /** * A list of CIDR block IP subnetwork specification. Must be IPv4. */ vpcIpSubnetworks?: string[]; } interface AccessLevelConditionDevicePolicy { /** * A list of allowed device management levels. * An empty list allows all management levels. * Each value may be one of: `MANAGEMENT_UNSPECIFIED`, `NONE`, `BASIC`, `COMPLETE`. */ allowedDeviceManagementLevels?: string[]; /** * A list of allowed encryptions statuses. * An empty list allows all statuses. * Each value may be one of: `ENCRYPTION_UNSPECIFIED`, `ENCRYPTION_UNSUPPORTED`, `UNENCRYPTED`, `ENCRYPTED`. */ allowedEncryptionStatuses?: string[]; /** * A list of allowed OS versions. * An empty list allows all types and all versions. * Structure is documented below. */ osConstraints?: outputs.accesscontextmanager.AccessLevelConditionDevicePolicyOsConstraint[]; /** * Whether the device needs to be approved by the customer admin. */ requireAdminApproval?: boolean; /** * Whether the device needs to be corp owned. */ requireCorpOwned?: boolean; /** * Whether or not screenlock is required for the DevicePolicy * to be true. Defaults to false. */ requireScreenLock?: boolean; } interface AccessLevelConditionDevicePolicyOsConstraint { /** * The minimum allowed OS version. If not set, any version * of this OS satisfies the constraint. * Format: "major.minor.patch" such as "10.5.301", "9.2.1". */ minimumVersion?: string; /** * The operating system type of the device. * Possible values are: `OS_UNSPECIFIED`, `DESKTOP_MAC`, `DESKTOP_WINDOWS`, `DESKTOP_LINUX`, `DESKTOP_CHROME_OS`, `ANDROID`, `IOS`. */ osType: string; } interface AccessLevelConditionVpcNetworkSource { /** * Sub networks within a VPC network. * Structure is documented below. */ vpcSubnetwork?: outputs.accesscontextmanager.AccessLevelConditionVpcNetworkSourceVpcSubnetwork; } interface AccessLevelConditionVpcNetworkSourceVpcSubnetwork { /** * Required. Network name to be allowed by this Access Level. Networks of foreign organizations requires `compute.network.get` permission to be granted to caller. */ network: string; /** * CIDR block IP subnetwork specification. Must be IPv4. */ vpcIpSubnetworks?: string[]; } interface AccessLevelCustom { /** * Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. * This page details the objects and attributes that are used to the build the CEL expressions for * custom access levels - https://cloud.google.com/access-context-manager/docs/custom-access-level-spec. * Structure is documented below. */ expr: outputs.accesscontextmanager.AccessLevelCustomExpr; } interface AccessLevelCustomExpr { /** * Description of the expression */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a file name and a position in the file */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. */ title?: string; } interface AccessLevelsAccessLevel { /** * A set of predefined conditions for the access level and a combining function. * Structure is documented below. */ basic?: outputs.accesscontextmanager.AccessLevelsAccessLevelBasic; /** * Custom access level conditions are set using the Cloud Common Expression Language to represent the necessary conditions for the level to apply to a request. * See CEL spec at: https://github.com/google/cel-spec. * Structure is documented below. */ custom?: outputs.accesscontextmanager.AccessLevelsAccessLevelCustom; /** * Description of the AccessLevel and its use. Does not affect behavior. */ description?: string; /** * Resource name for the Access Level. The shortName component must begin * with a letter and only include alphanumeric and '_'. * Format: accessPolicies/{policy_id}/accessLevels/{short_name} */ name: string; /** * Human readable title. Must be unique within the Policy. */ title: string; } interface AccessLevelsAccessLevelBasic { /** * How the conditions list should be combined to determine if a request * is granted this AccessLevel. If AND is used, each Condition in * conditions must be satisfied for the AccessLevel to be applied. If * OR is used, at least one Condition in conditions must be satisfied * for the AccessLevel to be applied. * Default value is `AND`. * Possible values are: `AND`, `OR`. */ combiningFunction?: string; /** * A set of requirements for the AccessLevel to be granted. * Structure is documented below. */ conditions: outputs.accesscontextmanager.AccessLevelsAccessLevelBasicCondition[]; } interface AccessLevelsAccessLevelBasicCondition { /** * Device specific restrictions, all restrictions must hold for * the Condition to be true. If not specified, all devices are * allowed. * Structure is documented below. */ devicePolicy?: outputs.accesscontextmanager.AccessLevelsAccessLevelBasicConditionDevicePolicy; /** * A list of CIDR block IP subnetwork specification. May be IPv4 * or IPv6. * Note that for a CIDR IP address block, the specified IP address * portion must be properly truncated (i.e. all the host bits must * be zero) or the input is considered malformed. For example, * "192.0.2.0/24" is accepted but "192.0.2.1/24" is not. Similarly, * for IPv6, "2001:db8::/32" is accepted whereas "2001:db8::1/32" * is not. The originating IP of a request must be in one of the * listed subnets in order for this Condition to be true. * If empty, all IP addresses are allowed. */ ipSubnetworks?: string[]; /** * An allowed list of members (users, service accounts). * Using groups is not supported yet. * The signed-in user originating the request must be a part of one * of the provided members. If not specified, a request may come * from any user (logged in/not logged in, not present in any * groups, etc.). * Formats: `user:{emailid}`, `serviceAccount:{emailid}` */ members?: string[]; /** * Whether to negate the Condition. If true, the Condition becomes * a NAND over its non-empty fields, each field must be false for * the Condition overall to be satisfied. Defaults to false. */ negate?: boolean; /** * The request must originate from one of the provided * countries/regions. * Format: A valid ISO 3166-1 alpha-2 code. */ regions?: string[]; /** * A list of other access levels defined in the same Policy, * referenced by resource name. Referencing an AccessLevel which * does not exist is an error. All access levels listed must be * granted for the Condition to be true. * Format: accessPolicies/{policy_id}/accessLevels/{short_name} */ requiredAccessLevels?: string[]; /** * The request must originate from one of the provided VPC networks in Google Cloud. Cannot specify this field together with `ipSubnetworks`. * Structure is documented below. */ vpcNetworkSources?: outputs.accesscontextmanager.AccessLevelsAccessLevelBasicConditionVpcNetworkSource[]; } interface AccessLevelsAccessLevelBasicConditionDevicePolicy { /** * A list of allowed device management levels. * An empty list allows all management levels. * Each value may be one of: `MANAGEMENT_UNSPECIFIED`, `NONE`, `BASIC`, `COMPLETE`. */ allowedDeviceManagementLevels?: string[]; /** * A list of allowed encryptions statuses. * An empty list allows all statuses. * Each value may be one of: `ENCRYPTION_UNSPECIFIED`, `ENCRYPTION_UNSUPPORTED`, `UNENCRYPTED`, `ENCRYPTED`. */ allowedEncryptionStatuses?: string[]; /** * A list of allowed OS versions. * An empty list allows all types and all versions. * Structure is documented below. */ osConstraints?: outputs.accesscontextmanager.AccessLevelsAccessLevelBasicConditionDevicePolicyOsConstraint[]; /** * Whether the device needs to be approved by the customer admin. */ requireAdminApproval?: boolean; /** * Whether the device needs to be corp owned. */ requireCorpOwned?: boolean; /** * Whether or not screenlock is required for the DevicePolicy * to be true. Defaults to false. */ requireScreenLock?: boolean; } interface AccessLevelsAccessLevelBasicConditionDevicePolicyOsConstraint { /** * The minimum allowed OS version. If not set, any version * of this OS satisfies the constraint. * Format: "major.minor.patch" such as "10.5.301", "9.2.1". */ minimumVersion?: string; /** * The operating system type of the device. * Possible values are: `OS_UNSPECIFIED`, `DESKTOP_MAC`, `DESKTOP_WINDOWS`, `DESKTOP_LINUX`, `DESKTOP_CHROME_OS`, `ANDROID`, `IOS`. */ osType: string; } interface AccessLevelsAccessLevelBasicConditionVpcNetworkSource { /** * Sub networks within a VPC network. * Structure is documented below. */ vpcSubnetwork?: outputs.accesscontextmanager.AccessLevelsAccessLevelBasicConditionVpcNetworkSourceVpcSubnetwork; } interface AccessLevelsAccessLevelBasicConditionVpcNetworkSourceVpcSubnetwork { /** * Required. Network name to be allowed by this Access Level. Networks of foreign organizations requires `compute.network.get` permission to be granted to caller. */ network: string; /** * CIDR block IP subnetwork specification. Must be IPv4. */ vpcIpSubnetworks?: string[]; } interface AccessLevelsAccessLevelCustom { /** * Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. * This page details the objects and attributes that are used to the build the CEL expressions for * custom access levels - https://cloud.google.com/access-context-manager/docs/custom-access-level-spec. * Structure is documented below. */ expr: outputs.accesscontextmanager.AccessLevelsAccessLevelCustomExpr; } interface AccessLevelsAccessLevelCustomExpr { /** * Description of the expression */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a file name and a position in the file */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. */ title?: string; } interface AccessPolicyIamBindingCondition { description?: string; expression: string; title: string; } interface AccessPolicyIamMemberCondition { description?: string; expression: string; title: string; } interface GcpUserAccessBindingScopedAccessSetting { /** * Optional. Access settings for this scoped access settings. This field may be empty if dryRunSettings is set. * Structure is documented below. */ activeSettings?: outputs.accesscontextmanager.GcpUserAccessBindingScopedAccessSettingActiveSettings; /** * Optional. Dry-run access settings for this scoped access settings. This field may be empty if activeSettings is set. Cannot contain session settings. * Structure is documented below. */ dryRunSettings?: outputs.accesscontextmanager.GcpUserAccessBindingScopedAccessSettingDryRunSettings; /** * Optional. Application, etc. to which the access settings will be applied to. Implicitly, this is the scoped access settings key; as such, it must be unique and non-empty. * Structure is documented below. */ scope?: outputs.accesscontextmanager.GcpUserAccessBindingScopedAccessSettingScope; } interface GcpUserAccessBindingScopedAccessSettingActiveSettings { /** * Optional. Access level that a user must have to be granted access. Only one access level is supported, not multiple. This repeated field must have exactly one element. Example: "accessPolicies/9522/accessLevels/device_trusted" */ accessLevels?: string[]; /** * Optional. Session settings applied to user access on a given AccessScope. * Structure is documented below. */ sessionSettings?: outputs.accesscontextmanager.GcpUserAccessBindingScopedAccessSettingActiveSettingsSessionSettings; } interface GcpUserAccessBindingScopedAccessSettingActiveSettingsSessionSettings { /** * Optional. How long a user is allowed to take between actions before a new access token must be issued. Only set for Google Cloud apps. */ maxInactivity?: string; /** * Optional. The session length. Setting this field to zero is equal to disabling session. Also can set infinite session by flipping the enabled bit to false below. If useOidcMaxAge is true, for OIDC apps, the session length will be the minimum of this field and OIDC maxAge param. */ sessionLength?: string; /** * Optional. This field enables or disables Google Cloud session length. When false, all fields set above will be disregarded and the session length is basically infinite. */ sessionLengthEnabled?: boolean; /** * Optional. The session challenges proposed to users when the Google Cloud session length is up. * Possible values are: `LOGIN`, `SECURITY_KEY`, `PASSWORD`. */ sessionReauthMethod?: string; /** * Optional. Only useful for OIDC apps. When false, the OIDC maxAge param, if passed in the authentication request will be ignored. When true, the re-auth period will be the minimum of the sessionLength field and the maxAge OIDC param. */ useOidcMaxAge?: boolean; } interface GcpUserAccessBindingScopedAccessSettingDryRunSettings { /** * Optional. Access level that a user must have to be granted access. Only one access level is supported, not multiple. This repeated field must have exactly one element. Example: "accessPolicies/9522/accessLevels/device_trusted" */ accessLevels?: string; } interface GcpUserAccessBindingScopedAccessSettingScope { /** * Optional. Client scope for this access scope. * Structure is documented below. */ clientScope?: outputs.accesscontextmanager.GcpUserAccessBindingScopedAccessSettingScopeClientScope; } interface GcpUserAccessBindingScopedAccessSettingScopeClientScope { /** * Optional. The application that is subject to this binding's scope. Only one of clientId or name should be specified. * Structure is documented below. */ restrictedClientApplication?: outputs.accesscontextmanager.GcpUserAccessBindingScopedAccessSettingScopeClientScopeRestrictedClientApplication; } interface GcpUserAccessBindingScopedAccessSettingScopeClientScopeRestrictedClientApplication { /** * The OAuth client ID of the application. */ clientId?: string; /** * The name of the application. Example: "Cloud Console" */ name?: string; } interface GcpUserAccessBindingSessionSettings { /** * Optional. How long a user is allowed to take between actions before a new access token must be issued. Only set for Google Cloud apps. */ maxInactivity?: string; /** * Optional. The session length. Setting this field to zero is equal to disabling session. Also can set infinite session by flipping the enabled bit to false below. If useOidcMaxAge is true, for OIDC apps, the session length will be the minimum of this field and OIDC maxAge param. */ sessionLength?: string; /** * Optional. This field enables or disables Google Cloud session length. When false, all fields set above will be disregarded and the session length is basically infinite. */ sessionLengthEnabled?: boolean; /** * Optional. The session challenges proposed to users when the Google Cloud session length is up. * Possible values are: `LOGIN`, `SECURITY_KEY`, `PASSWORD`. */ sessionReauthMethod?: string; /** * Optional. Only useful for OIDC apps. When false, the OIDC maxAge param, if passed in the authentication request will be ignored. When true, the re-auth period will be the minimum of the sessionLength field and the maxAge OIDC param. */ useOidcMaxAge?: boolean; } interface GetSupportedServiceSupportedMethod { /** * A valid method name for the respective request mode. Must be a fully qualified name, for example, `storage.googleapis.com/BucketService.GetBucket`. */ method: string; /** * A valid Cloud IAM permission for the respective request mode, for example, `storage.buckets.get`. */ permission: string; } interface GetSupportedServicesSupportedService { /** * True if the service is available on the restricted VIP. Services on the restricted VIP typically either support VPC Service Controls or are core infrastructure services required for the functioning of Google Cloud. */ availableOnRestrictedVip: boolean; /** * True if the service is supported with some limitations. Check [documentation](https://cloud.google.com/vpc-service-controls/docs/supported-products) for details. */ knownLimitations: boolean; /** * The service name or address of the supported service, such as `storage.googleapis.com`. */ name: string; /** * The support stage of the service. Values are `GA`, `PREVIEW`, and `DEPRECATED`. */ serviceSupportStage: string; /** * The support stage of the service. */ supportStage: string; /** * The name of the supported product, such as 'Cloud Storage'. */ title: string; } interface ServicePerimeterDryRunEgressPolicyEgressFrom { /** * Identities can be an individual user, service account, Google group, * or third-party identity. For third-party identity, only single identities * are supported and other identity types are not supported.The v1 identities * that have the prefix user, group and serviceAccount in * https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. */ identities?: string[]; /** * Specifies the type of identities that are allowed access to outside the * perimeter. If left unspecified, then members of `identities` field will * be allowed access. * Possible values are: `ANY_IDENTITY`, `ANY_USER_ACCOUNT`, `ANY_SERVICE_ACCOUNT`. */ identityType?: string; /** * Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`. * Possible values are: `SOURCE_RESTRICTION_ENABLED`, `SOURCE_RESTRICTION_DISABLED`. */ sourceRestriction?: string; /** * Sources that this EgressPolicy authorizes access from. * Structure is documented below. */ sources?: outputs.accesscontextmanager.ServicePerimeterDryRunEgressPolicyEgressFromSource[]; } interface ServicePerimeterDryRunEgressPolicyEgressFromSource { /** * An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside. */ accessLevel?: string; /** * A Google Cloud resource that is allowed to egress the perimeter. * Requests from these resources are allowed to access data outside the perimeter. * Currently only projects are allowed. Project format: `projects/{project_number}`. * The resource may be in any Google Cloud organization, not just the * organization that the perimeter is defined in. `*` is not allowed, the * case of allowing all Google Cloud resources only is not supported. */ resource?: string; } interface ServicePerimeterDryRunEgressPolicyEgressTo { /** * A list of external resources that are allowed to be accessed. A request * matches if it contains an external resource in this list (Example: * s3://bucket/path). Currently '*' is not allowed. */ externalResources?: string[]; /** * A list of `ApiOperations` that this egress rule applies to. A request matches * if it contains an operation/service in this list. * Structure is documented below. */ operations?: outputs.accesscontextmanager.ServicePerimeterDryRunEgressPolicyEgressToOperation[]; /** * A list of resources, currently only projects in the form * `projects/`, that match this to stanza. A request matches * if it contains a resource in this list. If * is specified for resources, * then this `EgressTo` rule will authorize access to all resources outside * the perimeter. */ resources?: string[]; /** * A list of IAM roles that represent the set of operations that the sources * specified in the corresponding `EgressFrom` * are allowed to perform. */ roles?: string[]; } interface ServicePerimeterDryRunEgressPolicyEgressToOperation { /** * API methods or permissions to allow. Method or permission must belong * to the service specified by `serviceName` field. A single MethodSelector * entry with `*` specified for the `method` field will allow all methods * AND permissions for the service specified in `serviceName`. * Structure is documented below. */ methodSelectors?: outputs.accesscontextmanager.ServicePerimeterDryRunEgressPolicyEgressToOperationMethodSelector[]; /** * The name of the API whose methods or permissions the `IngressPolicy` or * `EgressPolicy` want to allow. A single `ApiOperation` with serviceName * field set to `*` will allow all methods AND permissions for all services. */ serviceName?: string; } interface ServicePerimeterDryRunEgressPolicyEgressToOperationMethodSelector { /** * Value for `method` should be a valid method name for the corresponding * `serviceName` in `ApiOperation`. If `*` used as value for method, * then ALL methods and permissions are allowed. */ method?: string; /** * Value for permission should be a valid Cloud IAM permission for the * corresponding `serviceName` in `ApiOperation`. */ permission?: string; } interface ServicePerimeterDryRunIngressPolicyIngressFrom { /** * Identities can be an individual user, service account, Google group, * or third-party identity. For third-party identity, only single identities * are supported and other identity types are not supported.The v1 identities * that have the prefix user, group and serviceAccount in * https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. */ identities?: string[]; /** * Specifies the type of identities that are allowed access from outside the * perimeter. If left unspecified, then members of `identities` field will be * allowed access. * Possible values are: `ANY_IDENTITY`, `ANY_USER_ACCOUNT`, `ANY_SERVICE_ACCOUNT`. */ identityType?: string; /** * Sources that this `IngressPolicy` authorizes access from. * Structure is documented below. */ sources?: outputs.accesscontextmanager.ServicePerimeterDryRunIngressPolicyIngressFromSource[]; } interface ServicePerimeterDryRunIngressPolicyIngressFromSource { /** * An `AccessLevel` resource name that allow resources within the * `ServicePerimeters` to be accessed from the internet. `AccessLevels` listed * must be in the same policy as this `ServicePerimeter`. Referencing a nonexistent * `AccessLevel` will cause an error. If no `AccessLevel` names are listed, * resources within the perimeter can only be accessed via Google Cloud calls * with request origins within the perimeter. * Example `accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.` * If * is specified, then all IngressSources will be allowed. */ accessLevel?: string; /** * A Google Cloud resource that is allowed to ingress the perimeter. * Requests from these resources will be allowed to access perimeter data. * Currently only projects are allowed. Format `projects/{project_number}` * The project may be in any Google Cloud organization, not just the * organization that the perimeter is defined in. `*` is not allowed, the case * of allowing all Google Cloud resources only is not supported. */ resource?: string; } interface ServicePerimeterDryRunIngressPolicyIngressTo { /** * A list of `ApiOperations` the sources specified in corresponding `IngressFrom` * are allowed to perform in this `ServicePerimeter`. * Structure is documented below. */ operations?: outputs.accesscontextmanager.ServicePerimeterDryRunIngressPolicyIngressToOperation[]; /** * A list of resources, currently only projects in the form * `projects/`, protected by this `ServicePerimeter` * that are allowed to be accessed by sources defined in the * corresponding `IngressFrom`. A request matches if it contains * a resource in this list. If `*` is specified for resources, * then this `IngressTo` rule will authorize access to all * resources inside the perimeter, provided that the request * also matches the `operations` field. */ resources?: string[]; /** * A list of IAM roles that represent the set of operations that the sources * specified in the corresponding `IngressFrom` * are allowed to perform. */ roles?: string[]; } interface ServicePerimeterDryRunIngressPolicyIngressToOperation { /** * API methods or permissions to allow. Method or permission must belong to * the service specified by serviceName field. A single `MethodSelector` entry * with `*` specified for the method field will allow all methods AND * permissions for the service specified in `serviceName`. * Structure is documented below. */ methodSelectors?: outputs.accesscontextmanager.ServicePerimeterDryRunIngressPolicyIngressToOperationMethodSelector[]; /** * The name of the API whose methods or permissions the `IngressPolicy` or * `EgressPolicy` want to allow. A single `ApiOperation` with `serviceName` * field set to `*` will allow all methods AND permissions for all services. */ serviceName?: string; } interface ServicePerimeterDryRunIngressPolicyIngressToOperationMethodSelector { /** * Value for method should be a valid method name for the corresponding * serviceName in `ApiOperation`. If `*` used as value for `method`, then * ALL methods and permissions are allowed. */ method?: string; /** * Value for permission should be a valid Cloud IAM permission for the * corresponding `serviceName` in `ApiOperation`. */ permission?: string; } interface ServicePerimeterEgressPolicyEgressFrom { /** * Identities can be an individual user, service account, Google group, * or third-party identity. For third-party identity, only single identities * are supported and other identity types are not supported.The v1 identities * that have the prefix user, group and serviceAccount in * https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. */ identities?: string[]; /** * Specifies the type of identities that are allowed access to outside the * perimeter. If left unspecified, then members of `identities` field will * be allowed access. * Possible values are: `ANY_IDENTITY`, `ANY_USER_ACCOUNT`, `ANY_SERVICE_ACCOUNT`. */ identityType?: string; /** * Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`. * Possible values are: `SOURCE_RESTRICTION_UNSPECIFIED`, `SOURCE_RESTRICTION_ENABLED`, `SOURCE_RESTRICTION_DISABLED`. */ sourceRestriction?: string; /** * Sources that this EgressPolicy authorizes access from. * Structure is documented below. */ sources?: outputs.accesscontextmanager.ServicePerimeterEgressPolicyEgressFromSource[]; } interface ServicePerimeterEgressPolicyEgressFromSource { /** * An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside. */ accessLevel?: string; /** * A Google Cloud resource that is allowed to egress the perimeter. * Requests from these resources are allowed to access data outside the perimeter. * Currently only projects are allowed. Project format: `projects/{project_number}`. * The resource may be in any Google Cloud organization, not just the * organization that the perimeter is defined in. `*` is not allowed, the * case of allowing all Google Cloud resources only is not supported. */ resource?: string; } interface ServicePerimeterEgressPolicyEgressTo { /** * A list of external resources that are allowed to be accessed. A request * matches if it contains an external resource in this list (Example: * s3://bucket/path). Currently '*' is not allowed. */ externalResources?: string[]; /** * A list of `ApiOperations` that this egress rule applies to. A request matches * if it contains an operation/service in this list. * Structure is documented below. */ operations?: outputs.accesscontextmanager.ServicePerimeterEgressPolicyEgressToOperation[]; /** * A list of resources, currently only projects in the form * `projects/`, that match this to stanza. A request matches * if it contains a resource in this list. If * is specified for resources, * then this `EgressTo` rule will authorize access to all resources outside * the perimeter. */ resources?: string[]; /** * A list of IAM roles that represent the set of operations that the sources * specified in the corresponding `EgressFrom` * are allowed to perform. */ roles?: string[]; } interface ServicePerimeterEgressPolicyEgressToOperation { /** * API methods or permissions to allow. Method or permission must belong * to the service specified by `serviceName` field. A single MethodSelector * entry with `*` specified for the `method` field will allow all methods * AND permissions for the service specified in `serviceName`. * Structure is documented below. */ methodSelectors?: outputs.accesscontextmanager.ServicePerimeterEgressPolicyEgressToOperationMethodSelector[]; /** * The name of the API whose methods or permissions the `IngressPolicy` or * `EgressPolicy` want to allow. A single `ApiOperation` with serviceName * field set to `*` will allow all methods AND permissions for all services. */ serviceName?: string; } interface ServicePerimeterEgressPolicyEgressToOperationMethodSelector { /** * Value for `method` should be a valid method name for the corresponding * `serviceName` in `ApiOperation`. If `*` used as value for method, * then ALL methods and permissions are allowed. */ method?: string; /** * Value for permission should be a valid Cloud IAM permission for the * corresponding `serviceName` in `ApiOperation`. */ permission?: string; } interface ServicePerimeterIngressPolicyIngressFrom { /** * Identities can be an individual user, service account, Google group, * or third-party identity. For third-party identity, only single identities * are supported and other identity types are not supported.The v1 identities * that have the prefix user, group and serviceAccount in * https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. */ identities?: string[]; /** * Specifies the type of identities that are allowed access from outside the * perimeter. If left unspecified, then members of `identities` field will be * allowed access. * Possible values are: `ANY_IDENTITY`, `ANY_USER_ACCOUNT`, `ANY_SERVICE_ACCOUNT`. */ identityType?: string; /** * Sources that this `IngressPolicy` authorizes access from. * Structure is documented below. */ sources?: outputs.accesscontextmanager.ServicePerimeterIngressPolicyIngressFromSource[]; } interface ServicePerimeterIngressPolicyIngressFromSource { /** * An `AccessLevel` resource name that allow resources within the * `ServicePerimeters` to be accessed from the internet. `AccessLevels` listed * must be in the same policy as this `ServicePerimeter`. Referencing a nonexistent * `AccessLevel` will cause an error. If no `AccessLevel` names are listed, * resources within the perimeter can only be accessed via Google Cloud calls * with request origins within the perimeter. * Example `accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.` * If * is specified, then all IngressSources will be allowed. */ accessLevel?: string; /** * A Google Cloud resource that is allowed to ingress the perimeter. * Requests from these resources will be allowed to access perimeter data. * Currently only projects and VPCs are allowed. * Project format: `projects/{projectNumber}` * VPC network format: * `//compute.googleapis.com/projects/{PROJECT_ID}/global/networks/{NAME}`. * The project may be in any Google Cloud organization, not just the * organization that the perimeter is defined in. `*` is not allowed, the case * of allowing all Google Cloud resources only is not supported. */ resource?: string; } interface ServicePerimeterIngressPolicyIngressTo { /** * A list of `ApiOperations` the sources specified in corresponding `IngressFrom` * are allowed to perform in this `ServicePerimeter`. * Structure is documented below. */ operations?: outputs.accesscontextmanager.ServicePerimeterIngressPolicyIngressToOperation[]; /** * A list of resources, currently only projects in the form * `projects/`, protected by this `ServicePerimeter` * that are allowed to be accessed by sources defined in the * corresponding `IngressFrom`. A request matches if it contains * a resource in this list. If `*` is specified for resources, * then this `IngressTo` rule will authorize access to all * resources inside the perimeter, provided that the request * also matches the `operations` field. */ resources?: string[]; /** * A list of IAM roles that represent the set of operations that the sources * specified in the corresponding `IngressFrom` * are allowed to perform. */ roles?: string[]; } interface ServicePerimeterIngressPolicyIngressToOperation { /** * API methods or permissions to allow. Method or permission must belong to * the service specified by serviceName field. A single `MethodSelector` entry * with `*` specified for the method field will allow all methods AND * permissions for the service specified in `serviceName`. * Structure is documented below. */ methodSelectors?: outputs.accesscontextmanager.ServicePerimeterIngressPolicyIngressToOperationMethodSelector[]; /** * The name of the API whose methods or permissions the `IngressPolicy` or * `EgressPolicy` want to allow. A single `ApiOperation` with `serviceName` * field set to `*` will allow all methods AND permissions for all services. */ serviceName?: string; } interface ServicePerimeterIngressPolicyIngressToOperationMethodSelector { /** * Value for method should be a valid method name for the corresponding * serviceName in `ApiOperation`. If `*` used as value for `method`, then * ALL methods and permissions are allowed. */ method?: string; /** * Value for permission should be a valid Cloud IAM permission for the * corresponding `serviceName` in `ApiOperation`. */ permission?: string; } interface ServicePerimeterSpec { /** * A list of AccessLevel resource names that allow resources within * the ServicePerimeter to be accessed from the internet. * AccessLevels listed must be in the same policy as this * ServicePerimeter. Referencing a nonexistent AccessLevel is a * syntax error. If no AccessLevel names are listed, resources within * the perimeter can only be accessed via GCP calls with request * origins within the perimeter. For Service Perimeter Bridge, must * be empty. * Format: accessPolicies/{policy_id}/accessLevels/{access_level_name} */ accessLevels?: string[]; /** * List of EgressPolicies to apply to the perimeter. A perimeter may * have multiple EgressPolicies, each of which is evaluated separately. * Access is granted if any EgressPolicy grants it. Must be empty for * a perimeter bridge. * Structure is documented below. */ egressPolicies?: outputs.accesscontextmanager.ServicePerimeterSpecEgressPolicy[]; /** * List of `IngressPolicies` to apply to the perimeter. A perimeter may * have multiple `IngressPolicies`, each of which is evaluated * separately. Access is granted if any `Ingress Policy` grants it. * Must be empty for a perimeter bridge. * Structure is documented below. */ ingressPolicies?: outputs.accesscontextmanager.ServicePerimeterSpecIngressPolicy[]; /** * A list of GCP resources that are inside of the service perimeter. * Currently only projects are allowed. * Format: projects/{project_number} */ resources?: string[]; /** * GCP services that are subject to the Service Perimeter * restrictions. Must contain a list of services. For example, if * `storage.googleapis.com` is specified, access to the storage * buckets inside the perimeter must meet the perimeter's access * restrictions. */ restrictedServices?: string[]; /** * Specifies how APIs are allowed to communicate within the Service * Perimeter. * Structure is documented below. */ vpcAccessibleServices?: outputs.accesscontextmanager.ServicePerimeterSpecVpcAccessibleServices; } interface ServicePerimeterSpecEgressPolicy { /** * Defines conditions on the source of a request causing this `EgressPolicy` to apply. * Structure is documented below. */ egressFrom?: outputs.accesscontextmanager.ServicePerimeterSpecEgressPolicyEgressFrom; /** * Defines the conditions on the `ApiOperation` and destination resources that * cause this `EgressPolicy` to apply. * Structure is documented below. */ egressTo?: outputs.accesscontextmanager.ServicePerimeterSpecEgressPolicyEgressTo; /** * Human readable title. Must be unique within the perimeter. Does not affect behavior. */ title?: string; } interface ServicePerimeterSpecEgressPolicyEgressFrom { /** * A list of identities that are allowed access through this `EgressPolicy`. * Should be in the format of email address. The email address should * represent individual user or service account only. */ identities?: string[]; /** * Specifies the type of identities that are allowed access to outside the * perimeter. If left unspecified, then members of `identities` field will * be allowed access. * Possible values are: `IDENTITY_TYPE_UNSPECIFIED`, `ANY_IDENTITY`, `ANY_USER_ACCOUNT`, `ANY_SERVICE_ACCOUNT`. */ identityType?: string; /** * Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`. * Possible values are: `SOURCE_RESTRICTION_UNSPECIFIED`, `SOURCE_RESTRICTION_ENABLED`, `SOURCE_RESTRICTION_DISABLED`. */ sourceRestriction?: string; /** * Sources that this EgressPolicy authorizes access from. * Structure is documented below. */ sources?: outputs.accesscontextmanager.ServicePerimeterSpecEgressPolicyEgressFromSource[]; } interface ServicePerimeterSpecEgressPolicyEgressFromSource { /** * An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside. */ accessLevel?: string; /** * A Google Cloud resource that is allowed to egress the perimeter. * Requests from these resources are allowed to access data outside the perimeter. * Currently only projects are allowed. Project format: `projects/{project_number}`. * The resource may be in any Google Cloud organization, not just the * organization that the perimeter is defined in. `*` is not allowed, the * case of allowing all Google Cloud resources only is not supported. */ resource?: string; } interface ServicePerimeterSpecEgressPolicyEgressTo { /** * A list of external resources that are allowed to be accessed. A request * matches if it contains an external resource in this list (Example: * s3://bucket/path). Currently '*' is not allowed. */ externalResources?: string[]; /** * A list of `ApiOperations` that this egress rule applies to. A request matches * if it contains an operation/service in this list. * Structure is documented below. */ operations?: outputs.accesscontextmanager.ServicePerimeterSpecEgressPolicyEgressToOperation[]; /** * A list of resources, currently only projects in the form * `projects/`, that match this to stanza. A request matches * if it contains a resource in this list. If * is specified for resources, * then this `EgressTo` rule will authorize access to all resources outside * the perimeter. */ resources?: string[]; /** * A list of IAM roles that represent the set of operations that the sources * specified in the corresponding `EgressFrom` * are allowed to perform. */ roles?: string[]; } interface ServicePerimeterSpecEgressPolicyEgressToOperation { /** * API methods or permissions to allow. Method or permission must belong * to the service specified by `serviceName` field. A single MethodSelector * entry with `*` specified for the `method` field will allow all methods * AND permissions for the service specified in `serviceName`. * Structure is documented below. */ methodSelectors?: outputs.accesscontextmanager.ServicePerimeterSpecEgressPolicyEgressToOperationMethodSelector[]; /** * The name of the API whose methods or permissions the `IngressPolicy` or * `EgressPolicy` want to allow. A single `ApiOperation` with serviceName * field set to `*` will allow all methods AND permissions for all services. */ serviceName?: string; } interface ServicePerimeterSpecEgressPolicyEgressToOperationMethodSelector { /** * Value for `method` should be a valid method name for the corresponding * `serviceName` in `ApiOperation`. If `*` used as value for method, * then ALL methods and permissions are allowed. */ method?: string; /** * Value for permission should be a valid Cloud IAM permission for the * corresponding `serviceName` in `ApiOperation`. */ permission?: string; } interface ServicePerimeterSpecIngressPolicy { /** * Defines the conditions on the source of a request causing this `IngressPolicy` * to apply. * Structure is documented below. */ ingressFrom?: outputs.accesscontextmanager.ServicePerimeterSpecIngressPolicyIngressFrom; /** * Defines the conditions on the `ApiOperation` and request destination that cause * this `IngressPolicy` to apply. * Structure is documented below. */ ingressTo?: outputs.accesscontextmanager.ServicePerimeterSpecIngressPolicyIngressTo; /** * Human readable title. Must be unique within the perimeter. Does not affect behavior. */ title?: string; } interface ServicePerimeterSpecIngressPolicyIngressFrom { /** * A list of identities that are allowed access through this ingress policy. * Should be in the format of email address. The email address should represent * individual user or service account only. */ identities?: string[]; /** * Specifies the type of identities that are allowed access from outside the * perimeter. If left unspecified, then members of `identities` field will be * allowed access. * Possible values are: `IDENTITY_TYPE_UNSPECIFIED`, `ANY_IDENTITY`, `ANY_USER_ACCOUNT`, `ANY_SERVICE_ACCOUNT`. */ identityType?: string; /** * Sources that this `IngressPolicy` authorizes access from. * Structure is documented below. */ sources?: outputs.accesscontextmanager.ServicePerimeterSpecIngressPolicyIngressFromSource[]; } interface ServicePerimeterSpecIngressPolicyIngressFromSource { /** * An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside. */ accessLevel?: string; /** * A Google Cloud resource that is allowed to egress the perimeter. * Requests from these resources are allowed to access data outside the perimeter. * Currently only projects are allowed. Project format: `projects/{project_number}`. * The resource may be in any Google Cloud organization, not just the * organization that the perimeter is defined in. `*` is not allowed, the * case of allowing all Google Cloud resources only is not supported. */ resource?: string; } interface ServicePerimeterSpecIngressPolicyIngressTo { /** * A list of `ApiOperations` the sources specified in corresponding `IngressFrom` * are allowed to perform in this `ServicePerimeter`. * Structure is documented below. */ operations?: outputs.accesscontextmanager.ServicePerimeterSpecIngressPolicyIngressToOperation[]; /** * A list of resources, currently only projects in the form * `projects/`, protected by this `ServicePerimeter` * that are allowed to be accessed by sources defined in the * corresponding `IngressFrom`. A request matches if it contains * a resource in this list. If `*` is specified for resources, * then this `IngressTo` rule will authorize access to all * resources inside the perimeter, provided that the request * also matches the `operations` field. */ resources?: string[]; /** * A list of IAM roles that represent the set of operations that the sources * specified in the corresponding `IngressFrom` * are allowed to perform. */ roles?: string[]; } interface ServicePerimeterSpecIngressPolicyIngressToOperation { /** * API methods or permissions to allow. Method or permission must belong * to the service specified by `serviceName` field. A single MethodSelector * entry with `*` specified for the `method` field will allow all methods * AND permissions for the service specified in `serviceName`. * Structure is documented below. */ methodSelectors?: outputs.accesscontextmanager.ServicePerimeterSpecIngressPolicyIngressToOperationMethodSelector[]; /** * The name of the API whose methods or permissions the `IngressPolicy` or * `EgressPolicy` want to allow. A single `ApiOperation` with serviceName * field set to `*` will allow all methods AND permissions for all services. */ serviceName?: string; } interface ServicePerimeterSpecIngressPolicyIngressToOperationMethodSelector { /** * Value for `method` should be a valid method name for the corresponding * `serviceName` in `ApiOperation`. If `*` used as value for method, * then ALL methods and permissions are allowed. */ method?: string; /** * Value for permission should be a valid Cloud IAM permission for the * corresponding `serviceName` in `ApiOperation`. */ permission?: string; } interface ServicePerimeterSpecVpcAccessibleServices { /** * The list of APIs usable within the Service Perimeter. * Must be empty unless `enableRestriction` is True. */ allowedServices?: string[]; /** * Whether to restrict API calls within the Service Perimeter to the * list of APIs specified in 'allowedServices'. */ enableRestriction?: boolean; } interface ServicePerimeterStatus { /** * A list of AccessLevel resource names that allow resources within * the ServicePerimeter to be accessed from the internet. * AccessLevels listed must be in the same policy as this * ServicePerimeter. Referencing a nonexistent AccessLevel is a * syntax error. If no AccessLevel names are listed, resources within * the perimeter can only be accessed via GCP calls with request * origins within the perimeter. For Service Perimeter Bridge, must * be empty. * Format: accessPolicies/{policy_id}/accessLevels/{access_level_name} */ accessLevels?: string[]; /** * List of EgressPolicies to apply to the perimeter. A perimeter may * have multiple EgressPolicies, each of which is evaluated separately. * Access is granted if any EgressPolicy grants it. Must be empty for * a perimeter bridge. * Structure is documented below. */ egressPolicies?: outputs.accesscontextmanager.ServicePerimeterStatusEgressPolicy[]; /** * List of `IngressPolicies` to apply to the perimeter. A perimeter may * have multiple `IngressPolicies`, each of which is evaluated * separately. Access is granted if any `Ingress Policy` grants it. * Must be empty for a perimeter bridge. * Structure is documented below. */ ingressPolicies?: outputs.accesscontextmanager.ServicePerimeterStatusIngressPolicy[]; /** * A list of GCP resources that are inside of the service perimeter. * Currently only projects are allowed. * Format: projects/{project_number} */ resources?: string[]; /** * GCP services that are subject to the Service Perimeter * restrictions. Must contain a list of services. For example, if * `storage.googleapis.com` is specified, access to the storage * buckets inside the perimeter must meet the perimeter's access * restrictions. */ restrictedServices?: string[]; /** * Specifies how APIs are allowed to communicate within the Service * Perimeter. * Structure is documented below. */ vpcAccessibleServices?: outputs.accesscontextmanager.ServicePerimeterStatusVpcAccessibleServices; } interface ServicePerimeterStatusEgressPolicy { /** * Defines conditions on the source of a request causing this `EgressPolicy` to apply. * Structure is documented below. */ egressFrom?: outputs.accesscontextmanager.ServicePerimeterStatusEgressPolicyEgressFrom; /** * Defines the conditions on the `ApiOperation` and destination resources that * cause this `EgressPolicy` to apply. * Structure is documented below. */ egressTo?: outputs.accesscontextmanager.ServicePerimeterStatusEgressPolicyEgressTo; /** * Human readable title. Must be unique within the perimeter. Does not affect behavior. */ title?: string; } interface ServicePerimeterStatusEgressPolicyEgressFrom { /** * A list of identities that are allowed access through this `EgressPolicy`. * Should be in the format of email address. The email address should * represent individual user or service account only. */ identities?: string[]; /** * Specifies the type of identities that are allowed access to outside the * perimeter. If left unspecified, then members of `identities` field will * be allowed access. * Possible values are: `IDENTITY_TYPE_UNSPECIFIED`, `ANY_IDENTITY`, `ANY_USER_ACCOUNT`, `ANY_SERVICE_ACCOUNT`. */ identityType?: string; /** * Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`. * Possible values are: `SOURCE_RESTRICTION_UNSPECIFIED`, `SOURCE_RESTRICTION_ENABLED`, `SOURCE_RESTRICTION_DISABLED`. */ sourceRestriction?: string; /** * Sources that this EgressPolicy authorizes access from. * Structure is documented below. */ sources?: outputs.accesscontextmanager.ServicePerimeterStatusEgressPolicyEgressFromSource[]; } interface ServicePerimeterStatusEgressPolicyEgressFromSource { /** * An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside. */ accessLevel?: string; /** * A Google Cloud resource that is allowed to egress the perimeter. * Requests from these resources are allowed to access data outside the perimeter. * Currently only projects are allowed. Project format: `projects/{project_number}`. * The resource may be in any Google Cloud organization, not just the * organization that the perimeter is defined in. `*` is not allowed, the * case of allowing all Google Cloud resources only is not supported. */ resource?: string; } interface ServicePerimeterStatusEgressPolicyEgressTo { /** * A list of external resources that are allowed to be accessed. A request * matches if it contains an external resource in this list (Example: * s3://bucket/path). Currently '*' is not allowed. */ externalResources?: string[]; /** * A list of `ApiOperations` that this egress rule applies to. A request matches * if it contains an operation/service in this list. * Structure is documented below. */ operations?: outputs.accesscontextmanager.ServicePerimeterStatusEgressPolicyEgressToOperation[]; /** * A list of resources, currently only projects in the form * `projects/`, that match this to stanza. A request matches * if it contains a resource in this list. If * is specified for resources, * then this `EgressTo` rule will authorize access to all resources outside * the perimeter. */ resources?: string[]; /** * A list of IAM roles that represent the set of operations that the sources * specified in the corresponding `EgressFrom` * are allowed to perform. */ roles?: string[]; } interface ServicePerimeterStatusEgressPolicyEgressToOperation { /** * API methods or permissions to allow. Method or permission must belong * to the service specified by `serviceName` field. A single MethodSelector * entry with `*` specified for the `method` field will allow all methods * AND permissions for the service specified in `serviceName`. * Structure is documented below. */ methodSelectors?: outputs.accesscontextmanager.ServicePerimeterStatusEgressPolicyEgressToOperationMethodSelector[]; /** * The name of the API whose methods or permissions the `IngressPolicy` or * `EgressPolicy` want to allow. A single `ApiOperation` with serviceName * field set to `*` will allow all methods AND permissions for all services. */ serviceName?: string; } interface ServicePerimeterStatusEgressPolicyEgressToOperationMethodSelector { /** * Value for `method` should be a valid method name for the corresponding * `serviceName` in `ApiOperation`. If `*` used as value for method, * then ALL methods and permissions are allowed. */ method?: string; /** * Value for permission should be a valid Cloud IAM permission for the * corresponding `serviceName` in `ApiOperation`. */ permission?: string; } interface ServicePerimeterStatusIngressPolicy { /** * Defines the conditions on the source of a request causing this `IngressPolicy` * to apply. * Structure is documented below. */ ingressFrom?: outputs.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressFrom; /** * Defines the conditions on the `ApiOperation` and request destination that cause * this `IngressPolicy` to apply. * Structure is documented below. */ ingressTo?: outputs.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressTo; /** * Human readable title. Must be unique within the perimeter. Does not affect behavior. */ title?: string; } interface ServicePerimeterStatusIngressPolicyIngressFrom { /** * A list of identities that are allowed access through this ingress policy. * Should be in the format of email address. The email address should represent * individual user or service account only. */ identities?: string[]; /** * Specifies the type of identities that are allowed access from outside the * perimeter. If left unspecified, then members of `identities` field will be * allowed access. * Possible values are: `IDENTITY_TYPE_UNSPECIFIED`, `ANY_IDENTITY`, `ANY_USER_ACCOUNT`, `ANY_SERVICE_ACCOUNT`. */ identityType?: string; /** * Sources that this `IngressPolicy` authorizes access from. * Structure is documented below. */ sources?: outputs.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressFromSource[]; } interface ServicePerimeterStatusIngressPolicyIngressFromSource { /** * An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside. */ accessLevel?: string; /** * A Google Cloud resource that is allowed to egress the perimeter. * Requests from these resources are allowed to access data outside the perimeter. * Currently only projects are allowed. Project format: `projects/{project_number}`. * The resource may be in any Google Cloud organization, not just the * organization that the perimeter is defined in. `*` is not allowed, the * case of allowing all Google Cloud resources only is not supported. */ resource?: string; } interface ServicePerimeterStatusIngressPolicyIngressTo { /** * A list of `ApiOperations` the sources specified in corresponding `IngressFrom` * are allowed to perform in this `ServicePerimeter`. * Structure is documented below. */ operations?: outputs.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperation[]; /** * A list of resources, currently only projects in the form * `projects/`, protected by this `ServicePerimeter` * that are allowed to be accessed by sources defined in the * corresponding `IngressFrom`. A request matches if it contains * a resource in this list. If `*` is specified for resources, * then this `IngressTo` rule will authorize access to all * resources inside the perimeter, provided that the request * also matches the `operations` field. */ resources?: string[]; /** * A list of IAM roles that represent the set of operations that the sources * specified in the corresponding `IngressFrom` * are allowed to perform. */ roles?: string[]; } interface ServicePerimeterStatusIngressPolicyIngressToOperation { /** * API methods or permissions to allow. Method or permission must belong * to the service specified by `serviceName` field. A single MethodSelector * entry with `*` specified for the `method` field will allow all methods * AND permissions for the service specified in `serviceName`. * Structure is documented below. */ methodSelectors?: outputs.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationMethodSelector[]; /** * The name of the API whose methods or permissions the `IngressPolicy` or * `EgressPolicy` want to allow. A single `ApiOperation` with serviceName * field set to `*` will allow all methods AND permissions for all services. */ serviceName?: string; } interface ServicePerimeterStatusIngressPolicyIngressToOperationMethodSelector { /** * Value for `method` should be a valid method name for the corresponding * `serviceName` in `ApiOperation`. If `*` used as value for method, * then ALL methods and permissions are allowed. */ method?: string; /** * Value for permission should be a valid Cloud IAM permission for the * corresponding `serviceName` in `ApiOperation`. */ permission?: string; } interface ServicePerimeterStatusVpcAccessibleServices { /** * The list of APIs usable within the Service Perimeter. * Must be empty unless `enableRestriction` is True. */ allowedServices?: string[]; /** * Whether to restrict API calls within the Service Perimeter to the * list of APIs specified in 'allowedServices'. */ enableRestriction?: boolean; } interface ServicePerimetersServicePerimeter { /** * (Output) * Time the AccessPolicy was created in UTC. */ createTime: string; /** * Description of the ServicePerimeter and its use. Does not affect * behavior. */ description?: string; /** * Resource name for the ServicePerimeter. The shortName component must * begin with a letter and only include alphanumeric and '_'. * Format: accessPolicies/{policy_id}/servicePerimeters/{short_name} */ name: string; /** * Specifies the type of the Perimeter. There are two types: regular and * bridge. Regular Service Perimeter contains resources, access levels, * and restricted services. Every resource can be in at most * ONE regular Service Perimeter. * In addition to being in a regular service perimeter, a resource can also * be in zero or more perimeter bridges. A perimeter bridge only contains * resources. Cross project operations are permitted if all effected * resources share some perimeter (whether bridge or regular). Perimeter * Bridge does not contain access levels or services: those are governed * entirely by the regular perimeter that resource is in. * Perimeter Bridges are typically useful when building more complex * topologies with many independent perimeters that need to share some data * with a common perimeter, but should not be able to share data among * themselves. * Default value is `PERIMETER_TYPE_REGULAR`. * Possible values are: `PERIMETER_TYPE_REGULAR`, `PERIMETER_TYPE_BRIDGE`. */ perimeterType?: string; /** * Proposed (or dry run) ServicePerimeter configuration. * This configuration allows to specify and test ServicePerimeter configuration * without enforcing actual access restrictions. Only allowed to be set when * the `useExplicitDryRunSpec` flag is set. * Structure is documented below. */ spec?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterSpec; /** * ServicePerimeter configuration. Specifies sets of resources, * restricted services and access levels that determine * perimeter content and boundaries. * Structure is documented below. */ status?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterStatus; /** * Human readable title. Must be unique within the Policy. */ title: string; /** * (Output) * Time the AccessPolicy was updated in UTC. */ updateTime: string; /** * Use explicit dry run spec flag. Ordinarily, a dry-run spec implicitly exists * for all Service Perimeters, and that spec is identical to the status for those * Service Perimeters. When this flag is set, it inhibits the generation of the * implicit spec, thereby allowing the user to explicitly provide a * configuration ("spec") to use in a dry-run version of the Service Perimeter. * This allows the user to test changes to the enforced config ("status") without * actually enforcing them. This testing is done through analyzing the differences * between currently enforced and suggested restrictions. useExplicitDryRunSpec must * bet set to True if any of the fields in the spec are set to non-default values. */ useExplicitDryRunSpec?: boolean; } interface ServicePerimetersServicePerimeterSpec { /** * A list of AccessLevel resource names that allow resources within * the ServicePerimeter to be accessed from the internet. * AccessLevels listed must be in the same policy as this * ServicePerimeter. Referencing a nonexistent AccessLevel is a * syntax error. If no AccessLevel names are listed, resources within * the perimeter can only be accessed via GCP calls with request * origins within the perimeter. For Service Perimeter Bridge, must * be empty. * Format: accessPolicies/{policy_id}/accessLevels/{access_level_name} */ accessLevels?: string[]; /** * List of EgressPolicies to apply to the perimeter. A perimeter may * have multiple EgressPolicies, each of which is evaluated separately. * Access is granted if any EgressPolicy grants it. Must be empty for * a perimeter bridge. * Structure is documented below. */ egressPolicies?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterSpecEgressPolicy[]; /** * List of `IngressPolicies` to apply to the perimeter. A perimeter may * have multiple `IngressPolicies`, each of which is evaluated * separately. Access is granted if any `Ingress Policy` grants it. * Must be empty for a perimeter bridge. * Structure is documented below. */ ingressPolicies?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterSpecIngressPolicy[]; /** * A list of GCP resources that are inside of the service perimeter. * Currently only projects are allowed. * Format: projects/{project_number} */ resources?: string[]; /** * GCP services that are subject to the Service Perimeter * restrictions. Must contain a list of services. For example, if * `storage.googleapis.com` is specified, access to the storage * buckets inside the perimeter must meet the perimeter's access * restrictions. */ restrictedServices?: string[]; /** * Specifies how APIs are allowed to communicate within the Service * Perimeter. * Structure is documented below. */ vpcAccessibleServices?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterSpecVpcAccessibleServices; } interface ServicePerimetersServicePerimeterSpecEgressPolicy { /** * Defines conditions on the source of a request causing this `EgressPolicy` to apply. * Structure is documented below. */ egressFrom?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterSpecEgressPolicyEgressFrom; /** * Defines the conditions on the `ApiOperation` and destination resources that * cause this `EgressPolicy` to apply. * Structure is documented below. */ egressTo?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterSpecEgressPolicyEgressTo; /** * Human readable title. Must be unique within the perimeter. Does not affect behavior. */ title?: string; } interface ServicePerimetersServicePerimeterSpecEgressPolicyEgressFrom { /** * Identities can be an individual user, service account, Google group, * or third-party identity. For third-party identity, only single identities * are supported and other identity types are not supported.The v1 identities * that have the prefix user, group and serviceAccount in * https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. */ identities?: string[]; /** * Specifies the type of identities that are allowed access to outside the * perimeter. If left unspecified, then members of `identities` field will * be allowed access. * Possible values are: `IDENTITY_TYPE_UNSPECIFIED`, `ANY_IDENTITY`, `ANY_USER_ACCOUNT`, `ANY_SERVICE_ACCOUNT`. */ identityType?: string; /** * Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`. * Possible values are: `SOURCE_RESTRICTION_UNSPECIFIED`, `SOURCE_RESTRICTION_ENABLED`, `SOURCE_RESTRICTION_DISABLED`. */ sourceRestriction?: string; /** * Sources that this EgressPolicy authorizes access from. * Structure is documented below. */ sources?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterSpecEgressPolicyEgressFromSource[]; } interface ServicePerimetersServicePerimeterSpecEgressPolicyEgressFromSource { /** * An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside. */ accessLevel?: string; /** * A Google Cloud resource that is allowed to egress the perimeter. * Requests from these resources are allowed to access data outside the perimeter. * Currently only projects are allowed. Project format: `projects/{project_number}`. * The resource may be in any Google Cloud organization, not just the * organization that the perimeter is defined in. `*` is not allowed, the * case of allowing all Google Cloud resources only is not supported. */ resource?: string; } interface ServicePerimetersServicePerimeterSpecEgressPolicyEgressTo { /** * A list of external resources that are allowed to be accessed. A request * matches if it contains an external resource in this list (Example: * s3://bucket/path). Currently '*' is not allowed. */ externalResources?: string[]; /** * A list of `ApiOperations` that this egress rule applies to. A request matches * if it contains an operation/service in this list. * Structure is documented below. */ operations?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterSpecEgressPolicyEgressToOperation[]; /** * A list of resources, currently only projects in the form * `projects/`, that match this to stanza. A request matches * if it contains a resource in this list. If * is specified for resources, * then this `EgressTo` rule will authorize access to all resources outside * the perimeter. */ resources?: string[]; /** * A list of IAM roles that represent the set of operations that the sources * specified in the corresponding `EgressFrom` * are allowed to perform. */ roles?: string[]; } interface ServicePerimetersServicePerimeterSpecEgressPolicyEgressToOperation { /** * API methods or permissions to allow. Method or permission must belong * to the service specified by `serviceName` field. A single MethodSelector * entry with `*` specified for the `method` field will allow all methods * AND permissions for the service specified in `serviceName`. * Structure is documented below. */ methodSelectors?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterSpecEgressPolicyEgressToOperationMethodSelector[]; /** * The name of the API whose methods or permissions the `IngressPolicy` or * `EgressPolicy` want to allow. A single `ApiOperation` with serviceName * field set to `*` will allow all methods AND permissions for all services. */ serviceName?: string; } interface ServicePerimetersServicePerimeterSpecEgressPolicyEgressToOperationMethodSelector { /** * Value for `method` should be a valid method name for the corresponding * `serviceName` in `ApiOperation`. If `*` used as value for method, * then ALL methods and permissions are allowed. */ method?: string; /** * Value for permission should be a valid Cloud IAM permission for the * corresponding `serviceName` in `ApiOperation`. */ permission?: string; } interface ServicePerimetersServicePerimeterSpecIngressPolicy { /** * Defines the conditions on the source of a request causing this `IngressPolicy` * to apply. * Structure is documented below. */ ingressFrom?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterSpecIngressPolicyIngressFrom; /** * Defines the conditions on the `ApiOperation` and request destination that cause * this `IngressPolicy` to apply. * Structure is documented below. */ ingressTo?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterSpecIngressPolicyIngressTo; /** * Human readable title. Must be unique within the perimeter. Does not affect behavior. */ title?: string; } interface ServicePerimetersServicePerimeterSpecIngressPolicyIngressFrom { /** * A list of identities that are allowed access through this ingress policy. * Should be in the format of email address. The email address should represent * individual user or service account only. */ identities?: string[]; /** * Specifies the type of identities that are allowed access from outside the * perimeter. If left unspecified, then members of `identities` field will be * allowed access. * Possible values are: `IDENTITY_TYPE_UNSPECIFIED`, `ANY_IDENTITY`, `ANY_USER_ACCOUNT`, `ANY_SERVICE_ACCOUNT`. */ identityType?: string; /** * Sources that this `IngressPolicy` authorizes access from. * Structure is documented below. */ sources?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterSpecIngressPolicyIngressFromSource[]; } interface ServicePerimetersServicePerimeterSpecIngressPolicyIngressFromSource { /** * An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside. */ accessLevel?: string; /** * A Google Cloud resource that is allowed to egress the perimeter. * Requests from these resources are allowed to access data outside the perimeter. * Currently only projects are allowed. Project format: `projects/{project_number}`. * The resource may be in any Google Cloud organization, not just the * organization that the perimeter is defined in. `*` is not allowed, the * case of allowing all Google Cloud resources only is not supported. */ resource?: string; } interface ServicePerimetersServicePerimeterSpecIngressPolicyIngressTo { /** * A list of `ApiOperations` the sources specified in corresponding `IngressFrom` * are allowed to perform in this `ServicePerimeter`. * Structure is documented below. */ operations?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterSpecIngressPolicyIngressToOperation[]; /** * A list of resources, currently only projects in the form * `projects/`, protected by this `ServicePerimeter` * that are allowed to be accessed by sources defined in the * corresponding `IngressFrom`. A request matches if it contains * a resource in this list. If `*` is specified for resources, * then this `IngressTo` rule will authorize access to all * resources inside the perimeter, provided that the request * also matches the `operations` field. */ resources?: string[]; /** * A list of IAM roles that represent the set of operations that the sources * specified in the corresponding `IngressFrom` * are allowed to perform. */ roles?: string[]; } interface ServicePerimetersServicePerimeterSpecIngressPolicyIngressToOperation { /** * API methods or permissions to allow. Method or permission must belong * to the service specified by `serviceName` field. A single MethodSelector * entry with `*` specified for the `method` field will allow all methods * AND permissions for the service specified in `serviceName`. * Structure is documented below. */ methodSelectors?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterSpecIngressPolicyIngressToOperationMethodSelector[]; /** * The name of the API whose methods or permissions the `IngressPolicy` or * `EgressPolicy` want to allow. A single `ApiOperation` with serviceName * field set to `*` will allow all methods AND permissions for all services. */ serviceName?: string; } interface ServicePerimetersServicePerimeterSpecIngressPolicyIngressToOperationMethodSelector { /** * Value for `method` should be a valid method name for the corresponding * `serviceName` in `ApiOperation`. If `*` used as value for method, * then ALL methods and permissions are allowed. */ method?: string; /** * Value for permission should be a valid Cloud IAM permission for the * corresponding `serviceName` in `ApiOperation`. */ permission?: string; } interface ServicePerimetersServicePerimeterSpecVpcAccessibleServices { /** * The list of APIs usable within the Service Perimeter. * Must be empty unless `enableRestriction` is True. */ allowedServices?: string[]; /** * Whether to restrict API calls within the Service Perimeter to the * list of APIs specified in 'allowedServices'. */ enableRestriction?: boolean; } interface ServicePerimetersServicePerimeterStatus { /** * A list of AccessLevel resource names that allow resources within * the ServicePerimeter to be accessed from the internet. * AccessLevels listed must be in the same policy as this * ServicePerimeter. Referencing a nonexistent AccessLevel is a * syntax error. If no AccessLevel names are listed, resources within * the perimeter can only be accessed via GCP calls with request * origins within the perimeter. For Service Perimeter Bridge, must * be empty. * Format: accessPolicies/{policy_id}/accessLevels/{access_level_name} */ accessLevels?: string[]; /** * List of EgressPolicies to apply to the perimeter. A perimeter may * have multiple EgressPolicies, each of which is evaluated separately. * Access is granted if any EgressPolicy grants it. Must be empty for * a perimeter bridge. * Structure is documented below. */ egressPolicies?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterStatusEgressPolicy[]; /** * List of `IngressPolicies` to apply to the perimeter. A perimeter may * have multiple `IngressPolicies`, each of which is evaluated * separately. Access is granted if any `Ingress Policy` grants it. * Must be empty for a perimeter bridge. * Structure is documented below. */ ingressPolicies?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterStatusIngressPolicy[]; /** * A list of GCP resources that are inside of the service perimeter. * Currently only projects are allowed. * Format: projects/{project_number} */ resources?: string[]; /** * GCP services that are subject to the Service Perimeter * restrictions. Must contain a list of services. For example, if * `storage.googleapis.com` is specified, access to the storage * buckets inside the perimeter must meet the perimeter's access * restrictions. */ restrictedServices?: string[]; /** * Specifies how APIs are allowed to communicate within the Service * Perimeter. * Structure is documented below. */ vpcAccessibleServices?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterStatusVpcAccessibleServices; } interface ServicePerimetersServicePerimeterStatusEgressPolicy { /** * Defines conditions on the source of a request causing this `EgressPolicy` to apply. * Structure is documented below. */ egressFrom?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterStatusEgressPolicyEgressFrom; /** * Defines the conditions on the `ApiOperation` and destination resources that * cause this `EgressPolicy` to apply. * Structure is documented below. */ egressTo?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterStatusEgressPolicyEgressTo; /** * Human readable title. Must be unique within the perimeter. Does not affect behavior. */ title?: string; } interface ServicePerimetersServicePerimeterStatusEgressPolicyEgressFrom { /** * Identities can be an individual user, service account, Google group, * or third-party identity. For third-party identity, only single identities * are supported and other identity types are not supported.The v1 identities * that have the prefix user, group and serviceAccount in * https://cloud.google.com/iam/docs/principal-identifiers#v1 are supported. */ identities?: string[]; /** * Specifies the type of identities that are allowed access to outside the * perimeter. If left unspecified, then members of `identities` field will * be allowed access. * Possible values are: `IDENTITY_TYPE_UNSPECIFIED`, `ANY_IDENTITY`, `ANY_USER_ACCOUNT`, `ANY_SERVICE_ACCOUNT`. */ identityType?: string; /** * Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`. * Possible values are: `SOURCE_RESTRICTION_UNSPECIFIED`, `SOURCE_RESTRICTION_ENABLED`, `SOURCE_RESTRICTION_DISABLED`. */ sourceRestriction?: string; /** * Sources that this EgressPolicy authorizes access from. * Structure is documented below. */ sources?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterStatusEgressPolicyEgressFromSource[]; } interface ServicePerimetersServicePerimeterStatusEgressPolicyEgressFromSource { /** * An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside. */ accessLevel?: string; /** * A Google Cloud resource that is allowed to egress the perimeter. * Requests from these resources are allowed to access data outside the perimeter. * Currently only projects are allowed. Project format: `projects/{project_number}`. * The resource may be in any Google Cloud organization, not just the * organization that the perimeter is defined in. `*` is not allowed, the * case of allowing all Google Cloud resources only is not supported. */ resource?: string; } interface ServicePerimetersServicePerimeterStatusEgressPolicyEgressTo { /** * A list of external resources that are allowed to be accessed. A request * matches if it contains an external resource in this list (Example: * s3://bucket/path). Currently '*' is not allowed. */ externalResources?: string[]; /** * A list of `ApiOperations` that this egress rule applies to. A request matches * if it contains an operation/service in this list. * Structure is documented below. */ operations?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterStatusEgressPolicyEgressToOperation[]; /** * A list of resources, currently only projects in the form * `projects/`, that match this to stanza. A request matches * if it contains a resource in this list. If * is specified for resources, * then this `EgressTo` rule will authorize access to all resources outside * the perimeter. */ resources?: string[]; /** * A list of IAM roles that represent the set of operations that the sources * specified in the corresponding `EgressFrom` * are allowed to perform. */ roles?: string[]; } interface ServicePerimetersServicePerimeterStatusEgressPolicyEgressToOperation { /** * API methods or permissions to allow. Method or permission must belong * to the service specified by `serviceName` field. A single MethodSelector * entry with `*` specified for the `method` field will allow all methods * AND permissions for the service specified in `serviceName`. * Structure is documented below. */ methodSelectors?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterStatusEgressPolicyEgressToOperationMethodSelector[]; /** * The name of the API whose methods or permissions the `IngressPolicy` or * `EgressPolicy` want to allow. A single `ApiOperation` with serviceName * field set to `*` will allow all methods AND permissions for all services. */ serviceName?: string; } interface ServicePerimetersServicePerimeterStatusEgressPolicyEgressToOperationMethodSelector { /** * Value for `method` should be a valid method name for the corresponding * `serviceName` in `ApiOperation`. If `*` used as value for method, * then ALL methods and permissions are allowed. */ method?: string; /** * Value for permission should be a valid Cloud IAM permission for the * corresponding `serviceName` in `ApiOperation`. */ permission?: string; } interface ServicePerimetersServicePerimeterStatusIngressPolicy { /** * Defines the conditions on the source of a request causing this `IngressPolicy` * to apply. * Structure is documented below. */ ingressFrom?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterStatusIngressPolicyIngressFrom; /** * Defines the conditions on the `ApiOperation` and request destination that cause * this `IngressPolicy` to apply. * Structure is documented below. */ ingressTo?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterStatusIngressPolicyIngressTo; /** * Human readable title. Must be unique within the perimeter. Does not affect behavior. */ title?: string; } interface ServicePerimetersServicePerimeterStatusIngressPolicyIngressFrom { /** * A list of identities that are allowed access through this ingress policy. * Should be in the format of email address. The email address should represent * individual user or service account only. */ identities?: string[]; /** * Specifies the type of identities that are allowed access from outside the * perimeter. If left unspecified, then members of `identities` field will be * allowed access. * Possible values are: `IDENTITY_TYPE_UNSPECIFIED`, `ANY_IDENTITY`, `ANY_USER_ACCOUNT`, `ANY_SERVICE_ACCOUNT`. */ identityType?: string; /** * Sources that this `IngressPolicy` authorizes access from. * Structure is documented below. */ sources?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterStatusIngressPolicyIngressFromSource[]; } interface ServicePerimetersServicePerimeterStatusIngressPolicyIngressFromSource { /** * An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside. */ accessLevel?: string; /** * A Google Cloud resource that is allowed to egress the perimeter. * Requests from these resources are allowed to access data outside the perimeter. * Currently only projects are allowed. Project format: `projects/{project_number}`. * The resource may be in any Google Cloud organization, not just the * organization that the perimeter is defined in. `*` is not allowed, the * case of allowing all Google Cloud resources only is not supported. */ resource?: string; } interface ServicePerimetersServicePerimeterStatusIngressPolicyIngressTo { /** * A list of `ApiOperations` the sources specified in corresponding `IngressFrom` * are allowed to perform in this `ServicePerimeter`. * Structure is documented below. */ operations?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterStatusIngressPolicyIngressToOperation[]; /** * A list of resources, currently only projects in the form * `projects/`, protected by this `ServicePerimeter` * that are allowed to be accessed by sources defined in the * corresponding `IngressFrom`. A request matches if it contains * a resource in this list. If `*` is specified for resources, * then this `IngressTo` rule will authorize access to all * resources inside the perimeter, provided that the request * also matches the `operations` field. */ resources?: string[]; /** * A list of IAM roles that represent the set of operations that the sources * specified in the corresponding `IngressFrom` * are allowed to perform. */ roles?: string[]; } interface ServicePerimetersServicePerimeterStatusIngressPolicyIngressToOperation { /** * API methods or permissions to allow. Method or permission must belong * to the service specified by `serviceName` field. A single MethodSelector * entry with `*` specified for the `method` field will allow all methods * AND permissions for the service specified in `serviceName`. * Structure is documented below. */ methodSelectors?: outputs.accesscontextmanager.ServicePerimetersServicePerimeterStatusIngressPolicyIngressToOperationMethodSelector[]; /** * The name of the API whose methods or permissions the `IngressPolicy` or * `EgressPolicy` want to allow. A single `ApiOperation` with serviceName * field set to `*` will allow all methods AND permissions for all services. */ serviceName?: string; } interface ServicePerimetersServicePerimeterStatusIngressPolicyIngressToOperationMethodSelector { /** * Value for `method` should be a valid method name for the corresponding * `serviceName` in `ApiOperation`. If `*` used as value for method, * then ALL methods and permissions are allowed. */ method?: string; /** * Value for permission should be a valid Cloud IAM permission for the * corresponding `serviceName` in `ApiOperation`. */ permission?: string; } interface ServicePerimetersServicePerimeterStatusVpcAccessibleServices { /** * The list of APIs usable within the Service Perimeter. * Must be empty unless `enableRestriction` is True. */ allowedServices?: string[]; /** * Whether to restrict API calls within the Service Perimeter to the * list of APIs specified in 'allowedServices'. */ enableRestriction?: boolean; } } export declare namespace alloydb { interface BackupEncryptionConfig { /** * The fully-qualified resource name of the KMS key. Each Cloud KMS key is regionalized and has the following format: projects/[PROJECT]/locations/[REGION]/keyRings/[RING]/cryptoKeys/[KEY_NAME]. */ kmsKeyName?: string; } interface BackupEncryptionInfo { /** * (Output) * Output only. Type of encryption. */ encryptionType: string; /** * (Output) * Output only. Cloud KMS key versions that are being used to protect the database or the backup. */ kmsKeyVersions: string[]; } interface BackupExpiryQuantity { /** * (Output) * Output only. The backup's position among its backups with the same source cluster and type, by descending chronological order create time (i.e. newest first). */ retentionCount: number; /** * (Output) * Output only. The length of the quantity-based queue, specified by the backup's retention policy. */ totalRetentionCount: number; } interface ClusterAutomatedBackupPolicy { /** * The length of the time window during which a backup can be taken. If a backup does not succeed within this time window, it will be canceled and considered failed. * The backup window must be at least 5 minutes long. There is no upper bound on the window. If not set, it will default to 1 hour. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ backupWindow: string; /** * Whether automated backups are enabled. */ enabled: boolean; /** * EncryptionConfig describes the encryption config of a cluster or a backup that is encrypted with a CMEK (customer-managed encryption key). * Structure is documented below. */ encryptionConfig?: outputs.alloydb.ClusterAutomatedBackupPolicyEncryptionConfig; /** * Labels to apply to backups created using this configuration. */ labels?: { [key: string]: string; }; /** * The location where the backup will be stored. Currently, the only supported option is to store the backup in the same region as the cluster. */ location: string; /** * Quantity-based Backup retention policy to retain recent backups. Conflicts with 'time_based_retention', both can't be set together. * Structure is documented below. */ quantityBasedRetention?: outputs.alloydb.ClusterAutomatedBackupPolicyQuantityBasedRetention; /** * Time-based Backup retention policy. Conflicts with 'quantity_based_retention', both can't be set together. * Structure is documented below. */ timeBasedRetention?: outputs.alloydb.ClusterAutomatedBackupPolicyTimeBasedRetention; /** * Weekly schedule for the Backup. * Structure is documented below. */ weeklySchedule: outputs.alloydb.ClusterAutomatedBackupPolicyWeeklySchedule; } interface ClusterAutomatedBackupPolicyEncryptionConfig { /** * The fully-qualified resource name of the KMS key. Each Cloud KMS key is regionalized and has the following format: projects/[PROJECT]/locations/[REGION]/keyRings/[RING]/cryptoKeys/[KEY_NAME]. */ kmsKeyName?: string; } interface ClusterAutomatedBackupPolicyQuantityBasedRetention { /** * The number of backups to retain. */ count?: number; } interface ClusterAutomatedBackupPolicyTimeBasedRetention { /** * The retention period. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ retentionPeriod?: string; } interface ClusterAutomatedBackupPolicyWeeklySchedule { /** * The days of the week to perform a backup. At least one day of the week must be provided. * Each value may be one of: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ daysOfWeeks?: string[]; /** * The times during the day to start a backup. At least one start time must be provided. The start times are assumed to be in UTC and to be an exact hour (e.g., 04:00:00). * Structure is documented below. */ startTimes: outputs.alloydb.ClusterAutomatedBackupPolicyWeeklyScheduleStartTime[]; } interface ClusterAutomatedBackupPolicyWeeklyScheduleStartTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Currently, only the value 0 is supported. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Currently, only the value 0 is supported. */ nanos?: number; /** * Seconds of minutes of the time. Currently, only the value 0 is supported. */ seconds?: number; } interface ClusterBackupSource { /** * The name of the backup resource. */ backupName?: string; } interface ClusterBackupdrBackupSource { /** * The name of the BackupDR backup resource. */ backup?: string; } interface ClusterContinuousBackupConfig { /** * Whether continuous backup recovery is enabled. If not set, defaults to true. */ enabled?: boolean; /** * EncryptionConfig describes the encryption config of a cluster or a backup that is encrypted with a CMEK (customer-managed encryption key). * Structure is documented below. */ encryptionConfig?: outputs.alloydb.ClusterContinuousBackupConfigEncryptionConfig; /** * The numbers of days that are eligible to restore from using PITR. To support the entire recovery window, backups and logs are retained for one day more than the recovery window. * If not set, defaults to 14 days. */ recoveryWindowDays: number; } interface ClusterContinuousBackupConfigEncryptionConfig { /** * The fully-qualified resource name of the KMS key. Each Cloud KMS key is regionalized and has the following format: projects/[PROJECT]/locations/[REGION]/keyRings/[RING]/cryptoKeys/[KEY_NAME]. */ kmsKeyName?: string; } interface ClusterContinuousBackupInfo { /** * (Output) * The earliest restorable time that can be restored to. Output only field. */ earliestRestorableTime: string; /** * (Output) * When ContinuousBackup was most recently enabled. Set to null if ContinuousBackup is not enabled. */ enabledTime: string; /** * (Output) * Output only. The encryption information for the WALs and backups required for ContinuousBackup. * Structure is documented below. */ encryptionInfos: outputs.alloydb.ClusterContinuousBackupInfoEncryptionInfo[]; /** * (Output) * Days of the week on which a continuous backup is taken. Output only field. Ignored if passed into the request. */ schedules: string[]; } interface ClusterContinuousBackupInfoEncryptionInfo { /** * (Output) * Output only. Type of encryption. */ encryptionType: string; /** * (Output) * Output only. Cloud KMS key versions that are being used to protect the database or the backup. */ kmsKeyVersions: string[]; } interface ClusterEncryptionConfig { /** * The fully-qualified resource name of the KMS key. Each Cloud KMS key is regionalized and has the following format: projects/[PROJECT]/locations/[REGION]/keyRings/[RING]/cryptoKeys/[KEY_NAME]. */ kmsKeyName?: string; } interface ClusterEncryptionInfo { /** * (Output) * Output only. Type of encryption. */ encryptionType: string; /** * (Output) * Output only. Cloud KMS key versions that are being used to protect the database or the backup. */ kmsKeyVersions: string[]; } interface ClusterInitialUser { /** * The initial password for the user. * **Note**: This property is sensitive and will not be displayed in the plan. */ password?: string; /** * **NOTE:** This field is write-only and its value will not be updated in state as part of read operations. * (Optional, Write-Only) * The initial password for the user. * **Note**: This property is write-only and will not be read from the API. * * > **Note:** One of `password` or `passwordWo` can only be set. */ passwordWo?: string; /** * Triggers update of `passwordWo` write-only. Increment this value when an update to `passwordWo` is needed. For more info see [updating write-only arguments](https://www.terraform.io/docs/providers/google/guides/using_write_only_arguments.html#updating-write-only-arguments) */ passwordWoVersion?: string; /** * The database username. */ user?: string; } interface ClusterMaintenanceUpdatePolicy { /** * Preferred windows to perform maintenance. Currently limited to 1. * Structure is documented below. */ maintenanceWindows?: outputs.alloydb.ClusterMaintenanceUpdatePolicyMaintenanceWindow[]; } interface ClusterMaintenanceUpdatePolicyMaintenanceWindow { /** * Preferred day of the week for maintenance, e.g. MONDAY, TUESDAY, etc. * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ day: string; /** * Preferred time to start the maintenance operation on the specified day. Maintenance will start within 1 hour of this time. * Structure is documented below. */ startTime: outputs.alloydb.ClusterMaintenanceUpdatePolicyMaintenanceWindowStartTime; } interface ClusterMaintenanceUpdatePolicyMaintenanceWindowStartTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. */ hours: number; /** * Minutes of hour of day. Currently, only the value 0 is supported. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Currently, only the value 0 is supported. */ nanos?: number; /** * Seconds of minutes of the time. Currently, only the value 0 is supported. */ seconds?: number; } interface ClusterMigrationSource { /** * The host and port of the on-premises instance in host:port format */ hostPort?: string; /** * Place holder for the external source identifier(e.g DMS job name) that created the cluster. */ referenceId?: string; /** * Type of migration source. */ sourceType?: string; } interface ClusterNetworkConfig { /** * The name of the allocated IP range for the private IP AlloyDB cluster. For example: "google-managed-services-default". * If set, the instance IPs for this cluster will be created in the allocated range. */ allocatedIpRange?: string; /** * The resource link for the VPC network in which cluster resources are created and from which they are accessible via Private IP. The network must belong to the same project as the cluster. * It is specified in the form: "projects/{projectNumber}/global/networks/{network_id}". */ network?: string; } interface ClusterPscConfig { /** * Create an instance that allows connections from Private Service Connect endpoints to the instance. */ pscEnabled?: boolean; /** * (Output) * The project number that needs to be allowlisted on the network attachment to enable outbound connectivity, if the network attachment is configured to ACCEPT_MANUAL connections. * In case the network attachment is configured to ACCEPT_AUTOMATIC, this project number does not need to be allowlisted explicitly. */ serviceOwnedProjectNumber: number; } interface ClusterRestoreBackupSource { /** * The name of the backup that this cluster is restored from. */ backupName: string; } interface ClusterRestoreBackupdrBackupSource { /** * The name of the BackupDR backup that this cluster is restored from. It must be of the format "projects/[PROJECT]/locations/[LOCATION]/backupVaults/[VAULT_ID]/dataSources/[DATASOURCE_ID]/backups/[BACKUP_ID]" */ backup: string; } interface ClusterRestoreBackupdrPitrSource { /** * The name of the BackupDR data source that this cluster is restore from. It must be of the format "projects/[PROJECT]/locations/[LOCATION]/backupVaults/[VAULT_ID]/dataSources/[DATASOURCE_ID]" */ dataSource: string; /** * The point in time that this cluster is restored to, in RFC 3339 format. */ pointInTime: string; } interface ClusterRestoreContinuousBackupSource { /** * The name of the source cluster that this cluster is restored from. */ cluster: string; /** * The point in time that this cluster is restored to, in RFC 3339 format. */ pointInTime: string; } interface ClusterSecondaryConfig { /** * Name of the primary cluster must be in the format * 'projects/{project}/locations/{location}/clusters/{cluster_id}' */ primaryClusterName: string; } interface ClusterTrialMetadata { /** * End time of the trial cluster. */ endTime?: string; /** * Grace end time of the trial cluster. */ graceEndTime?: string; /** * Start time of the trial cluster. */ startTime?: string; /** * Upgrade time of the trial cluster to standard cluster. */ upgradeTime?: string; } interface GetClusterAutomatedBackupPolicy { /** * The length of the time window during which a backup can be taken. If a backup does not succeed within this time window, it will be canceled and considered failed. * * The backup window must be at least 5 minutes long. There is no upper bound on the window. If not set, it will default to 1 hour. * * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ backupWindow: string; /** * Whether automated backups are enabled. */ enabled: boolean; /** * EncryptionConfig describes the encryption config of a cluster or a backup that is encrypted with a CMEK (customer-managed encryption key). */ encryptionConfigs: outputs.alloydb.GetClusterAutomatedBackupPolicyEncryptionConfig[]; /** * Labels to apply to backups created using this configuration. */ labels: { [key: string]: string; }; /** * (optional) * The canonical id of the location.If it is not provided, the provider project is used. For example: us-east1. */ location: string; /** * Quantity-based Backup retention policy to retain recent backups. Conflicts with 'time_based_retention', both can't be set together. */ quantityBasedRetentions: outputs.alloydb.GetClusterAutomatedBackupPolicyQuantityBasedRetention[]; /** * Time-based Backup retention policy. Conflicts with 'quantity_based_retention', both can't be set together. */ timeBasedRetentions: outputs.alloydb.GetClusterAutomatedBackupPolicyTimeBasedRetention[]; /** * Weekly schedule for the Backup. */ weeklySchedules: outputs.alloydb.GetClusterAutomatedBackupPolicyWeeklySchedule[]; } interface GetClusterAutomatedBackupPolicyEncryptionConfig { /** * The fully-qualified resource name of the KMS key. Each Cloud KMS key is regionalized and has the following format: projects/[PROJECT]/locations/[REGION]/keyRings/[RING]/cryptoKeys/[KEY_NAME]. */ kmsKeyName: string; } interface GetClusterAutomatedBackupPolicyQuantityBasedRetention { /** * The number of backups to retain. */ count: number; } interface GetClusterAutomatedBackupPolicyTimeBasedRetention { /** * The retention period. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ retentionPeriod: string; } interface GetClusterAutomatedBackupPolicyWeeklySchedule { /** * The days of the week to perform a backup. At least one day of the week must be provided. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"] */ daysOfWeeks: string[]; /** * The times during the day to start a backup. At least one start time must be provided. The start times are assumed to be in UTC and to be an exact hour (e.g., 04:00:00). */ startTimes: outputs.alloydb.GetClusterAutomatedBackupPolicyWeeklyScheduleStartTime[]; } interface GetClusterAutomatedBackupPolicyWeeklyScheduleStartTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours: number; /** * Minutes of hour of day. Currently, only the value 0 is supported. */ minutes: number; /** * Fractions of seconds in nanoseconds. Currently, only the value 0 is supported. */ nanos: number; /** * Seconds of minutes of the time. Currently, only the value 0 is supported. */ seconds: number; } interface GetClusterBackupSource { /** * The name of the backup resource. */ backupName: string; } interface GetClusterBackupdrBackupSource { /** * The name of the BackupDR backup resource. */ backup: string; } interface GetClusterContinuousBackupConfig { /** * Whether continuous backup recovery is enabled. If not set, defaults to true. */ enabled: boolean; /** * EncryptionConfig describes the encryption config of a cluster or a backup that is encrypted with a CMEK (customer-managed encryption key). */ encryptionConfigs: outputs.alloydb.GetClusterContinuousBackupConfigEncryptionConfig[]; /** * The numbers of days that are eligible to restore from using PITR. To support the entire recovery window, backups and logs are retained for one day more than the recovery window. * * If not set, defaults to 14 days. */ recoveryWindowDays: number; } interface GetClusterContinuousBackupConfigEncryptionConfig { /** * The fully-qualified resource name of the KMS key. Each Cloud KMS key is regionalized and has the following format: projects/[PROJECT]/locations/[REGION]/keyRings/[RING]/cryptoKeys/[KEY_NAME]. */ kmsKeyName: string; } interface GetClusterContinuousBackupInfo { /** * The earliest restorable time that can be restored to. Output only field. */ earliestRestorableTime: string; /** * When ContinuousBackup was most recently enabled. Set to null if ContinuousBackup is not enabled. */ enabledTime: string; /** * Output only. The encryption information for the WALs and backups required for ContinuousBackup. */ encryptionInfos: outputs.alloydb.GetClusterContinuousBackupInfoEncryptionInfo[]; /** * Days of the week on which a continuous backup is taken. Output only field. Ignored if passed into the request. */ schedules: string[]; } interface GetClusterContinuousBackupInfoEncryptionInfo { /** * Output only. Type of encryption. */ encryptionType: string; /** * Output only. Cloud KMS key versions that are being used to protect the database or the backup. */ kmsKeyVersions: string[]; } interface GetClusterEncryptionConfig { /** * The fully-qualified resource name of the KMS key. Each Cloud KMS key is regionalized and has the following format: projects/[PROJECT]/locations/[REGION]/keyRings/[RING]/cryptoKeys/[KEY_NAME]. */ kmsKeyName: string; } interface GetClusterEncryptionInfo { /** * Output only. Type of encryption. */ encryptionType: string; /** * Output only. Cloud KMS key versions that are being used to protect the database or the backup. */ kmsKeyVersions: string[]; } interface GetClusterInitialUser { /** * The initial password for the user. */ password: string; /** * The initial password for the user. */ passwordWo: string; /** * Triggers update of 'password_wo' write-only. Increment this value when an update to 'password_wo' is needed. For more info see [updating write-only arguments](https://www.terraform.io/docs/providers/google/guides/using_write_only_arguments.html#updating-write-only-arguments) */ passwordWoVersion: string; /** * The database username. */ user: string; } interface GetClusterMaintenanceUpdatePolicy { /** * Preferred windows to perform maintenance. Currently limited to 1. */ maintenanceWindows: outputs.alloydb.GetClusterMaintenanceUpdatePolicyMaintenanceWindow[]; } interface GetClusterMaintenanceUpdatePolicyMaintenanceWindow { /** * Preferred day of the week for maintenance, e.g. MONDAY, TUESDAY, etc. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"] */ day: string; /** * Preferred time to start the maintenance operation on the specified day. Maintenance will start within 1 hour of this time. */ startTimes: outputs.alloydb.GetClusterMaintenanceUpdatePolicyMaintenanceWindowStartTime[]; } interface GetClusterMaintenanceUpdatePolicyMaintenanceWindowStartTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. */ hours: number; /** * Minutes of hour of day. Currently, only the value 0 is supported. */ minutes: number; /** * Fractions of seconds in nanoseconds. Currently, only the value 0 is supported. */ nanos: number; /** * Seconds of minutes of the time. Currently, only the value 0 is supported. */ seconds: number; } interface GetClusterMigrationSource { /** * The host and port of the on-premises instance in host:port format */ hostPort: string; /** * Place holder for the external source identifier(e.g DMS job name) that created the cluster. */ referenceId: string; /** * Type of migration source. */ sourceType: string; } interface GetClusterNetworkConfig { /** * The name of the allocated IP range for the private IP AlloyDB cluster. For example: "google-managed-services-default". * If set, the instance IPs for this cluster will be created in the allocated range. */ allocatedIpRange: string; /** * The resource link for the VPC network in which cluster resources are created and from which they are accessible via Private IP. The network must belong to the same project as the cluster. * It is specified in the form: "projects/{projectNumber}/global/networks/{network_id}". */ network: string; } interface GetClusterPscConfig { /** * Create an instance that allows connections from Private Service Connect endpoints to the instance. */ pscEnabled: boolean; /** * The project number that needs to be allowlisted on the network attachment to enable outbound connectivity, if the network attachment is configured to ACCEPT_MANUAL connections. * In case the network attachment is configured to ACCEPT_AUTOMATIC, this project number does not need to be allowlisted explicitly. */ serviceOwnedProjectNumber: number; } interface GetClusterRestoreBackupSource { /** * The name of the backup that this cluster is restored from. */ backupName: string; } interface GetClusterRestoreBackupdrBackupSource { /** * The name of the BackupDR backup that this cluster is restored from. It must be of the format "projects/[PROJECT]/locations/[LOCATION]/backupVaults/[VAULT_ID]/dataSources/[DATASOURCE_ID]/backups/[BACKUP_ID]" */ backup: string; } interface GetClusterRestoreBackupdrPitrSource { /** * The name of the BackupDR data source that this cluster is restore from. It must be of the format "projects/[PROJECT]/locations/[LOCATION]/backupVaults/[VAULT_ID]/dataSources/[DATASOURCE_ID]" */ dataSource: string; /** * The point in time that this cluster is restored to, in RFC 3339 format. */ pointInTime: string; } interface GetClusterRestoreContinuousBackupSource { /** * The name of the source cluster that this cluster is restored from. */ cluster: string; /** * The point in time that this cluster is restored to, in RFC 3339 format. */ pointInTime: string; } interface GetClusterSecondaryConfig { /** * Name of the primary cluster must be in the format * 'projects/{project}/locations/{location}/clusters/{cluster_id}' */ primaryClusterName: string; } interface GetClusterTrialMetadata { /** * End time of the trial cluster. */ endTime: string; /** * Grace end time of the trial cluster. */ graceEndTime: string; /** * Start time of the trial cluster. */ startTime: string; /** * Upgrade time of the trial cluster to standard cluster. */ upgradeTime: string; } interface GetInstanceClientConnectionConfig { /** * Configuration to enforce connectors only (ex: AuthProxy) connections to the database. */ requireConnectors: boolean; /** * SSL config option for this instance. */ sslConfigs: outputs.alloydb.GetInstanceClientConnectionConfigSslConfig[]; } interface GetInstanceClientConnectionConfigSslConfig { /** * SSL mode. Specifies client-server SSL/TLS connection behavior. Possible values: ["ENCRYPTED_ONLY", "ALLOW_UNENCRYPTED_AND_ENCRYPTED"] */ sslMode: string; } interface GetInstanceConnectionPoolConfig { /** * Whether to enabled Managed Connection Pool. */ enabled: boolean; /** * Flags for configuring managed connection pooling when it is enabled. * These flags will only be set if 'connection_pool_config.enabled' is * true. * Please see * https://cloud.google.com/alloydb/docs/configure-managed-connection-pooling#configuration-options * for a comprehensive list of flags that can be set. To specify the flags * in Terraform, please remove the "connection-pooling-" prefix and use * underscores instead of dashes in the name. For example, * "connection-pooling-pool-mode" would be "poolMode". */ flags: { [key: string]: string; }; /** * The number of running poolers per instance. */ poolerCount: number; } interface GetInstanceMachineConfig { /** * The number of CPU's in the VM instance. */ cpuCount: number; /** * Machine type of the VM instance. * E.g. "n2-highmem-4", "n2-highmem-8", "c4a-highmem-4-lssd". * 'cpu_count' must match the number of vCPUs in the machine type. */ machineType: string; } interface GetInstanceNetworkConfig { /** * Name of the allocated IP range for the private IP AlloyDB instance, for example: "google-managed-services-default". * If set, the instance IPs will be created from this allocated range and will override the IP range used by the parent cluster. * The range name must comply with RFC 1035. Specifically, the name must be 1-63 characters long and match the regular expression a-z?. */ allocatedIpRangeOverride: string; /** * A list of external networks authorized to access this instance. This * field is only allowed to be set when 'enable_public_ip' is set to * true. */ authorizedExternalNetworks: outputs.alloydb.GetInstanceNetworkConfigAuthorizedExternalNetwork[]; /** * Enabling outbound public ip for the instance. */ enableOutboundPublicIp: boolean; /** * Enabling public ip for the instance. If a user wishes to disable this, * please also clear the list of the authorized external networks set on * the same instance. */ enablePublicIp: boolean; } interface GetInstanceNetworkConfigAuthorizedExternalNetwork { /** * CIDR range for one authorized network of the instance. */ cidrRange: string; } interface GetInstanceObservabilityConfig { /** * Whether assistive experiences are enabled for this AlloyDB instance. */ assistiveExperiencesEnabled: boolean; /** * Observability feature status for an instance. */ enabled: boolean; /** * Query string length. The default value is 10240. Any integer between 1024 and 100000 is considered valid. */ maxQueryStringLength: number; /** * Preserve comments in the query string. */ preserveComments: boolean; /** * Number of query execution plans captured by Insights per minute for all queries combined. The default value is 5. Any integer between 0 and 200 is considered valid. */ queryPlansPerMinute: number; /** * Record application tags for an instance. This flag is turned "on" by default. */ recordApplicationTags: boolean; /** * Track actively running queries. If not set, default value is "off". */ trackActiveQueries: boolean; /** * Record wait event types during query execution for an instance. */ trackWaitEventTypes: boolean; /** * Record wait events during query execution for an instance. */ trackWaitEvents: boolean; } interface GetInstancePscInstanceConfig { /** * List of consumer projects that are allowed to create PSC endpoints to service-attachments to this instance. * These should be specified as project numbers only. */ allowedConsumerProjects: string[]; /** * Configurations for setting up PSC service automation. */ pscAutoConnections: outputs.alloydb.GetInstancePscInstanceConfigPscAutoConnection[]; /** * The DNS name of the instance for PSC connectivity. * Name convention: ...alloydb-psc.goog */ pscDnsName: string; /** * Configurations for setting up PSC interfaces attached to the instance * which are used for outbound connectivity. Currently, AlloyDB supports only 0 or 1 PSC interface. */ pscInterfaceConfigs: outputs.alloydb.GetInstancePscInstanceConfigPscInterfaceConfig[]; /** * The service attachment created when Private Service Connect (PSC) is enabled for the instance. * The name of the resource will be in the format of * 'projects//regions//serviceAttachments/' */ serviceAttachmentLink: string; } interface GetInstancePscInstanceConfigPscAutoConnection { /** * The consumer network for the PSC service automation, example: * "projects/vpc-host-project/global/networks/default". * The consumer network might be hosted a different project than the * consumer project. The API expects the consumer project specified to be * the project ID (and not the project number) */ consumerNetwork: string; /** * The status of the service connection policy. */ consumerNetworkStatus: string; /** * The consumer project to which the PSC service automation endpoint will * be created. The API expects the consumer project to be the project ID( * and not the project number). */ consumerProject: string; /** * The IP address of the PSC service automation endpoint. */ ipAddress: string; /** * The status of the PSC service automation connection. */ status: string; } interface GetInstancePscInstanceConfigPscInterfaceConfig { /** * The network attachment resource created in the consumer project to which the PSC interface will be linked. * This is of the format: "projects/${CONSUMER_PROJECT}/regions/${REGION}/networkAttachments/${NETWORK_ATTACHMENT_NAME}". * The network attachment must be in the same region as the instance. */ networkAttachmentResource: string; } interface GetInstanceQueryInsightsConfig { /** * Number of query execution plans captured by Insights per minute for all queries combined. The default value is 5. Any integer between 0 and 20 is considered valid. */ queryPlansPerMinute: number; /** * Query string length. The default value is 1024. Any integer between 256 and 4500 is considered valid. */ queryStringLength: number; /** * Record application tags for an instance. This flag is turned "on" by default. */ recordApplicationTags: boolean; /** * Record client address for an instance. Client address is PII information. This flag is turned "on" by default. */ recordClientAddress: boolean; } interface GetInstanceReadPoolConfig { /** * Read capacity, i.e. number of nodes in a read pool instance. */ nodeCount: number; } interface GetLocationsLocation { /** * The friendly name for this location, typically a nearby city name. For example, "Tokyo". */ displayName: string; /** * Cross-service attributes for the location. For example `{"cloud.googleapis.com/region": "us-east1"}`. */ labels: { [key: string]: string; }; /** * The canonical id for this location. For example: "us-east1".. */ locationId: string; /** * Service-specific metadata. For example the available capacity at the given location. */ metadata: { [key: string]: string; }; /** * Resource name for the location, which may vary between implementations. For example: "projects/example-project/locations/us-east1". */ name: string; } interface GetSupportedDatabaseFlagsSupportedDatabaseFlag { /** * Whether the database flag accepts multiple values. If true, a comma-separated list of stringified values may be specified. */ acceptsMultipleValues: boolean; /** * The name of the database flag, e.g. "maxAllowedPackets". The is a possibly key for the Instance.database_flags map field. */ flagName: string; /** * Restriction on `INTEGER` type value. Specifies the minimum value and the maximum value that can be specified, if applicable. */ integerRestrictions: outputs.alloydb.GetSupportedDatabaseFlagsSupportedDatabaseFlagIntegerRestrictions; /** * The name of the flag resource, following Google Cloud conventions, e.g.: * projects/{project}/locations/{location}/flags/{flag} This field currently has no semantic meaning. */ name: string; /** * Whether setting or updating this flag on an Instance requires a database restart. If a flag that requires database restart is set, the backend will automatically restart the database (making sure to satisfy any availability SLO's). */ requiresDbRestart: boolean; /** * Restriction on `STRING` type value. The list of allowed values, if bounded. This field will be empty if there is a unbounded number of allowed values. */ stringRestrictions: outputs.alloydb.GetSupportedDatabaseFlagsSupportedDatabaseFlagStringRestrictions; /** * Major database engine versions for which this flag is supported. The supported values are `POSTGRES_14` and `DATABASE_VERSION_UNSPECIFIED`. */ supportedDbVersions: string[]; /** * ValueType describes the semantic type of the value that the flag accepts. Regardless of the ValueType, the Instance.database_flags field accepts the stringified version of the value, i.e. "20" or "3.14". The supported values are `VALUE_TYPE_UNSPECIFIED`, `STRING`, `INTEGER`, `FLOAT` and `NONE`. */ valueType: string; } interface GetSupportedDatabaseFlagsSupportedDatabaseFlagIntegerRestrictions { /** * The maximum value that can be specified, if applicable. */ maxValue: string; /** * The minimum value that can be specified, if applicable. */ minValue: string; } interface GetSupportedDatabaseFlagsSupportedDatabaseFlagStringRestrictions { /** * The list of allowed values, if bounded. This field will be empty if there is a unbounded number of allowed values. */ allowedValues: string[]; } interface InstanceClientConnectionConfig { /** * Configuration to enforce connectors only (ex: AuthProxy) connections to the database. */ requireConnectors?: boolean; /** * SSL config option for this instance. * Structure is documented below. */ sslConfig: outputs.alloydb.InstanceClientConnectionConfigSslConfig; } interface InstanceClientConnectionConfigSslConfig { /** * SSL mode. Specifies client-server SSL/TLS connection behavior. * Possible values are: `ENCRYPTED_ONLY`, `ALLOW_UNENCRYPTED_AND_ENCRYPTED`. */ sslMode: string; } interface InstanceConnectionPoolConfig { /** * Whether to enabled Managed Connection Pool. */ enabled: boolean; /** * Flags for configuring managed connection pooling when it is enabled. * These flags will only be set if `connection_pool_config.enabled` is * true. * Please see * https://cloud.google.com/alloydb/docs/configure-managed-connection-pooling#configuration-options * for a comprehensive list of flags that can be set. To specify the flags * in Terraform, please remove the "connection-pooling-" prefix and use * underscores instead of dashes in the name. For example, * "connection-pooling-pool-mode" would be "poolMode". */ flags?: { [key: string]: string; }; /** * (Output) * The number of running poolers per instance. */ poolerCount: number; } interface InstanceMachineConfig { /** * The number of CPU's in the VM instance. */ cpuCount: number; /** * Machine type of the VM instance. * E.g. "n2-highmem-4", "n2-highmem-8", "c4a-highmem-4-lssd". * `cpuCount` must match the number of vCPUs in the machine type. */ machineType: string; } interface InstanceNetworkConfig { /** * Name of the allocated IP range for the private IP AlloyDB instance, for example: "google-managed-services-default". * If set, the instance IPs will be created from this allocated range and will override the IP range used by the parent cluster. * The range name must comply with RFC 1035. Specifically, the name must be 1-63 characters long and match the regular expression a-z?. */ allocatedIpRangeOverride?: string; /** * A list of external networks authorized to access this instance. This * field is only allowed to be set when `enablePublicIp` is set to * true. * Structure is documented below. */ authorizedExternalNetworks?: outputs.alloydb.InstanceNetworkConfigAuthorizedExternalNetwork[]; /** * Enabling outbound public ip for the instance. */ enableOutboundPublicIp?: boolean; /** * Enabling public ip for the instance. If a user wishes to disable this, * please also clear the list of the authorized external networks set on * the same instance. */ enablePublicIp?: boolean; } interface InstanceNetworkConfigAuthorizedExternalNetwork { /** * CIDR range for one authorized network of the instance. */ cidrRange?: string; } interface InstanceObservabilityConfig { /** * Whether assistive experiences are enabled for this AlloyDB instance. */ assistiveExperiencesEnabled: boolean; /** * Observability feature status for an instance. */ enabled: boolean; /** * Query string length. The default value is 10240. Any integer between 1024 and 100000 is considered valid. */ maxQueryStringLength: number; /** * Preserve comments in the query string. */ preserveComments: boolean; /** * Number of query execution plans captured by Insights per minute for all queries combined. The default value is 5. Any integer between 0 and 200 is considered valid. */ queryPlansPerMinute: number; /** * Record application tags for an instance. This flag is turned "on" by default. */ recordApplicationTags: boolean; /** * Track actively running queries. If not set, default value is "off". */ trackActiveQueries: boolean; /** * Record wait event types during query execution for an instance. */ trackWaitEventTypes: boolean; /** * Record wait events during query execution for an instance. */ trackWaitEvents: boolean; } interface InstancePscInstanceConfig { /** * List of consumer projects that are allowed to create PSC endpoints to service-attachments to this instance. * These should be specified as project numbers only. */ allowedConsumerProjects?: string[]; /** * Configurations for setting up PSC service automation. * Structure is documented below. */ pscAutoConnections?: outputs.alloydb.InstancePscInstanceConfigPscAutoConnection[]; /** * (Output) * The DNS name of the instance for PSC connectivity. * Name convention: ...alloydb-psc.goog */ pscDnsName: string; /** * Configurations for setting up PSC interfaces attached to the instance * which are used for outbound connectivity. Currently, AlloyDB supports only 0 or 1 PSC interface. * Structure is documented below. */ pscInterfaceConfigs?: outputs.alloydb.InstancePscInstanceConfigPscInterfaceConfig[]; /** * (Output) * The service attachment created when Private Service Connect (PSC) is enabled for the instance. * The name of the resource will be in the format of * `projects//regions//serviceAttachments/` */ serviceAttachmentLink: string; } interface InstancePscInstanceConfigPscAutoConnection { /** * The consumer network for the PSC service automation, example: * "projects/vpc-host-project/global/networks/default". * The consumer network might be hosted a different project than the * consumer project. The API expects the consumer project specified to be * the project ID (and not the project number) */ consumerNetwork?: string; /** * (Output) * The status of the service connection policy. */ consumerNetworkStatus: string; /** * The consumer project to which the PSC service automation endpoint will * be created. The API expects the consumer project to be the project ID( * and not the project number). */ consumerProject?: string; /** * (Output) * The IP address of the PSC service automation endpoint. */ ipAddress: string; /** * (Output) * The status of the PSC service automation connection. */ status: string; } interface InstancePscInstanceConfigPscInterfaceConfig { /** * The network attachment resource created in the consumer project to which the PSC interface will be linked. * This is of the format: "projects/${CONSUMER_PROJECT}/regions/${REGION}/networkAttachments/${NETWORK_ATTACHMENT_NAME}". * The network attachment must be in the same region as the instance. */ networkAttachmentResource?: string; } interface InstanceQueryInsightsConfig { /** * Number of query execution plans captured by Insights per minute for all queries combined. The default value is 5. Any integer between 0 and 20 is considered valid. */ queryPlansPerMinute?: number; /** * Query string length. The default value is 1024. Any integer between 256 and 4500 is considered valid. */ queryStringLength?: number; /** * Record application tags for an instance. This flag is turned "on" by default. */ recordApplicationTags?: boolean; /** * Record client address for an instance. Client address is PII information. This flag is turned "on" by default. */ recordClientAddress?: boolean; } interface InstanceReadPoolConfig { /** * Read capacity, i.e. number of nodes in a read pool instance. */ nodeCount?: number; } } export declare namespace apigateway { interface ApiConfigGatewayConfig { /** * Backend settings that are applied to all backends of the Gateway. * Structure is documented below. */ backendConfig: outputs.apigateway.ApiConfigGatewayConfigBackendConfig; } interface ApiConfigGatewayConfigBackendConfig { /** * Google Cloud IAM service account used to sign OIDC tokens for backends that have authentication configured * (https://cloud.google.com/service-infrastructure/docs/service-management/reference/rest/v1/services.configs#backend). */ googleServiceAccount: string; } interface ApiConfigGrpcService { /** * Input only. File descriptor set, generated by protoc. * To generate, use protoc with imports and source info included. For an example test.proto file, the following command would put the value in a new file named out.pb. * $ protoc --include_imports --include_source_info test.proto -o out.pb * Structure is documented below. */ fileDescriptorSet: outputs.apigateway.ApiConfigGrpcServiceFileDescriptorSet; /** * Uncompiled proto files associated with the descriptor set, used for display purposes (server-side compilation is not supported). These should match the inputs to 'protoc' command used to generate fileDescriptorSet. * Structure is documented below. */ sources?: outputs.apigateway.ApiConfigGrpcServiceSource[]; } interface ApiConfigGrpcServiceFileDescriptorSet { /** * Base64 encoded content of the file. */ contents: string; /** * The file path (full or relative path). This is typically the path of the file when it is uploaded. */ path: string; } interface ApiConfigGrpcServiceSource { /** * Base64 encoded content of the file. */ contents: string; /** * The file path (full or relative path). This is typically the path of the file when it is uploaded. */ path: string; } interface ApiConfigIamBindingCondition { description?: string; expression: string; title: string; } interface ApiConfigIamMemberCondition { description?: string; expression: string; title: string; } interface ApiConfigManagedServiceConfig { /** * Base64 encoded content of the file. */ contents: string; /** * The file path (full or relative path). This is typically the path of the file when it is uploaded. */ path: string; } interface ApiConfigOpenapiDocument { /** * The OpenAPI Specification document file. * Structure is documented below. */ document: outputs.apigateway.ApiConfigOpenapiDocumentDocument; } interface ApiConfigOpenapiDocumentDocument { /** * Base64 encoded content of the file. */ contents: string; /** * The file path (full or relative path). This is typically the path of the file when it is uploaded. */ path: string; } interface ApiIamBindingCondition { description?: string; expression: string; title: string; } interface ApiIamMemberCondition { description?: string; expression: string; title: string; } interface GatewayIamBindingCondition { description?: string; expression: string; title: string; } interface GatewayIamMemberCondition { description?: string; expression: string; title: string; } } export declare namespace apigee { interface AddonsConfigAddonsConfig { /** * Configuration for the Advanced API Ops add-on. * Structure is documented below. */ advancedApiOpsConfig?: outputs.apigee.AddonsConfigAddonsConfigAdvancedApiOpsConfig; /** * Configuration for the API Security add-on. * Structure is documented below. */ apiSecurityConfig?: outputs.apigee.AddonsConfigAddonsConfigApiSecurityConfig; /** * Configuration for the Monetization add-on. * Structure is documented below. */ connectorsPlatformConfig?: outputs.apigee.AddonsConfigAddonsConfigConnectorsPlatformConfig; /** * Configuration for the Integration add-on. * Structure is documented below. */ integrationConfig?: outputs.apigee.AddonsConfigAddonsConfigIntegrationConfig; /** * Configuration for the Monetization add-on. * Structure is documented below. */ monetizationConfig?: outputs.apigee.AddonsConfigAddonsConfigMonetizationConfig; } interface AddonsConfigAddonsConfigAdvancedApiOpsConfig { /** * Flag that specifies whether the Advanced API Ops add-on is enabled. */ enabled?: boolean; } interface AddonsConfigAddonsConfigApiSecurityConfig { /** * Flag that specifies whether the API security add-on is enabled. */ enabled?: boolean; /** * (Output) * Time at which the API Security add-on expires in in milliseconds since epoch. If unspecified, the add-on will never expire. */ expiresAt: string; } interface AddonsConfigAddonsConfigConnectorsPlatformConfig { /** * Flag that specifies whether the Connectors Platform add-on is enabled. */ enabled?: boolean; /** * (Output) * Time at which the Connectors Platform add-on expires in milliseconds since epoch. If unspecified, the add-on will never expire. */ expiresAt: string; } interface AddonsConfigAddonsConfigIntegrationConfig { /** * Flag that specifies whether the Integration add-on is enabled. */ enabled?: boolean; } interface AddonsConfigAddonsConfigMonetizationConfig { /** * Flag that specifies whether the Monetization add-on is enabled. */ enabled?: boolean; } interface ApiMetaData { /** * Time at which the API proxy was created, in milliseconds since epoch. */ createdAt: string; /** * Time at which the API proxy was most recently modified, in milliseconds since epoch. */ lastModifiedAt: string; /** * The type of entity described */ subType: string; } interface ApiProductAttribute { /** * Key of the attribute. */ name?: string; /** * Value of the attribute. */ value?: string; } interface ApiProductGraphqlOperationGroup { /** * Flag that specifes whether the configuration is for Apigee API proxy or a remote service. Valid values include proxy or remoteservice. Defaults to proxy. Set to proxy when Apigee API proxies are associated with the API product. Set to remoteservice when non-Apigee proxies like Istio-Envoy are associated with the API product. * Possible values are: `proxy`, `remoteservice`. */ operationConfigType?: string; /** * List of graphQL operation configuration details associated with Apigee API proxies or remote services. Remote services are non-Apigee proxies, such as Istio-Envoy. * Structure is documented below. */ operationConfigs?: outputs.apigee.ApiProductGraphqlOperationGroupOperationConfig[]; } interface ApiProductGraphqlOperationGroupOperationConfig { /** * Required. Name of the API proxy with which the gRPC operation and quota are associated. */ apiSource?: string; /** * Custom attributes associated with the operation. * Structure is documented below. */ attributes?: outputs.apigee.ApiProductGraphqlOperationGroupOperationConfigAttribute[]; /** * Required. List of GraphQL name/operation type pairs for the proxy or remote service to which quota will be applied. If only operation types are specified, the quota will be applied to all GraphQL requests irrespective of the GraphQL name. * Note: Currently, you can specify only a single GraphQLOperation. Specifying more than one will cause the operation to fail. * Structure is documented below. */ operations?: outputs.apigee.ApiProductGraphqlOperationGroupOperationConfigOperation[]; /** * Quota parameters to be enforced for the resources, methods, and API source combination. If none are specified, quota enforcement will not be done. * Structure is documented below. */ quota?: outputs.apigee.ApiProductGraphqlOperationGroupOperationConfigQuota; } interface ApiProductGraphqlOperationGroupOperationConfigAttribute { /** * Key of the attribute. */ name?: string; /** * Value of the attribute. */ value?: string; } interface ApiProductGraphqlOperationGroupOperationConfigOperation { /** * GraphQL operation name. The name and operation type will be used to apply quotas. If no name is specified, the quota will be applied to all GraphQL operations irrespective of their operation names in the payload. */ operation?: string; /** * Required. GraphQL operation types. Valid values include query or mutation. * Note: Apigee does not currently support subscription types. */ operationTypes?: string[]; } interface ApiProductGraphqlOperationGroupOperationConfigQuota { /** * Required. Time interval over which the number of request messages is calculated. */ interval?: string; /** * Required. Upper limit allowed for the time interval and time unit specified. Requests exceeding this limit will be rejected. */ limit?: string; /** * Time unit defined for the interval. Valid values include second, minute, hour, day, month or year. If limit and interval are valid, the default value is hour; otherwise, the default is null. */ timeUnit?: string; } interface ApiProductGrpcOperationGroup { /** * Required. List of operation configurations for either Apigee API proxies that are associated with this API product. * Structure is documented below. */ operationConfigs?: outputs.apigee.ApiProductGrpcOperationGroupOperationConfig[]; } interface ApiProductGrpcOperationGroupOperationConfig { /** * Required. Name of the API proxy with which the gRPC operation and quota are associated. */ apiSource?: string; /** * Custom attributes associated with the operation. * Structure is documented below. */ attributes?: outputs.apigee.ApiProductGrpcOperationGroupOperationConfigAttribute[]; /** * List of unqualified gRPC method names for the proxy to which quota will be applied. If this field is empty, the Quota will apply to all operations on the gRPC service defined on the proxy. * Example: Given a proxy that is configured to serve com.petstore.PetService, the methods com.petstore.PetService.ListPets and com.petstore.PetService.GetPet would be specified here as simply ["ListPets", "GetPet"]. * Note: Currently, you can specify only a single GraphQLOperation. Specifying more than one will cause the operation to fail. */ methods?: string[]; /** * Quota parameters to be enforced for the resources, methods, and API source combination. If none are specified, quota enforcement will not be done. * Structure is documented below. */ quota?: outputs.apigee.ApiProductGrpcOperationGroupOperationConfigQuota; /** * Required. gRPC Service name associated to be associated with the API proxy, on which quota rules can be applied upon. */ service?: string; } interface ApiProductGrpcOperationGroupOperationConfigAttribute { /** * Key of the attribute. */ name?: string; /** * Value of the attribute. */ value?: string; } interface ApiProductGrpcOperationGroupOperationConfigQuota { /** * Required. Time interval over which the number of request messages is calculated. */ interval?: string; /** * Required. Upper limit allowed for the time interval and time unit specified. Requests exceeding this limit will be rejected. */ limit?: string; /** * Time unit defined for the interval. Valid values include second, minute, hour, day, month or year. If limit and interval are valid, the default value is hour; otherwise, the default is null. */ timeUnit?: string; } interface ApiProductOperationGroup { /** * Flag that specifes whether the configuration is for Apigee API proxy or a remote service. Valid values include proxy or remoteservice. Defaults to proxy. Set to proxy when Apigee API proxies are associated with the API product. Set to remoteservice when non-Apigee proxies like Istio-Envoy are associated with the API product. * Possible values are: `proxy`, `remoteservice`. */ operationConfigType?: string; /** * Required. List of operation configurations for either Apigee API proxies or other remote services that are associated with this API product. * Structure is documented below. */ operationConfigs?: outputs.apigee.ApiProductOperationGroupOperationConfig[]; } interface ApiProductOperationGroupOperationConfig { /** * Required. Name of the API proxy with which the gRPC operation and quota are associated. */ apiSource?: string; /** * Custom attributes associated with the operation. * Structure is documented below. */ attributes?: outputs.apigee.ApiProductOperationGroupOperationConfigAttribute[]; /** * Required. List of GraphQL name/operation type pairs for the proxy or remote service to which quota will be applied. If only operation types are specified, the quota will be applied to all GraphQL requests irrespective of the GraphQL name. * Note: Currently, you can specify only a single GraphQLOperation. Specifying more than one will cause the operation to fail. * Structure is documented below. */ operations?: outputs.apigee.ApiProductOperationGroupOperationConfigOperation[]; /** * Quota parameters to be enforced for the resources, methods, and API source combination. If none are specified, quota enforcement will not be done. * Structure is documented below. */ quota?: outputs.apigee.ApiProductOperationGroupOperationConfigQuota; } interface ApiProductOperationGroupOperationConfigAttribute { /** * Key of the attribute. */ name?: string; /** * Value of the attribute. */ value?: string; } interface ApiProductOperationGroupOperationConfigOperation { /** * Methods refers to the REST verbs, when none specified, all verb types are allowed. */ methods?: string[]; /** * Required. REST resource path associated with the API proxy or remote service. */ resource?: string; } interface ApiProductOperationGroupOperationConfigQuota { /** * Required. Time interval over which the number of request messages is calculated. */ interval?: string; /** * Required. Upper limit allowed for the time interval and time unit specified. Requests exceeding this limit will be rejected. */ limit?: string; /** * Time unit defined for the interval. Valid values include second, minute, hour, day, month or year. If limit and interval are valid, the default value is hour; otherwise, the default is null. */ timeUnit?: string; } interface AppGroupAttribute { /** * Key of the attribute */ name?: string; /** * Value of the attribute */ value?: string; } interface DeveloperAppAttribute { /** * Key of the attribute */ name?: string; /** * Value of the attribute */ value?: string; } interface DeveloperAppCredential { /** * List of API products associated with the developer app. */ apiProducts: outputs.apigee.DeveloperAppCredentialApiProduct[]; /** * Developer attributes (name/value pairs). The custom attribute limit is 18. * Structure is documented below. */ attributes: outputs.apigee.DeveloperAppCredentialAttribute[]; /** * (Output) * Consumer key. */ consumerKey: string; /** * (Output) * Secret key. * **Note**: This property is sensitive and will not be displayed in the plan. */ consumerSecret: string; /** * (Output) * Time the credential will expire in milliseconds since epoch. */ expiresAt: string; /** * (Output) * Time the credential was issued in milliseconds since epoch. */ issuedAt: string; /** * Scopes to apply to the developer app. * The specified scopes must already exist for the API product that * you associate with the developer app. */ scopes: string[]; /** * Status of the credential. Valid values include approved or revoked. */ status: string; } interface DeveloperAppCredentialApiProduct { /** * (Output) * Name of the API product. */ apiproduct: string; /** * Status of the credential. Valid values include approved or revoked. */ status: string; } interface DeveloperAppCredentialAttribute { /** * Key of the attribute */ name: string; /** * Value of the attribute */ value: string; } interface DeveloperAttribute { /** * Key of the attribute */ name?: string; /** * Value of the attribute */ value?: string; } interface DnsZonePeeringConfig { /** * The name of the producer VPC network. */ targetNetworkId: string; /** * The ID of the project that contains the producer VPC network. */ targetProjectId: string; } interface EnvironmentClientIpResolutionConfig { /** * Resolves the client ip based on a custom header. * Structure is documented below. */ headerIndexAlgorithm?: outputs.apigee.EnvironmentClientIpResolutionConfigHeaderIndexAlgorithm; } interface EnvironmentClientIpResolutionConfigHeaderIndexAlgorithm { /** * The index of the ip in the header. Positive indices 0, 1, 2, 3 chooses indices from the left (first ips). Negative indices -1, -2, -3 chooses indices from the right (last ips). */ ipHeaderIndex: number; /** * The name of the header to extract the client ip from. We are currently only supporting the X-Forwarded-For header. */ ipHeaderName: string; } interface EnvironmentIamBindingCondition { description?: string; expression: string; title: string; } interface EnvironmentIamMemberCondition { description?: string; expression: string; title: string; } interface EnvironmentNodeConfig { /** * (Output) * The current total number of gateway nodes that each environment currently has across * all instances. */ currentAggregateNodeCount: string; /** * The maximum total number of gateway nodes that the is reserved for all instances that * has the specified environment. If not specified, the default is determined by the * recommended maximum number of nodes for that gateway. */ maxNodeCount?: string; /** * The minimum total number of gateway nodes that the is reserved for all instances that * has the specified environment. If not specified, the default is determined by the * recommended minimum number of nodes for that gateway. */ minNodeCount?: string; } interface EnvironmentProperties { /** * List of all properties in the object. * Structure is documented below. */ properties?: outputs.apigee.EnvironmentPropertiesProperty[]; } interface EnvironmentPropertiesProperty { /** * The property key. */ name?: string; /** * The property value. */ value?: string; } interface InstanceAccessLoggingConfig { /** * Boolean flag that specifies whether the customer access log feature is enabled. */ enabled: boolean; /** * Ship the access log entries that match the statusCode defined in the filter. * The statusCode is the only expected/supported filter field. (Ex: statusCode) * The filter will parse it to the Common Expression Language semantics for expression * evaluation to build the filter condition. (Ex: "filter": statusCode >= 200 && statusCode < 300 ) */ filter?: string; } interface KeystoresAliasesKeyCertFileCertsInfo { /** * (Output) * X.509 basic constraints extension. */ basicConstraints: string; /** * (Output) * X.509 notAfter validity period in milliseconds since epoch. */ expiryDate: string; /** * (Output) * Flag that specifies whether the certificate is valid. * Flag is set to Yes if the certificate is valid, No if expired, or Not yet if not yet valid. */ isValid: string; /** * (Output) * X.509 issuer. */ issuer: string; /** * (Output) * Public key component of the X.509 subject public key info. */ publicKey: string; /** * (Output) * X.509 serial number. */ serialNumber: string; /** * (Output) * X.509 signatureAlgorithm. */ sigAlgName: string; /** * (Output) * X.509 subject. */ subject: string; /** * (Output) * X.509 subject alternative names (SANs) extension. */ subjectAlternativeNames: string[]; /** * (Output) * X.509 notBefore validity period in milliseconds since epoch. */ validFrom: string; /** * (Output) * X.509 version. */ version: number; } interface KeystoresAliasesKeyCertFileTimeouts { /** * A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). */ create?: string; /** * A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. */ delete?: string; /** * A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. */ read?: string; /** * A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). */ update?: string; } interface KeystoresAliasesPkcs12CertsInfo { /** * (Output) * List of all properties in the object. * Structure is documented below. */ certInfos: outputs.apigee.KeystoresAliasesPkcs12CertsInfoCertInfo[]; } interface KeystoresAliasesPkcs12CertsInfoCertInfo { /** * (Output) * X.509 basic constraints extension. */ basicConstraints: string; /** * (Output) * X.509 notAfter validity period in milliseconds since epoch. */ expiryDate: string; /** * (Output) * Flag that specifies whether the certificate is valid. * Flag is set to Yes if the certificate is valid, No if expired, or Not yet if not yet valid. */ isValid: string; /** * (Output) * X.509 issuer. */ issuer: string; /** * (Output) * Public key component of the X.509 subject public key info. */ publicKey: string; /** * (Output) * X.509 serial number. */ serialNumber: string; /** * (Output) * X.509 signatureAlgorithm. */ sigAlgName: string; /** * (Output) * X.509 subject. */ subject: string; /** * (Output) * X.509 subject alternative names (SANs) extension. */ subjectAlternativeNames: string[]; /** * (Output) * X.509 notBefore validity period in milliseconds since epoch. */ validFrom: string; /** * (Output) * X.509 version. */ version: number; } interface KeystoresAliasesSelfSignedCertCertsInfo { /** * (Output) * List of all properties in the object. * Structure is documented below. */ certInfos: outputs.apigee.KeystoresAliasesSelfSignedCertCertsInfoCertInfo[]; } interface KeystoresAliasesSelfSignedCertCertsInfoCertInfo { /** * (Output) * X.509 basic constraints extension. */ basicConstraints: string; /** * (Output) * X.509 notAfter validity period in milliseconds since epoch. */ expiryDate: string; /** * (Output) * Flag that specifies whether the certificate is valid. * Flag is set to Yes if the certificate is valid, No if expired, or Not yet if not yet valid. */ isValid: string; /** * (Output) * X.509 issuer. */ issuer: string; /** * (Output) * Public key component of the X.509 subject public key info. */ publicKey: string; /** * (Output) * X.509 serial number. */ serialNumber: string; /** * (Output) * X.509 signatureAlgorithm. */ sigAlgName: string; /** * Subject details. * Structure is documented below. */ subject: string; /** * (Output) * X.509 subject alternative names (SANs) extension. */ subjectAlternativeNames: string[]; /** * (Output) * X.509 notBefore validity period in milliseconds since epoch. */ validFrom: string; /** * (Output) * X.509 version. */ version: number; } interface KeystoresAliasesSelfSignedCertSubject { /** * Common name of the organization. Maximum length is 64 characters. */ commonName?: string; /** * Two-letter country code. Example, IN for India, US for United States of America. */ countryCode?: string; /** * Email address. Max 255 characters. */ email?: string; /** * City or town name. Maximum length is 128 characters. */ locality?: string; /** * Organization name. Maximum length is 64 characters. */ org?: string; /** * Organization team name. Maximum length is 64 characters. */ orgUnit?: string; /** * State or district name. Maximum length is 128 characters. */ state?: string; } interface KeystoresAliasesSelfSignedCertSubjectAlternativeDnsNames { /** * Subject Alternative Name */ subjectAlternativeName?: string; } interface OrganizationProperties { /** * List of all properties in the object. * Structure is documented below. */ properties?: outputs.apigee.OrganizationPropertiesProperty[]; } interface OrganizationPropertiesProperty { /** * Name of the property. */ name?: string; /** * Value of the property. */ value?: string; } interface SecurityActionAllow { } interface SecurityActionConditionConfig { /** * A list of accessTokens. Limit 1000 per action. */ accessTokens?: string[]; /** * A list of API keys. Limit 1000 per action. */ apiKeys?: string[]; /** * A list of API Products. Limit 1000 per action. */ apiProducts?: string[]; /** * A list of ASN numbers to act on, e.g. 23. https://en.wikipedia.org/wiki/Autonomous_system_(Internet) * This uses int64 instead of uint32 because of https://linter.aip.dev/141/forbidden-types. */ asns?: string[]; /** * A list of Bot Reasons. Current options: Flooder, Brute Guessor, Static Content Scraper, * OAuth Abuser, Robot Abuser, TorListRule, Advanced Anomaly Detection, Advanced API Scraper, * Search Engine Crawlers, Public Clouds, Public Cloud AWS, Public Cloud Azure, and Public Cloud Google. */ botReasons?: string[]; /** * A list of developer apps. Limit 1000 per action. */ developerApps?: string[]; /** * A list of developers. Limit 1000 per action. */ developers?: string[]; /** * Act only on particular HTTP methods. E.g. A read-only API can block POST/PUT/DELETE methods. * Accepted values are: GET, HEAD, POST, PUT, DELETE, CONNECT, OPTIONS, TRACE and PATCH. */ httpMethods?: string[]; /** * A list of IP addresses. This could be either IPv4 or IPv6. Limited to 100 per action. */ ipAddressRanges?: string[]; /** * A list of countries/region codes to act on, e.g. US. This follows https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2. */ regionCodes?: string[]; /** * A list of user agents to deny. We look for exact matches. Limit 50 per action. */ userAgents?: string[]; } interface SecurityActionDeny { /** * The HTTP response code if the Action = DENY. */ responseCode?: number; } interface SecurityActionFlag { /** * A list of HTTP headers to be sent to the target in case of a FLAG SecurityAction. * Limit 5 headers per SecurityAction. * At least one is mandatory. * Structure is documented below. */ headers?: outputs.apigee.SecurityActionFlagHeader[]; } interface SecurityActionFlagHeader { /** * The header name to be sent to the target. */ name?: string; /** * The header value to be sent to the target. */ value?: string; } interface SecurityFeedbackFeedbackContext { /** * The attribute the user is providing feedback about. * Possible values are: `ATTRIBUTE_ENVIRONMENTS`, `ATTRIBUTE_IP_ADDRESS_RANGES`. */ attribute: string; /** * The values of the attribute the user is providing feedback about, separated by commas. */ values: string[]; } interface SecurityMonitoringConditionIncludeAllResources { } interface SecurityProfileV2ProfileAssessmentConfig { /** * The identifier for this object. Format specified above. */ assessment: string; /** * The weight of the assessment. * Possible values are: `MINOR`, `MODERATE`, `MAJOR`. */ weight: string; } interface SharedflowMetaData { /** * Time at which the API proxy was created, in milliseconds since epoch. */ createdAt: string; /** * Time at which the API proxy was most recently modified, in milliseconds since epoch. */ lastModifiedAt: string; /** * The type of entity described */ subType: string; } interface TargetServerSSlInfo { /** * The SSL/TLS cipher suites to be used. For programmable proxies, it must be one of the cipher suite names listed in: http://docs.oracle.com/javase/8/docs/technotes/guides/security/StandardNames.html#ciphersuites. For configurable proxies, it must follow the configuration specified in: https://commondatastorage.googleapis.com/chromium-boringssl-docs/ssl.h.html#Cipher-suite-configuration. This setting has no effect for configurable proxies when negotiating TLS 1.3. */ ciphers?: string[]; /** * Enables two-way TLS. */ clientAuthEnabled?: boolean; /** * The TLS Common Name of the certificate. * Structure is documented below. */ commonName?: outputs.apigee.TargetServerSSlInfoCommonName; /** * Enables TLS. If false, neither one-way nor two-way TLS will be enabled. */ enabled: boolean; /** * If true, TLS is strictly enforced. */ enforce?: boolean; /** * If true, Edge ignores TLS certificate errors. Valid when configuring TLS for target servers and target endpoints, and when configuring virtual hosts that use 2-way TLS. When used with a target endpoint/target server, if the backend system uses SNI and returns a cert with a subject Distinguished Name (DN) that does not match the hostname, there is no way to ignore the error and the connection fails. */ ignoreValidationErrors?: boolean; /** * Required if clientAuthEnabled is true. The resource ID for the alias containing the private key and cert. */ keyAlias?: string; /** * Required if clientAuthEnabled is true. The resource ID of the keystore. */ keyStore?: string; /** * The TLS versioins to be used. */ protocols?: string[]; /** * The resource ID of the truststore. */ trustStore?: string; } interface TargetServerSSlInfoCommonName { /** * The TLS Common Name string of the certificate. */ value?: string; /** * Indicates whether the cert should be matched against as a wildcard cert. */ wildcardMatch?: boolean; } } export declare namespace apihub { interface ApiHubInstanceConfig { /** * Optional. The Customer Managed Encryption Key (CMEK) used for data encryption. * The CMEK name should follow the format of * `projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)`, * where the location must match the instance location. * If the CMEK is not provided, a GMEK will be created for the instance. */ cmekKeyName?: string; /** * Optional. If true, the search will be disabled for the instance. The default value * is false. */ disableSearch?: boolean; /** * Optional. Encryption type for the region. If the encryption type is CMEK, the * cmekKeyName must be provided. If no encryption type is provided, * GMEK will be used. * Possible values: * ENCRYPTION_TYPE_UNSPECIFIED * GMEK * CMEK */ encryptionType: string; /** * Optional. The name of the Vertex AI location where the data store is stored. */ vertexLocation?: string; } interface CurationEndpoint { /** * The details of the Application Integration endpoint to be triggered for * curation. * Structure is documented below. */ applicationIntegrationEndpointDetails: outputs.apihub.CurationEndpointApplicationIntegrationEndpointDetails; } interface CurationEndpointApplicationIntegrationEndpointDetails { /** * The API trigger ID of the Application Integration workflow. */ triggerId: string; /** * The endpoint URI should be a valid REST URI for triggering an Application * Integration. * Format: * `https://integrations.googleapis.com/v1/{name=projects/*/locations/*/integrations/*}:execute` * or * `https://{location}-integrations.googleapis.com/v1/{name=projects/*/locations/*/integrations/*}:execute` */ uri: string; } interface CurationPluginInstanceAction { /** * (Output) * The action ID that is using the curation. * This should map to one of the action IDs specified * in action configs in the plugin. */ actionId: string; /** * (Output) * Plugin instance that is using the curation. * Format is * `projects/{project}/locations/{locati on}/plugins/{plugin}/instances/{instance}` */ pluginInstance: string; } interface PluginActionsConfig { /** * The description of the operation performed by the action. */ description: string; /** * The display name of the action. */ displayName: string; /** * The id of the action. */ id: string; /** * The trigger mode supported by the action. * Possible values: * TRIGGER_MODE_UNSPECIFIED * API_HUB_ON_DEMAND_TRIGGER * API_HUB_SCHEDULE_TRIGGER * NON_API_HUB_MANAGED */ triggerMode: string; } interface PluginConfigTemplate { /** * The list of additional configuration variables for the plugin's * configuration. * Structure is documented below. */ additionalConfigTemplates?: outputs.apihub.PluginConfigTemplateAdditionalConfigTemplate[]; /** * AuthConfigTemplate represents the authentication template for a plugin. * Structure is documented below. */ authConfigTemplate?: outputs.apihub.PluginConfigTemplateAuthConfigTemplate; } interface PluginConfigTemplateAdditionalConfigTemplate { /** * Description. */ description?: string; /** * Enum options. To be populated if `ValueType` is `ENUM`. * Structure is documented below. */ enumOptions?: outputs.apihub.PluginConfigTemplateAdditionalConfigTemplateEnumOption[]; /** * ID of the config variable. Must be unique within the configuration. */ id: string; /** * Multi select options. To be populated if `ValueType` is `MULTI_SELECT`. * Structure is documented below. */ multiSelectOptions?: outputs.apihub.PluginConfigTemplateAdditionalConfigTemplateMultiSelectOption[]; /** * Flag represents that this `ConfigVariable` must be provided for a * PluginInstance. */ required?: boolean; /** * Regular expression in RE2 syntax used for validating the `value` of a * `ConfigVariable`. */ validationRegex?: string; /** * Type of the parameter: string, int, bool etc. * Possible values: * VALUE_TYPE_UNSPECIFIED * STRING * INT * BOOL * SECRET * ENUM * MULTI_SELECT * MULTI_STRING * MULTI_INT */ valueType: string; } interface PluginConfigTemplateAdditionalConfigTemplateEnumOption { /** * Description of the option. */ description?: string; /** * Display name of the option. */ displayName: string; /** * Id of the option. */ id: string; } interface PluginConfigTemplateAdditionalConfigTemplateMultiSelectOption { /** * Description of the option. */ description?: string; /** * Display name of the option. */ displayName: string; /** * Id of the option. */ id: string; } interface PluginConfigTemplateAuthConfigTemplate { /** * Config for Google service account authentication. * Structure is documented below. */ serviceAccount?: outputs.apihub.PluginConfigTemplateAuthConfigTemplateServiceAccount; /** * The list of authentication types supported by the plugin. */ supportedAuthTypes: string[]; } interface PluginConfigTemplateAuthConfigTemplateServiceAccount { /** * The service account to be used for authenticating request. * The `iam.serviceAccounts.getAccessToken` permission should be granted on * this service account to the impersonator service account. */ serviceAccount: string; } interface PluginDocumentation { /** * The uri of the externally hosted documentation. */ externalUri?: string; } interface PluginHostingService { /** * The URI of the service implemented by the plugin developer, used to * invoke the plugin's functionality. This information is only required for * user defined plugins. */ serviceUri?: string; } interface PluginInstanceAction { /** * This should map to one of the action id specified * in actionsConfig in the plugin. */ actionId: string; /** * The curation information for this plugin instance. * Structure is documented below. */ curationConfig: outputs.apihub.PluginInstanceActionCurationConfig; /** * (Output) * The execution status for the plugin instance. * Structure is documented below. */ hubInstanceActions: outputs.apihub.PluginInstanceActionHubInstanceAction[]; /** * The schedule for this plugin instance action. This can only be set if the * plugin supports API_HUB_SCHEDULE_TRIGGER mode for this action. */ scheduleCronExpression: string; /** * The time zone for the schedule cron expression. If not provided, UTC will * be used. * * * The `hubInstanceAction` block contains: */ scheduleTimeZone: string; /** * (Output) * The current state of the plugin action in the plugin instance. * Possible values: * STATE_UNSPECIFIED * ENABLED * DISABLED * ENABLING * DISABLING * ERROR */ state: string; } interface PluginInstanceActionCurationConfig { /** * Possible values: * CURATION_TYPE_UNSPECIFIED * DEFAULT_CURATION_FOR_API_METADATA * CUSTOM_CURATION_FOR_API_METADATA */ curationType: string; /** * Custom curation information for this plugin instance. * Structure is documented below. */ customCuration?: outputs.apihub.PluginInstanceActionCurationConfigCustomCuration; } interface PluginInstanceActionCurationConfigCustomCuration { /** * The unique name of the curation resource. This will be the name of the * curation resource in the format: * `projects/{project}/locations/{location}/curations/{curation}` */ curation: string; } interface PluginInstanceActionHubInstanceAction { /** * The current state of the execution. * Possible values: * CURRENT_EXECUTION_STATE_UNSPECIFIED * RUNNING * NOT_RUNNING */ currentExecutionState: string; /** * The result of the last execution of the plugin instance. */ lastExecutions: outputs.apihub.PluginInstanceActionHubInstanceActionLastExecution[]; } interface PluginInstanceActionHubInstanceActionLastExecution { /** * The last execution end time of the plugin instance. */ endTime: string; /** * Error message describing the failure, if any, during Create, Delete or * ApplyConfig operation corresponding to the plugin instance.This field will * only be populated if the plugin instance is in the ERROR or FAILED state. */ errorMessage: string; /** * The result of the last execution of the plugin instance. * Possible values: * RESULT_UNSPECIFIED * SUCCEEDED * FAILED */ result: string; /** * The last execution start time of the plugin instance. */ startTime: string; } interface PluginInstanceAuthConfig { /** * Config for authentication with API key. * Structure is documented below. */ apiKeyConfig?: outputs.apihub.PluginInstanceAuthConfigApiKeyConfig; /** * Possible values: * AUTH_TYPE_UNSPECIFIED * NO_AUTH * GOOGLE_SERVICE_ACCOUNT * USER_PASSWORD * API_KEY * OAUTH2_CLIENT_CREDENTIALS */ authType: string; /** * Config for Google service account authentication. * Structure is documented below. */ googleServiceAccountConfig?: outputs.apihub.PluginInstanceAuthConfigGoogleServiceAccountConfig; /** * Parameters to support Oauth 2.0 client credentials grant authentication. * See https://tools.ietf.org/html/rfc6749#section-1.3.4 for more details. * Structure is documented below. */ oauth2ClientCredentialsConfig?: outputs.apihub.PluginInstanceAuthConfigOauth2ClientCredentialsConfig; /** * Parameters to support Username and Password Authentication. * Structure is documented below. */ userPasswordConfig?: outputs.apihub.PluginInstanceAuthConfigUserPasswordConfig; } interface PluginInstanceAuthConfigApiKeyConfig { /** * Secret provides a reference to entries in Secret Manager. * Structure is documented below. */ apiKey: outputs.apihub.PluginInstanceAuthConfigApiKeyConfigApiKey; /** * The location of the API key. * The default value is QUERY. * Possible values: * HTTP_ELEMENT_LOCATION_UNSPECIFIED * QUERY * HEADER * PATH * BODY * COOKIE */ httpElementLocation: string; /** * The parameter name of the API key. * E.g. If the API request is "https://example.com/act?api_key=", * "apiKey" would be the parameter name. */ name: string; } interface PluginInstanceAuthConfigApiKeyConfigApiKey { /** * The resource name of the secret version in the format, * format as: `projects/*/secrets/*/versions/*`. * * The `oauth2ClientCredentialsConfig` block supports: */ secretVersion: string; } interface PluginInstanceAuthConfigGoogleServiceAccountConfig { /** * The service account to be used for authenticating request. * The `iam.serviceAccounts.getAccessToken` permission should be granted on * this service account to the impersonator service account. */ serviceAccount: string; } interface PluginInstanceAuthConfigOauth2ClientCredentialsConfig { /** * The client identifier. */ clientId: string; /** * Secret provides a reference to entries in Secret Manager. */ clientSecret: outputs.apihub.PluginInstanceAuthConfigOauth2ClientCredentialsConfigClientSecret; } interface PluginInstanceAuthConfigOauth2ClientCredentialsConfigClientSecret { /** * The resource name of the secret version in the format, * format as: `projects/*/secrets/*/versions/*`. */ secretVersion: string; } interface PluginInstanceAuthConfigUserPasswordConfig { /** * Secret provides a reference to entries in Secret Manager. * Structure is documented below. */ password: outputs.apihub.PluginInstanceAuthConfigUserPasswordConfigPassword; /** * Username. */ username: string; } interface PluginInstanceAuthConfigUserPasswordConfigPassword { /** * The resource name of the secret version in the format, * format as: `projects/*/secrets/*/versions/*`. */ secretVersion: string; } } export declare namespace appengine { interface ApplicationFeatureSettings { /** * Set to false to use the legacy health check instead of the readiness * and liveness checks. */ splitHealthChecks: boolean; } interface ApplicationIap { /** * (Optional) Whether the serving infrastructure will authenticate and authorize all incoming requests. * (default is false) */ enabled?: boolean; /** * OAuth2 client ID to use for the authentication flow. */ oauth2ClientId: string; /** * OAuth2 client secret to use for the authentication flow. * The SHA-256 hash of the value is returned in the oauth2ClientSecretSha256 field. */ oauth2ClientSecret: string; /** * Hex-encoded SHA-256 hash of the client secret. */ oauth2ClientSecretSha256: string; } interface ApplicationUrlDispatchRule { domain: string; path: string; service: string; } interface ApplicationUrlDispatchRulesDispatchRule { /** * Domain name to match against. The wildcard "*" is supported if specified before a period: "*.". * Defaults to matching all domains: "*". */ domain?: string; /** * Pathname within the host. Must start with a "/". A single "*" can be included at the end of the path. * The sum of the lengths of the domain and path may not exceed 100 characters. */ path: string; /** * Pathname within the host. Must start with a "/". A single "*" can be included at the end of the path. * The sum of the lengths of the domain and path may not exceed 100 characters. */ service: string; } interface DomainMappingResourceRecord { /** * Relative name of the object affected by this record. Only applicable for CNAME records. Example: 'www'. */ name?: string; /** * Data for this record. Values vary by record type, as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1). */ rrdata?: string; /** * Resource record type. Example: `AAAA`. * Possible values are: `A`, `AAAA`, `CNAME`. */ type?: string; } interface DomainMappingSslSettings { /** * ID of the AuthorizedCertificate resource configuring SSL for the application. Clearing this field will * remove SSL support. * By default, a managed certificate is automatically created for every domain mapping. To omit SSL support * or to configure SSL manually, specify `SslManagementType.MANUAL` on a `CREATE` or `UPDATE` request. You must be * authorized to administer the `AuthorizedCertificate` resource to manually map it to a DomainMapping resource. * Example: 12345. */ certificateId: string; /** * (Output) * ID of the managed `AuthorizedCertificate` resource currently being provisioned, if applicable. Until the new * managed certificate has been successfully provisioned, the previous SSL state will be preserved. Once the * provisioning process completes, the `certificateId` field will reflect the new managed certificate and this * field will be left empty. To remove SSL support while there is still a pending managed certificate, clear the * `certificateId` field with an update request. */ pendingManagedCertificateId: string; /** * SSL management type for this domain. If `AUTOMATIC`, a managed certificate is automatically provisioned. * If `MANUAL`, `certificateId` must be manually specified in order to configure SSL for this domain. * Possible values are: `AUTOMATIC`, `MANUAL`. */ sslManagementType: string; } interface EngineSplitTrafficSplit { /** * Mapping from version IDs within the service to fractional (0.000, 1] allocations of traffic for that version. Each version can be specified only once, but some versions in the service may not have any traffic allocation. Services that have traffic allocated cannot be deleted until either the service is deleted or their traffic allocation is removed. Allocations must sum to 1. Up to two decimal place precision is supported for IP-based splits and up to three decimal places is supported for cookie-based splits. */ allocations: { [key: string]: string; }; /** * Mechanism used to determine which version a request is sent to. The traffic selection algorithm will be stable for either type until allocations are changed. * Possible values are: `UNSPECIFIED`, `COOKIE`, `IP`, `RANDOM`. */ shardBy?: string; } interface FlexibleAppVersionApiConfig { /** * Action to take when users access resources that require authentication. * Default value is `AUTH_FAIL_ACTION_REDIRECT`. * Possible values are: `AUTH_FAIL_ACTION_REDIRECT`, `AUTH_FAIL_ACTION_UNAUTHORIZED`. */ authFailAction?: string; /** * Level of login required to access this resource. * Default value is `LOGIN_OPTIONAL`. * Possible values are: `LOGIN_OPTIONAL`, `LOGIN_ADMIN`, `LOGIN_REQUIRED`. */ login?: string; /** * Path to the script from the application root directory. */ script: string; /** * Security (HTTPS) enforcement for this URL. * Possible values are: `SECURE_DEFAULT`, `SECURE_NEVER`, `SECURE_OPTIONAL`, `SECURE_ALWAYS`. */ securityLevel?: string; /** * URL to serve the endpoint at. */ url?: string; } interface FlexibleAppVersionAutomaticScaling { /** * The time period that the Autoscaler should wait before it starts collecting information from a new instance. * This prevents the autoscaler from collecting information when the instance is initializing, * during which the collected usage would not be reliable. Default: 120s */ coolDownPeriod?: string; /** * Target scaling by CPU usage. * Structure is documented below. */ cpuUtilization: outputs.appengine.FlexibleAppVersionAutomaticScalingCpuUtilization; /** * Target scaling by disk usage. * Structure is documented below. */ diskUtilization?: outputs.appengine.FlexibleAppVersionAutomaticScalingDiskUtilization; /** * Number of concurrent requests an automatic scaling instance can accept before the scheduler spawns a new instance. * Defaults to a runtime-specific value. */ maxConcurrentRequests: number; /** * Maximum number of idle instances that should be maintained for this version. */ maxIdleInstances?: number; /** * Maximum amount of time that a request should wait in the pending queue before starting a new instance to handle it. */ maxPendingLatency?: string; /** * Maximum number of instances that should be started to handle requests for this version. Default: 20 */ maxTotalInstances?: number; /** * Minimum number of idle instances that should be maintained for this version. Only applicable for the default version of a service. */ minIdleInstances?: number; /** * Minimum amount of time a request should wait in the pending queue before starting a new instance to handle it. */ minPendingLatency?: string; /** * Minimum number of running instances that should be maintained for this version. Default: 2 */ minTotalInstances?: number; /** * Target scaling by network usage. * Structure is documented below. */ networkUtilization?: outputs.appengine.FlexibleAppVersionAutomaticScalingNetworkUtilization; /** * Target scaling by request utilization. * Structure is documented below. */ requestUtilization?: outputs.appengine.FlexibleAppVersionAutomaticScalingRequestUtilization; } interface FlexibleAppVersionAutomaticScalingCpuUtilization { /** * Period of time over which CPU utilization is calculated. */ aggregationWindowLength?: string; /** * Target CPU utilization ratio to maintain when scaling. Must be between 0 and 1. */ targetUtilization: number; } interface FlexibleAppVersionAutomaticScalingDiskUtilization { /** * Target bytes read per second. */ targetReadBytesPerSecond?: number; /** * Target ops read per seconds. */ targetReadOpsPerSecond?: number; /** * Target bytes written per second. */ targetWriteBytesPerSecond?: number; /** * Target ops written per second. */ targetWriteOpsPerSecond?: number; } interface FlexibleAppVersionAutomaticScalingNetworkUtilization { /** * Target bytes received per second. */ targetReceivedBytesPerSecond?: number; /** * Target packets received per second. */ targetReceivedPacketsPerSecond?: number; /** * Target bytes sent per second. */ targetSentBytesPerSecond?: number; /** * Target packets sent per second. */ targetSentPacketsPerSecond?: number; } interface FlexibleAppVersionAutomaticScalingRequestUtilization { /** * Target number of concurrent requests. */ targetConcurrentRequests?: number; /** * Target requests per second. */ targetRequestCountPerSecond?: string; } interface FlexibleAppVersionDeployment { /** * Options for the build operations performed as a part of the version deployment. Only applicable when creating a version using source code directly. * Structure is documented below. */ cloudBuildOptions?: outputs.appengine.FlexibleAppVersionDeploymentCloudBuildOptions; /** * The Docker image for the container that runs the version. * Structure is documented below. */ container: outputs.appengine.FlexibleAppVersionDeploymentContainer; /** * Manifest of the files stored in Google Cloud Storage that are included as part of this version. * All files must be readable using the credentials supplied with this call. * Structure is documented below. */ files?: outputs.appengine.FlexibleAppVersionDeploymentFile[]; /** * Zip File * Structure is documented below. */ zip?: outputs.appengine.FlexibleAppVersionDeploymentZip; } interface FlexibleAppVersionDeploymentCloudBuildOptions { /** * Path to the yaml file used in deployment, used to determine runtime configuration details. */ appYamlPath: string; /** * The Cloud Build timeout used as part of any dependent builds performed by version creation. Defaults to 10 minutes. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ cloudBuildTimeout?: string; } interface FlexibleAppVersionDeploymentContainer { /** * URI to the hosted container image in Google Container Registry. The URI must be fully qualified and include a tag or digest. * Examples: "gcr.io/my-project/image:tag" or "gcr.io/my-project/image@digest" */ image: string; } interface FlexibleAppVersionDeploymentFile { /** * The identifier for this object. Format specified above. */ name: string; /** * SHA1 checksum of the file */ sha1Sum?: string; /** * Source URL */ sourceUrl: string; } interface FlexibleAppVersionDeploymentZip { /** * files count */ filesCount?: number; /** * Source URL */ sourceUrl: string; } interface FlexibleAppVersionEndpointsApiService { /** * Endpoints service configuration ID as specified by the Service Management API. For example "2016-09-19r1". * By default, the rollout strategy for Endpoints is "FIXED". This means that Endpoints starts up with a particular configuration ID. * When a new configuration is rolled out, Endpoints must be given the new configuration ID. The configId field is used to give the configuration ID * and is required in this case. * Endpoints also has a rollout strategy called "MANAGED". When using this, Endpoints fetches the latest configuration and does not need * the configuration ID. In this case, configId must be omitted. */ configId?: string; /** * Enable or disable trace sampling. By default, this is set to false for enabled. */ disableTraceSampling?: boolean; /** * Endpoints service name which is the name of the "service" resource in the Service Management API. * For example "myapi.endpoints.myproject.cloud.goog" */ name: string; /** * Endpoints rollout strategy. If FIXED, configId must be specified. If MANAGED, configId must be omitted. * Default value is `FIXED`. * Possible values are: `FIXED`, `MANAGED`. */ rolloutStrategy?: string; } interface FlexibleAppVersionEntrypoint { /** * The format should be a shell command that can be fed to bash -c. */ shell: string; } interface FlexibleAppVersionFlexibleRuntimeSettings { /** * Operating System of the application runtime. */ operatingSystem?: string; /** * The runtime version of an App Engine flexible application. */ runtimeVersion?: string; } interface FlexibleAppVersionHandler { /** * Actions to take when the user is not logged in. * Possible values are: `AUTH_FAIL_ACTION_REDIRECT`, `AUTH_FAIL_ACTION_UNAUTHORIZED`. */ authFailAction?: string; /** * Methods to restrict access to a URL based on login status. * Possible values are: `LOGIN_OPTIONAL`, `LOGIN_ADMIN`, `LOGIN_REQUIRED`. */ login?: string; /** * 30x code to use when performing redirects for the secure field. * Possible values are: `REDIRECT_HTTP_RESPONSE_CODE_301`, `REDIRECT_HTTP_RESPONSE_CODE_302`, `REDIRECT_HTTP_RESPONSE_CODE_303`, `REDIRECT_HTTP_RESPONSE_CODE_307`. */ redirectHttpResponseCode?: string; /** * Executes a script to handle the requests that match this URL pattern. * Only the auto value is supported for Node.js in the App Engine standard environment, for example "script:" "auto". * Structure is documented below. */ script?: outputs.appengine.FlexibleAppVersionHandlerScript; /** * Security (HTTPS) enforcement for this URL. * Possible values are: `SECURE_DEFAULT`, `SECURE_NEVER`, `SECURE_OPTIONAL`, `SECURE_ALWAYS`. */ securityLevel?: string; /** * Files served directly to the user for a given URL, such as images, CSS stylesheets, or JavaScript source files. * Static file handlers describe which files in the application directory are static files, and which URLs serve them. * Structure is documented below. */ staticFiles?: outputs.appengine.FlexibleAppVersionHandlerStaticFiles; /** * URL prefix. Uses regular expression syntax, which means regexp special characters must be escaped, but should not contain groupings. * All URLs that begin with this prefix are handled by this handler, using the portion of the URL after the prefix as part of the file path. */ urlRegex?: string; } interface FlexibleAppVersionHandlerScript { /** * Path to the script from the application root directory. */ scriptPath: string; } interface FlexibleAppVersionHandlerStaticFiles { /** * Whether files should also be uploaded as code data. By default, files declared in static file handlers are * uploaded as static data and are only served to end users; they cannot be read by the application. If enabled, * uploads are charged against both your code and static data storage resource quotas. */ applicationReadable?: boolean; /** * Time a static file served by this handler should be cached by web proxies and browsers. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example "3.5s". * Default is '0s' */ expiration?: string; /** * HTTP headers to use for all responses from these URLs. * An object containing a list of "key:value" value pairs.". */ httpHeaders?: { [key: string]: string; }; /** * MIME type used to serve all files served by this handler. * Defaults to file-specific MIME types, which are derived from each file's filename extension. */ mimeType?: string; /** * Path to the static files matched by the URL pattern, from the application root directory. * The path can refer to text matched in groupings in the URL pattern. */ path?: string; /** * Whether this handler should match the request if the file referenced by the handler does not exist. */ requireMatchingFile?: boolean; /** * Regular expression that matches the file paths for all files that should be referenced by this handler. */ uploadPathRegex?: string; } interface FlexibleAppVersionLivenessCheck { /** * Interval between health checks. */ checkInterval?: string; /** * Number of consecutive failed checks required before considering the VM unhealthy. Default: 4. */ failureThreshold?: number; /** * Host header to send when performing a HTTP Readiness check. Example: "myapp.appspot.com" */ host?: string; /** * The initial delay before starting to execute the checks. Default: "300s" */ initialDelay?: string; /** * The request path. */ path: string; /** * Number of consecutive successful checks required before considering the VM healthy. Default: 2. */ successThreshold?: number; /** * Time before the check is considered failed. Default: "4s" */ timeout?: string; } interface FlexibleAppVersionManualScaling { /** * Number of instances to assign to the service at the start. * **Note:** When managing the number of instances at runtime through the App Engine Admin API or the (now deprecated) Python 2 * Modules API set_num_instances() you must use `lifecycle.ignore_changes = ["manualScaling"[0].instances]` to prevent drift detection. */ instances: number; } interface FlexibleAppVersionNetwork { /** * List of ports, or port pairs, to forward from the virtual machine to the application container. */ forwardedPorts?: string[]; /** * (Optional, Beta) * Prevent instances from receiving an ephemeral external IP address. * Possible values are: `EXTERNAL`, `INTERNAL`. */ instanceIpMode?: string; /** * Tag to apply to the instance during creation. */ instanceTag?: string; /** * Google Compute Engine network where the virtual machines are created. Specify the short name, not the resource path. */ name: string; /** * Enable session affinity. */ sessionAffinity?: boolean; /** * Google Cloud Platform sub-network where the virtual machines are created. Specify the short name, not the resource path. * If the network that the instance is being created in is a Legacy network, then the IP address is allocated from the IPv4Range. * If the network that the instance is being created in is an auto Subnet Mode Network, then only network name should be specified (not the subnetworkName) and the IP address is created from the IPCidrRange of the subnetwork that exists in that zone for that network. * If the network that the instance is being created in is a custom Subnet Mode Network, then the subnetworkName must be specified and the IP address is created from the IPCidrRange of the subnetwork. * If specified, the subnetwork must exist in the same region as the App Engine flexible environment application. */ subnetwork?: string; } interface FlexibleAppVersionReadinessCheck { /** * A maximum time limit on application initialization, measured from moment the application successfully * replies to a healthcheck until it is ready to serve traffic. Default: "300s" */ appStartTimeout?: string; /** * Interval between health checks. Default: "5s". */ checkInterval?: string; /** * Number of consecutive failed checks required before removing traffic. Default: 2. */ failureThreshold?: number; /** * Host header to send when performing a HTTP Readiness check. Example: "myapp.appspot.com" */ host?: string; /** * The request path. */ path: string; /** * Number of consecutive successful checks required before receiving traffic. Default: 2. */ successThreshold?: number; /** * Time before the check is considered failed. Default: "4s" */ timeout?: string; } interface FlexibleAppVersionResources { /** * Number of CPU cores needed. */ cpu?: number; /** * Disk size (GB) needed. */ diskGb?: number; /** * Memory (GB) needed. */ memoryGb?: number; /** * List of ports, or port pairs, to forward from the virtual machine to the application container. * Structure is documented below. */ volumes?: outputs.appengine.FlexibleAppVersionResourcesVolume[]; } interface FlexibleAppVersionResourcesVolume { /** * Unique name for the volume. */ name: string; /** * Volume size in gigabytes. */ sizeGb: number; /** * Underlying volume type, e.g. 'tmpfs'. */ volumeType: string; } interface FlexibleAppVersionVpcAccessConnector { /** * Full Serverless VPC Access Connector name e.g. /projects/my-project/locations/us-central1/connectors/c1. */ name: string; } interface ServiceNetworkSettingsNetworkSettings { /** * The ingress settings for version or service. * Default value is `INGRESS_TRAFFIC_ALLOWED_UNSPECIFIED`. * Possible values are: `INGRESS_TRAFFIC_ALLOWED_UNSPECIFIED`, `INGRESS_TRAFFIC_ALLOWED_ALL`, `INGRESS_TRAFFIC_ALLOWED_INTERNAL_ONLY`, `INGRESS_TRAFFIC_ALLOWED_INTERNAL_AND_LB`. */ ingressTrafficAllowed?: string; } interface StandardAppVersionAutomaticScaling { /** * Number of concurrent requests an automatic scaling instance can accept before the scheduler spawns a new instance. * Defaults to a runtime-specific value. */ maxConcurrentRequests?: number; /** * Maximum number of idle instances that should be maintained for this version. */ maxIdleInstances?: number; /** * Maximum amount of time that a request should wait in the pending queue before starting a new instance to handle it. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ maxPendingLatency?: string; /** * Minimum number of idle instances that should be maintained for this version. Only applicable for the default version of a service. */ minIdleInstances?: number; /** * Minimum amount of time a request should wait in the pending queue before starting a new instance to handle it. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ minPendingLatency?: string; /** * Scheduler settings for standard environment. * Structure is documented below. */ standardSchedulerSettings?: outputs.appengine.StandardAppVersionAutomaticScalingStandardSchedulerSettings; } interface StandardAppVersionAutomaticScalingStandardSchedulerSettings { /** * Maximum number of instances to run for this version. Set to zero to disable maxInstances configuration. * **Note:** Starting from March 2025, App Engine sets the maxInstances default for standard environment deployments to 20. This change doesn't impact existing apps. To override the default, specify a new value between 0 and 2147483647, and deploy a new version or redeploy over an existing version. To disable the maxInstances default configuration setting, specify the maximum permitted value 2147483647. */ maxInstances: number; /** * Minimum number of instances to run for this version. Set to zero to disable minInstances configuration. */ minInstances?: number; /** * Target CPU utilization ratio to maintain when scaling. Should be a value in the range [0.50, 0.95], zero, or a negative value. */ targetCpuUtilization?: number; /** * Target throughput utilization ratio to maintain when scaling. Should be a value in the range [0.50, 0.95], zero, or a negative value. */ targetThroughputUtilization?: number; } interface StandardAppVersionBasicScaling { /** * Duration of time after the last request that an instance must wait before the instance is shut down. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". Defaults to 900s. */ idleTimeout?: string; /** * Maximum number of instances to create for this version. Must be in the range [1.0, 200.0]. */ maxInstances: number; } interface StandardAppVersionDeployment { /** * Manifest of the files stored in Google Cloud Storage that are included as part of this version. * All files must be readable using the credentials supplied with this call. * Structure is documented below. */ files?: outputs.appengine.StandardAppVersionDeploymentFile[]; /** * Zip File * Structure is documented below. */ zip?: outputs.appengine.StandardAppVersionDeploymentZip; } interface StandardAppVersionDeploymentFile { /** * The identifier for this object. Format specified above. */ name: string; /** * SHA1 checksum of the file */ sha1Sum?: string; /** * Source URL */ sourceUrl: string; } interface StandardAppVersionDeploymentZip { /** * files count */ filesCount?: number; /** * Source URL */ sourceUrl: string; } interface StandardAppVersionEntrypoint { /** * The format should be a shell command that can be fed to bash -c. */ shell: string; } interface StandardAppVersionHandler { /** * Actions to take when the user is not logged in. * Possible values are: `AUTH_FAIL_ACTION_REDIRECT`, `AUTH_FAIL_ACTION_UNAUTHORIZED`. */ authFailAction?: string; /** * Methods to restrict access to a URL based on login status. * Possible values are: `LOGIN_OPTIONAL`, `LOGIN_ADMIN`, `LOGIN_REQUIRED`. */ login?: string; /** * 30x code to use when performing redirects for the secure field. * Possible values are: `REDIRECT_HTTP_RESPONSE_CODE_301`, `REDIRECT_HTTP_RESPONSE_CODE_302`, `REDIRECT_HTTP_RESPONSE_CODE_303`, `REDIRECT_HTTP_RESPONSE_CODE_307`. */ redirectHttpResponseCode?: string; /** * Executes a script to handle the requests that match this URL pattern. * Only the auto value is supported for Node.js in the App Engine standard environment, for example "script:" "auto". * Structure is documented below. */ script?: outputs.appengine.StandardAppVersionHandlerScript; /** * Security (HTTPS) enforcement for this URL. * Possible values are: `SECURE_DEFAULT`, `SECURE_NEVER`, `SECURE_OPTIONAL`, `SECURE_ALWAYS`. */ securityLevel?: string; /** * Files served directly to the user for a given URL, such as images, CSS stylesheets, or JavaScript source files. Static file handlers describe which files in the application directory are static files, and which URLs serve them. * Structure is documented below. */ staticFiles?: outputs.appengine.StandardAppVersionHandlerStaticFiles; /** * URL prefix. Uses regular expression syntax, which means regexp special characters must be escaped, but should not contain groupings. * All URLs that begin with this prefix are handled by this handler, using the portion of the URL after the prefix as part of the file path. */ urlRegex?: string; } interface StandardAppVersionHandlerScript { /** * Path to the script from the application root directory. */ scriptPath: string; } interface StandardAppVersionHandlerStaticFiles { /** * Whether files should also be uploaded as code data. By default, files declared in static file handlers are uploaded as * static data and are only served to end users; they cannot be read by the application. If enabled, uploads are charged * against both your code and static data storage resource quotas. */ applicationReadable?: boolean; /** * Time a static file served by this handler should be cached by web proxies and browsers. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example "3.5s". */ expiration?: string; /** * HTTP headers to use for all responses from these URLs. * An object containing a list of "key:value" value pairs.". */ httpHeaders?: { [key: string]: string; }; /** * MIME type used to serve all files served by this handler. * Defaults to file-specific MIME types, which are derived from each file's filename extension. */ mimeType?: string; /** * Path to the static files matched by the URL pattern, from the application root directory. The path can refer to text matched in groupings in the URL pattern. */ path?: string; /** * Whether this handler should match the request if the file referenced by the handler does not exist. */ requireMatchingFile?: boolean; /** * Regular expression that matches the file paths for all files that should be referenced by this handler. */ uploadPathRegex?: string; } interface StandardAppVersionLibrary { /** * Name of the library. Example "django". */ name?: string; /** * Version of the library to select, or "latest". */ version?: string; } interface StandardAppVersionManualScaling { /** * Number of instances to assign to the service at the start. * **Note:** When managing the number of instances at runtime through the App Engine Admin API or the (now deprecated) Python 2 * Modules API set_num_instances() you must use `lifecycle.ignore_changes = ["manualScaling"[0].instances]` to prevent drift detection. */ instances: number; } interface StandardAppVersionVpcAccessConnector { /** * The egress setting for the connector, controlling what traffic is diverted through it. */ egressSetting?: string; /** * Full Serverless VPC Access Connector name e.g. /projects/my-project/locations/us-central1/connectors/c1. */ name: string; } } export declare namespace apphub { interface ApplicationAttributes { /** * Optional. Business team that ensures user needs are met and value is delivered * Structure is documented below. */ businessOwners?: outputs.apphub.ApplicationAttributesBusinessOwner[]; /** * Criticality of the Application, Service, or Workload * Structure is documented below. */ criticality?: outputs.apphub.ApplicationAttributesCriticality; /** * Optional. Developer team that owns development and coding. * Structure is documented below. */ developerOwners?: outputs.apphub.ApplicationAttributesDeveloperOwner[]; /** * Environment of the Application, Service, or Workload * Structure is documented below. */ environment?: outputs.apphub.ApplicationAttributesEnvironment; /** * Optional. Operator team that ensures runtime and operations. * Structure is documented below. */ operatorOwners?: outputs.apphub.ApplicationAttributesOperatorOwner[]; } interface ApplicationAttributesBusinessOwner { /** * Optional. Contact's name. */ displayName?: string; /** * Required. Email address of the contacts. */ email: string; } interface ApplicationAttributesCriticality { /** * Criticality type. * Possible values are: `MISSION_CRITICAL`, `HIGH`, `MEDIUM`, `LOW`. */ type: string; } interface ApplicationAttributesDeveloperOwner { /** * Optional. Contact's name. */ displayName?: string; /** * Required. Email address of the contacts. */ email: string; } interface ApplicationAttributesEnvironment { /** * Environment type. * Possible values are: `PRODUCTION`, `STAGING`, `TEST`, `DEVELOPMENT`. */ type: string; } interface ApplicationAttributesOperatorOwner { /** * Optional. Contact's name. */ displayName?: string; /** * Required. Email address of the contacts. */ email: string; } interface ApplicationScope { /** * Required. Scope Type. * Possible values: * REGIONAL * GLOBAL * Possible values are: `REGIONAL`, `GLOBAL`. */ type: string; } interface GetApplicationAttribute { /** * Optional. Business team that ensures user needs are met and value is delivered */ businessOwners: outputs.apphub.GetApplicationAttributeBusinessOwner[]; /** * Criticality of the Application, Service, or Workload */ criticalities: outputs.apphub.GetApplicationAttributeCriticality[]; /** * Optional. Developer team that owns development and coding. */ developerOwners: outputs.apphub.GetApplicationAttributeDeveloperOwner[]; /** * Environment of the Application, Service, or Workload */ environments: outputs.apphub.GetApplicationAttributeEnvironment[]; /** * Optional. Operator team that ensures runtime and operations. */ operatorOwners: outputs.apphub.GetApplicationAttributeOperatorOwner[]; } interface GetApplicationAttributeBusinessOwner { /** * Optional. Contact's name. */ displayName: string; /** * Required. Email address of the contacts. */ email: string; } interface GetApplicationAttributeCriticality { /** * Criticality type. Possible values: ["MISSION_CRITICAL", "HIGH", "MEDIUM", "LOW"] */ type: string; } interface GetApplicationAttributeDeveloperOwner { /** * Optional. Contact's name. */ displayName: string; /** * Required. Email address of the contacts. */ email: string; } interface GetApplicationAttributeEnvironment { /** * Environment type. Possible values: ["PRODUCTION", "STAGING", "TEST", "DEVELOPMENT"] */ type: string; } interface GetApplicationAttributeOperatorOwner { /** * Optional. Contact's name. */ displayName: string; /** * Required. Email address of the contacts. */ email: string; } interface GetApplicationScope { /** * Required. Scope Type. * Possible values: * REGIONAL * GLOBAL Possible values: ["REGIONAL", "GLOBAL"] */ type: string; } interface GetDiscoveredServiceServiceProperty { /** * The service project identifier that the underlying cloud resource resides in. */ gcpProject: string; /** * The location of the discovered service. */ location: string; /** * The location that the underlying resource resides in if it is zonal. */ zone: string; } interface GetDiscoveredServiceServiceReference { /** * Additional path under the resource URI. */ path: string; /** * The underlying resource URI. */ uri: string; } interface GetDiscoveredWorkloadWorkloadProperty { /** * The service project identifier that the underlying cloud resource resides in. */ gcpProject: string; /** * The location of the discovered workload. */ location: string; /** * The location that the underlying resource resides in if it is zonal. */ zone: string; } interface GetDiscoveredWorkloadWorkloadReference { /** * The underlying resource URI. */ uri: string; } interface ServiceAttributes { /** * Business team that ensures user needs are met and value is delivered * Structure is documented below. */ businessOwners?: outputs.apphub.ServiceAttributesBusinessOwner[]; /** * Criticality of the Application, Service, or Workload * Structure is documented below. */ criticality?: outputs.apphub.ServiceAttributesCriticality; /** * Developer team that owns development and coding. * Structure is documented below. */ developerOwners?: outputs.apphub.ServiceAttributesDeveloperOwner[]; /** * Environment of the Application, Service, or Workload * Structure is documented below. */ environment?: outputs.apphub.ServiceAttributesEnvironment; /** * Operator team that ensures runtime and operations. * Structure is documented below. */ operatorOwners?: outputs.apphub.ServiceAttributesOperatorOwner[]; } interface ServiceAttributesBusinessOwner { /** * Contact's name. */ displayName?: string; /** * Required. Email address of the contacts. */ email: string; } interface ServiceAttributesCriticality { /** * Criticality type. * Possible values are: `MISSION_CRITICAL`, `HIGH`, `MEDIUM`, `LOW`. */ type: string; } interface ServiceAttributesDeveloperOwner { /** * Contact's name. */ displayName?: string; /** * Required. Email address of the contacts. */ email: string; } interface ServiceAttributesEnvironment { /** * Environment type. * Possible values are: `PRODUCTION`, `STAGING`, `TEST`, `DEVELOPMENT`. */ type: string; } interface ServiceAttributesOperatorOwner { /** * Contact's name. */ displayName?: string; /** * Required. Email address of the contacts. */ email: string; } interface ServiceServiceProperty { /** * (Output) * Output only. Additional metadata specific to the resource type. * Structure is documented below. */ extendedMetadatas: outputs.apphub.ServiceServicePropertyExtendedMetadata[]; /** * (Output) * Output only. The type of the service. * Structure is documented below. */ functionalTypes: outputs.apphub.ServiceServicePropertyFunctionalType[]; /** * (Output) * Output only. The service project identifier that the underlying cloud resource resides in. */ gcpProject: string; /** * (Output) * The identity associated with the service. * Structure is documented below. */ identities: outputs.apphub.ServiceServicePropertyIdentity[]; /** * Part of `parent`. Full resource name of a parent Application. Example: projects/{HOST_PROJECT_ID}/locations/{LOCATION}/applications/{APPLICATION_ID} */ location: string; /** * (Output) * Output only. The registration type of the service. * Structure is documented below. */ registrationTypes: outputs.apphub.ServiceServicePropertyRegistrationType[]; /** * (Output) * Output only. The location that the underlying resource resides in if it is zonal, for example, us-west1-a). */ zone: string; } interface ServiceServicePropertyExtendedMetadata { /** * (Output) * The key of the extended metadata. */ key: string; /** * (Output) * The value of the extended metadata. * Structure is documented below. */ values: outputs.apphub.ServiceServicePropertyExtendedMetadataValue[]; } interface ServiceServicePropertyExtendedMetadataValue { /** * (Output) * The resource name for the Extended Metadata Schema. */ extendedMetadataSchema: string; /** * (Output) * The metadata contents as a JSON string. */ metadataStruct: string; } interface ServiceServicePropertyFunctionalType { /** * (Output) * Output only. The registration type of a service. */ type: string; } interface ServiceServicePropertyIdentity { /** * (Output) * The principal of the identity. */ principal: string; } interface ServiceServicePropertyRegistrationType { /** * (Output) * Output only. The registration type of a service. */ type: string; } interface ServiceServiceReference { /** * (Output) * Output only. The underlying resource URI (For example, URI of Forwarding Rule, URL Map, * and Backend Service). */ uri: string; } interface WorkloadAttributes { /** * Business team that ensures user needs are met and value is delivered * Structure is documented below. */ businessOwners?: outputs.apphub.WorkloadAttributesBusinessOwner[]; /** * Criticality of the Application, Service, or Workload * Structure is documented below. */ criticality?: outputs.apphub.WorkloadAttributesCriticality; /** * Developer team that owns development and coding. * Structure is documented below. */ developerOwners?: outputs.apphub.WorkloadAttributesDeveloperOwner[]; /** * Environment of the Application, Service, or Workload * Structure is documented below. */ environment?: outputs.apphub.WorkloadAttributesEnvironment; /** * Operator team that ensures runtime and operations. * Structure is documented below. */ operatorOwners?: outputs.apphub.WorkloadAttributesOperatorOwner[]; } interface WorkloadAttributesBusinessOwner { /** * Contact's name. */ displayName?: string; /** * Email address of the contacts. */ email: string; } interface WorkloadAttributesCriticality { /** * Criticality type. * Possible values are: `MISSION_CRITICAL`, `HIGH`, `MEDIUM`, `LOW`. */ type: string; } interface WorkloadAttributesDeveloperOwner { /** * Contact's name. */ displayName?: string; /** * Email address of the contacts. */ email: string; } interface WorkloadAttributesEnvironment { /** * Environment type. * Possible values are: `PRODUCTION`, `STAGING`, `TEST`, `DEVELOPMENT`. */ type: string; } interface WorkloadAttributesOperatorOwner { /** * Contact's name. */ displayName?: string; /** * Email address of the contacts. */ email: string; } interface WorkloadWorkloadProperty { /** * (Output) * Output only. Additional metadata specific to the resource type. * Structure is documented below. */ extendedMetadatas: outputs.apphub.WorkloadWorkloadPropertyExtendedMetadata[]; /** * (Output) * Output only. The functional type of a service or workload. * Structure is documented below. */ functionalTypes: outputs.apphub.WorkloadWorkloadPropertyFunctionalType[]; /** * (Output) * Output only. The service project identifier that the underlying cloud resource resides in. Empty for non cloud resources. */ gcpProject: string; /** * (Output) * The identity associated with the workload. * Structure is documented below. */ identities: outputs.apphub.WorkloadWorkloadPropertyIdentity[]; /** * Part of `parent`. Full resource name of a parent Application. Example: projects/{HOST_PROJECT_ID}/locations/{LOCATION}/applications/{APPLICATION_ID} */ location: string; /** * (Output) * Output only. The location that the underlying compute resource resides in if it is zonal (e.g us-west1-a). */ zone: string; } interface WorkloadWorkloadPropertyExtendedMetadata { /** * (Output) * The key of the extended metadata. */ key: string; /** * (Output) * The value of the extended metadata. * Structure is documented below. */ values: outputs.apphub.WorkloadWorkloadPropertyExtendedMetadataValue[]; } interface WorkloadWorkloadPropertyExtendedMetadataValue { /** * (Output) * The resource name for the Extended Metadata Schema. */ extendedMetadataSchema: string; /** * (Output) * The metadata contents as a JSON string. */ metadataStruct: string; } interface WorkloadWorkloadPropertyFunctionalType { /** * (Output) * Output only. The functional type of a service or workload. */ type: string; } interface WorkloadWorkloadPropertyIdentity { /** * (Output) * The principal of the identity. */ principal: string; } interface WorkloadWorkloadReference { /** * (Output) * Output only. The underlying compute resource uri. */ uri: string; } } export declare namespace applicationintegration { interface AuthConfigClientCertificate { /** * The ssl certificate encoded in PEM format. This string must include the begin header and end footer lines. */ encryptedPrivateKey: string; /** * 'passphrase' should be left unset if private key is not encrypted. * Note that 'passphrase' is not the password for web server, but an extra layer of security to protected private key. */ passphrase?: string; /** * The ssl certificate encoded in PEM format. This string must include the begin header and end footer lines. */ sslCertificate: string; } interface AuthConfigDecryptedCredential { /** * Auth token credential. * Structure is documented below. */ authToken?: outputs.applicationintegration.AuthConfigDecryptedCredentialAuthToken; /** * Credential type associated with auth configs. */ credentialType: string; /** * JWT credential. * Structure is documented below. */ jwt?: outputs.applicationintegration.AuthConfigDecryptedCredentialJwt; /** * OAuth2 authorization code credential. * Structure is documented below. */ oauth2AuthorizationCode?: outputs.applicationintegration.AuthConfigDecryptedCredentialOauth2AuthorizationCode; /** * OAuth2 client credentials. * Structure is documented below. */ oauth2ClientCredentials?: outputs.applicationintegration.AuthConfigDecryptedCredentialOauth2ClientCredentials; /** * Google OIDC ID Token. * Structure is documented below. */ oidcToken?: outputs.applicationintegration.AuthConfigDecryptedCredentialOidcToken; /** * Service account credential. * Structure is documented below. */ serviceAccountCredentials?: outputs.applicationintegration.AuthConfigDecryptedCredentialServiceAccountCredentials; /** * Username and password credential. * Structure is documented below. */ usernameAndPassword?: outputs.applicationintegration.AuthConfigDecryptedCredentialUsernameAndPassword; } interface AuthConfigDecryptedCredentialAuthToken { /** * The token for the auth type. */ token?: string; /** * Authentication type, e.g. "Basic", "Bearer", etc. */ type?: string; } interface AuthConfigDecryptedCredentialJwt { /** * (Output) * The token calculated by the header, payload and signature. */ jwt: string; /** * Identifies which algorithm is used to generate the signature. */ jwtHeader?: string; /** * Contains a set of claims. The JWT specification defines seven Registered Claim Names which are the standard fields commonly included in tokens. Custom claims are usually also included, depending on the purpose of the token. */ jwtPayload?: string; /** * User's pre-shared secret to sign the token. */ secret?: string; } interface AuthConfigDecryptedCredentialOauth2AuthorizationCode { /** * The auth url endpoint to send the auth code request to. */ authEndpoint?: string; /** * The client's id. */ clientId?: string; /** * The client's secret. */ clientSecret?: string; /** * A space-delimited list of requested scope permissions. */ scope?: string; /** * The token url endpoint to send the token request to. */ tokenEndpoint?: string; } interface AuthConfigDecryptedCredentialOauth2ClientCredentials { /** * The client's ID. */ clientId?: string; /** * The client's secret. */ clientSecret?: string; /** * Represent how to pass parameters to fetch access token Possible values: ["REQUEST_TYPE_UNSPECIFIED", "REQUEST_BODY", "QUERY_PARAMETERS", "ENCODED_HEADER"] */ requestType?: string; /** * A space-delimited list of requested scope permissions. */ scope?: string; /** * The token endpoint is used by the client to obtain an access token by presenting its authorization grant or refresh token. */ tokenEndpoint?: string; /** * Token parameters for the auth request. */ tokenParams?: outputs.applicationintegration.AuthConfigDecryptedCredentialOauth2ClientCredentialsTokenParams; } interface AuthConfigDecryptedCredentialOauth2ClientCredentialsTokenParams { /** * A list of parameter map entries. * Structure is documented below. */ entries?: outputs.applicationintegration.AuthConfigDecryptedCredentialOauth2ClientCredentialsTokenParamsEntry[]; } interface AuthConfigDecryptedCredentialOauth2ClientCredentialsTokenParamsEntry { /** * Key of the map entry. * Structure is documented below. */ key?: outputs.applicationintegration.AuthConfigDecryptedCredentialOauth2ClientCredentialsTokenParamsEntryKey; /** * Value of the map entry. * Structure is documented below. */ value?: outputs.applicationintegration.AuthConfigDecryptedCredentialOauth2ClientCredentialsTokenParamsEntryValue; } interface AuthConfigDecryptedCredentialOauth2ClientCredentialsTokenParamsEntryKey { /** * Passing a literal value * Structure is documented below. */ literalValue?: outputs.applicationintegration.AuthConfigDecryptedCredentialOauth2ClientCredentialsTokenParamsEntryKeyLiteralValue; } interface AuthConfigDecryptedCredentialOauth2ClientCredentialsTokenParamsEntryKeyLiteralValue { /** * String. */ stringValue?: string; } interface AuthConfigDecryptedCredentialOauth2ClientCredentialsTokenParamsEntryValue { /** * Passing a literal value * Structure is documented below. */ literalValue?: outputs.applicationintegration.AuthConfigDecryptedCredentialOauth2ClientCredentialsTokenParamsEntryValueLiteralValue; } interface AuthConfigDecryptedCredentialOauth2ClientCredentialsTokenParamsEntryValueLiteralValue { /** * String. */ stringValue?: string; } interface AuthConfigDecryptedCredentialOidcToken { /** * Audience to be used when generating OIDC token. The audience claim identifies the recipients that the JWT is intended for. */ audience?: string; /** * The service account email to be used as the identity for the token. */ serviceAccountEmail?: string; /** * (Output) * ID token obtained for the service account. */ token: string; /** * (Output) * The approximate time until the token retrieved is valid. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ tokenExpireTime: string; } interface AuthConfigDecryptedCredentialServiceAccountCredentials { /** * A space-delimited list of requested scope permissions. */ scope?: string; /** * Name of the service account that has the permission to make the request. */ serviceAccount?: string; } interface AuthConfigDecryptedCredentialUsernameAndPassword { /** * Password to be used. * * The `oauth2AuthorizationCode` block supports: */ password?: string; /** * Username to be used. */ username?: string; } interface ClientCloudKmsConfig { /** * A Cloud KMS key is a named object containing one or more key versions, along * with metadata for the key. A key exists on exactly one key ring tied to a * specific location. */ key: string; /** * Each version of a key contains key material used for encryption or signing. * A key's version is represented by an integer, starting at 1. To decrypt data * or verify a signature, you must use the same key version that was used to * encrypt or sign the data. */ keyVersion?: string; /** * Location name of the key ring, e.g. "us-west1". */ kmsLocation: string; /** * The Google Cloud project id of the project where the kms key stored. If empty, * the kms key is stored at the same project as customer's project and ecrypted * with CMEK, otherwise, the kms key is stored in the tenant project and * encrypted with GMEK. */ kmsProjectId?: string; /** * A key ring organizes keys in a specific Google Cloud location and allows you to * manage access control on groups of keys. A key ring's name does not need to be * unique across a Google Cloud project, but must be unique within a given location. */ kmsRing: string; } } export declare namespace artifactregistry { interface GetDockerImagesDockerImage { /** * The time, as a RFC 3339 string, this image was built. */ buildTime: string; /** * Extracted short name of the image (last part of `name`, without tag or digest). For example, from `.../nginx@sha256:...` → `nginx`. */ imageName: string; /** * Calculated size of the image in bytes. */ imageSizeBytes: string; /** * Media type of this image, e.g. `application/vnd.docker.distribution.manifest.v2+json`. */ mediaType: string; /** * The fully qualified name of the fetched image. This name has the form: `projects/{{project}}/locations/{{location}}/repository/{{repository_id}}/dockerImages/{{docker_image}}`. For example, `projects/test-project/locations/us-west4/repositories/test-repo/dockerImages/nginx@sha256:e9954c1fc875017be1c3e36eca16be2d9e9bccc4bf072163515467d6a823c7cf` */ name: string; /** * The URI to access the image. For example, `us-west4-docker.pkg.dev/test-project/test-repo/nginx@sha256:e9954c1fc875017be1c3e36eca16be2d9e9bccc4bf072163515467d6a823c7cf` */ selfLink: string; /** * A list of all tags associated with the image. */ tags: string[]; /** * The time, as a RFC 3339 string, this image was updated. */ updateTime: string; /** * The time, as a RFC 3339 string, the image was uploaded. For example, `2014-10-02T15:01:23.045123456Z`. */ uploadTime: string; } interface GetMavenArtifactsMavenArtifact { /** * The name of the artifact to fetch. */ artifactId: string; /** * The time the artifact was created. */ createTime: string; /** * Group ID for the artifact. */ groupId: string; /** * The fully qualified name of the fetched artifact. Format: * ``` * projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}/mavenArtifacts/{{group_id}}:{{artifact_id}}:{{version}} * ``` */ name: string; /** * URL to access the pom file of the artifact. */ pomUri: string; /** * The time the artifact was last updated. */ updateTime: string; /** * The version of the Maven artifact. */ version: string; } interface GetNpmPackagesNpmPackage { /** * The time, as a RFC 3339 string, this package was created. */ createTime: string; /** * The fully qualified name of the fetched package. This name has the form: `projects/{{project}}/locations/{{location}}/repository/{{repository_id}}/npmPackages/{{npmPackage}}`. For example, `projects/example-project/locations/us-central1/repository/example-repo/npmPackages/my-test-package:0.0.1` */ name: string; /** * Extracted short name of the package (last part of `name`, without version). For example, from `.../my-test-package:0.0.1` → `my-test-package`. */ packageName: string; /** * The tags associated with the Npm package. */ tags: string[]; /** * The time, as a RFC 3339 string, this package was updated. */ updateTime: string; /** * Version of this package. */ version: string; } interface GetPackagesPackage { /** * Client specified annotations. */ annotations: { [key: string]: string; }; /** * The time, as a RFC 3339 string, this package was created. */ createTime: string; /** * The display name of the package. */ displayName: string; /** * The name of the package, for example: `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1`. If the package ID part contains slashes, the slashes are escaped. */ name: string; /** * The time, as a RFC 3339 string, this package was last updated. This includes publishing a new version of the package. */ updateTime: string; } interface GetPythonPackagesPythonPackage { /** * The time, as a RFC 3339 string, this package was created. */ createTime: string; /** * The fully qualified name of the fetched package. This name has the form: `projects/{{project}}/locations/{{location}}/repository/{{repository_id}}/pythonPackages/{{pythonPackage}}`. For example, `projects/example-project/locations/us-central1/repository/example-repo/pythonPackages/my-test-package:0.0.1` */ name: string; /** * Extracted short name of the package (last part of `name`, without version). For example, from `.../my-test-package:0.0.1` → `my-test-package`. */ packageName: string; /** * The time, as a RFC 3339 string, this package was updated. */ updateTime: string; /** * Version of this package. */ version: string; } interface GetRepositoriesRepository { /** * The time when the repository was created. */ createTime: string; /** * The user-provided description of the repository. */ description: string; /** * The format of packages that are stored in the repository. Supported formats can be found [here](https://cloud.google.com/artifact-registry/docs/supported-formats). */ format: string; /** * An identifier for the resource with format `projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}` */ id: string; /** * The last part of the repository name, for example: `"repo1"` */ repositoryId: string; /** * The time when the repository was last updated. */ updateTime: string; } interface GetRepositoryCleanupPolicy { /** * Policy action. Possible values: ["DELETE", "KEEP"] */ action: string; /** * Policy condition for matching versions. */ conditions: outputs.artifactregistry.GetRepositoryCleanupPolicyCondition[]; id: string; /** * Policy condition for retaining a minimum number of versions. May only be * specified with a Keep action. */ mostRecentVersions: outputs.artifactregistry.GetRepositoryCleanupPolicyMostRecentVersion[]; } interface GetRepositoryCleanupPolicyCondition { /** * Match versions newer than a duration. */ newerThan: string; /** * Match versions older than a duration. */ olderThan: string; /** * Match versions by package prefix. Applied on any prefix match. */ packageNamePrefixes: string[]; /** * Match versions by tag prefix. Applied on any prefix match. */ tagPrefixes: string[]; /** * Match versions by tag status. Default value: "ANY" Possible values: ["TAGGED", "UNTAGGED", "ANY"] */ tagState: string; /** * Match versions by version name prefix. Applied on any prefix match. */ versionNamePrefixes: string[]; } interface GetRepositoryCleanupPolicyMostRecentVersion { /** * Minimum number of versions to keep. */ keepCount: number; /** * Match versions by package prefix. Applied on any prefix match. */ packageNamePrefixes: string[]; } interface GetRepositoryDockerConfig { /** * The repository which enabled this flag prevents all tags from being modified, moved or deleted. This does not prevent tags from being created. */ immutableTags: boolean; } interface GetRepositoryMavenConfig { /** * The repository with this flag will allow publishing the same * snapshot versions. */ allowSnapshotOverwrites: boolean; /** * Version policy defines the versions that the registry will accept. Default value: "VERSION_POLICY_UNSPECIFIED" Possible values: ["VERSION_POLICY_UNSPECIFIED", "RELEASE", "SNAPSHOT"] */ versionPolicy: string; } interface GetRepositoryRemoteRepositoryConfig { /** * Specific settings for an Apt remote repository. */ aptRepositories: outputs.artifactregistry.GetRepositoryRemoteRepositoryConfigAptRepository[]; /** * Specific settings for an Artifact Registory remote repository. */ commonRepositories: outputs.artifactregistry.GetRepositoryRemoteRepositoryConfigCommonRepository[]; /** * The description of the remote source. */ description: string; /** * If true, the remote repository upstream and upstream credentials will * not be validated. */ disableUpstreamValidation: boolean; /** * Specific settings for a Docker remote repository. */ dockerRepositories: outputs.artifactregistry.GetRepositoryRemoteRepositoryConfigDockerRepository[]; /** * Specific settings for a Maven remote repository. */ mavenRepositories: outputs.artifactregistry.GetRepositoryRemoteRepositoryConfigMavenRepository[]; /** * Specific settings for an Npm remote repository. */ npmRepositories: outputs.artifactregistry.GetRepositoryRemoteRepositoryConfigNpmRepository[]; /** * Specific settings for a Python remote repository. */ pythonRepositories: outputs.artifactregistry.GetRepositoryRemoteRepositoryConfigPythonRepository[]; /** * The credentials used to access the remote repository. */ upstreamCredentials: outputs.artifactregistry.GetRepositoryRemoteRepositoryConfigUpstreamCredential[]; /** * Specific settings for an Yum remote repository. */ yumRepositories: outputs.artifactregistry.GetRepositoryRemoteRepositoryConfigYumRepository[]; } interface GetRepositoryRemoteRepositoryConfigAptRepository { /** * One of the publicly available Apt repositories supported by Artifact Registry. */ publicRepositories: outputs.artifactregistry.GetRepositoryRemoteRepositoryConfigAptRepositoryPublicRepository[]; } interface GetRepositoryRemoteRepositoryConfigAptRepositoryPublicRepository { /** * A common public repository base for Apt, e.g. '"debian/dists/stable"' Possible values: ["DEBIAN", "UBUNTU", "DEBIAN_SNAPSHOT"] */ repositoryBase: string; /** * Specific repository from the base. */ repositoryPath: string; } interface GetRepositoryRemoteRepositoryConfigCommonRepository { /** * One of: * a. Artifact Registry Repository resource, e.g. 'projects/UPSTREAM_PROJECT_ID/locations/REGION/repositories/UPSTREAM_REPOSITORY' * b. URI to the registry, e.g. '"https://registry-1.docker.io"' * c. URI to Artifact Registry Repository, e.g. '"https://REGION-docker.pkg.dev/UPSTREAM_PROJECT_ID/UPSTREAM_REPOSITORY"' */ uri: string; } interface GetRepositoryRemoteRepositoryConfigDockerRepository { /** * [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. */ customRepositories: outputs.artifactregistry.GetRepositoryRemoteRepositoryConfigDockerRepositoryCustomRepository[]; /** * Address of the remote repository. Possible values: ["DOCKER_HUB"] */ publicRepository: string; } interface GetRepositoryRemoteRepositoryConfigDockerRepositoryCustomRepository { /** * Specific uri to the registry, e.g. '"https://registry-1.docker.io"' */ uri: string; } interface GetRepositoryRemoteRepositoryConfigMavenRepository { /** * [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. */ customRepositories: outputs.artifactregistry.GetRepositoryRemoteRepositoryConfigMavenRepositoryCustomRepository[]; /** * Address of the remote repository. Possible values: ["MAVEN_CENTRAL"] */ publicRepository: string; } interface GetRepositoryRemoteRepositoryConfigMavenRepositoryCustomRepository { /** * Specific uri to the registry, e.g. '"https://repo.maven.apache.org/maven2"' */ uri: string; } interface GetRepositoryRemoteRepositoryConfigNpmRepository { /** * [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. */ customRepositories: outputs.artifactregistry.GetRepositoryRemoteRepositoryConfigNpmRepositoryCustomRepository[]; /** * Address of the remote repository. Possible values: ["NPMJS"] */ publicRepository: string; } interface GetRepositoryRemoteRepositoryConfigNpmRepositoryCustomRepository { /** * Specific uri to the registry, e.g. '"https://registry.npmjs.org"' */ uri: string; } interface GetRepositoryRemoteRepositoryConfigPythonRepository { /** * [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. */ customRepositories: outputs.artifactregistry.GetRepositoryRemoteRepositoryConfigPythonRepositoryCustomRepository[]; /** * Address of the remote repository. Possible values: ["PYPI"] */ publicRepository: string; } interface GetRepositoryRemoteRepositoryConfigPythonRepositoryCustomRepository { /** * Specific uri to the registry, e.g. '"https://pypi.io"' */ uri: string; } interface GetRepositoryRemoteRepositoryConfigUpstreamCredential { /** * Use username and password to access the remote repository. */ usernamePasswordCredentials: outputs.artifactregistry.GetRepositoryRemoteRepositoryConfigUpstreamCredentialUsernamePasswordCredential[]; } interface GetRepositoryRemoteRepositoryConfigUpstreamCredentialUsernamePasswordCredential { /** * The Secret Manager key version that holds the password to access the * remote repository. Must be in the format of * 'projects/{project}/secrets/{secret}/versions/{version}'. */ passwordSecretVersion: string; /** * The username to access the remote repository. */ username: string; } interface GetRepositoryRemoteRepositoryConfigYumRepository { /** * One of the publicly available Yum repositories supported by Artifact Registry. */ publicRepositories: outputs.artifactregistry.GetRepositoryRemoteRepositoryConfigYumRepositoryPublicRepository[]; } interface GetRepositoryRemoteRepositoryConfigYumRepositoryPublicRepository { /** * A common public repository base for Yum. Possible values: ["CENTOS", "CENTOS_DEBUG", "CENTOS_VAULT", "CENTOS_STREAM", "ROCKY", "EPEL"] */ repositoryBase: string; /** * Specific repository from the base, e.g. '"pub/rocky/9/BaseOS/x86_64/os"' */ repositoryPath: string; } interface GetRepositoryVirtualRepositoryConfig { /** * Policies that configure the upstream artifacts distributed by the Virtual * Repository. Upstream policies cannot be set on a standard repository. */ upstreamPolicies: outputs.artifactregistry.GetRepositoryVirtualRepositoryConfigUpstreamPolicy[]; } interface GetRepositoryVirtualRepositoryConfigUpstreamPolicy { /** * The user-provided ID of the upstream policy. */ id: string; /** * Entries with a greater priority value take precedence in the pull order. */ priority: number; /** * A reference to the repository resource, for example: * "projects/p1/locations/us-central1/repository/repo1". */ repository: string; } interface GetRepositoryVulnerabilityScanningConfig { /** * This configures whether vulnerability scanning is automatically performed for artifacts pushed to this repository. Possible values: ["INHERITED", "DISABLED"] */ enablementConfig: string; /** * This field returns whether scanning is active for this repository. */ enablementState: string; /** * This provides an explanation for the state of scanning on this repository. */ enablementStateReason: string; } interface GetTagsTag { /** * The name of the tag, for example: `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/tags/tag1`. If the package part contains slashes, the slashes are escaped. */ name: string; /** * The version of the tag. */ version: string; } interface GetVersionRelatedTag { /** * The name of the version, for example: `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/version1`. If the package part contains slashes, the slashes are escaped. */ name: string; version: string; } interface GetVersionsVersion { /** * Client specified annotations. */ annotations: { [key: string]: string; }; /** * The time, as a RFC 3339 string, this package was created. */ createTime: string; /** * Description of the version, as specified in its metadata. */ description: string; /** * The name of the version, for example: `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/version1`. If the package part contains slashes, the slashes are escaped. */ name: string; /** * A list of related tags. Will contain up to 100 tags that reference this version. */ relatedTags: outputs.artifactregistry.GetVersionsVersionRelatedTag[]; /** * The time, as a RFC 3339 string, this package was last updated. This includes publishing a new version of the package. */ updateTime: string; } interface GetVersionsVersionRelatedTag { /** * The name of the version, for example: `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/version1`. If the package part contains slashes, the slashes are escaped. */ name: string; version: string; } interface RepositoryCleanupPolicy { /** * Policy action. * Possible values are: `DELETE`, `KEEP`. */ action?: string; /** * Policy condition for matching versions. * Structure is documented below. */ condition?: outputs.artifactregistry.RepositoryCleanupPolicyCondition; /** * The identifier for this object. Format specified above. */ id: string; /** * Policy condition for retaining a minimum number of versions. May only be * specified with a Keep action. * Structure is documented below. */ mostRecentVersions?: outputs.artifactregistry.RepositoryCleanupPolicyMostRecentVersions; } interface RepositoryCleanupPolicyCondition { /** * Match versions newer than a duration. */ newerThan?: string; /** * Match versions older than a duration. */ olderThan?: string; /** * Match versions by package prefix. Applied on any prefix match. */ packageNamePrefixes?: string[]; /** * Match versions by tag prefix. Applied on any prefix match. */ tagPrefixes?: string[]; /** * Match versions by tag status. * Default value is `ANY`. * Possible values are: `TAGGED`, `UNTAGGED`, `ANY`. */ tagState?: string; /** * Match versions by version name prefix. Applied on any prefix match. */ versionNamePrefixes?: string[]; } interface RepositoryCleanupPolicyMostRecentVersions { /** * Minimum number of versions to keep. */ keepCount?: number; /** * Match versions by package prefix. Applied on any prefix match. */ packageNamePrefixes?: string[]; } interface RepositoryDockerConfig { /** * The repository which enabled this flag prevents all tags from being modified, moved or deleted. This does not prevent tags from being created. */ immutableTags?: boolean; } interface RepositoryIamBindingCondition { description?: string; expression: string; title: string; } interface RepositoryIamMemberCondition { description?: string; expression: string; title: string; } interface RepositoryMavenConfig { /** * The repository with this flag will allow publishing the same * snapshot versions. */ allowSnapshotOverwrites?: boolean; /** * Version policy defines the versions that the registry will accept. * Default value is `VERSION_POLICY_UNSPECIFIED`. * Possible values are: `VERSION_POLICY_UNSPECIFIED`, `RELEASE`, `SNAPSHOT`. */ versionPolicy?: string; } interface RepositoryRemoteRepositoryConfig { /** * Specific settings for an Apt remote repository. * Structure is documented below. */ aptRepository?: outputs.artifactregistry.RepositoryRemoteRepositoryConfigAptRepository; /** * Specific settings for an Artifact Registory remote repository. * Structure is documented below. */ commonRepository?: outputs.artifactregistry.RepositoryRemoteRepositoryConfigCommonRepository; /** * The description of the remote source. */ description?: string; /** * If true, the remote repository upstream and upstream credentials will * not be validated. */ disableUpstreamValidation?: boolean; /** * Specific settings for a Docker remote repository. * Structure is documented below. */ dockerRepository?: outputs.artifactregistry.RepositoryRemoteRepositoryConfigDockerRepository; /** * Specific settings for a Maven remote repository. * Structure is documented below. */ mavenRepository?: outputs.artifactregistry.RepositoryRemoteRepositoryConfigMavenRepository; /** * Specific settings for an Npm remote repository. * Structure is documented below. */ npmRepository?: outputs.artifactregistry.RepositoryRemoteRepositoryConfigNpmRepository; /** * Specific settings for a Python remote repository. * Structure is documented below. */ pythonRepository?: outputs.artifactregistry.RepositoryRemoteRepositoryConfigPythonRepository; /** * The credentials used to access the remote repository. * Structure is documented below. */ upstreamCredentials?: outputs.artifactregistry.RepositoryRemoteRepositoryConfigUpstreamCredentials; /** * Specific settings for an Yum remote repository. * Structure is documented below. */ yumRepository?: outputs.artifactregistry.RepositoryRemoteRepositoryConfigYumRepository; } interface RepositoryRemoteRepositoryConfigAptRepository { /** * One of the publicly available Apt repositories supported by Artifact Registry. * Structure is documented below. */ publicRepository?: outputs.artifactregistry.RepositoryRemoteRepositoryConfigAptRepositoryPublicRepository; } interface RepositoryRemoteRepositoryConfigAptRepositoryPublicRepository { /** * A common public repository base for Yum. * Possible values are: `CENTOS`, `CENTOS_DEBUG`, `CENTOS_VAULT`, `CENTOS_STREAM`, `ROCKY`, `EPEL`. */ repositoryBase: string; /** * Specific repository from the base, e.g. `"pub/rocky/9/BaseOS/x86_64/os"` */ repositoryPath: string; } interface RepositoryRemoteRepositoryConfigCommonRepository { /** * One of: * a. Artifact Registry Repository resource, e.g. `projects/UPSTREAM_PROJECT_ID/locations/REGION/repositories/UPSTREAM_REPOSITORY` * b. URI to the registry, e.g. `"https://registry-1.docker.io"` * c. URI to Artifact Registry Repository, e.g. `"https://REGION-docker.pkg.dev/UPSTREAM_PROJECT_ID/UPSTREAM_REPOSITORY"` */ uri: string; } interface RepositoryRemoteRepositoryConfigDockerRepository { /** * [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. * Structure is documented below. */ customRepository?: outputs.artifactregistry.RepositoryRemoteRepositoryConfigDockerRepositoryCustomRepository; /** * Address of the remote repository. * Possible values are: `DOCKER_HUB`. */ publicRepository?: string; } interface RepositoryRemoteRepositoryConfigDockerRepositoryCustomRepository { /** * Specific uri to the registry, e.g. `"https://pypi.io"` */ uri?: string; } interface RepositoryRemoteRepositoryConfigMavenRepository { /** * [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. * Structure is documented below. */ customRepository?: outputs.artifactregistry.RepositoryRemoteRepositoryConfigMavenRepositoryCustomRepository; /** * Address of the remote repository. * Possible values are: `MAVEN_CENTRAL`. */ publicRepository?: string; } interface RepositoryRemoteRepositoryConfigMavenRepositoryCustomRepository { /** * Specific uri to the registry, e.g. `"https://pypi.io"` */ uri?: string; } interface RepositoryRemoteRepositoryConfigNpmRepository { /** * [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. * Structure is documented below. */ customRepository?: outputs.artifactregistry.RepositoryRemoteRepositoryConfigNpmRepositoryCustomRepository; /** * Address of the remote repository. * Possible values are: `NPMJS`. */ publicRepository?: string; } interface RepositoryRemoteRepositoryConfigNpmRepositoryCustomRepository { /** * Specific uri to the registry, e.g. `"https://pypi.io"` */ uri?: string; } interface RepositoryRemoteRepositoryConfigPythonRepository { /** * [Deprecated, please use commonRepository instead] Settings for a remote repository with a custom uri. * Structure is documented below. */ customRepository?: outputs.artifactregistry.RepositoryRemoteRepositoryConfigPythonRepositoryCustomRepository; /** * Address of the remote repository. * Possible values are: `PYPI`. */ publicRepository?: string; } interface RepositoryRemoteRepositoryConfigPythonRepositoryCustomRepository { /** * Specific uri to the registry, e.g. `"https://pypi.io"` */ uri?: string; } interface RepositoryRemoteRepositoryConfigUpstreamCredentials { /** * Use username and password to access the remote repository. * Structure is documented below. */ usernamePasswordCredentials?: outputs.artifactregistry.RepositoryRemoteRepositoryConfigUpstreamCredentialsUsernamePasswordCredentials; } interface RepositoryRemoteRepositoryConfigUpstreamCredentialsUsernamePasswordCredentials { /** * The Secret Manager key version that holds the password to access the * remote repository. Must be in the format of * `projects/{project}/secrets/{secret}/versions/{version}`. */ passwordSecretVersion?: string; /** * The username to access the remote repository. */ username?: string; } interface RepositoryRemoteRepositoryConfigYumRepository { /** * One of the publicly available Yum repositories supported by Artifact Registry. * Structure is documented below. */ publicRepository?: outputs.artifactregistry.RepositoryRemoteRepositoryConfigYumRepositoryPublicRepository; } interface RepositoryRemoteRepositoryConfigYumRepositoryPublicRepository { /** * A common public repository base for Yum. * Possible values are: `CENTOS`, `CENTOS_DEBUG`, `CENTOS_VAULT`, `CENTOS_STREAM`, `ROCKY`, `EPEL`. */ repositoryBase: string; /** * Specific repository from the base, e.g. `"pub/rocky/9/BaseOS/x86_64/os"` */ repositoryPath: string; } interface RepositoryVirtualRepositoryConfig { /** * Policies that configure the upstream artifacts distributed by the Virtual * Repository. Upstream policies cannot be set on a standard repository. * Structure is documented below. */ upstreamPolicies?: outputs.artifactregistry.RepositoryVirtualRepositoryConfigUpstreamPolicy[]; } interface RepositoryVirtualRepositoryConfigUpstreamPolicy { /** * The user-provided ID of the upstream policy. */ id?: string; /** * Entries with a greater priority value take precedence in the pull order. */ priority?: number; /** * A reference to the repository resource, for example: * "projects/p1/locations/us-central1/repository/repo1". */ repository?: string; } interface RepositoryVulnerabilityScanningConfig { /** * This configures whether vulnerability scanning is automatically performed for artifacts pushed to this repository. * Possible values are: `INHERITED`, `DISABLED`. */ enablementConfig?: string; /** * (Output) * This field returns whether scanning is active for this repository. */ enablementState: string; /** * (Output) * This provides an explanation for the state of scanning on this repository. */ enablementStateReason: string; } } export declare namespace assuredworkloads { interface WorkloadComplianceStatus { /** * Number of current orgPolicy violations which are acknowledged. */ acknowledgedViolationCounts: number[]; /** * Number of current orgPolicy violations which are not acknowledged. */ activeViolationCounts: number[]; } interface WorkloadEkmProvisioningResponse { /** * Indicates Ekm provisioning error if any. Possible values: EKM_PROVISIONING_ERROR_DOMAIN_UNSPECIFIED, UNSPECIFIED_ERROR, GOOGLE_SERVER_ERROR, EXTERNAL_USER_ERROR, EXTERNAL_PARTNER_ERROR, TIMEOUT_ERROR */ ekmProvisioningErrorDomain: string; /** * Detailed error message if Ekm provisioning fails Possible values: EKM_PROVISIONING_ERROR_MAPPING_UNSPECIFIED, INVALID_SERVICE_ACCOUNT, MISSING_METRICS_SCOPE_ADMIN_PERMISSION, MISSING_EKM_CONNECTION_ADMIN_PERMISSION */ ekmProvisioningErrorMapping: string; /** * Indicates Ekm enrollment Provisioning of a given workload. Possible values: EKM_PROVISIONING_STATE_UNSPECIFIED, EKM_PROVISIONING_STATE_PENDING, EKM_PROVISIONING_STATE_FAILED, EKM_PROVISIONING_STATE_COMPLETED */ ekmProvisioningState: string; } interface WorkloadKmsSettings { /** * Required. Input only. Immutable. The time at which the Key Management Service will automatically create a new version of the crypto key and mark it as the primary. */ nextRotationTime: string; /** * Required. Input only. Immutable. will be advanced by this period when the Key Management Service automatically rotates a key. Must be at least 24 hours and at most 876,000 hours. */ rotationPeriod: string; } interface WorkloadPartnerPermissions { /** * Optional. Allow partner to view violation alerts. */ assuredWorkloadsMonitoring?: boolean; /** * Allow the partner to view inspectability logs and monitoring violations. */ dataLogsViewer?: boolean; /** * Optional. Allow partner to view access approval logs. */ serviceAccessApprover?: boolean; } interface WorkloadResource { /** * Resource identifier. For a project this represents project_number. */ resourceId: number; /** * Indicates the type of resource. Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER */ resourceType: string; } interface WorkloadResourceSetting { /** * User-assigned resource display name. If not empty it will be used to create a resource with the specified name. */ displayName?: string; /** * Resource identifier. For a project this represents projectId. If the project is already taken, the workload creation will fail. For KeyRing, this represents the keyring_id. For a folder, don't set this value as folderId is assigned by Google. */ resourceId?: string; /** * Indicates the type of resource. This field should be specified to correspond the id to the right project type (CONSUMER_PROJECT or ENCRYPTION_KEYS_PROJECT) Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER */ resourceType?: string; } interface WorkloadSaaEnrollmentResponse { /** * Indicates SAA enrollment setup error if any. */ setupErrors: string[]; /** * Indicates SAA enrollment status of a given workload. Possible values: SETUP_STATE_UNSPECIFIED, STATUS_PENDING, STATUS_COMPLETE */ setupStatus: string; } interface WorkloadWorkloadOptions { /** * Indicates type of KAJ enrollment for the workload. Currently, only specifiying KEY_ACCESS_TRANSPARENCY_OFF is implemented to not enroll in KAT-level KAJ enrollment for Regional Controls workloads. Possible values: KAJ_ENROLLMENT_TYPE_UNSPECIFIED, FULL_KAJ, EKM_ONLY, KEY_ACCESS_TRANSPARENCY_OFF */ kajEnrollmentType?: string; } } export declare namespace backupdisasterrecovery { interface BackupPlanAssociationRulesConfigInfo { /** * (Output) * google.rpc.Status object to store the last backup error * Structure is documented below. */ lastBackupErrors: outputs.backupdisasterrecovery.BackupPlanAssociationRulesConfigInfoLastBackupError[]; /** * (Output) * State of last backup taken. */ lastBackupState: string; /** * (Output) * The point in time when the last successful backup was captured from the source */ lastSuccessfulBackupConsistencyTime: string; /** * (Output) * Backup Rule id fetched from backup plan. */ ruleId: string; } interface BackupPlanAssociationRulesConfigInfoLastBackupError { /** * (Output) * The status code, which should be an enum value of [google.rpc.Code] */ code: number; /** * (Output) * A developer-facing error message, which should be in English. */ message: string; } interface BackupPlanBackupRule { /** * Configures the duration for which backup data will be kept. The value should be greater than or equal to minimum enforced retention of the backup vault. */ backupRetentionDays: number; /** * The unique ID of this `BackupRule`. The `ruleId` is unique per `BackupPlan`. */ ruleId: string; /** * StandardSchedule defines a schedule that runs within the confines of a defined window of days. * Structure is documented below. */ standardSchedule: outputs.backupdisasterrecovery.BackupPlanBackupRuleStandardSchedule; } interface BackupPlanBackupRuleStandardSchedule { /** * A BackupWindow defines the window of the day during which backup jobs will run. Jobs are queued at the beginning of the window and will be marked as * `NOT_RUN` if they do not start by the end of the window. * Structure is documented below. */ backupWindow?: outputs.backupdisasterrecovery.BackupPlanBackupRuleStandardScheduleBackupWindow; /** * Specifies days of months like 1, 5, or 14 on which jobs will run. */ daysOfMonths?: number[]; /** * Specifies days of week like MONDAY or TUESDAY, on which jobs will run. This is required for `recurrenceType`, `WEEKLY` and is not applicable otherwise. * Each value may be one of: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ daysOfWeeks?: string[]; /** * Specifies frequency for hourly backups. An hourly frequency of 2 means jobs will run every 2 hours from start time till end time defined. * This is required for `recurrenceType`, `HOURLY` and is not applicable otherwise. */ hourlyFrequency?: number; /** * Specifies values of months * Each value may be one of: `MONTH_UNSPECIFIED`, `JANUARY`, `FEBRUARY`, `MARCH`, `APRIL`, `MAY`, `JUNE`, `JULY`, `AUGUST`, `SEPTEMBER`, `OCTOBER`, `NOVEMBER`, `DECEMBER`. */ months?: string[]; /** * RecurrenceType enumerates the applicable periodicity for the schedule. * Possible values are: `HOURLY`, `DAILY`, `WEEKLY`, `MONTHLY`, `YEARLY`. */ recurrenceType: string; /** * The time zone to be used when interpreting the schedule. */ timeZone: string; /** * Specifies a week day of the month like FIRST SUNDAY or LAST MONDAY, on which jobs will run. * Structure is documented below. */ weekDayOfMonth?: outputs.backupdisasterrecovery.BackupPlanBackupRuleStandardScheduleWeekDayOfMonth; } interface BackupPlanBackupRuleStandardScheduleBackupWindow { /** * The hour of the day (1-24) when the window ends, for example, if the value of end hour of the day is 10, that means the backup window end time is 10:00. * The end hour of the day should be greater than the start */ endHourOfDay?: number; /** * The hour of the day (0-23) when the window starts, for example, if the value of the start hour of the day is 6, that means the backup window starts at 6:00. */ startHourOfDay: number; } interface BackupPlanBackupRuleStandardScheduleWeekDayOfMonth { /** * Specifies the day of the week. * Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeek: string; /** * WeekOfMonth enumerates possible weeks in the month, e.g. the first, third, or last week of the month. * Possible values are: `WEEK_OF_MONTH_UNSPECIFIED`, `FIRST`, `SECOND`, `THIRD`, `FOURTH`, `LAST`. */ weekOfMonth: string; } interface BackupVaultEncryptionConfig { /** * The Resource name of the Cloud KMS key to be used to encrypt new backups. The key must be in the same location as the backup vault. The key must be a Cloud KMS CryptoKey. */ kmsKeyName?: string; } interface GetBackupBackup { /** * Id of the requesting object, Backup. */ backupId: string; /** * The ID of the Backup Vault of the Data Source in which the Backup belongs. */ backupVaultId: string; /** * The time when the backup was created. */ createTime: string; /** * The ID of the Data Source in which the Backup belongs. */ dataSourceId: string; /** * The location in which the Backup belongs. */ location: string; /** * Name of the resource. */ name: string; } interface GetBackupPlanAssociationRulesConfigInfo { /** * google.rpc.Status object to store the last backup error */ lastBackupErrors: outputs.backupdisasterrecovery.GetBackupPlanAssociationRulesConfigInfoLastBackupError[]; /** * State of last backup taken. */ lastBackupState: string; /** * The point in time when the last successful backup was captured from the source */ lastSuccessfulBackupConsistencyTime: string; /** * Backup Rule id fetched from backup plan. */ ruleId: string; } interface GetBackupPlanAssociationRulesConfigInfoLastBackupError { /** * The status code, which should be an enum value of [google.rpc.Code] */ code: number; /** * A developer-facing error message, which should be in English. */ message: string; } interface GetBackupPlanAssociationsAssociation { /** * The backup plan to which the resource is attached. */ backupPlan: string; createTime: string; /** * The resource name of data source which will be used as storage location for backups taken. */ dataSource: string; /** * The full name of the backup plan association resource. */ name: string; /** * The resource to which the backup plan is applied. */ resource: string; /** * A list containing information about the backup rules. Each object in the list contains: */ rulesConfigInfos: outputs.backupdisasterrecovery.GetBackupPlanAssociationsAssociationRulesConfigInfo[]; } interface GetBackupPlanAssociationsAssociationRulesConfigInfo { /** * A block containing details of the last backup error, if any. */ lastBackupErrors: outputs.backupdisasterrecovery.GetBackupPlanAssociationsAssociationRulesConfigInfoLastBackupError[]; /** * State of last backup taken. */ lastBackupState: string; /** * The point in time when the last successful backup was captured from the source. */ lastSuccessfulBackupConsistencyTime: string; /** * Backup Rule id fetched from backup plan. */ ruleId: string; } interface GetBackupPlanAssociationsAssociationRulesConfigInfoLastBackupError { /** * The status code, which should be an enum value of [google.rpc.Code]. */ code: number; /** * A developer-facing error message. */ message: string; } interface GetBackupPlanBackupRule { /** * Configures the duration for which backup data will be kept. The value should be greater than or equal to minimum enforced retention of the backup vault. */ backupRetentionDays: number; /** * The unique ID of this 'BackupRule'. The 'rule_id' is unique per 'BackupPlan'. */ ruleId: string; /** * StandardSchedule defines a schedule that runs within the confines of a defined window of days. */ standardSchedules: outputs.backupdisasterrecovery.GetBackupPlanBackupRuleStandardSchedule[]; } interface GetBackupPlanBackupRuleStandardSchedule { /** * A BackupWindow defines the window of the day during which backup jobs will run. Jobs are queued at the beginning of the window and will be marked as * 'NOT_RUN' if they do not start by the end of the window. */ backupWindows: outputs.backupdisasterrecovery.GetBackupPlanBackupRuleStandardScheduleBackupWindow[]; /** * Specifies days of months like 1, 5, or 14 on which jobs will run. */ daysOfMonths: number[]; /** * Specifies days of week like MONDAY or TUESDAY, on which jobs will run. This is required for 'recurrence_type', 'WEEKLY' and is not applicable otherwise. Possible values: ["DAY_OF_WEEK_UNSPECIFIED", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"] */ daysOfWeeks: string[]; /** * Specifies frequency for hourly backups. An hourly frequency of 2 means jobs will run every 2 hours from start time till end time defined. * This is required for 'recurrence_type', 'HOURLY' and is not applicable otherwise. */ hourlyFrequency: number; /** * Specifies values of months Possible values: ["MONTH_UNSPECIFIED", "JANUARY", "FEBRUARY", "MARCH", "APRIL", "MAY", "JUNE", "JULY", "AUGUST", "SEPTEMBER", "OCTOBER", "NOVEMBER", "DECEMBER"] */ months: string[]; /** * RecurrenceType enumerates the applicable periodicity for the schedule. Possible values: ["HOURLY", "DAILY", "WEEKLY", "MONTHLY", "YEARLY"] */ recurrenceType: string; /** * The time zone to be used when interpreting the schedule. */ timeZone: string; /** * Specifies a week day of the month like FIRST SUNDAY or LAST MONDAY, on which jobs will run. */ weekDayOfMonths: outputs.backupdisasterrecovery.GetBackupPlanBackupRuleStandardScheduleWeekDayOfMonth[]; } interface GetBackupPlanBackupRuleStandardScheduleBackupWindow { /** * The hour of the day (1-24) when the window ends, for example, if the value of end hour of the day is 10, that means the backup window end time is 10:00. * The end hour of the day should be greater than the start */ endHourOfDay: number; /** * The hour of the day (0-23) when the window starts, for example, if the value of the start hour of the day is 6, that means the backup window starts at 6:00. */ startHourOfDay: number; } interface GetBackupPlanBackupRuleStandardScheduleWeekDayOfMonth { /** * Specifies the day of the week. Possible values: ["DAY_OF_WEEK_UNSPECIFIED", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"] */ dayOfWeek: string; /** * WeekOfMonth enumerates possible weeks in the month, e.g. the first, third, or last week of the month. Possible values: ["WEEK_OF_MONTH_UNSPECIFIED", "FIRST", "SECOND", "THIRD", "FOURTH", "LAST"] */ weekOfMonth: string; } interface GetBackupVaultEncryptionConfig { /** * The Resource name of the Cloud KMS key to be used to encrypt new backups. The key must be in the same location as the backup vault. The key must be a Cloud KMS CryptoKey. */ kmsKeyName: string; } interface GetDataSourceBackupConfigInfo { /** * Configuration for an application backed up by a Backup Appliance. */ backupApplianceBackupConfigs: outputs.backupdisasterrecovery.GetDataSourceBackupConfigInfoBackupApplianceBackupConfig[]; /** * Configuration for a Google Cloud resource. */ gcpBackupConfigs: outputs.backupdisasterrecovery.GetDataSourceBackupConfigInfoGcpBackupConfig[]; /** * If the last backup failed, this field has the error message. */ lastBackupError: { [key: string]: string; }; /** * LastBackupstate tracks whether the last backup was not yet started, successful, failed, or could not be run because of the lack of permissions. */ lastBackupState: string; /** * If the last backup were successful, this field has the consistency date. */ lastSuccessfulBackupConsistencyTime: string; } interface GetDataSourceBackupConfigInfoBackupApplianceBackupConfig { /** * The name of the application. */ applicationName: string; /** * The ID of the backup appliance. */ backupApplianceId: string; /** * The name of the backup appliance. */ backupApplianceName: string; /** * The name of the host where the application is running. */ hostName: string; /** * The ID of the SLA of this application. */ slaId: string; /** * The name of the SLP associated with the application. */ slpName: string; /** * The name of the SLT associated with the application. */ sltName: string; } interface GetDataSourceBackupConfigInfoGcpBackupConfig { /** * The name of the backup plan. */ backupPlan: string; /** * The name of the backup plan association. */ backupPlanAssociation: string; /** * The description of the backup plan. */ backupPlanDescription: string; /** * The names of the backup plan rules which point to this backupvault */ backupPlanRules: string[]; } interface GetDataSourceDataSourceBackupApplianceApplication { /** * Appliance Id of the Backup Appliance. */ applianceId: string; /** * The appid field of the application within the Backup Appliance. */ applicationId: string; /** * The name of the Application as known to the Backup Appliance. */ applicationName: string; /** * Appliance name. */ backupAppliance: string; /** * Hostid of the application host. */ hostId: string; /** * Hostname of the host where the application is running. */ hostname: string; /** * The type of the application. e.g. VMBackup */ type: string; } interface GetDataSourceDataSourceGcpResource { /** * ComputeInstanceDataSourceProperties has a subset of Compute Instance properties that are useful at the Datasource level. */ computeInstanceDataSourceProperties: outputs.backupdisasterrecovery.GetDataSourceDataSourceGcpResourceComputeInstanceDataSourceProperty[]; /** * Full resource pathname URL of the source Google Cloud resource. */ gcpResourcename: string; /** * The location in which the Data Source belongs. */ location: string; /** * The type of the Google Cloud resource. Use the Unified Resource Type, * eg. compute.googleapis.com/Instance. */ type: string; } interface GetDataSourceDataSourceGcpResourceComputeInstanceDataSourceProperty { /** * The description of the Compute Engine instance. */ description: string; /** * The machine type of the instance. */ machineType: string; /** * Name of the compute instance backed up by the datasource. */ name: string; /** * The total number of disks attached to the Instance. */ totalDiskCount: string; /** * The sum of all the disk sizes. */ totalDiskSizeGb: string; } interface GetDataSourceReferencesDataSourceReference { /** * The state of the backup config for the data source. */ backupConfigState: string; /** * The number of backups for the data source. */ backupCount: number; /** * The underlying data source resource. */ dataSource: string; /** * The GCP resource name for the data source. */ gcpResourceName: string; /** * The state of the last backup. */ lastBackupState: string; /** * The last time a successful backup was made. */ lastSuccessfulBackupTime: string; name: string; /** * The resource type to get the data source references for. Examples include, "sqladmin.googleapis.com/Instance" , "compute.googleapis.com/Instance". `resourceType` is deprecated and will be removed in a future major release. */ resourceType: string; } interface GetDataSourcesDataSource { /** * Details of how the resource is configured for backup. */ backupConfigInfos: outputs.backupdisasterrecovery.GetDataSourcesDataSourceBackupConfigInfo[]; /** * Number of backups in the data source. */ backupCount: string; /** * The backup configuration state. */ configState: string; /** * The time when the instance was created. */ createTime: string; /** * The backed up resource is a backup appliance application. */ dataSourceBackupApplianceApplications: outputs.backupdisasterrecovery.GetDataSourcesDataSourceDataSourceBackupApplianceApplication[]; /** * The backed up resource is a Google Cloud resource. * The word 'DataSource' was included in the names to indicate that this is * the representation of the Google Cloud resource used within the * DataSource object. */ dataSourceGcpResources: outputs.backupdisasterrecovery.GetDataSourcesDataSourceDataSourceGcpResource[]; /** * Server specified ETag for the ManagementServer resource to prevent simultaneous updates from overwiting each other. */ etag: string; /** * Resource labels to represent user provided metadata. */ labels: { [key: string]: string; }; /** * Name of the datasource to create. * It must have the format "projects/{project}/locations/{location}/backupVaults/{backupvault}/dataSources/{datasource}". * '{datasource}' cannot be changed after creation. It must be between 3-63 characters long and must be unique within the backup vault. */ name: string; /** * The DataSource resource instance state. */ state: string; /** * The number of bytes (metadata and data) stored in this datasource. */ totalStoredBytes: string; /** * The time when the instance was updated. */ updateTime: string; } interface GetDataSourcesDataSourceBackupConfigInfo { /** * Configuration for an application backed up by a Backup Appliance. */ backupApplianceBackupConfigs: outputs.backupdisasterrecovery.GetDataSourcesDataSourceBackupConfigInfoBackupApplianceBackupConfig[]; /** * Configuration for a Google Cloud resource. */ gcpBackupConfigs: outputs.backupdisasterrecovery.GetDataSourcesDataSourceBackupConfigInfoGcpBackupConfig[]; /** * If the last backup failed, this field has the error message. */ lastBackupError: { [key: string]: string; }; /** * LastBackupstate tracks whether the last backup was not yet started, successful, failed, or could not be run because of the lack of permissions. */ lastBackupState: string; /** * If the last backup were successful, this field has the consistency date. */ lastSuccessfulBackupConsistencyTime: string; } interface GetDataSourcesDataSourceBackupConfigInfoBackupApplianceBackupConfig { /** * The name of the application. */ applicationName: string; /** * The ID of the backup appliance. */ backupApplianceId: string; /** * The name of the backup appliance. */ backupApplianceName: string; /** * The name of the host where the application is running. */ hostName: string; /** * The ID of the SLA of this application. */ slaId: string; /** * The name of the SLP associated with the application. */ slpName: string; /** * The name of the SLT associated with the application. */ sltName: string; } interface GetDataSourcesDataSourceBackupConfigInfoGcpBackupConfig { /** * The name of the backup plan. */ backupPlan: string; /** * The name of the backup plan association. */ backupPlanAssociation: string; /** * The description of the backup plan. */ backupPlanDescription: string; /** * The names of the backup plan rules which point to this backupvault */ backupPlanRules: string[]; } interface GetDataSourcesDataSourceDataSourceBackupApplianceApplication { /** * Appliance Id of the Backup Appliance. */ applianceId: string; /** * The appid field of the application within the Backup Appliance. */ applicationId: string; /** * The name of the Application as known to the Backup Appliance. */ applicationName: string; /** * Appliance name. */ backupAppliance: string; /** * Hostid of the application host. */ hostId: string; /** * Hostname of the host where the application is running. */ hostname: string; /** * The type of the application. e.g. VMBackup */ type: string; } interface GetDataSourcesDataSourceDataSourceGcpResource { /** * ComputeInstanceDataSourceProperties has a subset of Compute Instance properties that are useful at the Datasource level. */ computeInstanceDataSourceProperties: outputs.backupdisasterrecovery.GetDataSourcesDataSourceDataSourceGcpResourceComputeInstanceDataSourceProperty[]; /** * Full resource pathname URL of the source Google Cloud resource. */ gcpResourcename: string; /** * Location of the resource: //"global"/"unspecified". */ location: string; /** * The type of the Google Cloud resource. Use the Unified Resource Type, * eg. compute.googleapis.com/Instance. */ type: string; } interface GetDataSourcesDataSourceDataSourceGcpResourceComputeInstanceDataSourceProperty { /** * The description of the Compute Engine instance. */ description: string; /** * The machine type of the instance. */ machineType: string; /** * Name of the compute instance backed up by the datasource. */ name: string; /** * The total number of disks attached to the Instance. */ totalDiskCount: string; /** * The sum of all the disk sizes. */ totalDiskSizeGb: string; } interface GetManagementServerManagementUri { /** * The management console api endpoint. */ api: string; /** * The management console webUi. */ webUi: string; } interface GetManagementServerNetwork { /** * Network with format 'projects/{{project_id}}/global/networks/{{network_id}}' */ network: string; /** * Type of Network peeringMode Default value: "PRIVATE_SERVICE_ACCESS" Possible values: ["PRIVATE_SERVICE_ACCESS"] */ peeringMode: string; } interface ManagementServerManagementUri { /** * (Output) * The management console api endpoint. */ api: string; /** * (Output) * The management console webUi. */ webUi: string; } interface ManagementServerNetwork { /** * Network with format `projects/{{project_id}}/global/networks/{{network_id}}` */ network: string; /** * Type of Network peeringMode * Default value is `PRIVATE_SERVICE_ACCESS`. * Possible values are: `PRIVATE_SERVICE_ACCESS`. */ peeringMode?: string; } interface RestoreWorkloadComputeInstanceRestoreProperties { /** * Optional. Controls for advanced machine-related behavior features. * Structure is documented below. */ advancedMachineFeatures?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesAdvancedMachineFeatures; /** * Optional. Specifies the reservations that this instance can consume from. * Structure is documented below. */ allocationAffinity?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesAllocationAffinity; /** * Optional. Allows this instance to send and receive packets with non-matching destination or source IPs. */ canIpForward?: boolean; /** * Optional. Controls Confidential compute options on the instance. * Structure is documented below. */ confidentialInstanceConfig?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesConfidentialInstanceConfig; /** * Optional. Whether the resource should be protected against deletion. */ deletionProtection?: boolean; /** * Optional. An optional description of this resource. */ description?: string; /** * Optional. Array of disks associated with this instance. * Structure is documented below. */ disks?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesDisk[]; /** * Optional. Enables display device for the instance. * Structure is documented below. */ displayDevice?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesDisplayDevice; /** * Optional. A list of the type and count of accelerator cards attached to the instance. * Structure is documented below. */ guestAccelerators?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesGuestAccelerator[]; /** * Optional. Specifies the hostname of the instance. */ hostname?: string; /** * Optional. Encrypts suspended data for an instance with a customer-managed encryption key. * Structure is documented below. */ instanceEncryptionKey?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesInstanceEncryptionKey; /** * Optional. KeyRevocationActionType of the instance. * Possible values are: `KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED`, `NONE`, `STOP`. */ keyRevocationActionType?: string; /** * Optional. Labels to apply to this instance. * Structure is documented below. */ labels?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesLabel[]; /** * Optional. Full or partial URL of the machine type resource to use for this instance. */ machineType?: string; /** * Optional. This includes custom metadata and predefined keys. * Structure is documented below. */ metadata?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesMetadata; /** * Optional. Minimum CPU platform to use for this instance. */ minCpuPlatform?: string; /** * Required. Name of the compute instance. */ name: string; /** * Optional. An array of network configurations for this instance. * Structure is documented below. */ networkInterfaces?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesNetworkInterface[]; /** * Optional. Configure network performance such as egress bandwidth tier. * Structure is documented below. */ networkPerformanceConfig?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesNetworkPerformanceConfig; /** * Input only. Additional params passed with the request. * Structure is documented below. */ params?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesParams; /** * Optional. The private IPv6 google access type for the VM. * Possible values are: `INSTANCE_PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED`, `INHERIT_FROM_SUBNETWORK`, `ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE`, `ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE`. */ privateIpv6GoogleAccess?: string; /** * Optional. Resource policies applied to this instance. */ resourcePolicies?: string[]; /** * Optional. Sets the scheduling options for this instance. * Structure is documented below. */ scheduling?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesScheduling; /** * Optional. A list of service accounts, with their specified scopes, authorized for this instance. * Structure is documented below. */ serviceAccounts?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesServiceAccount[]; /** * Optional. Controls Shielded compute options on the instance. * Structure is documented below. */ shieldedInstanceConfig?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesShieldedInstanceConfig; /** * Optional. Tags to apply to this instance. * Structure is documented below. */ tags?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesTags; } interface RestoreWorkloadComputeInstanceRestorePropertiesAdvancedMachineFeatures { /** * Optional. Whether to enable nested virtualization or not (default is false). */ enableNestedVirtualization?: boolean; /** * Optional. Whether to enable UEFI networking for instance creation. */ enableUefiNetworking?: boolean; /** * Optional. The number of threads per physical core. */ threadsPerCore?: number; /** * Optional. The number of physical cores to expose to an instance. */ visibleCoreCount?: number; } interface RestoreWorkloadComputeInstanceRestorePropertiesAllocationAffinity { /** * Possible values are: `TYPE_UNSPECIFIED`, `NO_RESERVATION`, `ANY_RESERVATION`, `SPECIFIC_RESERVATION`. */ consumeAllocationType?: string; /** * (Optional) */ key?: string; /** * (Optional) */ values?: string[]; } interface RestoreWorkloadComputeInstanceRestorePropertiesConfidentialInstanceConfig { /** * Optional. Defines whether the instance should have confidential compute enabled. */ enableConfidentialCompute?: boolean; } interface RestoreWorkloadComputeInstanceRestorePropertiesDisk { /** * Optional. Specifies whether the disk will be auto-deleted when the instance is deleted. */ autoDelete?: boolean; /** * Optional. Indicates that this is a boot disk. */ boot?: boolean; /** * Optional. This is used as an identifier for the disks. */ deviceName?: string; /** * Optional. Encrypts or decrypts a disk using a customer-supplied encryption key. * Structure is documented below. */ diskEncryptionKey?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesDiskDiskEncryptionKey; /** * Optional. Specifies the disk interface to use for attaching this disk. * Possible values are: `DISK_INTERFACE_UNSPECIFIED`, `SCSI`, `NVME`, `NVDIMM`, `ISCSI`. */ diskInterface?: string; /** * Optional. The size of the disk in GB. */ diskSizeGb?: number; /** * Output only. The URI of the disk type resource. */ diskType?: string; /** * Optional. A list of features to enable on the guest operating system. * Structure is documented below. */ guestOsFeatures?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesDiskGuestOsFeature[]; /** * Optional. A zero-based index to this disk, where 0 is reserved for the boot disk. */ index?: number; /** * Optional. Specifies the parameters to initialize this disk. * Structure is documented below. */ initializeParams?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesDiskInitializeParams; /** * Optional. Type of the resource. */ kind?: string; /** * Optional. Any valid publicly visible licenses. */ licenses?: string[]; /** * Optional. The mode in which to attach this disk. * Possible values are: `DISK_MODE_UNSPECIFIED`, `READ_WRITE`, `READ_ONLY`, `LOCKED`. */ mode?: string; /** * Optional. Specifies the saved state of the disk. * Possible values are: `DISK_SAVED_STATE_UNSPECIFIED`, `PRESERVED`. */ savedState?: string; /** * Optional. Specifies a valid partial or full URL to an existing Persistent Disk resource. */ source?: string; /** * Optional. Specifies the type of the disk. * Possible values are: `DISK_TYPE_UNSPECIFIED`, `SCRATCH`, `PERSISTENT`. */ type?: string; } interface RestoreWorkloadComputeInstanceRestorePropertiesDiskDiskEncryptionKey { /** * (Optional) */ kmsKeyName?: string; /** * (Optional) */ kmsKeyServiceAccount?: string; /** * (Optional) */ rawKey?: string; /** * (Optional) */ rsaEncryptedKey?: string; } interface RestoreWorkloadComputeInstanceRestorePropertiesDiskGuestOsFeature { /** * Possible values are: `FEATURE_TYPE_UNSPECIFIED`, `VIRTIO_SCSI_MULTIQUEUE`, `WINDOWS`, `MULTI_IP_SUBNET`, `UEFI_COMPATIBLE`, `SECURE_BOOT`, `GVNIC`, `SEV_CAPABLE`, `BARE_METAL_LINUX_COMPATIBLE`, `SUSPEND_RESUME_COMPATIBLE`, `SEV_LIVE_MIGRATABLE`, `SEV_SNP_CAPABLE`, `TDX_CAPABLE`, `IDPF`, `SEV_LIVE_MIGRATABLE_V2`. */ type?: string; } interface RestoreWorkloadComputeInstanceRestorePropertiesDiskInitializeParams { /** * Optional. Specifies the disk name. */ diskName?: string; /** * Optional. URL of the zone where the disk should be created. */ replicaZones?: string[]; } interface RestoreWorkloadComputeInstanceRestorePropertiesDisplayDevice { /** * Optional. Enables display for the Compute Engine VM. */ enableDisplay?: boolean; } interface RestoreWorkloadComputeInstanceRestorePropertiesGuestAccelerator { /** * Optional. The number of the guest accelerator cards exposed to this instance. */ acceleratorCount?: number; /** * Optional. Full or partial URL of the accelerator type resource. */ acceleratorType?: string; } interface RestoreWorkloadComputeInstanceRestorePropertiesInstanceEncryptionKey { /** * (Optional) */ kmsKeyName?: string; /** * (Optional) */ kmsKeyServiceAccount?: string; /** * (Optional) */ rawKey?: string; /** * (Optional) */ rsaEncryptedKey?: string; } interface RestoreWorkloadComputeInstanceRestorePropertiesLabel { /** * The identifier for this object. Format specified above. */ key: string; /** * (Optional) */ value?: string; } interface RestoreWorkloadComputeInstanceRestorePropertiesMetadata { /** * Structure is documented below. */ items?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesMetadataItem[]; } interface RestoreWorkloadComputeInstanceRestorePropertiesMetadataItem { /** * (Optional) */ key?: string; /** * (Optional) */ value?: string; } interface RestoreWorkloadComputeInstanceRestorePropertiesNetworkInterface { /** * Structure is documented below. */ accessConfigs?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesNetworkInterfaceAccessConfig[]; /** * Structure is documented below. */ aliasIpRanges?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesNetworkInterfaceAliasIpRange[]; /** * Optional. The prefix length of the primary internal IPv6 range. */ internalIpv6PrefixLength?: number; /** * Optional. An IPv4 internal IP address to assign to the instance. */ ipAddress?: string; /** * Structure is documented below. */ ipv6AccessConfigs?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesNetworkInterfaceIpv6AccessConfig[]; /** * Possible values are: `UNSPECIFIED_IPV6_ACCESS_TYPE`, `INTERNAL`, `EXTERNAL`. */ ipv6AccessType?: string; /** * Optional. An IPv6 internal network address for this network interface. */ ipv6Address?: string; /** * Optional. URL of the VPC network resource for this instance. */ network?: string; /** * (Optional) */ networkAttachment?: string; /** * Possible values are: `NIC_TYPE_UNSPECIFIED`, `VIRTIO_NET`, `GVNIC`. */ nicType?: string; /** * (Optional) */ queueCount?: number; /** * Possible values are: `STACK_TYPE_UNSPECIFIED`, `IPV4_ONLY`, `IPV4_IPV6`. */ stackType?: string; /** * Optional. The URL of the Subnetwork resource for this instance. */ subnetwork?: string; } interface RestoreWorkloadComputeInstanceRestorePropertiesNetworkInterfaceAccessConfig { /** * (Optional) */ externalIp?: string; /** * (Optional) */ externalIpv6?: string; /** * (Optional) */ externalIpv6PrefixLength?: number; /** * Optional. The name of this access configuration. */ name?: string; /** * Possible values are: `NETWORK_TIER_UNSPECIFIED`, `PREMIUM`, `STANDARD`. */ networkTier?: string; /** * (Optional) */ publicPtrDomainName?: string; /** * (Optional) */ setPublicPtr?: boolean; /** * Optional. The type of configuration. * Possible values are: `ACCESS_TYPE_UNSPECIFIED`, `ONE_TO_ONE_NAT`, `DIRECT_IPV6`. */ type?: string; } interface RestoreWorkloadComputeInstanceRestorePropertiesNetworkInterfaceAliasIpRange { /** * (Optional) */ ipCidrRange?: string; /** * (Optional) */ subnetworkRangeName?: string; } interface RestoreWorkloadComputeInstanceRestorePropertiesNetworkInterfaceIpv6AccessConfig { /** * (Optional) */ externalIp?: string; /** * (Optional) */ externalIpv6?: string; /** * (Optional) */ externalIpv6PrefixLength?: number; /** * (Optional, Deprecated) * The resource name of the backup instance. * * > **Warning:** `name` is deprecated and will be removed in a future major release. The backup is identified by the parameters (location, backup_vault_id, data_source_id, backup_id). */ name?: string; /** * Possible values: ["NETWORK_TIER_UNSPECIFIED", "PREMIUM", "STANDARD"] */ networkTier?: string; /** * (Optional) */ publicPtrDomainName?: string; /** * (Optional) */ setPublicPtr?: boolean; /** * Output only. Type of the resource. */ type?: string; } interface RestoreWorkloadComputeInstanceRestorePropertiesNetworkPerformanceConfig { /** * Possible values are: `TIER_UNSPECIFIED`, `DEFAULT`, `TIER_1`. */ totalEgressBandwidthTier?: string; } interface RestoreWorkloadComputeInstanceRestorePropertiesParams { /** * Structure is documented below. */ resourceManagerTags?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesParamsResourceManagerTag[]; } interface RestoreWorkloadComputeInstanceRestorePropertiesParamsResourceManagerTag { /** * The identifier for this object. Format specified above. */ key: string; /** * (Optional) */ value?: string; } interface RestoreWorkloadComputeInstanceRestorePropertiesScheduling { /** * (Optional) */ automaticRestart?: boolean; /** * Possible values are: `INSTANCE_TERMINATION_ACTION_UNSPECIFIED`, `DELETE`, `STOP`. */ instanceTerminationAction?: string; /** * A nested object resource. * Structure is documented below. */ localSsdRecoveryTimeout?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesSchedulingLocalSsdRecoveryTimeout; /** * A nested object resource. * Structure is documented below. */ maxRunDuration?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesSchedulingMaxRunDuration; /** * (Optional) */ minNodeCpus?: number; /** * Structure is documented below. */ nodeAffinities?: outputs.backupdisasterrecovery.RestoreWorkloadComputeInstanceRestorePropertiesSchedulingNodeAffinity[]; /** * Possible values are: `ON_HOST_MAINTENANCE_UNSPECIFIED`, `TERMINATE`, `MIGRATE`. */ onHostMaintenance?: string; /** * (Optional) */ preemptible?: boolean; /** * Possible values are: `PROVISIONING_MODEL_UNSPECIFIED`, `STANDARD`, `SPOT`. */ provisioningModel?: string; /** * (Optional) */ terminationTime?: string; } interface RestoreWorkloadComputeInstanceRestorePropertiesSchedulingLocalSsdRecoveryTimeout { /** * (Optional) */ nanos?: number; /** * (Optional) */ seconds?: number; } interface RestoreWorkloadComputeInstanceRestorePropertiesSchedulingMaxRunDuration { /** * (Optional) */ nanos?: number; /** * (Optional) */ seconds?: number; } interface RestoreWorkloadComputeInstanceRestorePropertiesSchedulingNodeAffinity { /** * (Optional) */ key?: string; /** * Possible values are: `OPERATOR_UNSPECIFIED`, `IN`, `NOT_IN`. */ operator?: string; /** * (Optional) */ values?: string[]; } interface RestoreWorkloadComputeInstanceRestorePropertiesServiceAccount { /** * (Optional) */ email?: string; /** * (Optional) */ scopes?: string[]; } interface RestoreWorkloadComputeInstanceRestorePropertiesShieldedInstanceConfig { /** * (Optional) */ enableIntegrityMonitoring?: boolean; /** * (Optional) */ enableSecureBoot?: boolean; /** * (Optional) */ enableVtpm?: boolean; } interface RestoreWorkloadComputeInstanceRestorePropertiesTags { /** * (Optional) */ items?: string[]; } interface RestoreWorkloadComputeInstanceTargetEnvironment { /** * Required. Target project for the Compute Engine instance. */ project: string; /** * Required. The zone of the Compute Engine instance. */ zone: string; } interface RestoreWorkloadDiskRestoreProperties { /** * Optional. The access mode of the disk. * Possible values are: `READ_WRITE_SINGLE`, `READ_WRITE_MANY`, `READ_ONLY_MANY`. */ accessMode?: string; /** * Optional. The architecture of the source disk. * Possible values are: `ARCHITECTURE_UNSPECIFIED`, `X86_64`, `ARM64`. */ architecture?: string; /** * Optional. An optional description of this resource. */ description?: string; /** * Optional. Encrypts the disk using a customer-supplied encryption key. * Structure is documented below. */ diskEncryptionKey?: outputs.backupdisasterrecovery.RestoreWorkloadDiskRestorePropertiesDiskEncryptionKey; /** * Optional. Indicates whether this disk is using confidential compute mode. */ enableConfidentialCompute?: boolean; /** * Optional. A list of features to enable in the guest operating system. * Structure is documented below. */ guestOsFeatures?: outputs.backupdisasterrecovery.RestoreWorkloadDiskRestorePropertiesGuestOsFeature[]; /** * Optional. Labels to apply to this disk. * Structure is documented below. */ labels?: outputs.backupdisasterrecovery.RestoreWorkloadDiskRestorePropertiesLabel[]; /** * Optional. A list of publicly available licenses that are applicable to this backup. */ licenses?: string[]; /** * Required. Name of the disk. */ name: string; /** * Optional. Physical block size of the persistent disk, in bytes. */ physicalBlockSizeBytes?: number; /** * Optional. Indicates how many IOPS to provision for the disk. */ provisionedIops?: number; /** * Optional. Indicates how much throughput to provision for the disk. */ provisionedThroughput?: number; /** * Optional. Resource manager tags to be bound to the disk. * Structure is documented below. */ resourceManagerTags?: outputs.backupdisasterrecovery.RestoreWorkloadDiskRestorePropertiesResourceManagerTag[]; /** * Optional. Resource policies applied to this disk. */ resourcePolicies?: string[]; /** * Required. The size of the disk in GB. */ sizeGb: number; /** * Optional. The storage pool in which the new disk is created. */ storagePool?: string; /** * Required. URL of the disk type resource describing which disk type to use. */ type: string; } interface RestoreWorkloadDiskRestorePropertiesDiskEncryptionKey { /** * (Optional) */ kmsKeyName?: string; /** * (Optional) */ kmsKeyServiceAccount?: string; /** * (Optional) */ rawKey?: string; /** * (Optional) */ rsaEncryptedKey?: string; } interface RestoreWorkloadDiskRestorePropertiesGuestOsFeature { /** * Possible values are: `FEATURE_TYPE_UNSPECIFIED`, `VIRTIO_SCSI_MULTIQUEUE`, `WINDOWS`, `MULTI_IP_SUBNET`, `UEFI_COMPATIBLE`, `SECURE_BOOT`, `GVNIC`, `SEV_CAPABLE`, `BARE_METAL_LINUX_COMPATIBLE`, `SUSPEND_RESUME_COMPATIBLE`, `SEV_LIVE_MIGRATABLE`, `SEV_SNP_CAPABLE`, `TDX_CAPABLE`, `IDPF`, `SEV_LIVE_MIGRATABLE_V2`. */ type?: string; } interface RestoreWorkloadDiskRestorePropertiesLabel { /** * The identifier for this object. Format specified above. */ key: string; /** * (Optional) */ value?: string; } interface RestoreWorkloadDiskRestorePropertiesResourceManagerTag { /** * The identifier for this object. Format specified above. */ key: string; /** * (Optional) */ value?: string; } interface RestoreWorkloadDiskTargetEnvironment { /** * Required. Target project for the disk. */ project: string; /** * Required. Target zone for the disk. */ zone: string; } interface RestoreWorkloadRegionDiskTargetEnvironment { /** * Required. Target project for the disk. */ project: string; /** * Required. Target region for the disk. */ region: string; /** * Required. Target URLs of the replica zones for the disk. */ replicaZones: string[]; } interface RestoreWorkloadTargetResource { /** * Output only. Details of the native Google Cloud resource created as part of restore. * Structure is documented below. */ gcpResource?: outputs.backupdisasterrecovery.RestoreWorkloadTargetResourceGcpResource; } interface RestoreWorkloadTargetResourceGcpResource { /** * Output only. Name of the Google Cloud resource. */ gcpResourcename?: string; /** * Required. The location for the backup vault. */ location?: string; /** * Output only. Type of the resource. */ type?: string; } } export declare namespace beyondcorp { interface AppConnectionApplicationEndpoint { /** * Hostname or IP address of the remote application endpoint. */ host: string; /** * Port of the remote application endpoint. */ port: number; } interface AppConnectionGateway { /** * AppGateway name in following format: projects/{project_id}/locations/{locationId}/appgateways/{gateway_id}. */ appGateway: string; /** * (Output) * Ingress port reserved on the gateways for this AppConnection, if not specified or zero, the default port is 19443. */ ingressPort: number; /** * The type of hosting used by the gateway. Refer to * https://cloud.google.com/beyondcorp/docs/reference/rest/v1/projects.locations.appConnections#Type_1 * for a list of possible values. */ type?: string; /** * (Output) * Server-defined URI for this resource. */ uri: string; } interface AppConnectorPrincipalInfo { /** * ServiceAccount represents a GCP service account. * Structure is documented below. */ serviceAccount: outputs.beyondcorp.AppConnectorPrincipalInfoServiceAccount; } interface AppConnectorPrincipalInfoServiceAccount { /** * Email address of the service account. */ email: string; } interface AppGatewayAllocatedConnection { /** * The ingress port of an allocated connection. */ ingressPort?: number; /** * The PSC uri of an allocated connection. */ pscUri?: string; } interface GetAppConnectionApplicationEndpoint { /** * Hostname or IP address of the remote application endpoint. */ host: string; /** * Port of the remote application endpoint. */ port: number; } interface GetAppConnectionGateway { /** * AppGateway name in following format: projects/{project_id}/locations/{locationId}/appgateways/{gateway_id}. */ appGateway: string; /** * Ingress port reserved on the gateways for this AppConnection, if not specified or zero, the default port is 19443. */ ingressPort: number; /** * The type of hosting used by the gateway. Refer to * https://cloud.google.com/beyondcorp/docs/reference/rest/v1/projects.locations.appConnections#Type_1 * for a list of possible values. */ type: string; /** * Server-defined URI for this resource. */ uri: string; } interface GetAppConnectorPrincipalInfo { /** * ServiceAccount represents a GCP service account. */ serviceAccounts: outputs.beyondcorp.GetAppConnectorPrincipalInfoServiceAccount[]; } interface GetAppConnectorPrincipalInfoServiceAccount { /** * Email address of the service account. */ email: string; } interface GetAppGatewayAllocatedConnection { /** * The ingress port of an allocated connection. */ ingressPort: number; /** * The PSC uri of an allocated connection. */ pscUri: string; } interface GetSecurityGatewayHub { /** * Internet Gateway configuration. */ internetGateways: outputs.beyondcorp.GetSecurityGatewayHubInternetGateway[]; region: string; } interface GetSecurityGatewayHubInternetGateway { /** * Output only. List of IP addresses assigned to the Cloud NAT. */ assignedIps: string[]; } interface GetSecurityGatewayProxyProtocolConfig { /** * The configuration for the proxy. */ allowedClientHeaders: string[]; /** * Client IP configuration. The client IP address is included if true. */ clientIp: boolean; /** * Configuration for the contextual headers. */ contextualHeaders: outputs.beyondcorp.GetSecurityGatewayProxyProtocolConfigContextualHeader[]; /** * Gateway identity configuration. Possible values: ["RESOURCE_NAME"] */ gatewayIdentity: string; /** * Custom resource specific headers along with the values. * The names should conform to RFC 9110: * > Field names SHOULD constrain themselves to alphanumeric characters, "-", * and ".", and SHOULD begin with a letter. * > Field values SHOULD contain only ASCII printable characters and tab. */ metadataHeaders: { [key: string]: string; }; } interface GetSecurityGatewayProxyProtocolConfigContextualHeader { /** * Device info configuration. */ deviceInfos: outputs.beyondcorp.GetSecurityGatewayProxyProtocolConfigContextualHeaderDeviceInfo[]; /** * Group info configuration. */ groupInfos: outputs.beyondcorp.GetSecurityGatewayProxyProtocolConfigContextualHeaderGroupInfo[]; /** * Default output type for all enabled headers. Possible values: ["PROTOBUF", "JSON", "NONE"] */ outputType: string; /** * User info configuration. */ userInfos: outputs.beyondcorp.GetSecurityGatewayProxyProtocolConfigContextualHeaderUserInfo[]; } interface GetSecurityGatewayProxyProtocolConfigContextualHeaderDeviceInfo { /** * The output type of the delegated device info. Possible values: ["PROTOBUF", "JSON", "NONE"] */ outputType: string; } interface GetSecurityGatewayProxyProtocolConfigContextualHeaderGroupInfo { /** * The output type of the delegated group info. Possible values: ["PROTOBUF", "JSON", "NONE"] */ outputType: string; } interface GetSecurityGatewayProxyProtocolConfigContextualHeaderUserInfo { /** * The output type of the delegated user info. Possible values: ["PROTOBUF", "JSON", "NONE"] */ outputType: string; } interface GetSecurityGatewayServiceDiscovery { /** * External API configuration. */ apiGateways: outputs.beyondcorp.GetSecurityGatewayServiceDiscoveryApiGateway[]; } interface GetSecurityGatewayServiceDiscoveryApiGateway { /** * Enables fetching resource model updates to alter service behavior per Chrome profile. */ resourceOverrides: outputs.beyondcorp.GetSecurityGatewayServiceDiscoveryApiGatewayResourceOverride[]; } interface GetSecurityGatewayServiceDiscoveryApiGatewayResourceOverride { /** * Contains uri path fragment where HTTP request is sent. */ path: string; } interface SecurityGatewayApplicationEndpointMatcher { /** * Required. Hostname of the application. */ hostname: string; /** * Optional. Ports of the application. */ ports: number[]; } interface SecurityGatewayApplicationIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface SecurityGatewayApplicationIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface SecurityGatewayApplicationUpstream { /** * Optional. Routing policy information. * Structure is documented below. */ egressPolicy?: outputs.beyondcorp.SecurityGatewayApplicationUpstreamEgressPolicy; /** * List of the external endpoints to forward traffic to. * Structure is documented below. */ external?: outputs.beyondcorp.SecurityGatewayApplicationUpstreamExternal; /** * Network to forward traffic to. * Structure is documented below. */ network?: outputs.beyondcorp.SecurityGatewayApplicationUpstreamNetwork; /** * Shared proxy configuration for all apps. * Structure is documented below. */ proxyProtocol?: outputs.beyondcorp.SecurityGatewayApplicationUpstreamProxyProtocol; } interface SecurityGatewayApplicationUpstreamEgressPolicy { /** * Required. List of regions where the application sends traffic to. */ regions: string[]; } interface SecurityGatewayApplicationUpstreamExternal { /** * List of the endpoints to forward traffic to. * Structure is documented below. */ endpoints: outputs.beyondcorp.SecurityGatewayApplicationUpstreamExternalEndpoint[]; } interface SecurityGatewayApplicationUpstreamExternalEndpoint { /** * Hostname of the endpoint. */ hostname: string; /** * Port of the endpoint. */ port: number; } interface SecurityGatewayApplicationUpstreamNetwork { /** * Required. Network name is of the format: * `projects/{project}/global/networks/{network}` */ name: string; } interface SecurityGatewayApplicationUpstreamProxyProtocol { /** * The configuration for the proxy. */ allowedClientHeaders?: string[]; /** * Client IP configuration. The client IP address is included if true. */ clientIp?: boolean; /** * Configuration for the contextual headers. * Structure is documented below. */ contextualHeaders?: outputs.beyondcorp.SecurityGatewayApplicationUpstreamProxyProtocolContextualHeaders; /** * Gateway identity configuration. * Possible values are: `RESOURCE_NAME`. */ gatewayIdentity?: string; /** * Custom resource specific headers along with the values. * The names should conform to RFC 9110: * > Field names SHOULD constrain themselves to alphanumeric characters, "-", * and ".", and SHOULD begin with a letter. * > Field values SHOULD contain only ASCII printable characters and tab. */ metadataHeaders?: { [key: string]: string; }; } interface SecurityGatewayApplicationUpstreamProxyProtocolContextualHeaders { /** * Device info configuration. * Structure is documented below. */ deviceInfo?: outputs.beyondcorp.SecurityGatewayApplicationUpstreamProxyProtocolContextualHeadersDeviceInfo; /** * Group info configuration. * Structure is documented below. */ groupInfo?: outputs.beyondcorp.SecurityGatewayApplicationUpstreamProxyProtocolContextualHeadersGroupInfo; /** * Default output type for all enabled headers. * Possible values are: `PROTOBUF`, `JSON`, `NONE`. */ outputType?: string; /** * User info configuration. * Structure is documented below. */ userInfo?: outputs.beyondcorp.SecurityGatewayApplicationUpstreamProxyProtocolContextualHeadersUserInfo; } interface SecurityGatewayApplicationUpstreamProxyProtocolContextualHeadersDeviceInfo { /** * The output type of the delegated device info. * Possible values are: `PROTOBUF`, `JSON`, `NONE`. */ outputType?: string; } interface SecurityGatewayApplicationUpstreamProxyProtocolContextualHeadersGroupInfo { /** * The output type of the delegated group info. * Possible values are: `PROTOBUF`, `JSON`, `NONE`. */ outputType?: string; } interface SecurityGatewayApplicationUpstreamProxyProtocolContextualHeadersUserInfo { /** * The output type of the delegated user info. * Possible values are: `PROTOBUF`, `JSON`, `NONE`. */ outputType?: string; } interface SecurityGatewayHub { /** * Internet Gateway configuration. * Structure is documented below. */ internetGateway: outputs.beyondcorp.SecurityGatewayHubInternetGateway; /** * The identifier for this object. Format specified above. */ region: string; } interface SecurityGatewayHubInternetGateway { /** * (Output) * Output only. List of IP addresses assigned to the Cloud NAT. */ assignedIps: string[]; } interface SecurityGatewayIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface SecurityGatewayIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface SecurityGatewayProxyProtocolConfig { /** * The configuration for the proxy. */ allowedClientHeaders?: string[]; /** * Client IP configuration. The client IP address is included if true. */ clientIp?: boolean; /** * Configuration for the contextual headers. * Structure is documented below. */ contextualHeaders?: outputs.beyondcorp.SecurityGatewayProxyProtocolConfigContextualHeaders; /** * Gateway identity configuration. * Possible values are: `RESOURCE_NAME`. */ gatewayIdentity?: string; /** * Custom resource specific headers along with the values. * The names should conform to RFC 9110: * > Field names SHOULD constrain themselves to alphanumeric characters, "-", * and ".", and SHOULD begin with a letter. * > Field values SHOULD contain only ASCII printable characters and tab. */ metadataHeaders?: { [key: string]: string; }; } interface SecurityGatewayProxyProtocolConfigContextualHeaders { /** * Device info configuration. * Structure is documented below. */ deviceInfo?: outputs.beyondcorp.SecurityGatewayProxyProtocolConfigContextualHeadersDeviceInfo; /** * Group info configuration. * Structure is documented below. */ groupInfo?: outputs.beyondcorp.SecurityGatewayProxyProtocolConfigContextualHeadersGroupInfo; /** * Default output type for all enabled headers. * Possible values are: `PROTOBUF`, `JSON`, `NONE`. */ outputType?: string; /** * User info configuration. * Structure is documented below. */ userInfo?: outputs.beyondcorp.SecurityGatewayProxyProtocolConfigContextualHeadersUserInfo; } interface SecurityGatewayProxyProtocolConfigContextualHeadersDeviceInfo { /** * The output type of the delegated device info. * Possible values are: `PROTOBUF`, `JSON`, `NONE`. */ outputType?: string; } interface SecurityGatewayProxyProtocolConfigContextualHeadersGroupInfo { /** * The output type of the delegated group info. * Possible values are: `PROTOBUF`, `JSON`, `NONE`. */ outputType?: string; } interface SecurityGatewayProxyProtocolConfigContextualHeadersUserInfo { /** * The output type of the delegated user info. * Possible values are: `PROTOBUF`, `JSON`, `NONE`. */ outputType?: string; } interface SecurityGatewayServiceDiscovery { /** * External API configuration. * Structure is documented below. */ apiGateway?: outputs.beyondcorp.SecurityGatewayServiceDiscoveryApiGateway; } interface SecurityGatewayServiceDiscoveryApiGateway { /** * Enables fetching resource model updates to alter service behavior per Chrome profile. * Structure is documented below. */ resourceOverride?: outputs.beyondcorp.SecurityGatewayServiceDiscoveryApiGatewayResourceOverride; } interface SecurityGatewayServiceDiscoveryApiGatewayResourceOverride { /** * Contains uri path fragment where HTTP request is sent. */ path?: string; } } export declare namespace biglake { interface DatabaseHiveOptions { /** * Cloud Storage folder URI where the database data is stored, starting with "gs://". */ locationUri?: string; /** * Stores user supplied Hive database parameters. An object containing a * list of"key": value pairs. * Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ parameters?: { [key: string]: string; }; } interface IcebergCatalogIamBindingCondition { description?: string; expression: string; title: string; } interface IcebergCatalogIamMemberCondition { description?: string; expression: string; title: string; } interface IcebergCatalogReplica { /** * (Output) * The region of the replica, e.g., `us-east1`. */ region: string; /** * (Output) * If the IcebergCatalog is replicated to multiple regions, this describes the current state of the replica. STATE_UNKNOWN - The replica state is unknown. STATE_PRIMARY - The replica is the writable primary. STATE_PRIMARY_IN_PROGRESS - The replica has been recently assigned as the primary, but not all namespaces are writeable yet. STATE_SECONDARY - The replica is a read-only secondary replica. */ state: string; } interface IcebergNamespaceIamBindingCondition { description?: string; expression: string; title: string; } interface IcebergNamespaceIamMemberCondition { description?: string; expression: string; title: string; } interface TableHiveOptions { /** * Stores user supplied Hive table parameters. An object containing a * list of "key": value pairs. * Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ parameters?: { [key: string]: string; }; /** * Stores physical storage information on the data. * Structure is documented below. */ storageDescriptor?: outputs.biglake.TableHiveOptionsStorageDescriptor; /** * Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE. */ tableType?: string; } interface TableHiveOptionsStorageDescriptor { /** * The fully qualified Java class name of the input format. */ inputFormat?: string; /** * Cloud Storage folder URI where the table data is stored, starting with "gs://". */ locationUri?: string; /** * The fully qualified Java class name of the output format. */ outputFormat?: string; } } export declare namespace bigquery { interface AppProfileDataBoostIsolationReadOnly { /** * The Compute Billing Owner for this Data Boost App Profile. * Possible values are: `HOST_PAYS`. */ computeBillingOwner: string; } interface AppProfileSingleClusterRouting { /** * If true, CheckAndMutateRow and ReadModifyWriteRow requests are allowed by this app profile. * It is unsafe to send these requests to the same table/row/column in multiple clusters. */ allowTransactionalWrites?: boolean; /** * The cluster to which read/write requests should be routed. */ clusterId: string; } interface AppProfileStandardIsolation { /** * The priority of requests sent using this app profile. * Possible values are: `PRIORITY_LOW`, `PRIORITY_MEDIUM`, `PRIORITY_HIGH`. */ priority: string; } interface BiReservationPreferredTable { /** * The ID of the dataset in the above project. */ datasetId?: string; /** * The assigned project ID of the project. */ projectId?: string; /** * The ID of the table in the above dataset. */ tableId?: string; } interface ConnectionAws { /** * Authentication using Google owned service account to assume into customer's AWS IAM Role. * Structure is documented below. */ accessRole: outputs.bigquery.ConnectionAwsAccessRole; } interface ConnectionAwsAccessRole { /** * The user’s AWS IAM Role that trusts the Google-owned AWS IAM user Connection. */ iamRoleId: string; /** * (Output) * A unique Google-owned and Google-generated identity for the Connection. This identity will be used to access the user's AWS IAM Role. */ identity: string; } interface ConnectionAzure { /** * (Output) * The name of the Azure Active Directory Application. */ application: string; /** * (Output) * The client id of the Azure Active Directory Application. */ clientId: string; /** * The id of customer's directory that host the data. */ customerTenantId: string; /** * The Azure Application (client) ID where the federated credentials will be hosted. */ federatedApplicationClientId?: string; /** * (Output) * A unique Google-owned and Google-generated identity for the Connection. This identity will be used to access the user's Azure Active Directory Application. */ identity: string; /** * (Output) * The object id of the Azure Active Directory Application. */ objectId: string; /** * (Output) * The URL user will be redirected to after granting consent during connection setup. */ redirectUri: string; } interface ConnectionCloudResource { /** * (Output) * The account ID of the service created for the purpose of this connection. */ serviceAccountId: string; } interface ConnectionCloudSpanner { /** * Cloud Spanner database in the form `project/instance/database'. */ database: string; /** * Cloud Spanner database role for fine-grained access control. The Cloud Spanner admin should have provisioned the database role with appropriate permissions, such as `SELECT` and `INSERT`. Other users should only use roles provided by their Cloud Spanner admins. The database role name must start with a letter, and can only contain letters, numbers, and underscores. For more details, see https://cloud.google.com/spanner/docs/fgac-about. */ databaseRole?: string; /** * Allows setting max parallelism per query when executing on Spanner independent compute resources. If unspecified, default values of parallelism are chosen that are dependent on the Cloud Spanner instance configuration. `useParallelism` and `useDataBoost` must be set when setting max parallelism. */ maxParallelism?: number; /** * If set, the request will be executed via Spanner independent compute resources. `useParallelism` must be set when using data boost. */ useDataBoost?: boolean; /** * If parallelism should be used when reading from Cloud Spanner. */ useParallelism?: boolean; /** * (Optional, Deprecated) * If the serverless analytics service should be used to read data from Cloud Spanner. `useParallelism` must be set when using serverless analytics. * * > **Warning:** `useServerlessAnalytics` is deprecated and will be removed in a future major release. Use `useDataBoost` instead. * * @deprecated `useServerlessAnalytics` is deprecated and will be removed in a future major release. Use `useDataBoost` instead. */ useServerlessAnalytics?: boolean; } interface ConnectionCloudSql { /** * Cloud SQL properties. * Structure is documented below. */ credential: outputs.bigquery.ConnectionCloudSqlCredential; /** * Database name. */ database: string; /** * Cloud SQL instance ID in the form project:location:instance. */ instanceId: string; /** * (Output) * When the connection is used in the context of an operation in BigQuery, this service account will serve as the identity being used for connecting to the CloudSQL instance specified in this connection. */ serviceAccountId: string; /** * Type of the Cloud SQL database. * Possible values are: `DATABASE_TYPE_UNSPECIFIED`, `POSTGRES`, `MYSQL`. */ type: string; } interface ConnectionCloudSqlCredential { /** * Password for database. * **Note**: This property is sensitive and will not be displayed in the plan. */ password: string; /** * Username for database. */ username: string; } interface ConnectionIamBindingCondition { description?: string; expression: string; title: string; } interface ConnectionIamMemberCondition { description?: string; expression: string; title: string; } interface ConnectionSpark { /** * Dataproc Metastore Service configuration for the connection. * Structure is documented below. */ metastoreServiceConfig?: outputs.bigquery.ConnectionSparkMetastoreServiceConfig; /** * (Output) * The account ID of the service created for the purpose of this connection. */ serviceAccountId: string; /** * Spark History Server configuration for the connection. * Structure is documented below. */ sparkHistoryServerConfig?: outputs.bigquery.ConnectionSparkSparkHistoryServerConfig; } interface ConnectionSparkMetastoreServiceConfig { /** * Resource name of an existing Dataproc Metastore service in the form of projects/[projectId]/locations/[region]/services/[serviceId]. */ metastoreService?: string; } interface ConnectionSparkSparkHistoryServerConfig { /** * Resource name of an existing Dataproc Cluster to act as a Spark History Server for the connection if the form of projects/[projectId]/regions/[region]/clusters/[clusterName]. */ dataprocCluster?: string; } interface DataTransferConfigEmailPreferences { /** * If true, email notifications will be sent on transfer run failures. */ enableFailureEmail: boolean; } interface DataTransferConfigEncryptionConfiguration { /** * The name of the KMS key used for encrypting BigQuery data. */ kmsKeyName: string; } interface DataTransferConfigScheduleOptions { /** * If true, automatic scheduling of data transfer runs for this * configuration will be disabled. The runs can be started on ad-hoc * basis using transferConfigs.startManualRuns API. When automatic * scheduling is disabled, the TransferConfig.schedule field will * be ignored. */ disableAutoScheduling?: boolean; /** * Defines time to stop scheduling transfer runs. A transfer run cannot be * scheduled at or after the end time. The end time can be changed at any * moment. The time when a data transfer can be triggered manually is not * limited by this option. */ endTime?: string; /** * Specifies time to start scheduling transfer runs. The first run will be * scheduled at or after the start time according to a recurrence pattern * defined in the schedule string. The start time can be changed at any * moment. The time when a data transfer can be triggered manually is not * limited by this option. */ startTime?: string; } interface DataTransferConfigSensitiveParams { /** * The Secret Access Key of the AWS account transferring data from. * **Note**: This property is sensitive and will not be displayed in the plan. */ secretAccessKey?: string; /** * **NOTE:** This field is write-only and its value will not be updated in state as part of read operations. * (Optional, Write-Only) * The Secret Access Key of the AWS account transferring data from. * **Note**: This property is write-only and will not be read from the API. * * > **Note:** One of `secretAccessKey` or `secretAccessKeyWo` can only be set. */ secretAccessKeyWo?: string; /** * The version of the sensitive params - used to trigger updates of the write-only params. For more info see [updating write-only arguments](https://www.terraform.io/docs/providers/google/guides/using_write_only_arguments.html#updating-write-only-arguments) */ secretAccessKeyWoVersion?: number; } interface Datapolicyv2DataPolicyDataMaskingPolicy { /** * A predefined masking expression. * Possible values: * SHA256 * ALWAYS_NULL * DEFAULT_MASKING_VALUE * LAST_FOUR_CHARACTERS * FIRST_FOUR_CHARACTERS * EMAIL_MASK * DATE_YEAR_MASK * RANDOM_HASH */ predefinedExpression?: string; /** * The name of the BigQuery routine that contains the custom masking * routine, in the format of * `projects/{project_number}/datasets/{dataset_id}/routines/{routine_id}`. */ routine?: string; } interface Datapolicyv2DataPolicyIamBindingCondition { description?: string; expression: string; title: string; } interface Datapolicyv2DataPolicyIamMemberCondition { description?: string; expression: string; title: string; } interface DatasetAccess { /** * Condition for the binding. If CEL expression in this field is true, this * access binding will be considered. * Structure is documented below. */ condition?: outputs.bigquery.DatasetAccessCondition; /** * Grants all resources of particular types in a particular dataset read access to the current dataset. * Structure is documented below. */ dataset?: outputs.bigquery.DatasetAccessDataset; /** * A domain to grant access to. Any users signed in with the * domain specified will be granted the specified access */ domain?: string; /** * An email address of a Google Group to grant access to. */ groupByEmail?: string; /** * Some other type of member that appears in the IAM Policy but isn't a user, * group, domain, or special group. For example: `allUsers` */ iamMember?: string; /** * Describes the rights granted to the user specified by the other * member of the access object. Basic, predefined, and custom roles * are supported. Predefined roles that have equivalent basic roles * are swapped by the API to their basic counterparts. See * [official docs](https://cloud.google.com/bigquery/docs/access-control). */ role?: string; /** * A routine from a different dataset to grant access to. Queries * executed against that routine will have read access to tables in * this dataset. The role field is not required when this field is * set. If that routine is updated by any user, access to the routine * needs to be granted again via an update operation. * Structure is documented below. */ routine?: outputs.bigquery.DatasetAccessRoutine; /** * A special group to grant access to. Possible values include: * * `projectOwners`: Owners of the enclosing project. * * `projectReaders`: Readers of the enclosing project. * * `projectWriters`: Writers of the enclosing project. * * `allAuthenticatedUsers`: All authenticated BigQuery users. */ specialGroup?: string; /** * An email address of a user to grant access to. For example: * fred@example.com */ userByEmail?: string; /** * A view from a different dataset to grant access to. Queries * executed against that view will have read access to tables in * this dataset. The role field is not required when this field is * set. If that view is updated by any user, access to the view * needs to be granted again via an update operation. * Structure is documented below. */ view?: outputs.bigquery.DatasetAccessView; } interface DatasetAccessAuthorizedDataset { /** * The dataset this entry applies to * Structure is documented below. */ dataset: outputs.bigquery.DatasetAccessAuthorizedDatasetDataset; /** * Which resources in the dataset this entry applies to. Currently, only views are supported, * but additional target types may be added in the future. Possible values: VIEWS */ targetTypes: string[]; } interface DatasetAccessAuthorizedDatasetDataset { /** * The ID of the dataset containing this table. */ datasetId: string; /** * The ID of the project containing this table. */ projectId: string; } interface DatasetAccessCondition { /** * Description of the expression. This is a longer text which describes the expression, * e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a file * name and a position in the file. */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. * This can be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface DatasetAccessDataset { /** * The dataset this entry applies to * Structure is documented below. */ dataset: outputs.bigquery.DatasetAccessDatasetDataset; /** * Which resources in the dataset this entry applies to. Currently, only views are supported, * but additional target types may be added in the future. Possible values: VIEWS */ targetTypes: string[]; } interface DatasetAccessDatasetDataset { /** * The ID of the dataset containing this table. */ datasetId: string; /** * The ID of the project containing this table. */ projectId: string; } interface DatasetAccessRoutine { /** * The ID of the dataset containing this table. */ datasetId: string; /** * The ID of the project containing this table. */ projectId: string; /** * The ID of the routine. The ID must contain only letters (a-z, * A-Z), numbers (0-9), or underscores (_). The maximum length * is 256 characters. */ routineId: string; } interface DatasetAccessView { /** * The ID of the dataset containing this table. */ datasetId: string; /** * The ID of the project containing this table. */ projectId: string; /** * The ID of the table. The ID must contain only letters (a-z, * A-Z), numbers (0-9), or underscores (_). The maximum length * is 1,024 characters. */ tableId: string; } interface DatasetDefaultEncryptionConfiguration { /** * Describes the Cloud KMS encryption key that will be used to protect destination * BigQuery table. The BigQuery Service Account associated with your project requires * access to this encryption key. */ kmsKeyName: string; } interface DatasetExternalCatalogDatasetOptions { /** * The storage location URI for all tables in the dataset. Equivalent to hive metastore's * database locationUri. Maximum length of 1024 characters. */ defaultStorageLocationUri?: string; /** * A map of key value pairs defining the parameters and properties of the open source schema. * Maximum size of 2Mib. */ parameters?: { [key: string]: string; }; } interface DatasetExternalDatasetReference { /** * The connection id that is used to access the externalSource. * Format: projects/{projectId}/locations/{locationId}/connections/{connectionId} */ connection: string; /** * External source that backs this dataset. */ externalSource: string; } interface DatasetIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface DatasetIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface GetDatasetAccess { /** * Condition for the binding. If CEL expression in this field is true, this * access binding will be considered. */ conditions: outputs.bigquery.GetDatasetAccessCondition[]; /** * Grants all resources of particular types in a particular dataset read access to the current dataset. */ datasets: outputs.bigquery.GetDatasetAccessDataset[]; /** * A domain to grant access to. Any users signed in with the * domain specified will be granted the specified access */ domain: string; /** * An email address of a Google Group to grant access to. */ groupByEmail: string; /** * Some other type of member that appears in the IAM Policy but isn't a user, * group, domain, or special group. For example: 'allUsers' */ iamMember: string; /** * Describes the rights granted to the user specified by the other * member of the access object. Basic, predefined, and custom roles * are supported. Predefined roles that have equivalent basic roles * are swapped by the API to their basic counterparts. See * [official docs](https://cloud.google.com/bigquery/docs/access-control). */ role: string; /** * A routine from a different dataset to grant access to. Queries * executed against that routine will have read access to tables in * this dataset. The role field is not required when this field is * set. If that routine is updated by any user, access to the routine * needs to be granted again via an update operation. */ routines: outputs.bigquery.GetDatasetAccessRoutine[]; /** * A special group to grant access to. Possible values include: * * 'projectOwners': Owners of the enclosing project. * * 'projectReaders': Readers of the enclosing project. * * 'projectWriters': Writers of the enclosing project. * * 'allAuthenticatedUsers': All authenticated BigQuery users. */ specialGroup: string; /** * An email address of a user to grant access to. For example: * fred@example.com */ userByEmail: string; /** * A view from a different dataset to grant access to. Queries * executed against that view will have read access to tables in * this dataset. The role field is not required when this field is * set. If that view is updated by any user, access to the view * needs to be granted again via an update operation. */ views: outputs.bigquery.GetDatasetAccessView[]; } interface GetDatasetAccessCondition { /** * Description of the expression. This is a longer text which describes the expression, * e.g. when hovered over it in a UI. */ description: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a file * name and a position in the file. */ location: string; /** * Title for the expression, i.e. a short string describing its purpose. * This can be used e.g. in UIs which allow to enter the expression. */ title: string; } interface GetDatasetAccessDataset { /** * The dataset this entry applies to */ datasets: outputs.bigquery.GetDatasetAccessDatasetDataset[]; /** * Which resources in the dataset this entry applies to. Currently, only views are supported, * but additional target types may be added in the future. Possible values: VIEWS */ targetTypes: string[]; } interface GetDatasetAccessDatasetDataset { /** * The dataset ID. */ datasetId: string; /** * The ID of the project containing this table. */ projectId: string; } interface GetDatasetAccessRoutine { /** * The dataset ID. */ datasetId: string; /** * The ID of the project containing this table. */ projectId: string; /** * The ID of the routine. The ID must contain only letters (a-z, * A-Z), numbers (0-9), or underscores (_). The maximum length * is 256 characters. */ routineId: string; } interface GetDatasetAccessView { /** * The dataset ID. */ datasetId: string; /** * The ID of the project containing this table. */ projectId: string; /** * The ID of the table. The ID must contain only letters (a-z, * A-Z), numbers (0-9), or underscores (_). The maximum length * is 1,024 characters. */ tableId: string; } interface GetDatasetDefaultEncryptionConfiguration { /** * Describes the Cloud KMS encryption key that will be used to protect destination * BigQuery table. The BigQuery Service Account associated with your project requires * access to this encryption key. */ kmsKeyName: string; } interface GetDatasetExternalCatalogDatasetOption { /** * The storage location URI for all tables in the dataset. Equivalent to hive metastore's * database locationUri. Maximum length of 1024 characters. */ defaultStorageLocationUri: string; /** * A map of key value pairs defining the parameters and properties of the open source schema. * Maximum size of 2Mib. */ parameters: { [key: string]: string; }; } interface GetDatasetExternalDatasetReference { /** * The connection id that is used to access the externalSource. * Format: projects/{projectId}/locations/{locationId}/connections/{connectionId} */ connection: string; /** * External source that backs this dataset. */ externalSource: string; } interface GetDatasetsDataset { /** * The id of the dataset. */ datasetId: string; /** * The friendly name of the dataset. */ friendlyName: string; /** * User-provided dataset labels, in key/value pairs. */ labels: { [key: string]: string; }; /** * The geographic location of the dataset. */ location: string; } interface GetTableBiglakeConfiguration { /** * The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connectionId can have the form "<project\_id>.<location\_id>.<connection\_id>" or "projects/<project\_id>/locations/<location\_id>/connections/<connection\_id>". */ connectionId: string; /** * The file format the data is stored in. */ fileFormat: string; /** * The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/" */ storageUri: string; /** * The table format the metadata only snapshots are stored in. */ tableFormat: string; } interface GetTableEncryptionConfiguration { /** * The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the gcp.bigquery.getDefaultServiceAccount datasource and the gcp.kms.CryptoKeyIAMBinding resource. */ kmsKeyName: string; /** * The self link or full name of the kms key version used to encrypt this table. */ kmsKeyVersion: string; } interface GetTableExternalCatalogTableOption { /** * The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connection is needed to read the open source table from BigQuery Engine. The connectionId can have the form .. or projects//locations//connections/. */ connectionId: string; /** * A map of key value pairs defining the parameters and properties of the open source table. Corresponds with hive meta store table parameters. Maximum size of 4Mib. */ parameters: { [key: string]: string; }; /** * A storage descriptor containing information about the physical storage of this table. */ storageDescriptors: outputs.bigquery.GetTableExternalCatalogTableOptionStorageDescriptor[]; } interface GetTableExternalCatalogTableOptionStorageDescriptor { /** * Specifies the fully qualified class name of the InputFormat (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"). The maximum length is 128 characters. */ inputFormat: string; /** * The physical location of the table (e.g. 'gs://spark-dataproc-data/pangea-data/case_sensitive/' or 'gs://spark-dataproc-data/pangea-data/*'). The maximum length is 2056 bytes. */ locationUri: string; /** * Specifies the fully qualified class name of the OutputFormat (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"). The maximum length is 128 characters. */ outputFormat: string; /** * Serializer and deserializer information. */ serdeInfos: outputs.bigquery.GetTableExternalCatalogTableOptionStorageDescriptorSerdeInfo[]; } interface GetTableExternalCatalogTableOptionStorageDescriptorSerdeInfo { /** * Name of the SerDe. The maximum length is 256 characters. */ name: string; /** * Key-value pairs that define the initialization parameters for the serialization library. Maximum size 10 Kib. */ parameters: { [key: string]: string; }; /** * Specifies a fully-qualified class name of the serialization library that is responsible for the translation of data between table representation and the underlying low-level input and output format structures. The maximum length is 256 characters. */ serializationLibrary: string; } interface GetTableExternalDataConfiguration { /** * Let BigQuery try to autodetect the schema and format of the table. */ autodetect: boolean; /** * Additional options if sourceFormat is set to "AVRO" */ avroOptions: outputs.bigquery.GetTableExternalDataConfigurationAvroOption[]; /** * Additional options if sourceFormat is set to BIGTABLE. */ bigtableOptions: outputs.bigquery.GetTableExternalDataConfigurationBigtableOption[]; /** * The compression type of the data source. Valid values are "NONE" or "GZIP". */ compression: string; /** * The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connectionId can have the form ".." or "projects//locations//connections/". */ connectionId: string; /** * Additional properties to set if sourceFormat is set to "CSV". */ csvOptions: outputs.bigquery.GetTableExternalDataConfigurationCsvOption[]; /** * The data types that could be used as a target type when converting decimal values. */ decimalTargetTypes: string[]; /** * Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. */ fileSetSpecType: string; /** * Additional options if sourceFormat is set to "GOOGLE_SHEETS". */ googleSheetsOptions: outputs.bigquery.GetTableExternalDataConfigurationGoogleSheetsOption[]; /** * When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. */ hivePartitioningOptions: outputs.bigquery.GetTableExternalDataConfigurationHivePartitioningOption[]; /** * Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. */ ignoreUnknownValues: boolean; /** * Load option to be used together with sourceFormat newline-delimited JSON to indicate that a variant of JSON is being loaded. To load newline-delimited GeoJSON, specify GEOJSON (and sourceFormat must be set to NEWLINE_DELIMITED_JSON). */ jsonExtension: string; /** * Additional properties to set if sourceFormat is set to JSON. */ jsonOptions: outputs.bigquery.GetTableExternalDataConfigurationJsonOption[]; /** * The maximum number of bad records that BigQuery can ignore when reading data. */ maxBadRecords: number; /** * Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. */ metadataCacheMode: string; /** * Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If ObjectMetadata is set, sourceFormat should be omitted. */ objectMetadata: string; /** * Additional properties to set if sourceFormat is set to PARQUET. */ parquetOptions: outputs.bigquery.GetTableExternalDataConfigurationParquetOption[]; /** * When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC. */ referenceFileSchemaUri: string; /** * A JSON schema for the external table. Schema is required for CSV and JSON formats and is disallowed for Google Cloud Bigtable, Cloud Datastore backups, and Avro formats when using external tables. */ schema: string; /** * Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation (https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externaldataconfiguration) for supported formats. To use "GOOGLE_SHEETS" the scopes must include "googleapis.com/auth/drive.readonly". */ sourceFormat: string; /** * A list of the fully-qualified URIs that point to your data in Google Cloud. */ sourceUris: string[]; } interface GetTableExternalDataConfigurationAvroOption { /** * If sourceFormat is set to "AVRO", indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER). */ useAvroLogicalTypes: boolean; } interface GetTableExternalDataConfigurationBigtableOption { /** * A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable. */ columnFamilies: outputs.bigquery.GetTableExternalDataConfigurationBigtableOptionColumnFamily[]; /** * If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false. */ ignoreUnspecifiedColumnFamilies: boolean; /** * If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false. */ outputColumnFamiliesAsJson: boolean; /** * If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false. */ readRowkeyAsString: boolean; } interface GetTableExternalDataConfigurationBigtableOptionColumnFamily { /** * A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field */ columns: outputs.bigquery.GetTableExternalDataConfigurationBigtableOptionColumnFamilyColumn[]; /** * The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it. */ encoding: string; /** * Identifier of the column family. */ familyId: string; /** * If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column. */ onlyReadLatest: boolean; /** * The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it. */ type: string; } interface GetTableExternalDataConfigurationBigtableOptionColumnFamilyColumn { /** * The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels. */ encoding: string; /** * If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries. */ fieldName: string; /** * If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels. */ onlyReadLatest: boolean; /** * Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName. */ qualifierEncoded: string; /** * Qualifier string. */ qualifierString: string; /** * The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels. */ type: string; } interface GetTableExternalDataConfigurationCsvOption { /** * Indicates if BigQuery should accept rows that are missing trailing optional columns. */ allowJaggedRows: boolean; /** * Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false. */ allowQuotedNewlines: boolean; /** * The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. */ encoding: string; /** * The separator for fields in a CSV file. */ fieldDelimiter: string; /** * The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true. The API-side default is ", specified in Terraform escaped as \". Due to limitations with Terraform default values, this value is required to be explicitly set. */ quote: string; /** * The number of rows at the top of a CSV file that BigQuery will skip when reading the data. */ skipLeadingRows: number; /** * Specifies how source columns are matched to the table schema. Valid values are POSITION (columns matched by position, assuming same ordering) or NAME (columns matched by name, reads header row and reorders columns to align with schema field names). */ sourceColumnMatch: string; } interface GetTableExternalDataConfigurationGoogleSheetsOption { /** * Range of a sheet to query from. Only used when non-empty. At least one of range or skipLeadingRows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20 */ range: string; /** * The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of range or skipLeadingRows must be set. */ skipLeadingRows: number; } interface GetTableExternalDataConfigurationHivePartitioningOption { /** * When set, what mode of hive partitioning to use when reading data. */ mode: string; /** * If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. */ requirePartitionFilter: boolean; /** * When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. */ sourceUriPrefix: string; } interface GetTableExternalDataConfigurationJsonOption { /** * The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8. */ encoding: string; } interface GetTableExternalDataConfigurationParquetOption { /** * Indicates whether to use schema inference specifically for Parquet LIST logical type. */ enableListInference: boolean; /** * Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default. */ enumAsString: boolean; } interface GetTableMaterializedView { /** * Allow non incremental materialized view definition. The default value is false. */ allowNonIncrementalDefinition: boolean; /** * Specifies if BigQuery should automatically refresh materialized view when the base table is updated. The default is true. */ enableRefresh: boolean; /** * A query whose result is persisted. */ query: string; /** * Specifies maximum frequency at which this materialized view will be refreshed. The default is 1800000. */ refreshIntervalMs: number; } interface GetTableRangePartitioning { /** * The field used to determine how to create a range-based partition. */ field: string; /** * Information required to partition based on ranges. Structure is documented below. */ ranges: outputs.bigquery.GetTableRangePartitioningRange[]; } interface GetTableRangePartitioningRange { /** * End of the range partitioning, exclusive. */ end: number; /** * The width of each range within the partition. */ interval: number; /** * Start of the range partitioning, inclusive. */ start: number; } interface GetTableSchemaForeignTypeInfo { /** * Specifies the system which defines the foreign data type. */ typeSystem: string; } interface GetTableTableConstraint { /** * Present only if the table has a foreign key. The foreign key is not enforced. */ foreignKeys: outputs.bigquery.GetTableTableConstraintForeignKey[]; /** * Represents a primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced. */ primaryKeys: outputs.bigquery.GetTableTableConstraintPrimaryKey[]; } interface GetTableTableConstraintForeignKey { /** * The pair of the foreign key column and primary key column. */ columnReferences: outputs.bigquery.GetTableTableConstraintForeignKeyColumnReference[]; /** * Set only if the foreign key constraint is named. */ name: string; /** * The table that holds the primary key and is referenced by this foreign key. */ referencedTables: outputs.bigquery.GetTableTableConstraintForeignKeyReferencedTable[]; } interface GetTableTableConstraintForeignKeyColumnReference { /** * The column in the primary key that are referenced by the referencingColumn. */ referencedColumn: string; /** * The column that composes the foreign key. */ referencingColumn: string; } interface GetTableTableConstraintForeignKeyReferencedTable { /** * The dataset ID. */ datasetId: string; /** * The ID of the project containing this table. */ projectId: string; /** * The table ID. */ tableId: string; } interface GetTableTableConstraintPrimaryKey { /** * The columns that are composed of the primary key constraint. */ columns: string[]; } interface GetTableTableReplicationInfo { /** * The interval at which the source materialized view is polled for updates. The default is 300000. */ replicationIntervalMs: number; /** * The ID of the source dataset. */ sourceDatasetId: string; /** * The ID of the source project. */ sourceProjectId: string; /** * The ID of the source materialized view. */ sourceTableId: string; } interface GetTableTimePartitioning { /** * Number of milliseconds for which to keep the storage for a partition. */ expirationMs: number; /** * The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time. */ field: string; /** * If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. */ requirePartitionFilter: boolean; /** * The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively. */ type: string; } interface GetTableView { /** * A query that BigQuery executes when the view is referenced. */ query: string; /** * Specifies whether to use BigQuery's legacy SQL for this view. If set to false, the view will use BigQuery's standard SQL */ useLegacySql: boolean; } interface GetTablesTable { /** * User-provided table labels, in key/value pairs. */ labels: { [key: string]: string; }; /** * The name of the table. */ tableId: string; } interface IamBindingCondition { description?: string; expression: string; title: string; } interface IamMemberCondition { description?: string; expression: string; title: string; } interface JobCopy { /** * Specifies whether the job is allowed to create new tables. The following values are supported: * CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. * CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. * Creation, truncation and append actions occur as one atomic update upon job completion * Default value is `CREATE_IF_NEEDED`. * Possible values are: `CREATE_IF_NEEDED`, `CREATE_NEVER`. */ createDisposition?: string; /** * Custom encryption configuration (e.g., Cloud KMS keys) * Structure is documented below. */ destinationEncryptionConfiguration?: outputs.bigquery.JobCopyDestinationEncryptionConfiguration; /** * The destination table. * Structure is documented below. */ destinationTable?: outputs.bigquery.JobCopyDestinationTable; /** * Source tables to copy. * Structure is documented below. */ sourceTables: outputs.bigquery.JobCopySourceTable[]; /** * Specifies the action that occurs if the destination table already exists. The following values are supported: * WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. * WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. * Each action is atomic and only occurs if BigQuery is able to complete the job successfully. * Creation, truncation and append actions occur as one atomic update upon job completion. * Default value is `WRITE_EMPTY`. * Possible values are: `WRITE_TRUNCATE`, `WRITE_APPEND`, `WRITE_EMPTY`. */ writeDisposition?: string; } interface JobCopyDestinationEncryptionConfiguration { /** * Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. * The BigQuery Service Account associated with your project requires access to this encryption key. */ kmsKeyName: string; /** * (Output) * Describes the Cloud KMS encryption key version used to protect destination BigQuery table. */ kmsKeyVersion: string; } interface JobCopyDestinationTable { /** * The ID of the dataset containing this table. */ datasetId: string; /** * The ID of the project containing this table. */ projectId: string; /** * The table. Can be specified `{{table_id}}` if `projectId` and `datasetId` are also set, * or of the form `projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}` if not. */ tableId: string; } interface JobCopySourceTable { /** * The ID of the dataset containing this table. */ datasetId: string; /** * The ID of the project containing this table. */ projectId: string; /** * The table. Can be specified `{{table_id}}` if `projectId` and `datasetId` are also set, * or of the form `projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}` if not. */ tableId: string; } interface JobExtract { /** * The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. * The default value is NONE. DEFLATE and SNAPPY are only supported for Avro. */ compression?: string; /** * The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. * The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. * The default value for models is SAVED_MODEL. */ destinationFormat: string; /** * A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written. */ destinationUris: string[]; /** * When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. * Default is ',' */ fieldDelimiter: string; /** * Whether to print out a header row in the results. Default is true. */ printHeader?: boolean; /** * A reference to the model being exported. * Structure is documented below. */ sourceModel?: outputs.bigquery.JobExtractSourceModel; /** * A reference to the table being exported. * Structure is documented below. */ sourceTable?: outputs.bigquery.JobExtractSourceTable; /** * Whether to use logical types when extracting to AVRO format. */ useAvroLogicalTypes?: boolean; } interface JobExtractSourceModel { /** * The ID of the dataset containing this model. */ datasetId: string; /** * The ID of the model. */ modelId: string; /** * The ID of the project containing this model. */ projectId: string; } interface JobExtractSourceTable { /** * The ID of the dataset containing this table. */ datasetId: string; /** * The ID of the project containing this table. */ projectId: string; /** * The table. Can be specified `{{table_id}}` if `projectId` and `datasetId` are also set, * or of the form `projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}` if not. */ tableId: string; } interface JobLoad { /** * Accept rows that are missing trailing optional columns. The missing values are treated as nulls. * If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, * an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats. */ allowJaggedRows?: boolean; /** * Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. * The default value is false. */ allowQuotedNewlines?: boolean; /** * Indicates if we should automatically infer the options and schema for CSV and JSON sources. */ autodetect?: boolean; /** * Specifies whether the job is allowed to create new tables. The following values are supported: * CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. * CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. * Creation, truncation and append actions occur as one atomic update upon job completion * Default value is `CREATE_IF_NEEDED`. * Possible values are: `CREATE_IF_NEEDED`, `CREATE_NEVER`. */ createDisposition?: string; /** * Custom encryption configuration (e.g., Cloud KMS keys) * Structure is documented below. */ destinationEncryptionConfiguration?: outputs.bigquery.JobLoadDestinationEncryptionConfiguration; /** * The destination table to load the data into. * Structure is documented below. */ destinationTable: outputs.bigquery.JobLoadDestinationTable; /** * The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. * The default value is UTF-8. BigQuery decodes the data after the raw, binary data * has been split using the values of the quote and fieldDelimiter properties. */ encoding?: string; /** * The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. * To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts * the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the * data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. * The default value is a comma (','). */ fieldDelimiter: string; /** * Indicates if BigQuery should allow extra values that are not represented in the table schema. * If true, the extra values are ignored. If false, records with extra columns are treated as bad records, * and if there are too many bad records, an invalid error is returned in the job result. * The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: * CSV: Trailing columns * JSON: Named values that don't match any column names */ ignoreUnknownValues?: boolean; /** * If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. * For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited * GeoJSON: set to GEOJSON. */ jsonExtension?: string; /** * The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, * an invalid error is returned in the job result. The default value is 0, which requires that all records are valid. */ maxBadRecords?: number; /** * Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this * property to a custom value, BigQuery throws an error if an * empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as * an empty value. */ nullMarker?: string; /** * Parquet Options for load and make external tables. * Structure is documented below. */ parquetOptions?: outputs.bigquery.JobLoadParquetOptions; /** * If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. * Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. * If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result. */ projectionFields?: string[]; /** * The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, * and then uses the first byte of the encoded string to split the data in its raw, binary state. * The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. * If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true. */ quote: string; /** * Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or * supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; * when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. * For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: * ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. * ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable. */ schemaUpdateOptions?: string[]; /** * The number of rows at the top of a CSV file that BigQuery will skip when loading the data. * The default value is 0. This property is useful if you have header rows in the file that should be skipped. * When autodetect is on, the behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, * the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, * row N is just skipped. Otherwise row N is used to extract column names for the detected schema. */ skipLeadingRows?: number; /** * The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". * For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". * For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". * The default value is CSV. */ sourceFormat?: string; /** * The fully-qualified URIs that point to your data in Google Cloud. * For Google Cloud Storage URIs: Each URI can contain one '\*' wildcard character * and it must come after the 'bucket' name. Size limits related to load jobs apply * to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be * specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. * For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '\*' wildcard character is not allowed. */ sourceUris: string[]; /** * Time-based partitioning specification for the destination table. * Structure is documented below. */ timePartitioning?: outputs.bigquery.JobLoadTimePartitioning; /** * Specifies the action that occurs if the destination table already exists. The following values are supported: * WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. * WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. * Each action is atomic and only occurs if BigQuery is able to complete the job successfully. * Creation, truncation and append actions occur as one atomic update upon job completion. * Default value is `WRITE_EMPTY`. * Possible values are: `WRITE_TRUNCATE`, `WRITE_APPEND`, `WRITE_EMPTY`. */ writeDisposition?: string; } interface JobLoadDestinationEncryptionConfiguration { /** * Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. * The BigQuery Service Account associated with your project requires access to this encryption key. */ kmsKeyName: string; /** * (Output) * Describes the Cloud KMS encryption key version used to protect destination BigQuery table. */ kmsKeyVersion: string; } interface JobLoadDestinationTable { /** * The ID of the dataset containing this table. */ datasetId: string; /** * The ID of the project containing this table. */ projectId: string; /** * The table. Can be specified `{{table_id}}` if `projectId` and `datasetId` are also set, * or of the form `projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}` if not. */ tableId: string; } interface JobLoadParquetOptions { /** * If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type. */ enableListInference?: boolean; /** * If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default. */ enumAsString?: boolean; } interface JobLoadTimePartitioning { /** * Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value. */ expirationMs?: string; /** * If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. * The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. * A wrapper is used here because an empty string is an invalid value. */ field?: string; /** * The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, * but in OnePlatform the field will be treated as unset. */ type: string; } interface JobQuery { /** * If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. * Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. * However, you must still set destinationTable when result size exceeds the allowed maximum response size. */ allowLargeResults?: boolean; /** * Connection properties to customize query behavior. Under JDBC, these correspond * directly to connection properties passed to the DriverManager. Under ODBC, these * correspond to properties in the connection string. * Structure is documented below. */ connectionProperties?: outputs.bigquery.JobQueryConnectionProperty[]; /** * (Optional, Beta) * Whether to run the query as continuous or a regular query. */ continuous?: boolean; /** * Specifies whether the job is allowed to create new tables. The following values are supported: * CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. * CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. * Creation, truncation and append actions occur as one atomic update upon job completion * Default value is `CREATE_IF_NEEDED`. * Possible values are: `CREATE_IF_NEEDED`, `CREATE_NEVER`. */ createDisposition?: string; /** * Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. * Structure is documented below. */ defaultDataset?: outputs.bigquery.JobQueryDefaultDataset; /** * Custom encryption configuration (e.g., Cloud KMS keys) * Structure is documented below. */ destinationEncryptionConfiguration?: outputs.bigquery.JobQueryDestinationEncryptionConfiguration; /** * Describes the table where the query results should be stored. * This property must be set for large results that exceed the maximum response size. * For queries that produce anonymous (cached) results, this field will be populated by BigQuery. * Structure is documented below. */ destinationTable: outputs.bigquery.JobQueryDestinationTable; /** * If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. * allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened. */ flattenResults?: boolean; /** * Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). * If unspecified, this will be set to your project default. */ maximumBillingTier?: number; /** * Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). * If unspecified, this will be set to your project default. */ maximumBytesBilled?: string; /** * Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query. */ parameterMode?: string; /** * Specifies a priority for the query. * Default value is `INTERACTIVE`. * Possible values are: `INTERACTIVE`, `BATCH`. */ priority?: string; /** * SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. * *NOTE*: queries containing [DML language](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language) * (`DELETE`, `UPDATE`, `MERGE`, `INSERT`) must specify `createDisposition = ""` and `writeDisposition = ""`. */ query: string; /** * Allows the schema of the destination table to be updated as a side effect of the query job. * Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; * when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, * specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. * One or more of the following values are specified: * ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. * ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable. */ schemaUpdateOptions?: string[]; /** * Options controlling the execution of scripts. * Structure is documented below. */ scriptOptions?: outputs.bigquery.JobQueryScriptOptions; /** * Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. * If set to false, the query will use BigQuery's standard SQL. */ useLegacySql?: boolean; /** * Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever * tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. * The default value is true. */ useQueryCache?: boolean; /** * Describes user-defined function resources used in the query. * Structure is documented below. */ userDefinedFunctionResources?: outputs.bigquery.JobQueryUserDefinedFunctionResource[]; /** * Specifies the action that occurs if the destination table already exists. The following values are supported: * WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. * WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. * Each action is atomic and only occurs if BigQuery is able to complete the job successfully. * Creation, truncation and append actions occur as one atomic update upon job completion. * Default value is `WRITE_EMPTY`. * Possible values are: `WRITE_TRUNCATE`, `WRITE_APPEND`, `WRITE_EMPTY`. */ writeDisposition?: string; } interface JobQueryConnectionProperty { /** * The key of the property to set. Currently supported connection properties: */ key: string; /** * The value of the property to set. */ value: string; } interface JobQueryDefaultDataset { /** * The dataset. Can be specified `{{dataset_id}}` if `projectId` is also set, * or of the form `projects/{{project}}/datasets/{{dataset_id}}` if not. */ datasetId: string; /** * The ID of the project containing this table. */ projectId: string; } interface JobQueryDestinationEncryptionConfiguration { /** * Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. * The BigQuery Service Account associated with your project requires access to this encryption key. */ kmsKeyName: string; /** * (Output) * Describes the Cloud KMS encryption key version used to protect destination BigQuery table. */ kmsKeyVersion: string; } interface JobQueryDestinationTable { /** * The ID of the dataset containing this table. */ datasetId: string; /** * The ID of the project containing this table. */ projectId: string; /** * The table. Can be specified `{{table_id}}` if `projectId` and `datasetId` are also set, * or of the form `projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}` if not. */ tableId: string; } interface JobQueryScriptOptions { /** * Determines which statement in the script represents the "key result", * used to populate the schema and query results of the script job. * Possible values are: `LAST`, `FIRST_SELECT`. */ keyResultStatement?: string; /** * Limit on the number of bytes billed per statement. Exceeding this budget results in an error. */ statementByteBudget?: string; /** * Timeout period for each statement in a script. */ statementTimeoutMs?: string; } interface JobQueryUserDefinedFunctionResource { /** * An inline resource that contains code for a user-defined function (UDF). * Providing a inline code resource is equivalent to providing a URI for a file containing the same code. */ inlineCode?: string; /** * A code resource to load from a Google Cloud Storage URI (gs://bucket/path). */ resourceUri?: string; } interface JobStatus { /** * (Output) * Final error result of the job. If present, indicates that the job has completed and was unsuccessful. * Structure is documented below. */ errorResults: outputs.bigquery.JobStatusErrorResult[]; /** * (Output) * The first errors encountered during the running of the job. The final message * includes the number of errors that caused the process to stop. Errors here do * not necessarily mean that the job has not completed or was unsuccessful. * Structure is documented below. */ errors: outputs.bigquery.JobStatusError[]; /** * (Output) * Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'. */ state: string; } interface JobStatusError { /** * The geographic location of the job. The default value is US. */ location?: string; /** * A human-readable description of the error. */ message?: string; /** * A short error code that summarizes the error. */ reason?: string; } interface JobStatusErrorResult { /** * The geographic location of the job. The default value is US. */ location?: string; /** * A human-readable description of the error. */ message?: string; /** * A short error code that summarizes the error. */ reason?: string; } interface ReservationAutoscale { /** * (Output) * The slot capacity added to this reservation when autoscale happens. Will be between [0, maxSlots]. */ currentSlots: number; /** * Number of slots to be scaled when needed. */ maxSlots?: number; } interface ReservationReplicationStatus { /** * (Output) * The last error encountered while trying to replicate changes from the primary to the * secondary. This field is only available if the replication has not succeeded since. * Structure is documented below. */ errors: outputs.bigquery.ReservationReplicationStatusError[]; /** * (Output) * The time at which the last error was encountered while trying to replicate changes from * the primary to the secondary. This field is only available if the replication has not * succeeded since. */ lastErrorTime: string; /** * (Output) * A timestamp corresponding to the last change on the primary that was successfully * replicated to the secondary. */ lastReplicationTime: string; } interface ReservationReplicationStatusError { /** * (Output) * The status code, which should be an enum value of [google.rpc.Code](https://cloud.google.com/bigquery/docs/reference/reservations/rpc/google.rpc#google.rpc.Code). */ code: number; /** * (Output) * A developer-facing error message, which should be in English. */ message: string; } interface RoutineArgument { /** * Defaults to FIXED_TYPE. * Default value is `FIXED_TYPE`. * Possible values are: `FIXED_TYPE`, `ANY_TYPE`. */ argumentKind?: string; /** * A JSON schema for the data type. Required unless argumentKind = ANY_TYPE. * ~>**NOTE**: Because this field expects a JSON string, any changes to the string * will create a diff, even if the JSON itself hasn't changed. If the API returns * a different value for the same schema, e.g. it switched the order of values * or replaced STRUCT field type with RECORD field type, we currently cannot * suppress the recurring diff this causes. As a workaround, we recommend using * the schema as returned by the API. */ dataType?: string; /** * Specifies whether the argument is input or output. Can be set for procedures only. * Possible values are: `IN`, `OUT`, `INOUT`. */ mode?: string; /** * The name of this argument. Can be absent for function return argument. */ name?: string; } interface RoutineExternalRuntimeOptions { /** * Amount of CPU provisioned for a Python UDF container instance. For more * information, see [Configure container limits for Python * UDFs](https://cloud.google.com/bigquery/docs/user-defined-functions-python#configure-container-limits) */ containerCpu?: number; /** * Amount of memory provisioned for a Python UDF container instance. Format: * {number}{unit} where unit is one of "M", "G", "Mi" and "Gi" (e.g. 1G, * 512Mi). If not specified, the default value is 512Mi. For more information, * see [Configure container limits for Python * UDFs](https://cloud.google.com/bigquery/docs/user-defined-functions-python#configure-container-limits) */ containerMemory?: string; /** * Maximum number of rows in each batch sent to the external runtime. If * absent or if 0, BigQuery dynamically decides the number of rows in a batch. */ maxBatchingRows?: string; /** * Fully qualified name of the connection whose service account will be used * to execute the code in the container. Format: * `"projects/{project_id}/locations/{location_id}/connections/{connection_id}"` */ runtimeConnection?: string; /** * Language runtime version. Example: `python-3.11`. */ runtimeVersion?: string; } interface RoutinePythonOptions { /** * The name of the function defined in Python code as the entry point when the * Python UDF is invoked. */ entryPoint: string; /** * A list of Python package names along with versions to be installed. * Example: ["pandas>=2.1", "google-cloud-translate==3.11"]. For more * information, see [Use third-party * packages](https://cloud.google.com/bigquery/docs/user-defined-functions-python#third-party-packages). */ packages?: string[]; } interface RoutineRemoteFunctionOptions { /** * Fully qualified name of the user-provided connection object which holds * the authentication information to send requests to the remote service. * Format: "projects/{projectId}/locations/{locationId}/connections/{connectionId}" */ connection?: string; /** * Endpoint of the user-provided remote service, e.g. * `https://us-east1-my_gcf_project.cloudfunctions.net/remote_add` */ endpoint?: string; /** * Max number of rows in each batch sent to the remote service. If absent or if 0, * BigQuery dynamically decides the number of rows in a batch. */ maxBatchingRows?: string; /** * User-defined context as a set of key/value pairs, which will be sent as function * invocation context together with batched arguments in the requests to the remote * service. The total number of bytes of keys and values must be less than 8KB. * An object containing a list of "key": value pairs. Example: * `{ "name": "wrench", "mass": "1.3kg", "count": "3" }`. */ userDefinedContext: { [key: string]: string; }; } interface RoutineSparkOptions { /** * Archive files to be extracted into the working directory of each executor. For more information about Apache Spark, see Apache Spark. */ archiveUris: string[]; /** * Fully qualified name of the user-provided Spark connection object. * Format: "projects/{projectId}/locations/{locationId}/connections/{connectionId}" */ connection?: string; /** * Custom container image for the runtime environment. */ containerImage?: string; /** * Files to be placed in the working directory of each executor. For more information about Apache Spark, see Apache Spark. */ fileUris: string[]; /** * JARs to include on the driver and executor CLASSPATH. For more information about Apache Spark, see Apache Spark. */ jarUris: string[]; /** * The fully qualified name of a class in jarUris, for example, com.example.wordcount. * Exactly one of mainClass and mainJarUri field should be set for Java/Scala language type. */ mainClass?: string; /** * The main file/jar URI of the Spark application. * Exactly one of the definitionBody field and the mainFileUri field must be set for Python. * Exactly one of mainClass and mainFileUri field should be set for Java/Scala language type. */ mainFileUri?: string; /** * Configuration properties as a set of key/value pairs, which will be passed on to the Spark application. * For more information, see Apache Spark and the procedure option list. * An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ properties: { [key: string]: string; }; /** * Python files to be placed on the PYTHONPATH for PySpark application. Supported file types: .py, .egg, and .zip. For more information about Apache Spark, see Apache Spark. */ pyFileUris: string[]; /** * Runtime version. If not specified, the default runtime version is used. */ runtimeVersion?: string; } interface TableBiglakeConfiguration { /** * The connection specifying the credentials to be used to * read and write to external storage, such as Cloud Storage. The connectionId can * have the form "<project\_id>.<location\_id>.<connection\_id>" or * projects/<project\_id>/locations/<location\_id>/connections/<connection\_id>". */ connectionId: string; /** * The file format the table data is stored in. */ fileFormat: string; /** * The fully qualified location prefix of the external folder where table data * is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/" */ storageUri: string; /** * The table format the metadata only snapshots are stored in. */ tableFormat: string; } interface TableEncryptionConfiguration { /** * The self link or full name of a key which should be used to * encrypt this table. Note that the default bigquery service account will need to have * encrypt/decrypt permissions on this key - you may want to see the * `gcp.bigquery.getDefaultServiceAccount` datasource and the * `gcp.kms.CryptoKeyIAMBinding` resource. */ kmsKeyName: string; /** * The self link or full name of the kms key version used to encrypt this table. */ kmsKeyVersion: string; } interface TableExternalCatalogTableOptions { /** * The connection specifying the credentials to be * used to read external storage, such as Azure Blob, Cloud Storage, or S3. The * connection is needed to read the open source table from BigQuery Engine. The * connectionId can have the form `..` * or `projects//locations//connections/`. */ connectionId?: string; /** * A map of key value pairs defining the parameters and * properties of the open source table. Corresponds with hive meta store table * parameters. Maximum size of 4Mib. */ parameters?: { [key: string]: string; }; /** * A storage descriptor containing information * about the physical storage of this table. Structure is documented below. */ storageDescriptor?: outputs.bigquery.TableExternalCatalogTableOptionsStorageDescriptor; } interface TableExternalCatalogTableOptionsStorageDescriptor { /** * Specifies the fully qualified class name of the * InputFormat (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"). The * maximum length is 128 characters. */ inputFormat?: string; /** * The physical location of the table (e.g. * 'gs://spark-dataproc-data/pangea-data/case_sensitive/' or * 'gs://spark-dataproc-data/pangea-data/*'). The maximum length is 2056 bytes. */ locationUri?: string; /** * Specifies the fully qualified class name of the * OutputFormat (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"). The * maximum length is 128 characters. */ outputFormat?: string; /** * Serializer and deserializer information. Structure * is documented below. */ serdeInfo?: outputs.bigquery.TableExternalCatalogTableOptionsStorageDescriptorSerdeInfo; } interface TableExternalCatalogTableOptionsStorageDescriptorSerdeInfo { /** * Name of the SerDe. The maximum length is 256 characters. */ name?: string; /** * Key-value pairs that define the initialization * parameters for the serialization library. Maximum size 10 Kib. */ parameters?: { [key: string]: string; }; /** * Specifies a fully-qualified class name of * the serialization library that is responsible for the translation of data * between table representation and the underlying low-level input and output * format structures. The maximum length is 256 characters. */ serializationLibrary: string; } interface TableExternalDataConfiguration { /** * Let BigQuery try to autodetect the schema * and format of the table. */ autodetect: boolean; /** * Additional options if `sourceFormat` is set to * "AVRO". Structure is documented below. */ avroOptions?: outputs.bigquery.TableExternalDataConfigurationAvroOptions; /** * Additional properties to set if * `sourceFormat` is set to "BIGTABLE". Structure is documented below. */ bigtableOptions?: outputs.bigquery.TableExternalDataConfigurationBigtableOptions; /** * The compression type of the data source. * Valid values are "NONE" or "GZIP". */ compression?: string; /** * The connection specifying the credentials to be used to read * external storage, such as Azure Blob, Cloud Storage, or S3. The `connectionId` can have * the form `{{project}}.{{location}}.{{connection_id}}` * or `projects/{{project}}/locations/{{location}}/connections/{{connection_id}}`. * * ~>**NOTE:** If you set `external_data_configuration.connection_id`, the * table schema must be specified using the top-level `schema` field * documented above. */ connectionId?: string; /** * Additional properties to set if * `sourceFormat` is set to "CSV". Structure is documented below. */ csvOptions?: outputs.bigquery.TableExternalDataConfigurationCsvOptions; /** * Defines the list of possible SQL data types to which the source decimal values are converted. This list and the precision and the scale parameters of the decimal field determine the target type. In the order of NUMERIC, BIGNUMERIC, and STRING, a type is picked if it is in the specified list and if it supports the precision and the scale. STRING supports all precision and scale values. If none of the listed types supports the precision and the scale, the type supporting the widest range in the specified list is picked, and if a value exceeds the supported range when reading the data, an error will be thrown. * * Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. If (precision,scale) is: * * (38,9) > NUMERIC; * (39,9) > BIGNUMERIC (NUMERIC cannot hold 30 integer digits); * (38,10) > BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); * (76,38) > BIGNUMERIC; * (77,38) > BIGNUMERIC (error if value exceeds supported range). * * This field cannot contain duplicate types. The order of the types in this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. * * Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other file formats. */ decimalTargetTypes?: string[]; /** * Specifies how source URIs are interpreted for constructing the file set to load. * By default source URIs are expanded against the underlying storage. * Other options include specifying manifest files. Only applicable to object storage systems. Docs */ fileSetSpecType?: string; /** * Additional options if * `sourceFormat` is set to "GOOGLE_SHEETS". Structure is * documented below. */ googleSheetsOptions?: outputs.bigquery.TableExternalDataConfigurationGoogleSheetsOptions; /** * When set, configures hive partitioning * support. Not all storage formats support hive partitioning -- requesting hive * partitioning on an unsupported format will lead to an error, as will providing * an invalid specification. Structure is documented below. */ hivePartitioningOptions?: outputs.bigquery.TableExternalDataConfigurationHivePartitioningOptions; /** * Indicates if BigQuery should * allow extra values that are not represented in the table schema. * If true, the extra values are ignored. If false, records with * extra columns are treated as bad records, and if there are too * many bad records, an invalid error is returned in the job result. * The default value is false. */ ignoreUnknownValues?: boolean; /** * Used to indicate that a JSON variant, rather than normal JSON, is being used as the sourceFormat. This should only be used in combination with the `JSON` source format. Valid values are: `GEOJSON`. */ jsonExtension?: string; /** * Additional properties to set if * `sourceFormat` is set to "JSON". Structure is documented below. */ jsonOptions?: outputs.bigquery.TableExternalDataConfigurationJsonOptions; /** * The maximum number of bad records that * BigQuery can ignore when reading data. */ maxBadRecords?: number; /** * Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are `AUTOMATIC` and `MANUAL`. */ metadataCacheMode?: string; /** * Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If `objectMetadata` is set, `sourceFormat` should be omitted. */ objectMetadata?: string; /** * Additional properties to set if * `sourceFormat` is set to "PARQUET". Structure is documented below. */ parquetOptions?: outputs.bigquery.TableExternalDataConfigurationParquetOptions; /** * When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC. */ referenceFileSchemaUri?: string; /** * A JSON schema for the external table. Schema is required * for CSV and JSON formats if autodetect is not on. Schema is disallowed * for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. * ~>**NOTE:** Because this field expects a JSON string, any changes to the * string will create a diff, even if the JSON itself hasn't changed. * Furthermore drift for this field cannot not be detected because BigQuery * only uses this schema to compute the effective schema for the table, therefore * any changes on the configured value will force the table to be recreated. * This schema is effectively only applied when creating a table from an external * datasource, after creation the computed schema will be stored in * `google_bigquery_table.schema` * * ~>**NOTE:** If you set `external_data_configuration.connection_id`, the * table schema must be specified using the top-level `schema` field * documented above. */ schema: string; /** * The data format. Please see sourceFormat under * [ExternalDataConfiguration](https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externaldataconfiguration) * in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" * the `scopes` must include "https://www.googleapis.com/auth/drive.readonly". */ sourceFormat?: string; /** * A list of the fully-qualified URIs that point to * your data in Google Cloud. */ sourceUris: string[]; } interface TableExternalDataConfigurationAvroOptions { /** * If is set to true, indicates whether * to interpret logical types as the corresponding BigQuery data type * (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER). */ useAvroLogicalTypes: boolean; } interface TableExternalDataConfigurationBigtableOptions { /** * A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable. Structure is documented below. */ columnFamilies?: outputs.bigquery.TableExternalDataConfigurationBigtableOptionsColumnFamily[]; /** * If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false. */ ignoreUnspecifiedColumnFamilies?: boolean; /** * If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false. */ outputColumnFamiliesAsJson?: boolean; /** * If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false. */ readRowkeyAsString?: boolean; } interface TableExternalDataConfigurationBigtableOptionsColumnFamily { /** * A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field. Structure is documented below. */ columns?: outputs.bigquery.TableExternalDataConfigurationBigtableOptionsColumnFamilyColumn[]; /** * The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it. */ encoding?: string; /** * Identifier of the column family. */ familyId?: string; /** * If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column. */ onlyReadLatest?: boolean; /** * The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it. */ type?: string; } interface TableExternalDataConfigurationBigtableOptionsColumnFamilyColumn { /** * The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels. */ encoding?: string; /** * If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries. */ fieldName?: string; /** * If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels. */ onlyReadLatest?: boolean; /** * Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName. */ qualifierEncoded?: string; /** * Qualifier string. */ qualifierString?: string; /** * The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels. */ type?: string; } interface TableExternalDataConfigurationCsvOptions { /** * Indicates if BigQuery should accept rows * that are missing trailing optional columns. */ allowJaggedRows?: boolean; /** * Indicates if BigQuery should allow * quoted data sections that contain newline characters in a CSV file. * The default value is false. */ allowQuotedNewlines?: boolean; /** * The character encoding of the data. The supported * values are UTF-8 or ISO-8859-1. */ encoding?: string; /** * The separator for fields in a CSV file. */ fieldDelimiter?: string; /** * The value that is used to quote data sections in a * CSV file. If your data does not contain quoted sections, set the * property value to an empty string. If your data contains quoted newline * characters, you must also set the `allowQuotedNewlines` property to true. * The API-side default is `"`, specified in the provider escaped as `\"`. Due to * limitations with default values, this value is required to be * explicitly set. */ quote: string; /** * The number of rows at the top of a CSV * file that BigQuery will skip when reading the data. */ skipLeadingRows?: number; /** * Specifies how source columns are matched * to the table schema. Valid values are `POSITION` (columns matched by position, * assuming same ordering as the schema) or `NAME` (columns matched by name, * reads the header row and reorders columns to align with schema field names). * If not set, a default is chosen based on how the schema is provided: when * autodetect is used, columns are matched by name; otherwise, by position. */ sourceColumnMatch?: string; } interface TableExternalDataConfigurationGoogleSheetsOptions { /** * Range of a sheet to query from. Only used when * non-empty. At least one of `range` or `skipLeadingRows` must be set. * Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" * For example: "sheet1!A1:B20" */ range?: string; /** * The number of rows at the top of the sheet * that BigQuery will skip when reading the data. At least one of `range` or * `skipLeadingRows` must be set. */ skipLeadingRows?: number; } interface TableExternalDataConfigurationHivePartitioningOptions { /** * When set, what mode of hive partitioning to use when * reading data. The following modes are supported. * * AUTO: automatically infer partition key name(s) and type(s). * * STRINGS: automatically infer partition key name(s). All types are * Not all storage formats support hive partitioning. Requesting hive * partitioning on an unsupported format will lead to an error. * Currently supported formats are: JSON, CSV, ORC, Avro and Parquet. * * CUSTOM: when set to `CUSTOM`, you must encode the partition key schema within the `sourceUriPrefix` by setting `sourceUriPrefix` to `gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}`. */ mode?: string; /** * If set to true, queries over this table * require a partition filter that can be used for partition elimination to be * specified. */ requirePartitionFilter?: boolean; /** * When hive partition detection is requested, * a common for all source uris must be required. The prefix must end immediately * before the partition key encoding begins. For example, consider files following * this data layout. `gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro` * `gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro` When hive * partitioning is requested with either AUTO or STRINGS detection, the common prefix * can be either of `gs://bucket/path_to_table` or `gs://bucket/path_to_table/`. * Note that when `mode` is set to `CUSTOM`, you must encode the partition key schema within the `sourceUriPrefix` by setting `sourceUriPrefix` to `gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}`. */ sourceUriPrefix?: string; } interface TableExternalDataConfigurationJsonOptions { /** * The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8. */ encoding?: string; } interface TableExternalDataConfigurationParquetOptions { /** * Indicates whether to use schema inference specifically for Parquet LIST logical type. */ enableListInference?: boolean; /** * Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default. */ enumAsString?: boolean; } interface TableMaterializedView { /** * Allow non incremental materialized view definition. * The default value is false. */ allowNonIncrementalDefinition?: boolean; /** * Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. * The default value is true. */ enableRefresh?: boolean; /** * A query whose result is persisted. */ query: string; /** * The maximum frequency at which this materialized view will be refreshed. * The default value is 1800000 */ refreshIntervalMs?: number; } interface TableRangePartitioning { /** * The field used to determine how to create a range-based * partition. */ field: string; /** * Information required to partition based on ranges. * Structure is documented below. */ range: outputs.bigquery.TableRangePartitioningRange; } interface TableRangePartitioningRange { /** * End of the range partitioning, exclusive. */ end: number; /** * The width of each range within the partition. */ interval: number; /** * Start of the range partitioning, inclusive. */ start: number; } interface TableSchemaForeignTypeInfo { /** * Specifies the system which defines the foreign data * type. */ typeSystem: string; } interface TableTableConstraints { /** * Present only if the table has a foreign key. * The foreign key is not enforced. * Structure is documented below. */ foreignKeys?: outputs.bigquery.TableTableConstraintsForeignKey[]; /** * Represents the primary key constraint * on a table's columns. Present only if the table has a primary key. * The primary key is not enforced. * Structure is documented below. */ primaryKey?: outputs.bigquery.TableTableConstraintsPrimaryKey; } interface TableTableConstraintsForeignKey { /** * The pair of the foreign key column and primary key column. * Structure is documented below. */ columnReferences: outputs.bigquery.TableTableConstraintsForeignKeyColumnReferences; /** * Set only if the foreign key constraint is named. */ name?: string; /** * The table that holds the primary key * and is referenced by this foreign key. * Structure is documented below. */ referencedTable: outputs.bigquery.TableTableConstraintsForeignKeyReferencedTable; } interface TableTableConstraintsForeignKeyColumnReferences { /** * The column in the primary key that are * referenced by the referencingColumn */ referencedColumn: string; /** * The column that composes the foreign key. */ referencingColumn: string; } interface TableTableConstraintsForeignKeyReferencedTable { /** * The ID of the dataset containing this table. */ datasetId: string; /** * The ID of the project containing this table. */ projectId: string; /** * The ID of the table. The ID must contain only * letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum * length is 1,024 characters. Certain operations allow suffixing of * the table ID with a partition decorator, such as * sample_table$20190123. */ tableId: string; } interface TableTableConstraintsPrimaryKey { /** * The columns that are composed of the primary key constraint. */ columns: string[]; } interface TableTableReplicationInfo { /** * The interval at which the source * materialized view is polled for updates. The default is 300000. */ replicationIntervalMs?: number; /** * The ID of the source dataset. */ sourceDatasetId: string; /** * The ID of the source project. */ sourceProjectId: string; /** * The ID of the source materialized view. */ sourceTableId: string; } interface TableTimePartitioning { /** * Number of milliseconds for which to keep the * storage for a partition. */ expirationMs: number; /** * The field used to determine how to create a time-based * partition. If time-based partitioning is enabled without this value, the * table is partitioned based on the load time. */ field?: string; /** * If set to true, queries over this table * require a partition filter that can be used for partition elimination to be * specified. `requirePartitionFilter` is deprecated and will be removed in * a future major release. Use the top level field with the same name instead. * * @deprecated This field is deprecated and will be removed in a future major release; please use the top level field with the same name instead. */ requirePartitionFilter?: boolean; /** * The supported types are DAY, HOUR, MONTH, and YEAR, * which will generate one partition per day, hour, month, and year, respectively. */ type: string; } interface TableView { /** * A query that BigQuery executes when the view is referenced. */ query: string; /** * Specifies whether to use BigQuery's legacy SQL for this view. * If set to `false`, the view will use BigQuery's standard SQL. If set to * `true`, the view will use BigQuery's legacy SQL. If unset, the API will * interpret it as a `true` and assumes the legacy SQL dialect for its query * according to the [API documentation](https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ViewDefinition). * > **Note**: Starting in provider version `7.0.0`, no default value is * provided for this field unless explicitly set in the configuration. */ useLegacySql: boolean; } } export declare namespace bigqueryanalyticshub { interface DataExchangeIamBindingCondition { description?: string; expression: string; title: string; } interface DataExchangeIamMemberCondition { description?: string; expression: string; title: string; } interface DataExchangeSharingEnvironmentConfig { /** * Data Clean Room (DCR), used for privacy-safe and secured data sharing. */ dcrExchangeConfig?: outputs.bigqueryanalyticshub.DataExchangeSharingEnvironmentConfigDcrExchangeConfig; /** * Default Analytics Hub data exchange, used for secured data sharing. */ defaultExchangeConfig?: outputs.bigqueryanalyticshub.DataExchangeSharingEnvironmentConfigDefaultExchangeConfig; } interface DataExchangeSharingEnvironmentConfigDcrExchangeConfig { } interface DataExchangeSharingEnvironmentConfigDefaultExchangeConfig { } interface DataExchangeSubscriptionDestinationDataset { /** * A reference that identifies the destination dataset. * Structure is documented below. */ datasetReference: outputs.bigqueryanalyticshub.DataExchangeSubscriptionDestinationDatasetDatasetReference; /** * A user-friendly description of the dataset. */ description?: string; /** * A descriptive name for the dataset. */ friendlyName?: string; /** * The labels associated with this dataset. You can use these to * organize and group your datasets. */ labels?: { [key: string]: string; }; /** * The geographic location where the dataset should reside. * See https://cloud.google.com/bigquery/docs/locations for supported locations. */ location: string; } interface DataExchangeSubscriptionDestinationDatasetDatasetReference { /** * A unique ID for this dataset, without the project name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. */ datasetId: string; /** * The ID of the project containing this dataset. */ projectId: string; } interface DataExchangeSubscriptionLinkedDatasetMap { /** * (Output) * Output only. Name of the linked dataset, e.g. projects/subscriberproject/datasets/linkedDataset */ linkedDataset: string; /** * (Output) * Output only. Name of the Pub/Sub subscription, e.g. projects/subscriberproject/subscriptions/subscriptions/sub_id */ linkedPubsubSubscription: string; /** * (Output) * Output only. Listing for which linked resource is created. */ listing: string; /** * (Required) The identifier for this object. Format specified above. */ resourceName: string; } interface DataExchangeSubscriptionLinkedResource { /** * (Output) * Output only. Name of the linked dataset, e.g. projects/subscriberproject/datasets/linkedDataset */ linkedDataset: string; /** * (Output) * Output only. Listing for which linked resource is created. */ listing: string; } interface ListingBigqueryDataset { /** * Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 */ dataset: string; /** * (Output, Beta) * Server owned effective state of replicas. Contains both primary and secondary replicas. * Each replica includes a system-computed (output-only) state and primary designation. * Structure is documented below. */ effectiveReplicas: outputs.bigqueryanalyticshub.ListingBigqueryDatasetEffectiveReplica[]; /** * (Optional, Beta) * A list of regions where the publisher has created shared dataset replicas. */ replicaLocations?: string[]; /** * Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. * Structure is documented below. */ selectedResources?: outputs.bigqueryanalyticshub.ListingBigqueryDatasetSelectedResource[]; } interface ListingBigqueryDatasetEffectiveReplica { /** * The name of the location this data exchange listing. */ location: string; /** * Output-only. Indicates that this replica is the primary replica. * Possible values: PRIMARY_STATE_UNSPECIFIED, PRIMARY_REPLICA */ primaryState: string; /** * Output-only. Assigned by Analytics Hub based on real BigQuery replication state. * Possible values: REPLICA_STATE_UNSPECIFIED, READY_TO_USE, UNAVAILABLE */ replicaState: string; } interface ListingBigqueryDatasetSelectedResource { /** * (Optional, Beta) * Format: For routine: projects/{projectId}/datasets/{datasetId}/routines/{routineId} Example:"projects/test_project/datasets/test_dataset/routines/test_routine" * * The `effectiveReplicas` block contains: */ routine?: string; /** * Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" */ table?: string; } interface ListingCommercialInfo { /** * (Output) * Details of the Marketplace Data Product associated with the Listing. * Structure is documented below. */ cloudMarketplaces: outputs.bigqueryanalyticshub.ListingCommercialInfoCloudMarketplace[]; } interface ListingCommercialInfoCloudMarketplace { /** * (Output) * Commercial state of the Marketplace Data Product. * Possible values: COMMERCIAL_STATE_UNSPECIFIED, ONBOARDING, ACTIVE */ commercialState: string; /** * (Output) * Resource name of the commercial service associated with the Marketplace Data Product. e.g. example.com */ service: string; } interface ListingDataProvider { /** * Name of the data provider. */ name: string; /** * Email or URL of the data provider. */ primaryContact?: string; } interface ListingIamBindingCondition { description?: string; expression: string; title: string; } interface ListingIamMemberCondition { description?: string; expression: string; title: string; } interface ListingPublisher { /** * Name of the listing publisher. */ name: string; /** * Email or URL of the listing publisher. */ primaryContact?: string; } interface ListingPubsubTopic { /** * Region hint on where the data might be published. Data affinity regions are modifiable. * See https://cloud.google.com/about/locations for full listing of possible Cloud regions. */ dataAffinityRegions?: string[]; /** * Resource name of the Pub/Sub topic source for this listing. e.g. projects/myproject/topics/topicId */ topic: string; } interface ListingRestrictedExportConfig { /** * If true, enable restricted export. */ enabled?: boolean; /** * (Output) * If true, restrict direct table access(read api/tabledata.list) on linked table. */ restrictDirectTableAccess: boolean; /** * If true, restrict export of query result derived from restricted linked dataset table. */ restrictQueryResult?: boolean; } interface ListingSubscriptionCommercialInfo { /** * (Output) * Cloud Marketplace commercial metadata for this subscription. * Structure is documented below. */ cloudMarketplaces: outputs.bigqueryanalyticshub.ListingSubscriptionCommercialInfoCloudMarketplace[]; } interface ListingSubscriptionCommercialInfoCloudMarketplace { /** * (Output) * Resource name of the Marketplace Order. */ order: string; } interface ListingSubscriptionDestinationDataset { /** * A reference that identifies the destination dataset. * Structure is documented below. */ datasetReference: outputs.bigqueryanalyticshub.ListingSubscriptionDestinationDatasetDatasetReference; /** * A user-friendly description of the dataset. */ description?: string; /** * A descriptive name for the dataset. */ friendlyName?: string; /** * The labels associated with this dataset. You can use these to * organize and group your datasets. */ labels?: { [key: string]: string; }; /** * The geographic location where the dataset should reside. * See https://cloud.google.com/bigquery/docs/locations for supported locations. */ location: string; /** * (Optional, Beta) * List of regions where the subscriber wants dataset replicas. */ replicaLocations?: string[]; } interface ListingSubscriptionDestinationDatasetDatasetReference { /** * A unique ID for this dataset, without the project name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. */ datasetId: string; /** * The ID of the project containing this dataset. */ projectId: string; } interface ListingSubscriptionLinkedDatasetMap { /** * (Output) * Output only. Name of the linked dataset, e.g. projects/subscriberproject/datasets/linkedDataset */ linkedDataset: string; /** * (Output) * Output only. Listing for which linked resource is created. */ listing: string; /** * (Required) The identifier for this object. Format specified above. */ resourceName: string; } interface ListingSubscriptionLinkedResource { /** * (Output) * Output only. Name of the linked dataset, e.g. projects/subscriberproject/datasets/linkedDataset */ linkedDataset: string; /** * (Output) * Output only. Listing for which linked resource is created. */ listing: string; } } export declare namespace bigquerydatapolicy { interface DataPolicyDataMaskingPolicy { /** * The available masking rules. Learn more here: https://cloud.google.com/bigquery/docs/column-data-masking-intro#masking_options. * Possible values are: `SHA256`, `ALWAYS_NULL`, `DEFAULT_MASKING_VALUE`, `LAST_FOUR_CHARACTERS`, `FIRST_FOUR_CHARACTERS`, `EMAIL_MASK`, `DATE_YEAR_MASK`. */ predefinedExpression?: string; /** * The name of the BigQuery routine that contains the custom masking routine, in the format of projects/{projectNumber}/datasets/{dataset_id}/routines/{routine_id}. */ routine?: string; } interface DataPolicyIamBindingCondition { description?: string; expression: string; title: string; } interface DataPolicyIamMemberCondition { description?: string; expression: string; title: string; } } export declare namespace bigtable { interface AppProfileDataBoostIsolationReadOnly { /** * The Compute Billing Owner for this Data Boost App Profile. * Possible values are: `HOST_PAYS`. */ computeBillingOwner: string; } interface AppProfileSingleClusterRouting { /** * If true, CheckAndMutateRow and ReadModifyWriteRow requests are allowed by this app profile. * It is unsafe to send these requests to the same table/row/column in multiple clusters. */ allowTransactionalWrites?: boolean; /** * The cluster to which read/write requests should be routed. */ clusterId: string; } interface AppProfileStandardIsolation { /** * The priority of requests sent using this app profile. * Possible values are: `PRIORITY_LOW`, `PRIORITY_MEDIUM`, `PRIORITY_HIGH`. */ priority: string; } interface AuthorizedViewSubsetView { /** * A group of column family subsets to be included in the authorized view. This can be specified multiple times. Structure is documented below. * * ----- */ familySubsets?: outputs.bigtable.AuthorizedViewSubsetViewFamilySubset[]; /** * A list of Base64-encoded row prefixes to be included in the authorized view. To provide access to all rows, include the empty string as a prefix (""). */ rowPrefixes?: string[]; } interface AuthorizedViewSubsetViewFamilySubset { /** * Name of the column family to be included in the authorized view. The specified column family must exist in the parent table of this authorized view. */ familyName: string; /** * A list of Base64-encoded prefixes for qualifiers of the column family to be included in the authorized view. * Every qualifier starting with one of these prefixes is included in the authorized view. To provide access to all qualifiers, include the empty string as a prefix (""). */ qualifierPrefixes?: string[]; /** * A list of Base64-encoded individual exact column qualifiers of the column family to be included in the authorized view. */ qualifiers?: string[]; } interface GCPolicyMaxAge { /** * Number of days before applying GC policy. * * @deprecated Deprecated in favor of duration */ days: number; /** * Duration before applying GC policy (ex. "8h"). This is required when `days` isn't set * * ----- */ duration: string; } interface GCPolicyMaxVersion { /** * Number of version before applying the GC policy. * * ----- * `gcRules` include 2 fields: */ number: number; } interface InstanceCluster { /** * [Autoscaling](https://cloud.google.com/bigtable/docs/autoscaling#parameters) config for the cluster, contains the following arguments: */ autoscalingConfig?: outputs.bigtable.InstanceClusterAutoscalingConfig; /** * The ID of the Cloud Bigtable cluster. Must be 6-30 characters and must only contain hyphens, lowercase letters and numbers. */ clusterId: string; /** * Describes the Cloud KMS encryption key that will be used to protect the destination Bigtable cluster. The requirements for this key are: 1) The Cloud Bigtable service account associated with the project that contains this cluster must be granted the `cloudkms.cryptoKeyEncrypterDecrypter` role on the CMEK key. 2) Only regional keys can be used and the region of the CMEK key must match the region of the cluster. */ kmsKeyName: string; /** * The node scaling factor for this cluster. One of `"NodeScalingFactor1X"` or `"NodeScalingFactor2X"`. Defaults to `"NodeScalingFactor1X"`. If `"NodeScalingFactor2X"` is specified, then `numNodes`, `minNodes`, and `maxNodes` would need to be specified in increments of 2. This value cannot be updated after the cluster is created. * * > **Note**: Removing the field entirely from the config will cause the provider to default to the backend value. * * !> **Warning**: Modifying this field will cause the provider to delete/recreate the entire resource. * * !> **Warning:** Modifying the `storageType`, `zone` or `kmsKeyName` of an existing cluster (by * `clusterId`) will cause the provider to delete/recreate the entire * `gcp.bigtable.Instance` resource. If these values are changing, use a new * `clusterId`. */ nodeScalingFactor?: string; /** * The number of nodes in the cluster. * If no value is set, Cloud Bigtable automatically allocates nodes based on your data footprint and optimized for 50% storage utilization. */ numNodes: number; /** * describes the current state of the cluster. */ state: string; /** * The storage type to use. One of `"SSD"` or * `"HDD"`. Defaults to `"SSD"`. */ storageType?: string; /** * The zone to create the Cloud Bigtable cluster in. If it not * specified, the provider zone is used. Each cluster must have a different zone in the same region. Zones that support * Bigtable instances are noted on the [Cloud Bigtable locations page](https://cloud.google.com/bigtable/docs/locations). */ zone: string; } interface InstanceClusterAutoscalingConfig { /** * The target CPU utilization for autoscaling, in percentage. Must be between 10 and 80. */ cpuTarget: number; /** * The maximum number of nodes for autoscaling. */ maxNodes: number; /** * The minimum number of nodes for autoscaling. */ minNodes: number; /** * The target storage utilization for autoscaling, in GB, for each node in a cluster. This number is limited between 2560 (2.5TiB) and 5120 (5TiB) for a SSD cluster and between 8192 (8TiB) and 16384 (16 TiB) for an HDD cluster. If not set, whatever is already set for the cluster will not change, or if the cluster is just being created, it will use the default value of 2560 for SSD clusters and 8192 for HDD clusters. * * !> **Warning**: Only one of `autoscalingConfig` or `numNodes` should be set for a cluster. If both are set, `numNodes` is ignored. If none is set, autoscaling will be disabled and sized to the current node count. */ storageTarget: number; } interface InstanceIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * For `gcp.bigtable.InstanceIamPolicy` only: */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface InstanceIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * For `gcp.bigtable.InstanceIamPolicy` only: */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface SchemaBundleProtoSchema { /** * Base64 encoded content of the file. */ protoDescriptors: string; } interface TableAutomatedBackupPolicy { /** * How frequently automated backups should occur. */ frequency: string; /** * How long the automated backups should be retained. */ retentionPeriod: string; } interface TableColumnFamily { /** * The name of the column family. */ family: string; /** * The type of the column family. */ type?: string; } interface TableIamBindingCondition { description?: string; expression: string; title: string; } interface TableIamMemberCondition { description?: string; expression: string; title: string; } } export declare namespace billing { interface AccountIamBindingCondition { description?: string; expression: string; title: string; } interface AccountIamMemberCondition { description?: string; expression: string; title: string; } interface BudgetAllUpdatesRule { /** * Boolean. When set to true, disables default notifications sent * when a threshold is exceeded. Default recipients are * those with Billing Account Administrators and Billing * Account Users IAM roles for the target account. */ disableDefaultIamRecipients?: boolean; /** * When set to true, and when the budget has a single project configured, * notifications will be sent to project level recipients of that project. * This field will be ignored if the budget has multiple or no project configured. * Currently, project level recipients are the users with Owner role on a cloud project. */ enableProjectLevelRecipients?: boolean; /** * The full resource name of a monitoring notification * channel in the form * projects/{project_id}/notificationChannels/{channel_id}. * A maximum of 5 channels are allowed. */ monitoringNotificationChannels?: string[]; /** * The name of the Cloud Pub/Sub topic where budget related * messages will be published, in the form * projects/{project_id}/topics/{topic_id}. Updates are sent * at regular intervals to the topic. */ pubsubTopic?: string; /** * The schema version of the notification. Only "1.0" is * accepted. It represents the JSON schema as defined in * https://cloud.google.com/billing/docs/how-to/budgets#notification_format. */ schemaVersion?: string; } interface BudgetAmount { /** * Configures a budget amount that is automatically set to 100% of * last period's spend. * Boolean. Set value to true to use. Do not set to false, instead * use the `specifiedAmount` block. */ lastPeriodAmount?: boolean; /** * A specified amount to use as the budget. currencyCode is * optional. If specified, it must match the currency of the * billing account. The currencyCode is provided on output. * Structure is documented below. */ specifiedAmount?: outputs.billing.BudgetAmountSpecifiedAmount; } interface BudgetAmountSpecifiedAmount { /** * The 3-letter currency code defined in ISO 4217. */ currencyCode: string; /** * Number of nano (10^-9) units of the amount. * The value must be between -999,999,999 and +999,999,999 * inclusive. If units is positive, nanos must be positive or * zero. If units is zero, nanos can be positive, zero, or * negative. If units is negative, nanos must be negative or * zero. For example $-1.75 is represented as units=-1 and * nanos=-750,000,000. */ nanos?: number; /** * The whole units of the amount. For example if currencyCode * is "USD", then 1 unit is one US dollar. */ units?: string; } interface BudgetBudgetFilter { /** * A CalendarPeriod represents the abstract concept of a recurring time period that has a * canonical start. Grammatically, "the start of the current CalendarPeriod". * All calendar times begin at 12 AM US and Canadian Pacific Time (UTC-8). * Exactly one of `calendarPeriod`, `customPeriod` must be provided. * Possible values are: `MONTH`, `QUARTER`, `YEAR`, `CALENDAR_PERIOD_UNSPECIFIED`. */ calendarPeriod?: string; /** * Optional. If creditTypesTreatment is INCLUDE_SPECIFIED_CREDITS, * this is a list of credit types to be subtracted from gross cost to determine the spend for threshold calculations. See a list of acceptable credit type values. * If creditTypesTreatment is not INCLUDE_SPECIFIED_CREDITS, this field must be empty. */ creditTypes?: string[]; /** * Specifies how credits should be treated when determining spend * for threshold calculations. * Default value is `INCLUDE_ALL_CREDITS`. * Possible values are: `INCLUDE_ALL_CREDITS`, `EXCLUDE_ALL_CREDITS`, `INCLUDE_SPECIFIED_CREDITS`. */ creditTypesTreatment?: string; /** * Specifies to track usage from any start date (required) to any end date (optional). * This time period is static, it does not recur. * Exactly one of `calendarPeriod`, `customPeriod` must be provided. * Structure is documented below. */ customPeriod?: outputs.billing.BudgetBudgetFilterCustomPeriod; /** * A single label and value pair specifying that usage from only * this set of labeled resources should be included in the budget. */ labels: { [key: string]: string; }; /** * A set of projects of the form projects/{project_number}, * specifying that usage from only this set of projects should be * included in the budget. If omitted, the report will include * all usage for the billing account, regardless of which project * the usage occurred on. */ projects?: string[]; /** * A set of folder and organization names of the form folders/{folderId} or organizations/{organizationId}, * specifying that usage from only this set of folders and organizations should be included in the budget. * If omitted, the budget includes all usage that the billing account pays for. If the folder or organization * contains projects that are paid for by a different Cloud Billing account, the budget doesn't apply to those projects. */ resourceAncestors?: string[]; /** * A set of services of the form services/{service_id}, * specifying that usage from only this set of services should be * included in the budget. If omitted, the report will include * usage for all the services. The service names are available * through the Catalog API: * https://cloud.google.com/billing/v1/how-tos/catalog-api. */ services: string[]; /** * A set of subaccounts of the form billingAccounts/{account_id}, * specifying that usage from only this set of subaccounts should * be included in the budget. If a subaccount is set to the name of * the parent account, usage from the parent account will be included. * If the field is omitted, the report will include usage from the parent * account and all subaccounts, if they exist. */ subaccounts?: string[]; } interface BudgetBudgetFilterCustomPeriod { /** * Optional. The end date of the time period. Budgets with elapsed end date won't be processed. * If unset, specifies to track all usage incurred since the startDate. * Structure is documented below. */ endDate?: outputs.billing.BudgetBudgetFilterCustomPeriodEndDate; /** * A start date is required. The start date must be after January 1, 2017. * Structure is documented below. */ startDate: outputs.billing.BudgetBudgetFilterCustomPeriodStartDate; } interface BudgetBudgetFilterCustomPeriodEndDate { /** * Day of a month. Must be from 1 to 31 and valid for the year and month. */ day: number; /** * Month of a year. Must be from 1 to 12. */ month: number; /** * Year of the date. Must be from 1 to 9999. */ year: number; } interface BudgetBudgetFilterCustomPeriodStartDate { /** * Day of a month. Must be from 1 to 31 and valid for the year and month. */ day: number; /** * Month of a year. Must be from 1 to 12. */ month: number; /** * Year of the date. Must be from 1 to 9999. */ year: number; } interface BudgetThresholdRule { /** * The type of basis used to determine if spend has passed * the threshold. * Default value is `CURRENT_SPEND`. * Possible values are: `CURRENT_SPEND`, `FORECASTED_SPEND`. */ spendBasis?: string; /** * Send an alert when this threshold is exceeded. This is a * 1.0-based percentage, so 0.5 = 50%. Must be >= 0. */ thresholdPercent: number; } } export declare namespace binaryauthorization { interface AttestorAttestationAuthorityNote { /** * (Output) * This field will contain the service account email address that * this Attestor will use as the principal when querying Container * Analysis. Attestor administrators must grant this service account * the IAM role needed to read attestations from the noteReference in * Container Analysis (containeranalysis.notes.occurrences.viewer). * This email address is fixed for the lifetime of the Attestor, but * callers should not make any other assumptions about the service * account email; future versions may use an email based on a * different naming pattern. */ delegationServiceAccountEmail: string; /** * The resource name of a ATTESTATION_AUTHORITY Note, created by the * user. If the Note is in a different project from the Attestor, it * should be specified in the format `projects/*/notes/*` (or the legacy * `providers/*/notes/*`). This field may not be updated. * An attestation by this attestor is stored as a Container Analysis * ATTESTATION_AUTHORITY Occurrence that names a container image * and that links to this Note. */ noteReference: string; /** * Public keys that verify attestations signed by this attestor. This * field may be updated. * If this field is non-empty, one of the specified public keys must * verify that an attestation was signed by this attestor for the * image specified in the admission request. * If this field is empty, this attestor always returns that no valid * attestations exist. * Structure is documented below. */ publicKeys?: outputs.binaryauthorization.AttestorAttestationAuthorityNotePublicKey[]; } interface AttestorAttestationAuthorityNotePublicKey { /** * ASCII-armored representation of a PGP public key, as the * entire output by the command * `gpg --export --armor foo@example.com` (either LF or CRLF * line endings). When using this field, id should be left * blank. The BinAuthz API handlers will calculate the ID * and fill it in automatically. BinAuthz computes this ID * as the OpenPGP RFC4880 V4 fingerprint, represented as * upper-case hex. If id is provided by the caller, it will * be overwritten by the API-calculated ID. */ asciiArmoredPgpPublicKey?: string; /** * A descriptive comment. This field may be updated. */ comment?: string; /** * The ID of this public key. Signatures verified by BinAuthz * must include the ID of the public key that can be used to * verify them, and that ID must match the contents of this * field exactly. Additional restrictions on this field can * be imposed based on which public key type is encapsulated. * See the documentation on publicKey cases below for details. */ id: string; /** * A raw PKIX SubjectPublicKeyInfo format public key. * NOTE: id may be explicitly provided by the caller when using this * type of public key, but it MUST be a valid RFC3986 URI. If id is left * blank, a default one will be computed based on the digest of the DER * encoding of the public key. * Structure is documented below. */ pkixPublicKey?: outputs.binaryauthorization.AttestorAttestationAuthorityNotePublicKeyPkixPublicKey; } interface AttestorAttestationAuthorityNotePublicKeyPkixPublicKey { /** * A PEM-encoded public key, as described in * `https://tools.ietf.org/html/rfc7468#section-13` */ publicKeyPem?: string; /** * The signature algorithm used to verify a message against * a signature using this key. These signature algorithm must * match the structure and any object identifiers encoded in * publicKeyPem (i.e. this algorithm must match that of the * public key). */ signatureAlgorithm?: string; } interface AttestorIamBindingCondition { description?: string; expression: string; title: string; } interface AttestorIamMemberCondition { description?: string; expression: string; title: string; } interface PolicyAdmissionWhitelistPattern { /** * An image name pattern to whitelist, in the form * `registry/path/to/image`. This supports a trailing * as a * wildcard, but this is allowed only in text after the registry/ * part. */ namePattern: string; } interface PolicyClusterAdmissionRule { /** * The identifier for this object. Format specified above. */ cluster: string; /** * The action when a pod creation is denied by the admission rule. * Possible values are: `ENFORCED_BLOCK_AND_AUDIT_LOG`, `DRYRUN_AUDIT_LOG_ONLY`. */ enforcementMode: string; /** * How this admission rule will be evaluated. * Possible values are: `ALWAYS_ALLOW`, `REQUIRE_ATTESTATION`, `ALWAYS_DENY`. */ evaluationMode: string; /** * The resource names of the attestors that must attest to a * container image. If the attestor is in a different project from the * policy, it should be specified in the format `projects/*/attestors/*`. * Each attestor must exist before a policy can reference it. To add an * attestor to a policy the principal issuing the policy change * request must be able to read the attestor resource. * Note: this field must be non-empty when the evaluationMode field * specifies REQUIRE_ATTESTATION, otherwise it must be empty. */ requireAttestationsBies?: string[]; } interface PolicyDefaultAdmissionRule { /** * The action when a pod creation is denied by the admission rule. * Possible values are: `ENFORCED_BLOCK_AND_AUDIT_LOG`, `DRYRUN_AUDIT_LOG_ONLY`. */ enforcementMode: string; /** * How this admission rule will be evaluated. * Possible values are: `ALWAYS_ALLOW`, `REQUIRE_ATTESTATION`, `ALWAYS_DENY`. */ evaluationMode: string; /** * The resource names of the attestors that must attest to a * container image. If the attestor is in a different project from the * policy, it should be specified in the format `projects/*/attestors/*`. * Each attestor must exist before a policy can reference it. To add an * attestor to a policy the principal issuing the policy change * request must be able to read the attestor resource. * Note: this field must be non-empty when the evaluationMode field * specifies REQUIRE_ATTESTATION, otherwise it must be empty. */ requireAttestationsBies?: string[]; } } export declare namespace blockchainnodeengine { interface BlockchainNodesConnectionInfo { /** * (Output) * The endpoint information through which to interact with a blockchain node. * Structure is documented below. */ endpointInfos: outputs.blockchainnodeengine.BlockchainNodesConnectionInfoEndpointInfo[]; /** * (Output) * A service attachment that exposes a node, and has the following format: projects/{project}/regions/{region}/serviceAttachments/{service_attachment_name} */ serviceAttachment: string; } interface BlockchainNodesConnectionInfoEndpointInfo { /** * (Output) * The assigned URL for the node JSON-RPC API endpoint. */ jsonRpcApiEndpoint: string; /** * (Output) * The assigned URL for the node WebSockets API endpoint. */ websocketsApiEndpoint: string; } interface BlockchainNodesEthereumDetails { /** * (Output) * User-provided key-value pairs * Structure is documented below. */ additionalEndpoints: outputs.blockchainnodeengine.BlockchainNodesEthereumDetailsAdditionalEndpoint[]; /** * Enables JSON-RPC access to functions in the admin namespace. Defaults to false. */ apiEnableAdmin?: boolean; /** * Enables JSON-RPC access to functions in the debug namespace. Defaults to false. */ apiEnableDebug?: boolean; /** * The consensus client * Possible values are: `CONSENSUS_CLIENT_UNSPECIFIED`, `LIGHTHOUSE`. */ consensusClient?: string; /** * The execution client * Possible values are: `EXECUTION_CLIENT_UNSPECIFIED`, `GETH`, `ERIGON`. */ executionClient?: string; /** * User-provided key-value pairs * Structure is documented below. */ gethDetails?: outputs.blockchainnodeengine.BlockchainNodesEthereumDetailsGethDetails; /** * The Ethereum environment being accessed. * Possible values are: `MAINNET`, `TESTNET_GOERLI_PRATER`, `TESTNET_SEPOLIA`. */ network?: string; /** * The type of Ethereum node. * Possible values are: `LIGHT`, `FULL`, `ARCHIVE`. */ nodeType?: string; /** * Configuration for validator-related parameters on the beacon client, and for any managed validator client. * Structure is documented below. */ validatorConfig?: outputs.blockchainnodeengine.BlockchainNodesEthereumDetailsValidatorConfig; } interface BlockchainNodesEthereumDetailsAdditionalEndpoint { /** * The assigned URL for the node's Beacon API endpoint. */ beaconApiEndpoint: string; /** * The assigned URL for the node's Beacon Prometheus metrics endpoint. */ beaconPrometheusMetricsApiEndpoint: string; /** * The assigned URL for the node's execution client's Prometheus metrics endpoint. */ executionClientPrometheusMetricsApiEndpoint: string; } interface BlockchainNodesEthereumDetailsGethDetails { /** * Blockchain garbage collection modes. Only applicable when NodeType is FULL or ARCHIVE. * Possible values are: `FULL`, `ARCHIVE`. * * The `additionalEndpoints` block contains: */ garbageCollectionMode?: string; } interface BlockchainNodesEthereumDetailsValidatorConfig { /** * URLs for MEV-relay services to use for block building. When set, a managed MEV-boost service is configured on the beacon client. */ mevRelayUrls?: string[]; } } export declare namespace certificateauthority { interface AuthorityAccessUrl { /** * (Output) * The URL where this CertificateAuthority's CA certificate is published. This will only be * set for CAs that have been activated. */ caCertificateAccessUrl: string; /** * (Output) * The URL where this CertificateAuthority's CRLs are published. This will only be set for * CAs that have been activated. */ crlAccessUrls: string[]; } interface AuthorityConfig { /** * Specifies some of the values in a certificate that are related to the subject. * Structure is documented below. */ subjectConfig: outputs.certificateauthority.AuthorityConfigSubjectConfig; /** * When specified this provides a custom SKI to be used in the certificate. This should only be used to maintain a SKI of an existing CA originally created outside CA service, which was not generated using method (1) described in RFC 5280 section 4.2.1.2.. * Structure is documented below. */ subjectKeyId?: outputs.certificateauthority.AuthorityConfigSubjectKeyId; /** * Describes how some of the technical X.509 fields in a certificate should be populated. * Structure is documented below. */ x509Config: outputs.certificateauthority.AuthorityConfigX509Config; } interface AuthorityConfigSubjectConfig { /** * Contains distinguished name fields such as the location and organization. * Structure is documented below. */ subject: outputs.certificateauthority.AuthorityConfigSubjectConfigSubject; /** * The subject alternative name fields. * Structure is documented below. */ subjectAltName?: outputs.certificateauthority.AuthorityConfigSubjectConfigSubjectAltName; } interface AuthorityConfigSubjectConfigSubject { /** * The common name of the distinguished name. */ commonName: string; /** * The country code of the subject. */ countryCode?: string; /** * The locality or city of the subject. */ locality?: string; /** * The organization of the subject. */ organization?: string; /** * The organizational unit of the subject. */ organizationalUnit?: string; /** * The postal code of the subject. */ postalCode?: string; /** * The province, territory, or regional state of the subject. */ province?: string; /** * The street address of the subject. */ streetAddress?: string; } interface AuthorityConfigSubjectConfigSubjectAltName { /** * Contains only valid, fully-qualified host names. */ dnsNames?: string[]; /** * Contains only valid RFC 2822 E-mail addresses. */ emailAddresses?: string[]; /** * Contains only valid 32-bit IPv4 addresses or RFC 4291 IPv6 addresses. */ ipAddresses?: string[]; /** * Contains only valid RFC 3986 URIs. */ uris?: string[]; } interface AuthorityConfigSubjectKeyId { /** * The value of the KeyId in lowercase hexadecimal. * * The `x509Config` block supports: */ keyId?: string; } interface AuthorityConfigX509Config { /** * Specifies an X.509 extension, which may be used in different parts of X.509 objects like certificates, CSRs, and CRLs. */ additionalExtensions?: outputs.certificateauthority.AuthorityConfigX509ConfigAdditionalExtension[]; /** * Describes Online Certificate Status Protocol (OCSP) endpoint addresses that appear in the * "Authority Information Access" extension in the certificate. */ aiaOcspServers?: string[]; /** * Describes values that are relevant in a CA certificate. */ caOptions: outputs.certificateauthority.AuthorityConfigX509ConfigCaOptions; /** * Indicates the intended use for keys that correspond to a certificate. */ keyUsage: outputs.certificateauthority.AuthorityConfigX509ConfigKeyUsage; /** * Describes the X.509 name constraints extension. */ nameConstraints?: outputs.certificateauthority.AuthorityConfigX509ConfigNameConstraints; /** * Describes the X.509 certificate policy object identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4. */ policyIds?: outputs.certificateauthority.AuthorityConfigX509ConfigPolicyId[]; } interface AuthorityConfigX509ConfigAdditionalExtension { /** * Indicates whether or not this extension is critical (i.e., if the client does not know how to * handle this extension, the client should consider this to be an error). */ critical: boolean; /** * Describes values that are relevant in a CA certificate. * Structure is documented below. */ objectId: outputs.certificateauthority.AuthorityConfigX509ConfigAdditionalExtensionObjectId; /** * The value of this X.509 extension. A base64-encoded string. */ value: string; } interface AuthorityConfigX509ConfigAdditionalExtensionObjectId { /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. */ objectIdPaths: number[]; } interface AuthorityConfigX509ConfigCaOptions { /** * When true, the "CA" in Basic Constraints extension will be set to true. */ isCa: boolean; /** * Refers to the "path length constraint" in Basic Constraints extension. For a CA certificate, this value describes the depth of * subordinate CA certificates that are allowed. If this value is less than 0, the request will fail. Setting the value to 0 * requires setting `zeroMaxIssuerPathLength = true`. */ maxIssuerPathLength?: number; /** * When true, the "CA" in Basic Constraints extension will be set to false. * If both `isCa` and `nonCa` are unset, the extension will be omitted from the CA certificate. */ nonCa?: boolean; /** * When true, the "path length constraint" in Basic Constraints extension will be set to 0. * If both `maxIssuerPathLength` and `zeroMaxIssuerPathLength` are unset, * the max path length will be omitted from the CA certificate. */ zeroMaxIssuerPathLength?: boolean; } interface AuthorityConfigX509ConfigKeyUsage { /** * Describes high-level ways in which a key may be used. * Structure is documented below. */ baseKeyUsage: outputs.certificateauthority.AuthorityConfigX509ConfigKeyUsageBaseKeyUsage; /** * Describes high-level ways in which a key may be used. * Structure is documented below. */ extendedKeyUsage: outputs.certificateauthority.AuthorityConfigX509ConfigKeyUsageExtendedKeyUsage; /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. * Structure is documented below. */ unknownExtendedKeyUsages?: outputs.certificateauthority.AuthorityConfigX509ConfigKeyUsageUnknownExtendedKeyUsage[]; } interface AuthorityConfigX509ConfigKeyUsageBaseKeyUsage { /** * The key may be used to sign certificates. */ certSign?: boolean; /** * The key may be used for cryptographic commitments. Note that this may also be referred to as "non-repudiation". */ contentCommitment?: boolean; /** * The key may be used sign certificate revocation lists. */ crlSign?: boolean; /** * The key may be used to encipher data. */ dataEncipherment?: boolean; /** * The key may be used to decipher only. */ decipherOnly?: boolean; /** * The key may be used for digital signatures. */ digitalSignature?: boolean; /** * The key may be used to encipher only. */ encipherOnly?: boolean; /** * The key may be used in a key agreement protocol. */ keyAgreement?: boolean; /** * The key may be used to encipher other keys. */ keyEncipherment?: boolean; } interface AuthorityConfigX509ConfigKeyUsageExtendedKeyUsage { /** * Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially described as "TLS WWW client authentication", though regularly used for non-WWW TLS. */ clientAuth?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially described as "Signing of downloadable executable code client authentication". */ codeSigning?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially described as "Email protection". */ emailProtection?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially described as "Signing OCSP responses". */ ocspSigning?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially described as "TLS WWW server authentication", though regularly used for non-WWW TLS. */ serverAuth?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially described as "Binding the hash of an object to a time". */ timeStamping?: boolean; } interface AuthorityConfigX509ConfigKeyUsageUnknownExtendedKeyUsage { /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. */ objectIdPaths: number[]; } interface AuthorityConfigX509ConfigNameConstraints { /** * Indicates whether or not the name constraints are marked critical. */ critical: boolean; /** * Contains excluded DNS names. Any DNS name that can be * constructed by simply adding zero or more labels to * the left-hand side of the name satisfies the name constraint. * For example, `example.com`, `www.example.com`, `www.sub.example.com` * would satisfy `example.com` while `example1.com` does not. */ excludedDnsNames?: string[]; /** * Contains the excluded email addresses. The value can be a particular * email address, a hostname to indicate all email addresses on that host or * a domain with a leading period (e.g. `.example.com`) to indicate * all email addresses in that domain. */ excludedEmailAddresses?: string[]; /** * Contains the excluded IP ranges. For IPv4 addresses, the ranges * are expressed using CIDR notation as specified in RFC 4632. * For IPv6 addresses, the ranges are expressed in similar encoding as IPv4 * addresses. */ excludedIpRanges?: string[]; /** * Contains the excluded URIs that apply to the host part of the name. * The value can be a hostname or a domain with a * leading period (like `.example.com`) */ excludedUris?: string[]; /** * Contains permitted DNS names. Any DNS name that can be * constructed by simply adding zero or more labels to * the left-hand side of the name satisfies the name constraint. * For example, `example.com`, `www.example.com`, `www.sub.example.com` * would satisfy `example.com` while `example1.com` does not. */ permittedDnsNames?: string[]; /** * Contains the permitted email addresses. The value can be a particular * email address, a hostname to indicate all email addresses on that host or * a domain with a leading period (e.g. `.example.com`) to indicate * all email addresses in that domain. */ permittedEmailAddresses?: string[]; /** * Contains the permitted IP ranges. For IPv4 addresses, the ranges * are expressed using CIDR notation as specified in RFC 4632. * For IPv6 addresses, the ranges are expressed in similar encoding as IPv4 * addresses. */ permittedIpRanges?: string[]; /** * Contains the permitted URIs that apply to the host part of the name. * The value can be a hostname or a domain with a * leading period (like `.example.com`) */ permittedUris?: string[]; } interface AuthorityConfigX509ConfigPolicyId { /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. */ objectIdPaths: number[]; } interface AuthorityKeySpec { /** * The algorithm to use for creating a managed Cloud KMS key for a for a simplified * experience. All managed keys will be have their ProtectionLevel as HSM. * Possible values are: `SIGN_HASH_ALGORITHM_UNSPECIFIED`, `RSA_PSS_2048_SHA256`, `RSA_PSS_3072_SHA256`, `RSA_PSS_4096_SHA256`, `RSA_PKCS1_2048_SHA256`, `RSA_PKCS1_3072_SHA256`, `RSA_PKCS1_4096_SHA256`, `EC_P256_SHA256`, `EC_P384_SHA384`. */ algorithm?: string; /** * The resource name for an existing Cloud KMS CryptoKeyVersion in the format * `projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`. */ cloudKmsKeyVersion?: string; } interface AuthoritySubordinateConfig { /** * This can refer to a CertificateAuthority that was used to create a * subordinate CertificateAuthority. This field is used for information * and usability purposes only. The resource name is in the format * `projects/*/locations/*/caPools/*/certificateAuthorities/*`. */ certificateAuthority?: string; /** * Contains the PEM certificate chain for the issuers of this CertificateAuthority, * but not pem certificate for this CA itself. * Structure is documented below. */ pemIssuerChain: outputs.certificateauthority.AuthoritySubordinateConfigPemIssuerChain; } interface AuthoritySubordinateConfigPemIssuerChain { /** * Expected to be in leaf-to-root order according to RFC 5246. */ pemCertificates?: string[]; } interface AuthorityUserDefinedAccessUrls { /** * A list of URLs where this CertificateAuthority's CA certificate is published that is specified by users. */ aiaIssuingCertificateUrls?: string[]; /** * A list of URLs where this CertificateAuthority's CRLs are published that is specified by users. */ crlAccessUrls?: string[]; } interface CaPoolEncryptionSpec { /** * The resource name for an existing Cloud KMS key in the format * `projects/*/locations/*/keyRings/*/cryptoKeys/*`. */ cloudKmsKey?: string; } interface CaPoolIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface CaPoolIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface CaPoolIssuancePolicy { /** * IssuanceModes specifies the allowed ways in which Certificates may be requested from this CaPool. * Structure is documented below. */ allowedIssuanceModes?: outputs.certificateauthority.CaPoolIssuancePolicyAllowedIssuanceModes; /** * If any AllowedKeyType is specified, then the certificate request's public key must match one of the key types listed here. * Otherwise, any key may be used. You can specify only one key type of those listed here. * Structure is documented below. */ allowedKeyTypes?: outputs.certificateauthority.CaPoolIssuancePolicyAllowedKeyType[]; /** * The duration to backdate all certificates issued from this CaPool. If not set, the * certificates will be issued with a notBeforeTime of the issuance time (i.e. the current * time). If set, the certificates will be issued with a notBeforeTime of the issuance * time minus the backdate_duration. The notAfterTime will be adjusted to preserve the * requested lifetime. The backdateDuration must be less than or equal to 48 hours. */ backdateDuration?: string; /** * A set of X.509 values that will be applied to all certificates issued through this CaPool. If a certificate request * includes conflicting values for the same properties, they will be overwritten by the values defined here. If a certificate * request uses a CertificateTemplate that defines conflicting predefinedValues for the same properties, the certificate * issuance request will fail. * Structure is documented below. */ baselineValues?: outputs.certificateauthority.CaPoolIssuancePolicyBaselineValues; /** * Describes constraints on identities that may appear in Certificates issued through this CaPool. * If this is omitted, then this CaPool will not add restrictions on a certificate's identity. * Structure is documented below. */ identityConstraints?: outputs.certificateauthority.CaPoolIssuancePolicyIdentityConstraints; /** * The maximum lifetime allowed for issued Certificates. Note that if the issuing CertificateAuthority * expires before a Certificate's requested maximumLifetime, the effective lifetime will be explicitly truncated to match it. */ maximumLifetime?: string; } interface CaPoolIssuancePolicyAllowedIssuanceModes { /** * When true, allows callers to create Certificates by specifying a CertificateConfig. */ allowConfigBasedIssuance: boolean; /** * When true, allows callers to create Certificates by specifying a CSR. */ allowCsrBasedIssuance: boolean; } interface CaPoolIssuancePolicyAllowedKeyType { /** * Represents an allowed Elliptic Curve key type. * Structure is documented below. */ ellipticCurve?: outputs.certificateauthority.CaPoolIssuancePolicyAllowedKeyTypeEllipticCurve; /** * Describes an RSA key that may be used in a Certificate issued from a CaPool. * Structure is documented below. */ rsa?: outputs.certificateauthority.CaPoolIssuancePolicyAllowedKeyTypeRsa; } interface CaPoolIssuancePolicyAllowedKeyTypeEllipticCurve { /** * The algorithm used. * Possible values are: `ECDSA_P256`, `ECDSA_P384`, `EDDSA_25519`. */ signatureAlgorithm: string; } interface CaPoolIssuancePolicyAllowedKeyTypeRsa { /** * The maximum allowed RSA modulus size, in bits. If this is not set, or if set to zero, the * service will not enforce an explicit upper bound on RSA modulus sizes. */ maxModulusSize?: string; /** * The minimum allowed RSA modulus size, in bits. If this is not set, or if set to zero, the * service-level min RSA modulus size will continue to apply. */ minModulusSize?: string; } interface CaPoolIssuancePolicyBaselineValues { /** * Specifies an X.509 extension, which may be used in different parts of X.509 objects like certificates, CSRs, and CRLs. * Structure is documented below. */ additionalExtensions?: outputs.certificateauthority.CaPoolIssuancePolicyBaselineValuesAdditionalExtension[]; /** * Describes Online Certificate Status Protocol (OCSP) endpoint addresses that appear in the * "Authority Information Access" extension in the certificate. */ aiaOcspServers?: string[]; /** * Describes values that are relevant in a CA certificate. * Structure is documented below. */ caOptions: outputs.certificateauthority.CaPoolIssuancePolicyBaselineValuesCaOptions; /** * Indicates the intended use for keys that correspond to a certificate. * Structure is documented below. */ keyUsage: outputs.certificateauthority.CaPoolIssuancePolicyBaselineValuesKeyUsage; /** * Describes the X.509 name constraints extension. * Structure is documented below. */ nameConstraints?: outputs.certificateauthority.CaPoolIssuancePolicyBaselineValuesNameConstraints; /** * Describes the X.509 certificate policy object identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4. * Structure is documented below. */ policyIds?: outputs.certificateauthority.CaPoolIssuancePolicyBaselineValuesPolicyId[]; } interface CaPoolIssuancePolicyBaselineValuesAdditionalExtension { /** * Indicates whether or not this extension is critical (i.e., if the client does not know how to * handle this extension, the client should consider this to be an error). */ critical: boolean; /** * Describes values that are relevant in a CA certificate. * Structure is documented below. */ objectId: outputs.certificateauthority.CaPoolIssuancePolicyBaselineValuesAdditionalExtensionObjectId; /** * The value of this X.509 extension. A base64-encoded string. */ value: string; } interface CaPoolIssuancePolicyBaselineValuesAdditionalExtensionObjectId { /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. */ objectIdPaths: number[]; } interface CaPoolIssuancePolicyBaselineValuesCaOptions { /** * When true, the "CA" in Basic Constraints extension will be set to true. */ isCa?: boolean; /** * Refers to the "path length constraint" in Basic Constraints extension. For a CA certificate, this value describes the depth of * subordinate CA certificates that are allowed. If this value is less than 0, the request will fail. */ maxIssuerPathLength?: number; /** * When true, the "CA" in Basic Constraints extension will be set to false. * If both `isCa` and `nonCa` are unset, the extension will be omitted from the CA certificate. */ nonCa?: boolean; /** * When true, the "path length constraint" in Basic Constraints extension will be set to 0. * if both `maxIssuerPathLength` and `zeroMaxIssuerPathLength` are unset, * the max path length will be omitted from the CA certificate. */ zeroMaxIssuerPathLength?: boolean; } interface CaPoolIssuancePolicyBaselineValuesKeyUsage { /** * Describes high-level ways in which a key may be used. * Structure is documented below. */ baseKeyUsage: outputs.certificateauthority.CaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsage; /** * Describes high-level ways in which a key may be used. * Structure is documented below. */ extendedKeyUsage: outputs.certificateauthority.CaPoolIssuancePolicyBaselineValuesKeyUsageExtendedKeyUsage; /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. * Structure is documented below. */ unknownExtendedKeyUsages?: outputs.certificateauthority.CaPoolIssuancePolicyBaselineValuesKeyUsageUnknownExtendedKeyUsage[]; } interface CaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsage { /** * The key may be used to sign certificates. */ certSign?: boolean; /** * The key may be used for cryptographic commitments. Note that this may also be referred to as "non-repudiation". */ contentCommitment?: boolean; /** * The key may be used sign certificate revocation lists. */ crlSign?: boolean; /** * The key may be used to encipher data. */ dataEncipherment?: boolean; /** * The key may be used to decipher only. */ decipherOnly?: boolean; /** * The key may be used for digital signatures. */ digitalSignature?: boolean; /** * The key may be used to encipher only. */ encipherOnly?: boolean; /** * The key may be used in a key agreement protocol. */ keyAgreement?: boolean; /** * The key may be used to encipher other keys. */ keyEncipherment?: boolean; } interface CaPoolIssuancePolicyBaselineValuesKeyUsageExtendedKeyUsage { /** * Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially described as "TLS WWW client authentication", though regularly used for non-WWW TLS. */ clientAuth?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially described as "Signing of downloadable executable code client authentication". */ codeSigning?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially described as "Email protection". */ emailProtection?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially described as "Signing OCSP responses". */ ocspSigning?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially described as "TLS WWW server authentication", though regularly used for non-WWW TLS. */ serverAuth?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially described as "Binding the hash of an object to a time". */ timeStamping?: boolean; } interface CaPoolIssuancePolicyBaselineValuesKeyUsageUnknownExtendedKeyUsage { /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. */ objectIdPaths: number[]; } interface CaPoolIssuancePolicyBaselineValuesNameConstraints { /** * Indicates whether or not the name constraints are marked critical. */ critical: boolean; /** * Contains excluded DNS names. Any DNS name that can be * constructed by simply adding zero or more labels to * the left-hand side of the name satisfies the name constraint. * For example, `example.com`, `www.example.com`, `www.sub.example.com` * would satisfy `example.com` while `example1.com` does not. */ excludedDnsNames?: string[]; /** * Contains the excluded email addresses. The value can be a particular * email address, a hostname to indicate all email addresses on that host or * a domain with a leading period (e.g. `.example.com`) to indicate * all email addresses in that domain. */ excludedEmailAddresses?: string[]; /** * Contains the excluded IP ranges. For IPv4 addresses, the ranges * are expressed using CIDR notation as specified in RFC 4632. * For IPv6 addresses, the ranges are expressed in similar encoding as IPv4 * addresses. */ excludedIpRanges?: string[]; /** * Contains the excluded URIs that apply to the host part of the name. * The value can be a hostname or a domain with a * leading period (like `.example.com`) */ excludedUris?: string[]; /** * Contains permitted DNS names. Any DNS name that can be * constructed by simply adding zero or more labels to * the left-hand side of the name satisfies the name constraint. * For example, `example.com`, `www.example.com`, `www.sub.example.com` * would satisfy `example.com` while `example1.com` does not. */ permittedDnsNames?: string[]; /** * Contains the permitted email addresses. The value can be a particular * email address, a hostname to indicate all email addresses on that host or * a domain with a leading period (e.g. `.example.com`) to indicate * all email addresses in that domain. */ permittedEmailAddresses?: string[]; /** * Contains the permitted IP ranges. For IPv4 addresses, the ranges * are expressed using CIDR notation as specified in RFC 4632. * For IPv6 addresses, the ranges are expressed in similar encoding as IPv4 * addresses. */ permittedIpRanges?: string[]; /** * Contains the permitted URIs that apply to the host part of the name. * The value can be a hostname or a domain with a * leading period (like `.example.com`) */ permittedUris?: string[]; } interface CaPoolIssuancePolicyBaselineValuesPolicyId { /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. */ objectIdPaths: number[]; } interface CaPoolIssuancePolicyIdentityConstraints { /** * If this is set, the SubjectAltNames extension may be copied from a certificate request into the signed certificate. * Otherwise, the requested SubjectAltNames will be discarded. */ allowSubjectAltNamesPassthrough: boolean; /** * If this is set, the Subject field may be copied from a certificate request into the signed certificate. * Otherwise, the requested Subject will be discarded. */ allowSubjectPassthrough: boolean; /** * A CEL expression that may be used to validate the resolved X.509 Subject and/or Subject Alternative Name before a * certificate is signed. To see the full allowed syntax and some examples, * see https://cloud.google.com/certificate-authority-service/docs/cel-guide * Structure is documented below. */ celExpression?: outputs.certificateauthority.CaPoolIssuancePolicyIdentityConstraintsCelExpression; } interface CaPoolIssuancePolicyIdentityConstraintsCelExpression { /** * Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface CaPoolPublishingOptions { /** * Specifies the encoding format of each CertificateAuthority's CA * certificate and CRLs. If this is omitted, CA certificates and CRLs * will be published in PEM. * Possible values are: `PEM`, `DER`. */ encodingFormat?: string; /** * When true, publishes each CertificateAuthority's CA certificate and includes its URL in the "Authority Information Access" * X.509 extension in all issued Certificates. If this is false, the CA certificate will not be published and the corresponding * X.509 extension will not be written in issued certificates. */ publishCaCert: boolean; /** * When true, publishes each CertificateAuthority's CRL and includes its URL in the "CRL Distribution Points" X.509 extension * in all issued Certificates. If this is false, CRLs will not be published and the corresponding X.509 extension will not * be written in issued certificates. CRLs will expire 7 days from their creation. However, we will rebuild daily. CRLs are * also rebuilt shortly after a certificate is revoked. */ publishCrl: boolean; } interface CertificateCertificateDescription { /** * (Output) * Describes lists of issuer CA certificate URLs that appear in the "Authority Information Access" extension in the certificate. */ aiaIssuingCertificateUrls: string[]; /** * (Output) * Identifies the subjectKeyId of the parent certificate, per https://tools.ietf.org/html/rfc5280#section-4.2.1.1 * Structure is documented below. */ authorityKeyIds: outputs.certificateauthority.CertificateCertificateDescriptionAuthorityKeyId[]; /** * (Output) * The hash of the x.509 certificate. * Structure is documented below. */ certFingerprints: outputs.certificateauthority.CertificateCertificateDescriptionCertFingerprint[]; /** * (Output) * Describes a list of locations to obtain CRL information, i.e. the DistributionPoint.fullName described by https://tools.ietf.org/html/rfc5280#section-4.2.1.13 */ crlDistributionPoints: string[]; /** * (Output) * A PublicKey describes a public key. * Structure is documented below. */ publicKeys: outputs.certificateauthority.CertificateCertificateDescriptionPublicKey[]; /** * (Output) * Describes some of the values in a certificate that are related to the subject and lifetime. * Structure is documented below. */ subjectDescriptions: outputs.certificateauthority.CertificateCertificateDescriptionSubjectDescription[]; /** * (Output) * Provides a means of identifiying certificates that contain a particular public key, per https://tools.ietf.org/html/rfc5280#section-4.2.1.2. * Structure is documented below. */ subjectKeyIds: outputs.certificateauthority.CertificateCertificateDescriptionSubjectKeyId[]; /** * (Output) * A structured description of the issued X.509 certificate. * Structure is documented below. */ x509Descriptions: outputs.certificateauthority.CertificateCertificateDescriptionX509Description[]; } interface CertificateCertificateDescriptionAuthorityKeyId { /** * (Output) * Optional. The value of this KeyId encoded in lowercase hexadecimal. This is most likely the 160 bit SHA-1 hash of the public key. */ keyId: string; } interface CertificateCertificateDescriptionCertFingerprint { /** * (Output) * The SHA 256 hash, encoded in hexadecimal, of the DER x509 certificate. */ sha256Hash: string; } interface CertificateCertificateDescriptionPublicKey { /** * The format of the public key. Currently, only PEM format is supported. * Possible values are: `KEY_TYPE_UNSPECIFIED`, `PEM`. */ format: string; /** * Required. A public key. When this is specified in a request, the padding and encoding can be any of the options described by the respective 'KeyType' value. When this is generated by the service, it will always be an RFC 5280 SubjectPublicKeyInfo structure containing an algorithm identifier and a key. A base64-encoded string. */ key: string; } interface CertificateCertificateDescriptionSubjectDescription { /** * (Output) * The serial number encoded in lowercase hexadecimal. */ hexSerialNumber: string; /** * The desired lifetime of the CA certificate. Used to create the "notBeforeTime" and * "notAfterTime" fields inside an X.509 certificate. A duration in seconds with up to nine * fractional digits, terminated by 's'. Example: "3.5s". */ lifetime: string; /** * (Output) * The time at which the certificate expires. */ notAfterTime: string; /** * (Output) * The time at which the certificate becomes valid. */ notBeforeTime: string; /** * (Output) * The subject alternative name fields. * Structure is documented below. */ subjectAltNames: outputs.certificateauthority.CertificateCertificateDescriptionSubjectDescriptionSubjectAltName[]; /** * (Output) * Contains distinguished name fields such as the location and organization. * Structure is documented below. */ subjects: outputs.certificateauthority.CertificateCertificateDescriptionSubjectDescriptionSubject[]; } interface CertificateCertificateDescriptionSubjectDescriptionSubject { /** * The common name of the distinguished name. */ commonName: string; /** * The country code of the subject. */ countryCode: string; /** * The locality or city of the subject. */ locality: string; /** * The organization of the subject. */ organization: string; /** * The organizational unit of the subject. */ organizationalUnit: string; /** * The postal code of the subject. */ postalCode: string; /** * The province, territory, or regional state of the subject. */ province: string; /** * The street address of the subject. */ streetAddress: string; } interface CertificateCertificateDescriptionSubjectDescriptionSubjectAltName { /** * (Output) * Contains additional subject alternative name values. * Structure is documented below. */ customSans: outputs.certificateauthority.CertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSan[]; /** * Contains only valid, fully-qualified host names. */ dnsNames: string[]; /** * Contains only valid RFC 2822 E-mail addresses. */ emailAddresses: string[]; /** * Contains only valid 32-bit IPv4 addresses or RFC 4291 IPv6 addresses. */ ipAddresses: string[]; /** * Contains only valid RFC 3986 URIs. */ uris: string[]; } interface CertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSan { /** * (Output) * Indicates whether or not the name constraints are marked critical. */ critical: boolean; /** * (Output) * Describes how some of the technical fields in a certificate should be populated. * Structure is documented below. */ obectIds: outputs.certificateauthority.CertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSanObectId[]; /** * The value of this X.509 extension. A base64-encoded string. */ value: string; } interface CertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSanObectId { /** * (Output) * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. */ objectIdPaths: number[]; } interface CertificateCertificateDescriptionSubjectKeyId { /** * The value of the KeyId in lowercase hexadecimal. */ keyId: string; } interface CertificateCertificateDescriptionX509Description { /** * (Output) * Describes custom X.509 extensions. * Structure is documented below. */ additionalExtensions: outputs.certificateauthority.CertificateCertificateDescriptionX509DescriptionAdditionalExtension[]; /** * (Output) * Describes Online Certificate Status Protocol (OCSP) endpoint addresses that appear in the * "Authority Information Access" extension in the certificate. */ aiaOcspServers: string[]; /** * (Output) * Describes values that are relevant in a CA certificate. * Structure is documented below. */ caOptions: outputs.certificateauthority.CertificateCertificateDescriptionX509DescriptionCaOption[]; /** * (Output) * Indicates the intended use for keys that correspond to a certificate. * Structure is documented below. */ keyUsages: outputs.certificateauthority.CertificateCertificateDescriptionX509DescriptionKeyUsage[]; /** * (Output) * Describes the X.509 name constraints extension. * Structure is documented below. */ nameConstraints: outputs.certificateauthority.CertificateCertificateDescriptionX509DescriptionNameConstraint[]; /** * (Output) * Describes the X.509 certificate policy object identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4. * Structure is documented below. */ policyIds: outputs.certificateauthority.CertificateCertificateDescriptionX509DescriptionPolicyId[]; } interface CertificateCertificateDescriptionX509DescriptionAdditionalExtension { /** * Indicates whether or not this extension is critical (i.e., if the client does not know how to * handle this extension, the client should consider this to be an error). */ critical: boolean; /** * Describes values that are relevant in a CA certificate. * Structure is documented below. */ objectIds: outputs.certificateauthority.CertificateCertificateDescriptionX509DescriptionAdditionalExtensionObjectId[]; /** * The value of this X.509 extension. A base64-encoded string. */ value?: string; } interface CertificateCertificateDescriptionX509DescriptionAdditionalExtensionObjectId { /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. */ objectIdPaths: number[]; } interface CertificateCertificateDescriptionX509DescriptionCaOption { /** * When true, the "CA" in Basic Constraints extension will be set to true. */ isCa: boolean; /** * Refers to the "path length constraint" in Basic Constraints extension. For a CA certificate, this value describes the depth of * subordinate CA certificates that are allowed. If this value is less than 0, the request will fail. */ maxIssuerPathLength: number; } interface CertificateCertificateDescriptionX509DescriptionKeyUsage { /** * Describes high-level ways in which a key may be used. * Structure is documented below. */ baseKeyUsages: outputs.certificateauthority.CertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsage[]; /** * Describes high-level ways in which a key may be used. * Structure is documented below. */ extendedKeyUsages: outputs.certificateauthority.CertificateCertificateDescriptionX509DescriptionKeyUsageExtendedKeyUsage[]; /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. * Structure is documented below. */ unknownExtendedKeyUsages: outputs.certificateauthority.CertificateCertificateDescriptionX509DescriptionKeyUsageUnknownExtendedKeyUsage[]; } interface CertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsage { /** * The key may be used to sign certificates. */ certSign: boolean; /** * The key may be used for cryptographic commitments. Note that this may also be referred to as "non-repudiation". */ contentCommitment: boolean; /** * The key may be used sign certificate revocation lists. */ crlSign: boolean; /** * The key may be used to encipher data. */ dataEncipherment: boolean; /** * The key may be used to decipher only. */ decipherOnly: boolean; /** * The key may be used for digital signatures. */ digitalSignature: boolean; /** * The key may be used to encipher only. */ encipherOnly: boolean; /** * The key may be used in a key agreement protocol. */ keyAgreement: boolean; /** * The key may be used to encipher other keys. */ keyEncipherment: boolean; } interface CertificateCertificateDescriptionX509DescriptionKeyUsageExtendedKeyUsage { /** * Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially described as "TLS WWW client authentication", though regularly used for non-WWW TLS. */ clientAuth: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially described as "Signing of downloadable executable code client authentication". */ codeSigning: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially described as "Email protection". */ emailProtection: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially described as "Signing OCSP responses". */ ocspSigning: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially described as "TLS WWW server authentication", though regularly used for non-WWW TLS. */ serverAuth: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially described as "Binding the hash of an object to a time". */ timeStamping: boolean; } interface CertificateCertificateDescriptionX509DescriptionKeyUsageUnknownExtendedKeyUsage { /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. */ objectIdPaths: number[]; } interface CertificateCertificateDescriptionX509DescriptionNameConstraint { /** * Indicates whether or not the name constraints are marked critical. */ critical: boolean; /** * Contains excluded DNS names. Any DNS name that can be * constructed by simply adding zero or more labels to * the left-hand side of the name satisfies the name constraint. * For example, `example.com`, `www.example.com`, `www.sub.example.com` * would satisfy `example.com` while `example1.com` does not. */ excludedDnsNames: string[]; /** * Contains the excluded email addresses. The value can be a particular * email address, a hostname to indicate all email addresses on that host or * a domain with a leading period (e.g. `.example.com`) to indicate * all email addresses in that domain. */ excludedEmailAddresses: string[]; /** * Contains the excluded IP ranges. For IPv4 addresses, the ranges * are expressed using CIDR notation as specified in RFC 4632. * For IPv6 addresses, the ranges are expressed in similar encoding as IPv4 * addresses. */ excludedIpRanges: string[]; /** * Contains the excluded URIs that apply to the host part of the name. * The value can be a hostname or a domain with a * leading period (like `.example.com`) */ excludedUris: string[]; /** * Contains permitted DNS names. Any DNS name that can be * constructed by simply adding zero or more labels to * the left-hand side of the name satisfies the name constraint. * For example, `example.com`, `www.example.com`, `www.sub.example.com` * would satisfy `example.com` while `example1.com` does not. */ permittedDnsNames: string[]; /** * Contains the permitted email addresses. The value can be a particular * email address, a hostname to indicate all email addresses on that host or * a domain with a leading period (e.g. `.example.com`) to indicate * all email addresses in that domain. */ permittedEmailAddresses: string[]; /** * Contains the permitted IP ranges. For IPv4 addresses, the ranges * are expressed using CIDR notation as specified in RFC 4632. * For IPv6 addresses, the ranges are expressed in similar encoding as IPv4 * addresses. */ permittedIpRanges: string[]; /** * Contains the permitted URIs that apply to the host part of the name. * The value can be a hostname or a domain with a * leading period (like `.example.com`) */ permittedUris: string[]; } interface CertificateCertificateDescriptionX509DescriptionPolicyId { /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. */ objectIdPaths: number[]; } interface CertificateConfig { /** * A PublicKey describes a public key. * Structure is documented below. * * * The `x509Config` block supports: */ publicKey: outputs.certificateauthority.CertificateConfigPublicKey; /** * Specifies some of the values in a certificate that are related to the subject. * Structure is documented below. */ subjectConfig: outputs.certificateauthority.CertificateConfigSubjectConfig; /** * When specified this provides a custom SKI to be used in the certificate. This should only be used to maintain a SKI of an existing CA originally created outside CA service, which was not generated using method (1) described in RFC 5280 section 4.2.1.2.. * Structure is documented below. */ subjectKeyId?: outputs.certificateauthority.CertificateConfigSubjectKeyId; /** * Describes how some of the technical X.509 fields in a certificate should be populated. * Structure is documented below. */ x509Config: outputs.certificateauthority.CertificateConfigX509Config; } interface CertificateConfigPublicKey { /** * The format of the public key. Currently, only PEM format is supported. * Possible values are: `KEY_TYPE_UNSPECIFIED`, `PEM`. */ format: string; /** * Required. A public key. When this is specified in a request, the padding and encoding can be any of the options described by the respective 'KeyType' value. When this is generated by the service, it will always be an RFC 5280 SubjectPublicKeyInfo structure containing an algorithm identifier and a key. A base64-encoded string. */ key?: string; } interface CertificateConfigSubjectConfig { /** * Contains distinguished name fields such as the location and organization. * Structure is documented below. */ subject: outputs.certificateauthority.CertificateConfigSubjectConfigSubject; /** * The subject alternative name fields. * Structure is documented below. */ subjectAltName?: outputs.certificateauthority.CertificateConfigSubjectConfigSubjectAltName; } interface CertificateConfigSubjectConfigSubject { /** * The common name of the distinguished name. */ commonName: string; /** * The country code of the subject. */ countryCode?: string; /** * The locality or city of the subject. */ locality?: string; /** * The organization of the subject. */ organization: string; /** * The organizational unit of the subject. */ organizationalUnit?: string; /** * The postal code of the subject. */ postalCode?: string; /** * The province, territory, or regional state of the subject. */ province?: string; /** * The street address of the subject. */ streetAddress?: string; } interface CertificateConfigSubjectConfigSubjectAltName { /** * Contains only valid, fully-qualified host names. */ dnsNames?: string[]; /** * Contains only valid RFC 2822 E-mail addresses. */ emailAddresses?: string[]; /** * Contains only valid 32-bit IPv4 addresses or RFC 4291 IPv6 addresses. */ ipAddresses?: string[]; /** * Contains only valid RFC 3986 URIs. */ uris?: string[]; } interface CertificateConfigSubjectKeyId { /** * The value of the KeyId in lowercase hexadecimal. */ keyId?: string; } interface CertificateConfigX509Config { /** * (Output) * Describes custom X.509 extensions. * Structure is documented below. */ additionalExtensions?: outputs.certificateauthority.CertificateConfigX509ConfigAdditionalExtension[]; /** * (Output) * Describes Online Certificate Status Protocol (OCSP) endpoint addresses that appear in the * "Authority Information Access" extension in the certificate. */ aiaOcspServers?: string[]; /** * (Output) * Describes values that are relevant in a CA certificate. * Structure is documented below. */ caOptions?: outputs.certificateauthority.CertificateConfigX509ConfigCaOptions; /** * (Output) * Indicates the intended use for keys that correspond to a certificate. * Structure is documented below. */ keyUsage: outputs.certificateauthority.CertificateConfigX509ConfigKeyUsage; /** * (Output) * Describes the X.509 name constraints extension. * Structure is documented below. */ nameConstraints?: outputs.certificateauthority.CertificateConfigX509ConfigNameConstraints; /** * (Output) * Describes the X.509 certificate policy object identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4. * Structure is documented below. */ policyIds?: outputs.certificateauthority.CertificateConfigX509ConfigPolicyId[]; } interface CertificateConfigX509ConfigAdditionalExtension { /** * Indicates whether or not this extension is critical (i.e., if the client does not know how to * handle this extension, the client should consider this to be an error). */ critical: boolean; /** * Describes values that are relevant in a CA certificate. * Structure is documented below. */ objectId: outputs.certificateauthority.CertificateConfigX509ConfigAdditionalExtensionObjectId; /** * The value of this X.509 extension. A base64-encoded string. */ value: string; } interface CertificateConfigX509ConfigAdditionalExtensionObjectId { /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. */ objectIdPaths: number[]; } interface CertificateConfigX509ConfigCaOptions { /** * When true, the "CA" in Basic Constraints extension will be set to true. */ isCa?: boolean; /** * Refers to the "path length constraint" in Basic Constraints extension. For a CA certificate, this value describes the depth of * subordinate CA certificates that are allowed. If this value is less than 0, the request will fail. */ maxIssuerPathLength?: number; /** * When true, the "CA" in Basic Constraints extension will be set to false. * If both `isCa` and `nonCa` are unset, the extension will be omitted from the CA certificate. */ nonCa?: boolean; /** * When true, the "path length constraint" in Basic Constraints extension will be set to 0. * if both `maxIssuerPathLength` and `zeroMaxIssuerPathLength` are unset, * the max path length will be omitted from the CA certificate. */ zeroMaxIssuerPathLength?: boolean; } interface CertificateConfigX509ConfigKeyUsage { /** * Describes high-level ways in which a key may be used. * Structure is documented below. */ baseKeyUsage: outputs.certificateauthority.CertificateConfigX509ConfigKeyUsageBaseKeyUsage; /** * Describes high-level ways in which a key may be used. * Structure is documented below. */ extendedKeyUsage: outputs.certificateauthority.CertificateConfigX509ConfigKeyUsageExtendedKeyUsage; /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. * Structure is documented below. */ unknownExtendedKeyUsages?: outputs.certificateauthority.CertificateConfigX509ConfigKeyUsageUnknownExtendedKeyUsage[]; } interface CertificateConfigX509ConfigKeyUsageBaseKeyUsage { /** * The key may be used to sign certificates. */ certSign?: boolean; /** * The key may be used for cryptographic commitments. Note that this may also be referred to as "non-repudiation". */ contentCommitment?: boolean; /** * The key may be used sign certificate revocation lists. */ crlSign?: boolean; /** * The key may be used to encipher data. */ dataEncipherment?: boolean; /** * The key may be used to decipher only. */ decipherOnly?: boolean; /** * The key may be used for digital signatures. */ digitalSignature?: boolean; /** * The key may be used to encipher only. */ encipherOnly?: boolean; /** * The key may be used in a key agreement protocol. */ keyAgreement?: boolean; /** * The key may be used to encipher other keys. */ keyEncipherment?: boolean; } interface CertificateConfigX509ConfigKeyUsageExtendedKeyUsage { /** * Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially described as "TLS WWW client authentication", though regularly used for non-WWW TLS. */ clientAuth?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially described as "Signing of downloadable executable code client authentication". */ codeSigning?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially described as "Email protection". */ emailProtection?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially described as "Signing OCSP responses". */ ocspSigning?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially described as "TLS WWW server authentication", though regularly used for non-WWW TLS. */ serverAuth?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially described as "Binding the hash of an object to a time". */ timeStamping?: boolean; } interface CertificateConfigX509ConfigKeyUsageUnknownExtendedKeyUsage { /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. */ objectIdPaths: number[]; } interface CertificateConfigX509ConfigNameConstraints { /** * Indicates whether or not the name constraints are marked critical. */ critical: boolean; /** * Contains excluded DNS names. Any DNS name that can be * constructed by simply adding zero or more labels to * the left-hand side of the name satisfies the name constraint. * For example, `example.com`, `www.example.com`, `www.sub.example.com` * would satisfy `example.com` while `example1.com` does not. */ excludedDnsNames?: string[]; /** * Contains the excluded email addresses. The value can be a particular * email address, a hostname to indicate all email addresses on that host or * a domain with a leading period (e.g. `.example.com`) to indicate * all email addresses in that domain. */ excludedEmailAddresses?: string[]; /** * Contains the excluded IP ranges. For IPv4 addresses, the ranges * are expressed using CIDR notation as specified in RFC 4632. * For IPv6 addresses, the ranges are expressed in similar encoding as IPv4 * addresses. */ excludedIpRanges?: string[]; /** * Contains the excluded URIs that apply to the host part of the name. * The value can be a hostname or a domain with a * leading period (like `.example.com`) */ excludedUris?: string[]; /** * Contains permitted DNS names. Any DNS name that can be * constructed by simply adding zero or more labels to * the left-hand side of the name satisfies the name constraint. * For example, `example.com`, `www.example.com`, `www.sub.example.com` * would satisfy `example.com` while `example1.com` does not. */ permittedDnsNames?: string[]; /** * Contains the permitted email addresses. The value can be a particular * email address, a hostname to indicate all email addresses on that host or * a domain with a leading period (e.g. `.example.com`) to indicate * all email addresses in that domain. */ permittedEmailAddresses?: string[]; /** * Contains the permitted IP ranges. For IPv4 addresses, the ranges * are expressed using CIDR notation as specified in RFC 4632. * For IPv6 addresses, the ranges are expressed in similar encoding as IPv4 * addresses. */ permittedIpRanges?: string[]; /** * Contains the permitted URIs that apply to the host part of the name. * The value can be a hostname or a domain with a * leading period (like `.example.com`) */ permittedUris?: string[]; } interface CertificateConfigX509ConfigPolicyId { /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. */ objectIdPaths: number[]; } interface CertificateRevocationDetail { /** * (Output) * Indicates why a Certificate was revoked. */ revocationState: string; /** * (Output) * The time at which this Certificate was revoked. */ revocationTime: string; } interface CertificateTemplateIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface CertificateTemplateIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface CertificateTemplateIdentityConstraints { /** * Required. If this is true, the SubjectAltNames extension may be copied from a certificate request into the signed certificate. Otherwise, the requested SubjectAltNames will be discarded. */ allowSubjectAltNamesPassthrough: boolean; /** * Required. If this is true, the Subject field may be copied from a certificate request into the signed certificate. Otherwise, the requested Subject will be discarded. */ allowSubjectPassthrough: boolean; /** * Optional. A CEL expression that may be used to validate the resolved X.509 Subject and/or Subject Alternative Name before a certificate is signed. To see the full allowed syntax and some examples, see https://cloud.google.com/certificate-authority-service/docs/using-cel * Structure is documented below. */ celExpression?: outputs.certificateauthority.CertificateTemplateIdentityConstraintsCelExpression; } interface CertificateTemplateIdentityConstraintsCelExpression { /** * Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression?: string; /** * Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. */ location?: string; /** * Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface CertificateTemplatePassthroughExtensions { /** * Optional. A set of ObjectIds identifying custom X.509 extensions. Will be combined with knownExtensions to determine the full set of X.509 extensions. * Structure is documented below. */ additionalExtensions?: outputs.certificateauthority.CertificateTemplatePassthroughExtensionsAdditionalExtension[]; /** * Optional. A set of named X.509 extensions. Will be combined with additionalExtensions to determine the full set of X.509 extensions. */ knownExtensions?: string[]; } interface CertificateTemplatePassthroughExtensionsAdditionalExtension { /** * Required. The parts of an OID path. The most significant parts of the path come first. */ objectIdPaths: number[]; } interface CertificateTemplatePredefinedValues { /** * Optional. Describes custom X.509 extensions. * Structure is documented below. */ additionalExtensions?: outputs.certificateauthority.CertificateTemplatePredefinedValuesAdditionalExtension[]; /** * Optional. Describes Online Certificate Status Protocol (OCSP) endpoint addresses that appear in the "Authority Information Access" extension in the certificate. */ aiaOcspServers?: string[]; /** * Optional. Describes options in this X509Parameters that are relevant in a CA certificate. * Structure is documented below. */ caOptions?: outputs.certificateauthority.CertificateTemplatePredefinedValuesCaOptions; /** * Optional. Indicates the intended use for keys that correspond to a certificate. * Structure is documented below. */ keyUsage?: outputs.certificateauthority.CertificateTemplatePredefinedValuesKeyUsage; /** * Describes the X.509 name constraints extension. * Structure is documented below. */ nameConstraints?: outputs.certificateauthority.CertificateTemplatePredefinedValuesNameConstraints; /** * Optional. Describes the X.509 certificate policy object identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4. * Structure is documented below. */ policyIds?: outputs.certificateauthority.CertificateTemplatePredefinedValuesPolicyId[]; } interface CertificateTemplatePredefinedValuesAdditionalExtension { /** * Optional. Indicates whether or not this extension is critical (i.e., if the client does not know how to handle this extension, the client should consider this to be an error). */ critical?: boolean; /** * Required. The OID for this X.509 extension. * Structure is documented below. */ objectId: outputs.certificateauthority.CertificateTemplatePredefinedValuesAdditionalExtensionObjectId; /** * Required. The value of this X.509 extension. */ value: string; } interface CertificateTemplatePredefinedValuesAdditionalExtensionObjectId { /** * Required. The parts of an OID path. The most significant parts of the path come first. */ objectIdPaths: number[]; } interface CertificateTemplatePredefinedValuesCaOptions { /** * Optional. Refers to the "CA" X.509 extension, which is a boolean value. When this value is true, the "CA" in Basic Constraints extension will be set to true. */ isCa?: boolean; /** * Optional. Refers to the "path length constraint" in Basic Constraints extension. For a CA certificate, this value describes the depth of * subordinate CA certificates that are allowed. If this value is less than 0, the request will fail. */ maxIssuerPathLength?: number; /** * Optional. When true, the "CA" in Basic Constraints extension will be set to null and omitted from the CA certificate. * If both `isCa` and `nullCa` are unset, the "CA" in Basic Constraints extension will be set to false. * Note that the behavior when `isCa = false` for this resource is different from the behavior in the Certificate Authority, Certificate and CaPool resources. */ nullCa?: boolean; /** * Optional. When true, the "path length constraint" in Basic Constraints extension will be set to 0. * if both `maxIssuerPathLength` and `zeroMaxIssuerPathLength` are unset, * the max path length will be omitted from the CA certificate. */ zeroMaxIssuerPathLength?: boolean; } interface CertificateTemplatePredefinedValuesKeyUsage { /** * Describes high-level ways in which a key may be used. * Structure is documented below. */ baseKeyUsage?: outputs.certificateauthority.CertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage; /** * Detailed scenarios in which a key may be used. * Structure is documented below. */ extendedKeyUsage?: outputs.certificateauthority.CertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage; /** * Used to describe extended key usages that are not listed in the KeyUsage.ExtendedKeyUsageOptions message. * Structure is documented below. */ unknownExtendedKeyUsages?: outputs.certificateauthority.CertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsage[]; } interface CertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage { /** * The key may be used to sign certificates. */ certSign?: boolean; /** * The key may be used for cryptographic commitments. Note that this may also be referred to as "non-repudiation". */ contentCommitment?: boolean; /** * The key may be used sign certificate revocation lists. */ crlSign?: boolean; /** * The key may be used to encipher data. */ dataEncipherment?: boolean; /** * The key may be used to decipher only. */ decipherOnly?: boolean; /** * The key may be used for digital signatures. */ digitalSignature?: boolean; /** * The key may be used to encipher only. */ encipherOnly?: boolean; /** * The key may be used in a key agreement protocol. */ keyAgreement?: boolean; /** * The key may be used to encipher other keys. */ keyEncipherment?: boolean; } interface CertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage { /** * Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially described as "TLS WWW client authentication", though regularly used for non-WWW TLS. */ clientAuth?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially described as "Signing of downloadable executable code client authentication". */ codeSigning?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially described as "Email protection". */ emailProtection?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially described as "Signing OCSP responses". */ ocspSigning?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially described as "TLS WWW server authentication", though regularly used for non-WWW TLS. */ serverAuth?: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially described as "Binding the hash of an object to a time". */ timeStamping?: boolean; } interface CertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsage { /** * Required. The parts of an OID path. The most significant parts of the path come first. */ objectIdPaths: number[]; } interface CertificateTemplatePredefinedValuesNameConstraints { /** * Indicates whether or not the name constraints are marked critical. */ critical: boolean; /** * Contains excluded DNS names. Any DNS name that can be * constructed by simply adding zero or more labels to * the left-hand side of the name satisfies the name constraint. * For example, `example.com`, `www.example.com`, `www.sub.example.com` * would satisfy `example.com` while `example1.com` does not. */ excludedDnsNames?: string[]; /** * Contains the excluded email addresses. The value can be a particular * email address, a hostname to indicate all email addresses on that host or * a domain with a leading period (e.g. `.example.com`) to indicate * all email addresses in that domain. */ excludedEmailAddresses?: string[]; /** * Contains the excluded IP ranges. For IPv4 addresses, the ranges * are expressed using CIDR notation as specified in RFC 4632. * For IPv6 addresses, the ranges are expressed in similar encoding as IPv4 * addresses. */ excludedIpRanges?: string[]; /** * Contains the excluded URIs that apply to the host part of the name. * The value can be a hostname or a domain with a * leading period (like `.example.com`) */ excludedUris?: string[]; /** * Contains permitted DNS names. Any DNS name that can be * constructed by simply adding zero or more labels to * the left-hand side of the name satisfies the name constraint. * For example, `example.com`, `www.example.com`, `www.sub.example.com` * would satisfy `example.com` while `example1.com` does not. */ permittedDnsNames?: string[]; /** * Contains the permitted email addresses. The value can be a particular * email address, a hostname to indicate all email addresses on that host or * a domain with a leading period (e.g. `.example.com`) to indicate * all email addresses in that domain. */ permittedEmailAddresses?: string[]; /** * Contains the permitted IP ranges. For IPv4 addresses, the ranges * are expressed using CIDR notation as specified in RFC 4632. * For IPv6 addresses, the ranges are expressed in similar encoding as IPv4 * addresses. */ permittedIpRanges?: string[]; /** * Contains the permitted URIs that apply to the host part of the name. * The value can be a hostname or a domain with a * leading period (like `.example.com`) */ permittedUris?: string[]; } interface CertificateTemplatePredefinedValuesPolicyId { /** * Required. The parts of an OID path. The most significant parts of the path come first. */ objectIdPaths: number[]; } interface GetAuthorityAccessUrl { /** * The URL where this CertificateAuthority's CA certificate is published. This will only be * set for CAs that have been activated. */ caCertificateAccessUrl: string; /** * The URL where this CertificateAuthority's CRLs are published. This will only be set for * CAs that have been activated. */ crlAccessUrls: string[]; } interface GetAuthorityConfig { /** * Specifies some of the values in a certificate that are related to the subject. */ subjectConfigs: outputs.certificateauthority.GetAuthorityConfigSubjectConfig[]; /** * When specified this provides a custom SKI to be used in the certificate. This should only be used to maintain a SKI of an existing CA originally created outside CA service, which was not generated using method (1) described in RFC 5280 section 4.2.1.2.. */ subjectKeyIds: outputs.certificateauthority.GetAuthorityConfigSubjectKeyId[]; /** * Describes how some of the technical X.509 fields in a certificate should be populated. */ x509Configs: outputs.certificateauthority.GetAuthorityConfigX509Config[]; } interface GetAuthorityConfigSubjectConfig { /** * The subject alternative name fields. */ subjectAltNames: outputs.certificateauthority.GetAuthorityConfigSubjectConfigSubjectAltName[]; /** * Contains distinguished name fields such as the location and organization. */ subjects: outputs.certificateauthority.GetAuthorityConfigSubjectConfigSubject[]; } interface GetAuthorityConfigSubjectConfigSubject { /** * The common name of the distinguished name. */ commonName: string; /** * The country code of the subject. */ countryCode: string; /** * The locality or city of the subject. */ locality: string; /** * The organization of the subject. */ organization: string; /** * The organizational unit of the subject. */ organizationalUnit: string; /** * The postal code of the subject. */ postalCode: string; /** * The province, territory, or regional state of the subject. */ province: string; /** * The street address of the subject. */ streetAddress: string; } interface GetAuthorityConfigSubjectConfigSubjectAltName { /** * Contains only valid, fully-qualified host names. */ dnsNames: string[]; /** * Contains only valid RFC 2822 E-mail addresses. */ emailAddresses: string[]; /** * Contains only valid 32-bit IPv4 addresses or RFC 4291 IPv6 addresses. */ ipAddresses: string[]; /** * Contains only valid RFC 3986 URIs. */ uris: string[]; } interface GetAuthorityConfigSubjectKeyId { /** * The value of the KeyId in lowercase hexadecimal. */ keyId: string; } interface GetAuthorityConfigX509Config { /** * Specifies an X.509 extension, which may be used in different parts of X.509 objects like certificates, CSRs, and CRLs. */ additionalExtensions: outputs.certificateauthority.GetAuthorityConfigX509ConfigAdditionalExtension[]; /** * Describes Online Certificate Status Protocol (OCSP) endpoint addresses that appear in the * "Authority Information Access" extension in the certificate. */ aiaOcspServers: string[]; /** * Describes values that are relevant in a CA certificate. */ caOptions: outputs.certificateauthority.GetAuthorityConfigX509ConfigCaOption[]; /** * Indicates the intended use for keys that correspond to a certificate. */ keyUsages: outputs.certificateauthority.GetAuthorityConfigX509ConfigKeyUsage[]; /** * Describes the X.509 name constraints extension. */ nameConstraints: outputs.certificateauthority.GetAuthorityConfigX509ConfigNameConstraint[]; /** * Describes the X.509 certificate policy object identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4. */ policyIds: outputs.certificateauthority.GetAuthorityConfigX509ConfigPolicyId[]; } interface GetAuthorityConfigX509ConfigAdditionalExtension { /** * Indicates whether or not this extension is critical (i.e., if the client does not know how to * handle this extension, the client should consider this to be an error). */ critical: boolean; /** * Describes values that are relevant in a CA certificate. */ objectIds: outputs.certificateauthority.GetAuthorityConfigX509ConfigAdditionalExtensionObjectId[]; /** * The value of this X.509 extension. A base64-encoded string. */ value: string; } interface GetAuthorityConfigX509ConfigAdditionalExtensionObjectId { /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. */ objectIdPaths: number[]; } interface GetAuthorityConfigX509ConfigCaOption { /** * When true, the "CA" in Basic Constraints extension will be set to true. */ isCa: boolean; /** * Refers to the "path length constraint" in Basic Constraints extension. For a CA certificate, this value describes the depth of * subordinate CA certificates that are allowed. If this value is less than 0, the request will fail. Setting the value to 0 * requires setting 'zero_max_issuer_path_length = true'. */ maxIssuerPathLength: number; /** * When true, the "CA" in Basic Constraints extension will be set to false. * If both 'is_ca' and 'non_ca' are unset, the extension will be omitted from the CA certificate. */ nonCa: boolean; /** * When true, the "path length constraint" in Basic Constraints extension will be set to 0. * If both 'max_issuer_path_length' and 'zero_max_issuer_path_length' are unset, * the max path length will be omitted from the CA certificate. */ zeroMaxIssuerPathLength: boolean; } interface GetAuthorityConfigX509ConfigKeyUsage { /** * Describes high-level ways in which a key may be used. */ baseKeyUsages: outputs.certificateauthority.GetAuthorityConfigX509ConfigKeyUsageBaseKeyUsage[]; /** * Describes high-level ways in which a key may be used. */ extendedKeyUsages: outputs.certificateauthority.GetAuthorityConfigX509ConfigKeyUsageExtendedKeyUsage[]; /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. */ unknownExtendedKeyUsages: outputs.certificateauthority.GetAuthorityConfigX509ConfigKeyUsageUnknownExtendedKeyUsage[]; } interface GetAuthorityConfigX509ConfigKeyUsageBaseKeyUsage { /** * The key may be used to sign certificates. */ certSign: boolean; /** * The key may be used for cryptographic commitments. Note that this may also be referred to as "non-repudiation". */ contentCommitment: boolean; /** * The key may be used sign certificate revocation lists. */ crlSign: boolean; /** * The key may be used to encipher data. */ dataEncipherment: boolean; /** * The key may be used to decipher only. */ decipherOnly: boolean; /** * The key may be used for digital signatures. */ digitalSignature: boolean; /** * The key may be used to encipher only. */ encipherOnly: boolean; /** * The key may be used in a key agreement protocol. */ keyAgreement: boolean; /** * The key may be used to encipher other keys. */ keyEncipherment: boolean; } interface GetAuthorityConfigX509ConfigKeyUsageExtendedKeyUsage { /** * Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially described as "TLS WWW client authentication", though regularly used for non-WWW TLS. */ clientAuth: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially described as "Signing of downloadable executable code client authentication". */ codeSigning: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially described as "Email protection". */ emailProtection: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially described as "Signing OCSP responses". */ ocspSigning: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially described as "TLS WWW server authentication", though regularly used for non-WWW TLS. */ serverAuth: boolean; /** * Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially described as "Binding the hash of an object to a time". */ timeStamping: boolean; } interface GetAuthorityConfigX509ConfigKeyUsageUnknownExtendedKeyUsage { /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. */ objectIdPaths: number[]; } interface GetAuthorityConfigX509ConfigNameConstraint { /** * Indicates whether or not the name constraints are marked critical. */ critical: boolean; /** * Contains excluded DNS names. Any DNS name that can be * constructed by simply adding zero or more labels to * the left-hand side of the name satisfies the name constraint. * For example, 'example.com', 'www.example.com', 'www.sub.example.com' * would satisfy 'example.com' while 'example1.com' does not. */ excludedDnsNames: string[]; /** * Contains the excluded email addresses. The value can be a particular * email address, a hostname to indicate all email addresses on that host or * a domain with a leading period (e.g. '.example.com') to indicate * all email addresses in that domain. */ excludedEmailAddresses: string[]; /** * Contains the excluded IP ranges. For IPv4 addresses, the ranges * are expressed using CIDR notation as specified in RFC 4632. * For IPv6 addresses, the ranges are expressed in similar encoding as IPv4 * addresses. */ excludedIpRanges: string[]; /** * Contains the excluded URIs that apply to the host part of the name. * The value can be a hostname or a domain with a * leading period (like '.example.com') */ excludedUris: string[]; /** * Contains permitted DNS names. Any DNS name that can be * constructed by simply adding zero or more labels to * the left-hand side of the name satisfies the name constraint. * For example, 'example.com', 'www.example.com', 'www.sub.example.com' * would satisfy 'example.com' while 'example1.com' does not. */ permittedDnsNames: string[]; /** * Contains the permitted email addresses. The value can be a particular * email address, a hostname to indicate all email addresses on that host or * a domain with a leading period (e.g. '.example.com') to indicate * all email addresses in that domain. */ permittedEmailAddresses: string[]; /** * Contains the permitted IP ranges. For IPv4 addresses, the ranges * are expressed using CIDR notation as specified in RFC 4632. * For IPv6 addresses, the ranges are expressed in similar encoding as IPv4 * addresses. */ permittedIpRanges: string[]; /** * Contains the permitted URIs that apply to the host part of the name. * The value can be a hostname or a domain with a * leading period (like '.example.com') */ permittedUris: string[]; } interface GetAuthorityConfigX509ConfigPolicyId { /** * An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages. */ objectIdPaths: number[]; } interface GetAuthorityKeySpec { /** * The algorithm to use for creating a managed Cloud KMS key for a for a simplified * experience. All managed keys will be have their ProtectionLevel as HSM. Possible values: ["SIGN_HASH_ALGORITHM_UNSPECIFIED", "RSA_PSS_2048_SHA256", "RSA_PSS_3072_SHA256", "RSA_PSS_4096_SHA256", "RSA_PKCS1_2048_SHA256", "RSA_PKCS1_3072_SHA256", "RSA_PKCS1_4096_SHA256", "EC_P256_SHA256", "EC_P384_SHA384"] */ algorithm: string; /** * The resource name for an existing Cloud KMS CryptoKeyVersion in the format * 'projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*'. */ cloudKmsKeyVersion: string; } interface GetAuthoritySubordinateConfig { /** * This can refer to a CertificateAuthority that was used to create a * subordinate CertificateAuthority. This field is used for information * and usability purposes only. The resource name is in the format * 'projects/*/locations/*/caPools/*/certificateAuthorities/*'. */ certificateAuthority: string; /** * Contains the PEM certificate chain for the issuers of this CertificateAuthority, * but not pem certificate for this CA itself. */ pemIssuerChains: outputs.certificateauthority.GetAuthoritySubordinateConfigPemIssuerChain[]; } interface GetAuthoritySubordinateConfigPemIssuerChain { /** * Expected to be in leaf-to-root order according to RFC 5246. */ pemCertificates: string[]; } interface GetAuthorityUserDefinedAccessUrl { /** * A list of URLs where this CertificateAuthority's CA certificate is published that is specified by users. */ aiaIssuingCertificateUrls: string[]; /** * A list of URLs where this CertificateAuthority's CRLs are published that is specified by users. */ crlAccessUrls: string[]; } } export declare namespace certificatemanager { interface CertificateIssuanceConfigCertificateAuthorityConfig { /** * Defines a CertificateAuthorityServiceConfig. * Structure is documented below. */ certificateAuthorityServiceConfig?: outputs.certificatemanager.CertificateIssuanceConfigCertificateAuthorityConfigCertificateAuthorityServiceConfig; } interface CertificateIssuanceConfigCertificateAuthorityConfigCertificateAuthorityServiceConfig { /** * A CA pool resource used to issue a certificate. * The CA pool string has a relative resource path following the form * "projects/{project}/locations/{location}/caPools/{caPool}". */ caPool: string; } interface CertificateManaged { /** * (Output) * Detailed state of the latest authorization attempt for each domain * specified for this Managed Certificate. * Structure is documented below. * * * The `provisioningIssue` block contains: */ authorizationAttemptInfos: outputs.certificatemanager.CertificateManagedAuthorizationAttemptInfo[]; /** * Authorizations that will be used for performing domain authorization. Either issuanceConfig or dnsAuthorizations should be specificed, but not both. */ dnsAuthorizations?: string[]; /** * The domains for which a managed SSL certificate will be generated. * Wildcard domains are only supported with DNS challenge resolution */ domains?: string[]; /** * The resource name for a CertificateIssuanceConfig used to configure private PKI certificates in the format projects/*/locations/*/certificateIssuanceConfigs/*. * If this field is not set, the certificates will instead be publicly signed as documented at https://cloud.google.com/load-balancing/docs/ssl-certificates/google-managed-certs#caa. * Either issuanceConfig or dnsAuthorizations should be specificed, but not both. */ issuanceConfig?: string; /** * (Output) * Information about issues with provisioning this Managed Certificate. * Structure is documented below. */ provisioningIssues: outputs.certificatemanager.CertificateManagedProvisioningIssue[]; /** * (Output) * State of the domain for managed certificate issuance. */ state: string; } interface CertificateManagedAuthorizationAttemptInfo { /** * Human readable explanation for reaching the state. Provided to help * address the configuration issues. * Not guaranteed to be stable. For programmatic access use 'failure_reason' field. */ details: string; /** * Domain name of the authorization attempt. */ domain: string; /** * Reason for failure of the authorization attempt for the domain. */ failureReason: string; /** * State of the domain for managed certificate issuance. */ state: string; } interface CertificateManagedProvisioningIssue { /** * Human readable explanation about the issue. Provided to help address * the configuration issues. * Not guaranteed to be stable. For programmatic access use 'reason' field. */ details: string; /** * Reason for provisioning failures. */ reason: string; } interface CertificateMapGclbTarget { /** * An IP configuration where this Certificate Map is serving * Structure is documented below. */ ipConfigs?: outputs.certificatemanager.CertificateMapGclbTargetIpConfig[]; /** * Proxy name must be in the format projects/*/locations/*/targetHttpsProxies/*. * This field is part of a union field `targetProxy`: Only one of `targetHttpsProxy` or * `targetSslProxy` may be set. */ targetHttpsProxy?: string; /** * Proxy name must be in the format projects/*/locations/*/targetSslProxies/*. * This field is part of a union field `targetProxy`: Only one of `targetHttpsProxy` or * `targetSslProxy` may be set. */ targetSslProxy?: string; } interface CertificateMapGclbTargetIpConfig { /** * An external IP address */ ipAddress?: string; /** * A list of ports */ ports?: number[]; } interface CertificateSelfManaged { /** * (Optional, Deprecated) * The certificate chain in PEM-encoded form. * Leaf certificate comes first, followed by intermediate ones if any. * **Note**: This property is sensitive and will not be displayed in the plan. * * > **Warning:** `certificatePem` is deprecated and will be removed in a future major release. Use `pemCertificate` instead. * * @deprecated `certificatePem` is deprecated and will be removed in a future major release. Use `pemCertificate` instead. */ certificatePem?: string; /** * The certificate chain in PEM-encoded form. * Leaf certificate comes first, followed by intermediate ones if any. * **Note**: This property is sensitive and will not be displayed in the plan. */ pemCertificate?: string; /** * The private key of the leaf certificate in PEM-encoded form. * **Note**: This property is sensitive and will not be displayed in the plan. */ pemPrivateKey?: string; /** * (Optional, Deprecated) * The private key of the leaf certificate in PEM-encoded form. * **Note**: This property is sensitive and will not be displayed in the plan. * * > **Warning:** `privateKeyPem` is deprecated and will be removed in a future major release. Use `pemPrivateKey` instead. * * @deprecated `privateKeyPem` is deprecated and will be removed in a future major release. Use `pemPrivateKey` instead. */ privateKeyPem?: string; } interface DnsAuthorizationDnsResourceRecord { /** * (Output) * Data of the DNS Resource Record. */ data: string; /** * Name of the resource; provided by the client when the resource is created. * The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, * and all following characters must be a dash, underscore, letter or digit. */ name: string; /** * type of DNS authorization. If unset during the resource creation, FIXED_RECORD will * be used for global resources, and PER_PROJECT_RECORD will be used for other locations. * FIXED_RECORD DNS authorization uses DNS-01 validation method * PER_PROJECT_RECORD DNS authorization allows for independent management * of Google-managed certificates with DNS authorization across multiple * projects. * Possible values are: `FIXED_RECORD`, `PER_PROJECT_RECORD`. */ type: string; } interface GetCertificateMapGclbTarget { /** * An IP configuration where this Certificate Map is serving */ ipConfigs: outputs.certificatemanager.GetCertificateMapGclbTargetIpConfig[]; /** * Proxy name must be in the format projects/*/locations/*/targetHttpsProxies/*. * This field is part of a union field 'target_proxy': Only one of 'targetHttpsProxy' or * 'targetSslProxy' may be set. */ targetHttpsProxy: string; /** * Proxy name must be in the format projects/*/locations/*/targetSslProxies/*. * This field is part of a union field 'target_proxy': Only one of 'targetHttpsProxy' or * 'targetSslProxy' may be set. */ targetSslProxy: string; } interface GetCertificateMapGclbTargetIpConfig { /** * An external IP address */ ipAddress: string; /** * A list of ports */ ports: number[]; } interface GetCertificatesCertificate { /** * A human-readable description of the resource. */ description: string; /** * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. */ effectiveLabels: { [key: string]: string; }; /** * Set of label tags associated with the Certificate resource. * * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. * Please refer to the field 'effective_labels' for all of the labels present on the resource. */ labels: { [key: string]: string; }; /** * The Certificate Manager location. If not specified, "global" is used. */ location: string; /** * Configuration and state of a Managed Certificate. * Certificate Manager provisions and renews Managed Certificates * automatically, for as long as it's authorized to do so. */ manageds: outputs.certificatemanager.GetCertificatesCertificateManaged[]; /** * A user-defined name of the certificate. Certificate names must be unique * The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, * and all following characters must be a dash, underscore, letter or digit. */ name: string; /** * The ID of the project in which the resource belongs. If it * is not provided, the provider project is used. */ project: string; /** * The combination of labels configured directly on the resource * and default labels configured on the provider. */ pulumiLabels: { [key: string]: string; }; /** * The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) */ sanDnsnames: string[]; /** * The scope of the certificate. * * DEFAULT: Certificates with default scope are served from core Google data centers. * If unsure, choose this option. * * EDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, served from Edge Points of Presence. * See https://cloud.google.com/vpc/docs/edge-locations. * * ALL_REGIONS: Certificates with ALL_REGIONS scope are served from all GCP regions (You can only use ALL_REGIONS with global certs). * See https://cloud.google.com/compute/docs/regions-zones. * * CLIENT_AUTH: Certificates with CLIENT_AUTH scope are used by a load balancer (TLS client) to be presented to the backend (TLS server) when backend mTLS is configured. * See https://cloud.google.com/load-balancing/docs/backend-authenticated-tls-backend-mtls#client-certificate. */ scope: string; } interface GetCertificatesCertificateManaged { /** * Detailed state of the latest authorization attempt for each domain * specified for this Managed Certificate. */ authorizationAttemptInfos: outputs.certificatemanager.GetCertificatesCertificateManagedAuthorizationAttemptInfo[]; /** * Authorizations that will be used for performing domain authorization. Either issuanceConfig or dnsAuthorizations should be specificed, but not both. */ dnsAuthorizations: string[]; /** * The domains for which a managed SSL certificate will be generated. * Wildcard domains are only supported with DNS challenge resolution */ domains: string[]; /** * The resource name for a CertificateIssuanceConfig used to configure private PKI certificates in the format projects/*/locations/*/certificateIssuanceConfigs/*. * If this field is not set, the certificates will instead be publicly signed as documented at https://cloud.google.com/load-balancing/docs/ssl-certificates/google-managed-certs#caa. * Either issuanceConfig or dnsAuthorizations should be specificed, but not both. */ issuanceConfig: string; /** * Information about issues with provisioning this Managed Certificate. */ provisioningIssues: outputs.certificatemanager.GetCertificatesCertificateManagedProvisioningIssue[]; /** * A state of this Managed Certificate. */ state: string; } interface GetCertificatesCertificateManagedAuthorizationAttemptInfo { /** * Human readable explanation for reaching the state. Provided to help * address the configuration issues. * Not guaranteed to be stable. For programmatic access use 'failure_reason' field. */ details: string; /** * Domain name of the authorization attempt. */ domain: string; /** * Reason for failure of the authorization attempt for the domain. */ failureReason: string; /** * State of the domain for managed certificate issuance. */ state: string; } interface GetCertificatesCertificateManagedProvisioningIssue { /** * Human readable explanation about the issue. Provided to help address * the configuration issues. * Not guaranteed to be stable. For programmatic access use 'reason' field. */ details: string; /** * Reason for provisioning failures. */ reason: string; } interface GetDnsAuthorizationDnsResourceRecord { /** * Data of the DNS Resource Record. */ data: string; /** * The name of the DNS Authorization. */ name: string; /** * Type of the DNS Resource Record. */ type: string; } interface TrustConfigAllowlistedCertificate { /** * PEM certificate that is allowlisted. The certificate can be up to 5k bytes, and must be a parseable X.509 certificate. */ pemCertificate: string; } interface TrustConfigTrustStore { /** * Set of intermediate CA certificates used for the path building phase of chain validation. * The field is currently not supported if trust config is used for the workload certificate feature. * Structure is documented below. */ intermediateCas?: outputs.certificatemanager.TrustConfigTrustStoreIntermediateCa[]; /** * List of Trust Anchors to be used while performing validation against a given TrustStore. * Structure is documented below. */ trustAnchors?: outputs.certificatemanager.TrustConfigTrustStoreTrustAnchor[]; } interface TrustConfigTrustStoreIntermediateCa { /** * PEM intermediate certificate used for building up paths for validation. * Each certificate provided in PEM format may occupy up to 5kB. * **Note**: This property is sensitive and will not be displayed in the plan. */ pemCertificate?: string; } interface TrustConfigTrustStoreTrustAnchor { /** * PEM root certificate of the PKI used for validation. * Each certificate provided in PEM format may occupy up to 5kB. * **Note**: This property is sensitive and will not be displayed in the plan. */ pemCertificate?: string; } } export declare namespace ces { interface AgentAfterAgentCallback { /** * Human-readable description of the callback. */ description?: string; /** * Whether the callback is disabled. Disabled callbacks are ignored by the * agent. */ disabled?: boolean; /** * The python code to execute for the callback. */ pythonCode: string; } interface AgentAfterModelCallback { /** * Human-readable description of the callback. */ description?: string; /** * Whether the callback is disabled. Disabled callbacks are ignored by the * agent. */ disabled?: boolean; /** * The python code to execute for the callback. */ pythonCode: string; } interface AgentAfterToolCallback { /** * Human-readable description of the callback. */ description?: string; /** * Whether the callback is disabled. Disabled callbacks are ignored by the * agent. */ disabled?: boolean; /** * The python code to execute for the callback. */ pythonCode: string; } interface AgentBeforeAgentCallback { /** * Human-readable description of the callback. */ description?: string; /** * Whether the callback is disabled. Disabled callbacks are ignored by the * agent. */ disabled?: boolean; /** * The python code to execute for the callback. */ pythonCode: string; } interface AgentBeforeModelCallback { /** * Human-readable description of the callback. */ description?: string; /** * Whether the callback is disabled. Disabled callbacks are ignored by the * agent. */ disabled?: boolean; /** * The python code to execute for the callback. */ pythonCode: string; } interface AgentBeforeToolCallback { /** * Human-readable description of the callback. */ description?: string; /** * Whether the callback is disabled. Disabled callbacks are ignored by the * agent. */ disabled?: boolean; /** * The python code to execute for the callback. */ pythonCode: string; } interface AgentLlmAgent { } interface AgentModelSettings { /** * The LLM model that the agent should use. * If not set, the agent will inherit the model from its parent agent. */ model?: string; /** * If set, this temperature will be used for the LLM model. Temperature * controls the randomness of the model's responses. Lower temperatures * produce responses that are more predictable. Higher temperatures produce * responses that are more creative. */ temperature?: number; } interface AgentRemoteDialogflowAgent { /** * The * [Dialogflow](https://cloud.google.com/dialogflow/cx/docs/concept/console-conversational-agents * agent resource name. * Format: `projects/{project}/locations/{location}/agents/{agent}` */ agent: string; /** * The environment ID of the Dialogflow agent be used for the agent * execution. If not specified, the draft environment will be used. */ environmentId?: string; /** * The flow ID of the flow in the Dialogflow agent. */ flowId: string; /** * The mapping of the app variables names to the Dialogflow session * parameters names to be sent to the Dialogflow agent as input. */ inputVariableMapping?: { [key: string]: string; }; /** * The mapping of the Dialogflow session parameters names to the app * variables names to be sent back to the CES agent after the Dialogflow * agent execution ends. */ outputVariableMapping?: { [key: string]: string; }; } interface AgentToolset { /** * The tools IDs to filter the toolset. */ toolIds?: string[]; /** * The resource name of the toolset. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ toolset: string; } interface AppAudioProcessingConfig { /** * Configuration for the ambient sound to be played with the synthesized agent * response, to enhance the naturalness of the conversation. * Structure is documented below. */ ambientSoundConfig?: outputs.ces.AppAudioProcessingConfigAmbientSoundConfig; /** * Configuration for how the user barge-in activities should be handled. * Structure is documented below. */ bargeInConfig?: outputs.ces.AppAudioProcessingConfigBargeInConfig; /** * The duration of user inactivity (no speech or interaction) before the agent * prompts the user for reengagement. If not set, the agent will not prompt * the user for reengagement. */ inactivityTimeout?: string; /** * Configuration of how the agent response should be synthesized, mapping from * the language code to SynthesizeSpeechConfig. * If the configuration for the specified language code is not found, the * configuration for the root language code will be used. For example, if the * map contains "en-us" and "en", and the specified language code is "en-gb", * then "en" configuration will be used. * Note: Language code is case-insensitive. * Structure is documented below. */ synthesizeSpeechConfigs?: outputs.ces.AppAudioProcessingConfigSynthesizeSpeechConfig[]; } interface AppAudioProcessingConfigAmbientSoundConfig { /** * Ambient noise as a mono-channel, 16kHz WAV file stored in [Cloud * Storage](https://cloud.google.com/storage). * Note: Please make sure the CES service agent * `service-@gcp-sa-ces.iam.gserviceaccount.com` has * `storage.objects.get` permission to the Cloud Storage object. */ gcsUri?: string; /** * Name of the prebuilt ambient sound. * Valid values are: - "coffeeShop" - "keyboard" - "keypad" - "hum" * -"office1" - "office2" - "office3" * -"room1" - "room2" - "room3" * -"room4" - "room5" - "airConditioner" */ prebuiltAmbientSound?: string; /** * Volume gain (in dB) of the normal native volume supported by * ambient noise, in the range [-96.0, 16.0]. If unset, or set to a value of * 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB) * will play at approximately half the amplitude of the normal native signal * amplitude. A value of +6.0 (dB) will play at approximately twice the * amplitude of the normal native signal amplitude. We strongly recommend not * to exceed +10 (dB) as there's usually no effective increase in loudness for * any value greater than that. */ volumeGainDb?: number; } interface AppAudioProcessingConfigBargeInConfig { /** * If enabled, the agent will adapt its next response based on the assumption * that the user hasn't heard the full preceding agent message. * This should not be used in scenarios where agent responses are displayed * visually. */ bargeInAwareness?: boolean; } interface AppAudioProcessingConfigSynthesizeSpeechConfig { /** * The identifier for this object. Format specified above. */ languageCode: string; /** * The speaking rate/speed in the range [0.25, 2.0]. 1.0 is the normal native * speed supported by the specific voice. 2.0 is twice as fast, and 0.5 is * half as fast. Values outside of the range [0.25, 2.0] will return an error. */ speakingRate?: number; /** * The name of the voice. If not set, the service will choose a * voice based on the other parameters such as language_code. * For the list of available voices, please refer to Supported voices and * languages from Cloud Text-to-Speech. */ voice?: string; } interface AppClientCertificateSettings { /** * The passphrase to decrypt the private key. * Should be left unset if the private key is not encrypted. */ passphrase?: string; /** * The name of the SecretManager secret version resource * storing the private key encoded in PEM format. * Format: projects/{project}/secrets/{secret}/versions/{version} */ privateKey: string; /** * The TLS certificate encoded in PEM format. * This string must include the begin header and end footer lines. */ tlsCertificate: string; } interface AppDataStoreSettings { /** * (Output) * The engines for the app. * Structure is documented below. * * * The `engines` block contains: */ engines: outputs.ces.AppDataStoreSettingsEngine[]; } interface AppDataStoreSettingsEngine { /** * Identifier. The unique identifier of the app. * Format: `projects/{project}/locations/{location}/apps/{app}` */ name: string; /** * The type of the engine. * Possible values: * ENGINE_TYPE_SEARCH * ENGINE_TYPE_CHAT */ type: string; } interface AppDefaultChannelProfile { /** * The type of the channel profile. * Possible values: * UNKNOWN * WEB_UI * API * TWILIO * GOOGLE_TELEPHONY_PLATFORM * CONTACT_CENTER_AS_A_SERVICE */ channelType?: string; /** * Whether to disable user barge-in in the conversation. * - true: User interruptions are disabled while the agent is speaking. * - false: The agent retains automatic control over when the user can interrupt. */ disableBargeInControl?: boolean; /** * Whether to disable DTMF (dual-tone multi-frequency). */ disableDtmf?: boolean; /** * Represents the persona property of a channel. * Structure is documented below. */ personaProperty?: outputs.ces.AppDefaultChannelProfilePersonaProperty; /** * The unique identifier of the channel profile. */ profileId?: string; /** * Message for configuration for the web widget. * Structure is documented below. */ webWidgetConfig?: outputs.ces.AppDefaultChannelProfileWebWidgetConfig; } interface AppDefaultChannelProfilePersonaProperty { /** * The persona of the channel. * Possible values: * UNKNOWN * CONCISE * CHATTY */ persona?: string; } interface AppDefaultChannelProfileWebWidgetConfig { /** * The modality of the web widget. * Possible values: * UNKNOWN_MODALITY * CHAT_AND_VOICE * VOICE_ONLY * CHAT_ONLY */ modality?: string; /** * The theme of the web widget. * Possible values: * UNKNOWN_THEME * LIGHT * DARK */ theme?: string; /** * The title of the web widget. */ webWidgetTitle?: string; } interface AppEvaluationMetricsThresholds { /** * Settings for golden evaluations. * Structure is documented below. */ goldenEvaluationMetricsThresholds?: outputs.ces.AppEvaluationMetricsThresholdsGoldenEvaluationMetricsThresholds; } interface AppEvaluationMetricsThresholdsGoldenEvaluationMetricsThresholds { /** * Expectation level metrics thresholds. * Structure is documented below. */ expectationLevelMetricsThresholds?: outputs.ces.AppEvaluationMetricsThresholdsGoldenEvaluationMetricsThresholdsExpectationLevelMetricsThresholds; /** * Turn level metrics thresholds. * Structure is documented below. */ turnLevelMetricsThresholds?: outputs.ces.AppEvaluationMetricsThresholdsGoldenEvaluationMetricsThresholdsTurnLevelMetricsThresholds; } interface AppEvaluationMetricsThresholdsGoldenEvaluationMetricsThresholdsExpectationLevelMetricsThresholds { /** * The success threshold for individual tool invocation parameter * correctness. Must be a float between 0 and 1. Default is 1.0. */ toolInvocationParameterCorrectnessThreshold?: number; } interface AppEvaluationMetricsThresholdsGoldenEvaluationMetricsThresholdsTurnLevelMetricsThresholds { /** * The success threshold for overall tool invocation correctness. Must be * a float between 0 and 1. Default is 1.0. */ overallToolInvocationCorrectnessThreshold?: number; /** * The success threshold for semantic similarity. Must be an integer * between 0 and 4. Default is >= 3. */ semanticSimilaritySuccessThreshold?: number; } interface AppLanguageSettings { /** * The default language code of the app. */ defaultLanguageCode?: string; /** * Enables multilingual support. If true, agents in the app will use pre-built * instructions to improve handling of multilingual input. */ enableMultilingualSupport?: boolean; /** * The action to perform when an agent receives input in an unsupported * language. * This can be a predefined action or a custom tool call. * Valid values are: * - A tool's full resource name, which triggers a specific tool execution. * - A predefined system action, such as "escalate" or "exit", which triggers * an EndSession signal with corresponding metadata * to terminate the conversation. */ fallbackAction?: string; /** * List of languages codes supported by the app, in addition to the * `defaultLanguageCode`. */ supportedLanguageCodes?: string[]; } interface AppLoggingSettings { /** * Configuration for how the audio interactions should be recorded. * Structure is documented below. */ audioRecordingConfig?: outputs.ces.AppLoggingSettingsAudioRecordingConfig; /** * Settings to describe the BigQuery export behaviors for the app. * Structure is documented below. */ bigqueryExportSettings?: outputs.ces.AppLoggingSettingsBigqueryExportSettings; /** * Settings to describe the Cloud Logging behaviors for the app. * Structure is documented below. */ cloudLoggingSettings?: outputs.ces.AppLoggingSettingsCloudLoggingSettings; /** * Settings to describe the conversation logging behaviors for the app. * Structure is documented below. */ conversationLoggingSettings?: outputs.ces.AppLoggingSettingsConversationLoggingSettings; /** * Configuration to instruct how sensitive data should be handled. * Structure is documented below. */ redactionConfig?: outputs.ces.AppLoggingSettingsRedactionConfig; } interface AppLoggingSettingsAudioRecordingConfig { /** * The [Cloud Storage](https://cloud.google.com/storage) bucket to store the * session audio recordings. The URI must start with "gs://". * Note: If the Cloud Storage bucket is in a different project from the app, * you should grant `storage.objects.create` permission to the CES service * agent `service-@gcp-sa-ces.iam.gserviceaccount.com`. */ gcsBucket?: string; /** * The Cloud Storage path prefix for audio recordings. * This prefix can include the following placeholders, which will be * dynamically substituted at serving time: * - $project: project ID * - $location: app location * - $app: app ID * - $date: session date in YYYY-MM-DD format * - $session: session ID * If the path prefix is not specified, the default prefix * `$project/$location/$app/$date/$session/` will be used. */ gcsPathPrefix?: string; } interface AppLoggingSettingsBigqueryExportSettings { /** * The BigQuery dataset to export the data to. */ dataset?: string; /** * Indicates whether the BigQuery export is enabled. */ enabled?: boolean; /** * The project ID of the BigQuery dataset to export the data to. * Note: If the BigQuery dataset is in a different project from the app, you should grant * roles/bigquery.admin role to the CES service agent service-@gcp-sa-ces.iam.gserviceaccount.com. */ project?: string; } interface AppLoggingSettingsCloudLoggingSettings { /** * Whether to enable Cloud Logging for the sessions. */ enableCloudLogging?: boolean; } interface AppLoggingSettingsConversationLoggingSettings { /** * Whether to disable conversation logging for the sessions. */ disableConversationLogging?: boolean; } interface AppLoggingSettingsRedactionConfig { /** * [DLP](https://cloud.google.com/dlp/docs) deidentify template name to * instruct on how to de-identify content. * Format: * `projects/{project}/locations/{location}/deidentifyTemplates/{deidentify_template}` */ deidentifyTemplate?: string; /** * If true, redaction will be applied in various logging scenarios, including * conversation history, Cloud Logging and audio recording. */ enableRedaction?: boolean; /** * [DLP](https://cloud.google.com/dlp/docs) inspect template name to configure * detection of sensitive data types. * Format: * `projects/{project}/locations/{location}/inspectTemplates/{inspect_template}` */ inspectTemplate?: string; } interface AppModelSettings { /** * The LLM model that the agent should use. * If not set, the agent will inherit the model from its parent agent. */ model?: string; /** * If set, this temperature will be used for the LLM model. Temperature * controls the randomness of the model's responses. Lower temperatures * produce responses that are more predictable. Higher temperatures produce * responses that are more creative. */ temperature?: number; } interface AppTimeZoneSettings { /** * The time zone of the app from the time zone database, e.g., America/Los_Angeles, Europe/Paris. */ timeZone?: string; } interface AppVariableDeclaration { /** * The description of the variable. */ description: string; /** * The name of the variable. The name must start with a letter or underscore * and contain only letters, numbers, or underscores. */ name: string; /** * Represents a select subset of an OpenAPI 3.0 schema object. * Structure is documented below. */ schema: outputs.ces.AppVariableDeclarationSchema; } interface AppVariableDeclarationSchema { /** * Optional. Defines the schema for additional properties allowed in an object. * The value must be a valid JSON string representing the Schema object. * (Note: OpenAPI also allows a boolean, this definition expects a Schema JSON). */ additionalProperties?: string; /** * Optional. The instance value should be valid against at least one of the schemas in this list. */ anyOf?: string; /** * Optional. Default value of the data. Represents a dynamically typed value * which can be either null, a number, a string, a boolean, a struct, * or a list of values. The provided default value must be compatible * with the defined 'type' and other schema constraints. */ default?: string; /** * A map of definitions for use by ref. Only allowed at the root of the schema. */ defs?: string; /** * The description of the data. */ description?: string; /** * Possible values of the element of primitive type with enum format. * Examples: * 1. We can define direction as : * {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} * 2. We can define apartment number as : * {type:INTEGER, format:enum, enum:["101", "201", "301"]} */ enums?: string[]; /** * Schema of the elements of Type.ARRAY. */ items?: string; /** * Indicates if the value may be null. */ nullable?: boolean; /** * Optional. Schemas of initial elements of Type.ARRAY. */ prefixItems?: string; /** * Properties of Type.OBJECT. */ properties?: string; /** * Allows indirect references between schema nodes. The value should be a * valid reference to a child of the root `defs`. * For example, the following schema defines a reference to a schema node * named "Pet": * type: object * properties: * pet: * ref: #/defs/Pet * defs: * Pet: * type: object * properties: * name: * type: string * The value of the "pet" property is a reference to the schema node * named "Pet". * See details in * https://json-schema.org/understanding-json-schema/structuring. */ ref?: string; /** * Required properties of Type.OBJECT. */ requireds?: string[]; /** * The title of the schema. */ title?: string; /** * The type of the data. * Possible values: * STRING * INTEGER * NUMBER * BOOLEAN * OBJECT * ARRAY */ type: string; /** * Indicate the items in the array must be unique. Only applies to TYPE.ARRAY. */ uniqueItems?: boolean; } interface AppVersionSnapshot { /** * (Output) * List of agents in the app. * Structure is documented below. */ agents: outputs.ces.AppVersionSnapshotAgent[]; /** * Resource ID segment making up resource `name`. It identifies the resource within its parent collection as described in https://google.aip.dev/122. */ apps: outputs.ces.AppVersionSnapshotApp[]; /** * (Output) * List of examples in the app. * Structure is documented below. */ examples: outputs.ces.AppVersionSnapshotExample[]; /** * (Output) * List of guardrails for the app. * Format: * `projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail}` */ guardrails: outputs.ces.AppVersionSnapshotGuardrail[]; /** * (Output) * List of available tools for the agent. * Format: `projects/{project}/locations/{location}/apps/{app}/tools/{tool}` */ tools: outputs.ces.AppVersionSnapshotTool[]; /** * (Output) * List of toolsets for the agent. * Structure is documented below. */ toolsets: outputs.ces.AppVersionSnapshotToolset[]; } interface AppVersionSnapshotAgent { /** * (Output) * The callbacks to execute after the agent is called. * The provided callbacks are executed sequentially in the exact order they * are given in the list. If a callback returns an overridden response, * execution stops and any remaining callbacks are skipped. * Structure is documented below. */ afterAgentCallbacks: outputs.ces.AppVersionSnapshotAgentAfterAgentCallback[]; /** * (Output) * The callbacks to execute after the model is called. If there are multiple * calls to the model, the callback will be executed multiple times. * The provided callbacks are executed sequentially in the exact order they * are given in the list. If a callback returns an overridden response, * execution stops and any remaining callbacks are skipped. * Structure is documented below. */ afterModelCallbacks: outputs.ces.AppVersionSnapshotAgentAfterModelCallback[]; /** * (Output) * The callbacks to execute after the tool is invoked. If there are multiple * tool invocations, the callback will be executed multiple times. * The provided callbacks are executed sequentially in the exact order they * are given in the list. If a callback returns an overridden response, * execution stops and any remaining callbacks are skipped. * Structure is documented below. */ afterToolCallbacks: outputs.ces.AppVersionSnapshotAgentAfterToolCallback[]; /** * (Output) * The callbacks to execute before the agent is called. * The provided callbacks are executed sequentially in the exact order they * are given in the list. If a callback returns an overridden response, * execution stops and any remaining callbacks are skipped. * Structure is documented below. */ beforeAgentCallbacks: outputs.ces.AppVersionSnapshotAgentBeforeAgentCallback[]; /** * (Output) * The callbacks to execute before the model is called. If there are multiple * calls to the model, the callback will be executed multiple times. * The provided callbacks are executed sequentially in the exact order they * are given in the list. If a callback returns an overridden response, * execution stops and any remaining callbacks are skipped. * Structure is documented below. */ beforeModelCallbacks: outputs.ces.AppVersionSnapshotAgentBeforeModelCallback[]; /** * (Output) * The callbacks to execute before the tool is invoked. If there are multiple * tool invocations, the callback will be executed multiple times. * The provided callbacks are executed sequentially in the exact order they * are given in the list. If a callback returns an overridden response, * execution stops and any remaining callbacks are skipped. * Structure is documented below. */ beforeToolCallbacks: outputs.ces.AppVersionSnapshotAgentBeforeToolCallback[]; /** * (Output) * List of child agents in the agent tree. * Format: `projects/{project}/locations/{location}/apps/{app}/agents/{agent}` */ childAgents: string[]; /** * (Output) * Timestamp when the toolset was created. */ createTime: string; /** * The description of the app version. */ description: string; /** * The display name of the app version. */ displayName: string; /** * (Output) * ETag used to ensure the object hasn't changed during a read-modify-write * operation. If the etag is empty, the update will overwrite any concurrent * changes. */ etag: string; /** * (Output) * If the tool is generated by the LLM assistant, this field contains a * descriptive summary of the generation. */ generatedSummary: string; /** * (Output) * List of guardrails for the app. * Format: * `projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail}` */ guardrails: string[]; /** * (Output) * Instructions for the LLM model to guide the agent's behavior. */ instruction: string; /** * (Output) * Default agent type. The agent uses instructions and callbacks specified in * the agent to perform the task using a large language model. */ llmAgents: outputs.ces.AppVersionSnapshotAgentLlmAgent[]; /** * (Output) * Model settings contains various configurations for the LLM model. * Structure is documented below. */ modelSettings: outputs.ces.AppVersionSnapshotAgentModelSetting[]; /** * (Output) * Identifier. The unique identifier of the toolset. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ name: string; /** * (Output) * The agent which will transfer execution to an existing remote * [Dialogflow](https://cloud.google.com/dialogflow/cx/docs/concept/console-conversational-agents) * agent flow. The corresponding Dialogflow agent will process subsequent user * queries until the session ends or flow ends and the control is transferred * back to the parent CES agent. * Structure is documented below. */ remoteDialogflowAgents: outputs.ces.AppVersionSnapshotAgentRemoteDialogflowAgent[]; /** * (Output) * List of available tools for the agent. * Format: `projects/{project}/locations/{location}/apps/{app}/tools/{tool}` */ tools: string[]; /** * (Output) * List of toolsets for the agent. * Structure is documented below. */ toolsets: outputs.ces.AppVersionSnapshotAgentToolset[]; /** * (Output) * Timestamp when the toolset was last updated. */ updateTime: string; } interface AppVersionSnapshotAgentAfterAgentCallback { /** * The description of the app version. */ description: string; /** * (Output) * Whether summarization is disabled. */ disabled: boolean; /** * (Output) * The Python code to execute for the tool. */ pythonCode: string; } interface AppVersionSnapshotAgentAfterModelCallback { /** * The description of the app version. */ description: string; /** * (Output) * Whether summarization is disabled. */ disabled: boolean; /** * (Output) * The Python code to execute for the tool. */ pythonCode: string; } interface AppVersionSnapshotAgentAfterToolCallback { /** * The description of the app version. */ description: string; /** * (Output) * Whether summarization is disabled. */ disabled: boolean; /** * (Output) * The Python code to execute for the tool. */ pythonCode: string; } interface AppVersionSnapshotAgentBeforeAgentCallback { /** * The description of the app version. */ description: string; /** * (Output) * Whether summarization is disabled. */ disabled: boolean; /** * (Output) * The Python code to execute for the tool. */ pythonCode: string; } interface AppVersionSnapshotAgentBeforeModelCallback { /** * The description of the app version. */ description: string; /** * (Output) * Whether summarization is disabled. */ disabled: boolean; /** * (Output) * The Python code to execute for the tool. */ pythonCode: string; } interface AppVersionSnapshotAgentBeforeToolCallback { /** * The description of the app version. */ description: string; /** * (Output) * Whether summarization is disabled. */ disabled: boolean; /** * (Output) * The Python code to execute for the tool. */ pythonCode: string; } interface AppVersionSnapshotAgentLlmAgent { } interface AppVersionSnapshotAgentModelSetting { /** * (Output) * The LLM model that the agent should use. * If not set, the agent will inherit the model from its parent agent. */ model: string; /** * (Output) * If set, this temperature will be used for the LLM model. Temperature * controls the randomness of the model's responses. Lower temperatures * produce responses that are more predictable. Higher temperatures produce * responses that are more creative. */ temperature: number; } interface AppVersionSnapshotAgentRemoteDialogflowAgent { /** * (Output) * The name of the agent to transfer the conversation to. The agent must be * in the same app as the current agent. * Format: * `projects/{project}/locations/{location}/apps/{app}/agents/{agent}` */ agent: string; /** * (Output) * The environment ID of the Dialogflow agent be used for the agent * execution. If not specified, the draft environment will be used. */ environmentId: string; /** * (Output) * The flow ID of the flow in the Dialogflow agent. */ flowId: string; /** * (Output) * The mapping of the app variables names to the Dialogflow session * parameters names to be sent to the Dialogflow agent as input. */ inputVariableMapping: { [key: string]: string; }; /** * (Output) * The mapping of the Dialogflow session parameters names to the app * variables names to be sent back to the CES agent after the Dialogflow * agent execution ends. */ outputVariableMapping: { [key: string]: string; }; } interface AppVersionSnapshotAgentToolset { /** * (Output) * The tools IDs to filter the toolset. */ toolIds: string[]; /** * (Output) * The resource name of the Toolset from which this tool is derived. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ toolset: string; } interface AppVersionSnapshotApp { /** * (Output) * Configuration for how the input and output audio should be processed and * delivered. * Structure is documented below. */ audioProcessingConfigs: outputs.ces.AppVersionSnapshotAppAudioProcessingConfig[]; /** * (Output) * The default client certificate settings for the app. * Structure is documented below. */ clientCertificateSettings: outputs.ces.AppVersionSnapshotAppClientCertificateSetting[]; /** * (Output) * Timestamp when the toolset was created. */ createTime: string; /** * (Output) * Data store related settings for the app. * Structure is documented below. */ dataStoreSettings: outputs.ces.AppVersionSnapshotAppDataStoreSetting[]; /** * (Output) * A ChannelProfile configures the agent's behavior for a specific communication * channel, such as web UI or telephony. * Structure is documented below. */ defaultChannelProfiles: outputs.ces.AppVersionSnapshotAppDefaultChannelProfile[]; /** * (Output) * Number of deployments in the app. */ deploymentCount: number; /** * The description of the app version. */ description: string; /** * The display name of the app version. */ displayName: string; /** * (Output) * ETag used to ensure the object hasn't changed during a read-modify-write * operation. If the etag is empty, the update will overwrite any concurrent * changes. */ etag: string; /** * (Output) * Threshold settings for metrics in an Evaluation. * Structure is documented below. */ evaluationMetricsThresholds: outputs.ces.AppVersionSnapshotAppEvaluationMetricsThreshold[]; /** * (Output) * Instructions for all the agents in the app. * You can use this instruction to set up a stable identity or personality * across all the agents. */ globalInstruction: string; /** * (Output) * List of guardrails for the app. * Format: * `projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail}` */ guardrails: string[]; /** * (Output) * Language settings of the app. * Structure is documented below. */ languageSettings: outputs.ces.AppVersionSnapshotAppLanguageSetting[]; /** * (Output) * Settings to describe the logging behaviors for the app. * Structure is documented below. */ loggingSettings: outputs.ces.AppVersionSnapshotAppLoggingSetting[]; /** * (Output) * Metadata about the app. This field can be used to store additional * information relevant to the app's details or intended usages. */ metadata: { [key: string]: string; }; /** * (Output) * Model settings contains various configurations for the LLM model. * Structure is documented below. */ modelSettings: outputs.ces.AppVersionSnapshotAppModelSetting[]; /** * (Output) * Identifier. The unique identifier of the toolset. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ name: string; /** * (Output) * The root agent is the entry point of the app. * Format: `projects/{project}/locations/{location}/apps/{app}/agents/{agent}` */ rootAgent: string; /** * (Output) * TimeZone settings of the app. * Structure is documented below. */ timeZoneSettings: outputs.ces.AppVersionSnapshotAppTimeZoneSetting[]; /** * (Output) * Timestamp when the toolset was last updated. */ updateTime: string; /** * (Output) * The declarations of the variables. * Structure is documented below. */ variableDeclarations: outputs.ces.AppVersionSnapshotAppVariableDeclaration[]; } interface AppVersionSnapshotAppAudioProcessingConfig { /** * (Output) * Configuration for the ambient sound to be played with the synthesized agent * response, to enhance the naturalness of the conversation. * Structure is documented below. */ ambientSoundConfigs: outputs.ces.AppVersionSnapshotAppAudioProcessingConfigAmbientSoundConfig[]; /** * (Output) * Configuration for how the user barge-in activities should be handled. * Structure is documented below. */ bargeInConfigs: outputs.ces.AppVersionSnapshotAppAudioProcessingConfigBargeInConfig[]; /** * (Output) * The duration of user inactivity (no speech or interaction) before the agent * prompts the user for reengagement. If not set, the agent will not prompt * the user for reengagement. */ inactivityTimeout: string; /** * (Output) * Configuration of how the agent response should be synthesized, mapping from * the language code to SynthesizeSpeechConfig. * If the configuration for the specified language code is not found, the * configuration for the root language code will be used. For example, if the * map contains "en-us" and "en", and the specified language code is "en-gb", * then "en" configuration will be used. * Note: Language code is case-insensitive. * Structure is documented below. */ synthesizeSpeechConfigs: outputs.ces.AppVersionSnapshotAppAudioProcessingConfigSynthesizeSpeechConfig[]; } interface AppVersionSnapshotAppAudioProcessingConfigAmbientSoundConfig { /** * (Output) * Ambient noise as a mono-channel, 16kHz WAV file stored in [Cloud * Storage](https://cloud.google.com/storage). * Note: Please make sure the CES service agent * `service-@gcp-sa-ces.iam.gserviceaccount.com` has * `storage.objects.get` permission to the Cloud Storage object. */ gcsUri: string; /** * (Output) * Name of the prebuilt ambient sound. * Valid values are: - "coffeeShop" - "keyboard" - "keypad" - "hum" * -"office1" - "office2" - "office3" * -"room1" - "room2" - "room3" * -"room4" - "room5" - "airConditioner" */ prebuiltAmbientSound: string; /** * (Output) * Volume gain (in dB) of the normal native volume supported by * ambient noise, in the range [-96.0, 16.0]. If unset, or set to a value of * 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB) * will play at approximately half the amplitude of the normal native signal * amplitude. A value of +6.0 (dB) will play at approximately twice the * amplitude of the normal native signal amplitude. We strongly recommend not * to exceed +10 (dB) as there's usually no effective increase in loudness for * any value greater than that. */ volumeGainDb: number; } interface AppVersionSnapshotAppAudioProcessingConfigBargeInConfig { /** * (Output) * If enabled, the agent will adapt its next response based on the assumption * that the user hasn't heard the full preceding agent message. * This should not be used in scenarios where agent responses are displayed * visually. */ bargeInAwareness: boolean; } interface AppVersionSnapshotAppAudioProcessingConfigSynthesizeSpeechConfig { /** * (Required) The identifier for this object. Format specified above. */ languageCode: string; /** * (Output) * The speaking rate/speed in the range [0.25, 2.0]. 1.0 is the normal native * speed supported by the specific voice. 2.0 is twice as fast, and 0.5 is * half as fast. Values outside of the range [0.25, 2.0] will return an error. */ speakingRate: number; /** * (Output) * The name of the voice. If not set, the service will choose a * voice based on the other parameters such as language_code. * For the list of available voices, please refer to Supported voices and * languages from Cloud Text-to-Speech. */ voice: string; } interface AppVersionSnapshotAppClientCertificateSetting { /** * (Output) * The passphrase to decrypt the private key. * Should be left unset if the private key is not encrypted. */ passphrase: string; /** * (Output) * The name of the SecretManager secret version resource * storing the private key encoded in PEM format. * Format: projects/{project}/secrets/{secret}/versions/{version} */ privateKey: string; /** * (Output) * The TLS certificate encoded in PEM format. * This string must include the begin header and end footer lines. */ tlsCertificate: string; } interface AppVersionSnapshotAppDataStoreSetting { /** * (Output) * The engines for the app. * Structure is documented below. */ engines: outputs.ces.AppVersionSnapshotAppDataStoreSettingEngine[]; } interface AppVersionSnapshotAppDataStoreSettingEngine { /** * (Output) * Identifier. The unique identifier of the toolset. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ name: string; /** * (Output) * The type of the data store. This field is readonly and populated by the * server. * Possible values: * PUBLIC_WEB * UNSTRUCTURED * FAQ * CONNECTOR */ type: string; } interface AppVersionSnapshotAppDefaultChannelProfile { /** * (Output) * The type of the channel profile. * Possible values: * UNKNOWN * WEB_UI * API * TWILIO * GOOGLE_TELEPHONY_PLATFORM * CONTACT_CENTER_AS_A_SERVICE */ channelType: string; /** * (Output) * Whether to disable user barge-in in the conversation. * - true: User interruptions are disabled while the agent is speaking. * - false: The agent retains automatic control over when the user can interrupt. */ disableBargeInControl: boolean; /** * (Output) * Whether to disable DTMF (dual-tone multi-frequency). */ disableDtmf: boolean; /** * (Output) * Represents the persona property of a channel. * Structure is documented below. */ personaProperties: outputs.ces.AppVersionSnapshotAppDefaultChannelProfilePersonaProperty[]; /** * (Output) * The unique identifier of the channel profile. */ profileId: string; /** * (Output) * Message for configuration for the web widget. * Structure is documented below. */ webWidgetConfigs: outputs.ces.AppVersionSnapshotAppDefaultChannelProfileWebWidgetConfig[]; } interface AppVersionSnapshotAppDefaultChannelProfilePersonaProperty { /** * (Output) * The persona of the channel. * Possible values: * UNKNOWN * CONCISE * CHATTY */ persona: string; } interface AppVersionSnapshotAppDefaultChannelProfileWebWidgetConfig { /** * (Output) * The modality of the web widget. * Possible values: * UNKNOWN_MODALITY * CHAT_AND_VOICE * VOICE_ONLY * CHAT_ONLY */ modality: string; /** * (Output) * The theme of the web widget. * Possible values: * UNKNOWN_THEME * LIGHT * DARK */ theme: string; /** * (Output) * The title of the web widget. */ webWidgetTitle: string; } interface AppVersionSnapshotAppEvaluationMetricsThreshold { /** * (Output) * Settings for golden evaluations. * Structure is documented below. */ goldenEvaluationMetricsThresholds: outputs.ces.AppVersionSnapshotAppEvaluationMetricsThresholdGoldenEvaluationMetricsThreshold[]; } interface AppVersionSnapshotAppEvaluationMetricsThresholdGoldenEvaluationMetricsThreshold { /** * (Output) * Expectation level metrics thresholds. * Structure is documented below. */ expectationLevelMetricsThresholds: outputs.ces.AppVersionSnapshotAppEvaluationMetricsThresholdGoldenEvaluationMetricsThresholdExpectationLevelMetricsThreshold[]; /** * (Output) * Turn level metrics thresholds. * Structure is documented below. */ turnLevelMetricsThresholds: outputs.ces.AppVersionSnapshotAppEvaluationMetricsThresholdGoldenEvaluationMetricsThresholdTurnLevelMetricsThreshold[]; } interface AppVersionSnapshotAppEvaluationMetricsThresholdGoldenEvaluationMetricsThresholdExpectationLevelMetricsThreshold { /** * (Output) * The success threshold for individual tool invocation parameter * correctness. Must be a float between 0 and 1. Default is 1.0. */ toolInvocationParameterCorrectnessThreshold: number; } interface AppVersionSnapshotAppEvaluationMetricsThresholdGoldenEvaluationMetricsThresholdTurnLevelMetricsThreshold { /** * (Output) * The success threshold for overall tool invocation correctness. Must be * a float between 0 and 1. Default is 1.0. */ overallToolInvocationCorrectnessThreshold: number; /** * (Output) * The success threshold for semantic similarity. Must be an integer * between 0 and 4. Default is >= 3. */ semanticSimilaritySuccessThreshold: number; } interface AppVersionSnapshotAppLanguageSetting { /** * (Output) * The default language code of the app. */ defaultLanguageCode: string; /** * (Output) * Enables multilingual support. If true, agents in the app will use pre-built * instructions to improve handling of multilingual input. */ enableMultilingualSupport: boolean; /** * (Output) * The action to perform when an agent receives input in an unsupported * language. * This can be a predefined action or a custom tool call. * Valid values are: * - A tool's full resource name, which triggers a specific tool execution. * - A predefined system action, such as "escalate" or "exit", which triggers * an EndSession signal with corresponding metadata * to terminate the conversation. */ fallbackAction: string; /** * (Output) * List of languages codes supported by the app, in addition to the * `defaultLanguageCode`. */ supportedLanguageCodes: string[]; } interface AppVersionSnapshotAppLoggingSetting { /** * (Output) * Configuration for how the audio interactions should be recorded. * Structure is documented below. */ audioRecordingConfigs: outputs.ces.AppVersionSnapshotAppLoggingSettingAudioRecordingConfig[]; /** * (Output) * Settings to describe the BigQuery export behaviors for the app. * Structure is documented below. */ bigqueryExportSettings: outputs.ces.AppVersionSnapshotAppLoggingSettingBigqueryExportSetting[]; /** * (Output) * Settings to describe the Cloud Logging behaviors for the app. * Structure is documented below. */ cloudLoggingSettings: outputs.ces.AppVersionSnapshotAppLoggingSettingCloudLoggingSetting[]; /** * (Output) * Settings to describe the conversation logging behaviors for the app. * Structure is documented below. */ conversationLoggingSettings: outputs.ces.AppVersionSnapshotAppLoggingSettingConversationLoggingSetting[]; /** * (Output) * Configuration to instruct how sensitive data should be handled. * Structure is documented below. */ redactionConfigs: outputs.ces.AppVersionSnapshotAppLoggingSettingRedactionConfig[]; } interface AppVersionSnapshotAppLoggingSettingAudioRecordingConfig { /** * (Output) * The [Cloud Storage](https://cloud.google.com/storage) bucket to store the * session audio recordings. The URI must start with "gs://". * Note: If the Cloud Storage bucket is in a different project from the app, * you should grant `storage.objects.create` permission to the CES service * agent `service-@gcp-sa-ces.iam.gserviceaccount.com`. */ gcsBucket: string; /** * (Output) * The Cloud Storage path prefix for audio recordings. * This prefix can include the following placeholders, which will be * dynamically substituted at serving time: * - $project: project ID * - $location: app location * - $app: app ID * - $date: session date in YYYY-MM-DD format * - $session: session ID * If the path prefix is not specified, the default prefix * `$project/$location/$app/$date/$session/` will be used. */ gcsPathPrefix: string; } interface AppVersionSnapshotAppLoggingSettingBigqueryExportSetting { /** * (Output) * The BigQuery dataset to export the data to. */ dataset: string; /** * (Output) * Whether the guardrail is enabled. */ enabled: boolean; /** * The ID of the project in which the resource belongs. * If it is not provided, the provider project is used. */ project: string; } interface AppVersionSnapshotAppLoggingSettingCloudLoggingSetting { /** * (Output) * Whether to enable Cloud Logging for the sessions. */ enableCloudLogging: boolean; } interface AppVersionSnapshotAppLoggingSettingConversationLoggingSetting { /** * (Output) * Whether to disable conversation logging for the sessions. */ disableConversationLogging: boolean; } interface AppVersionSnapshotAppLoggingSettingRedactionConfig { /** * (Output) * [DLP](https://cloud.google.com/dlp/docs) deidentify template name to * instruct on how to de-identify content. * Format: * `projects/{project}/locations/{location}/deidentifyTemplates/{deidentify_template}` */ deidentifyTemplate: string; /** * (Output) * If true, redaction will be applied in various logging scenarios, including * conversation history, Cloud Logging and audio recording. */ enableRedaction: boolean; /** * (Output) * [DLP](https://cloud.google.com/dlp/docs) inspect template name to configure * detection of sensitive data types. * Format: * `projects/{project}/locations/{location}/inspectTemplates/{inspect_template}` */ inspectTemplate: string; } interface AppVersionSnapshotAppModelSetting { /** * (Output) * The LLM model that the agent should use. * If not set, the agent will inherit the model from its parent agent. */ model: string; /** * (Output) * If set, this temperature will be used for the LLM model. Temperature * controls the randomness of the model's responses. Lower temperatures * produce responses that are more predictable. Higher temperatures produce * responses that are more creative. */ temperature: number; } interface AppVersionSnapshotAppTimeZoneSetting { /** * (Output) * The time zone of the app from the time zone database, e.g., America/Los_Angeles, Europe/Paris. */ timeZone: string; } interface AppVersionSnapshotAppVariableDeclaration { /** * The description of the app version. */ description: string; /** * (Output) * Identifier. The unique identifier of the toolset. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ name: string; /** * (Output) * Represents a select subset of an OpenAPI 3.0 schema object. * Structure is documented below. */ schemas: outputs.ces.AppVersionSnapshotAppVariableDeclarationSchema[]; } interface AppVersionSnapshotAppVariableDeclarationSchema { /** * (Output) * Optional. Defines the schema for additional properties allowed in an object. * The value must be a valid JSON string representing the Schema object. * (Note: OpenAPI also allows a boolean, this definition expects a Schema JSON). */ additionalProperties: string; /** * (Output) * Optional. The instance value should be valid against at least one of the schemas in this list. */ anyOf: string; /** * (Output) * Optional. Default value of the data. Represents a dynamically typed value * which can be either null, a number, a string, a boolean, a struct, * or a list of values. The provided default value must be compatible * with the defined 'type' and other schema constraints. */ default: string; /** * (Output) * A map of definitions for use by ref. Only allowed at the root of the schema. */ defs: string; /** * The description of the app version. */ description: string; /** * (Output) * Possible values of the element of primitive type with enum format. * Examples: * 1. We can define direction as : * {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} * 2. We can define apartment number as : * {type:INTEGER, format:enum, enum:["101", "201", "301"]} */ enums: string[]; /** * (Output) * Schema of the elements of Type.ARRAY. */ items: string; /** * (Output) * Indicates if the value may be null. */ nullable: boolean; /** * (Output) * Optional. Schemas of initial elements of Type.ARRAY. */ prefixItems: string; /** * (Output) * Properties of Type.OBJECT. */ properties: string; /** * (Output) * Allows indirect references between schema nodes. The value should be a * valid reference to a child of the root `defs`. * For example, the following schema defines a reference to a schema node * named "Pet": * type: object * properties: * pet: * ref: #/defs/Pet * defs: * Pet: * type: object * properties: * name: * type: string * The value of the "pet" property is a reference to the schema node * named "Pet". * See details in * https://json-schema.org/understanding-json-schema/structuring. */ ref: string; /** * (Output) * Required properties of Type.OBJECT. */ requireds: string[]; /** * (Output) * The type of the data store. This field is readonly and populated by the * server. * Possible values: * PUBLIC_WEB * UNSTRUCTURED * FAQ * CONNECTOR */ type: string; /** * (Output) * Indicate the items in the array must be unique. Only applies to TYPE.ARRAY. */ uniqueItems: boolean; } interface AppVersionSnapshotExample { /** * (Output) * Timestamp when the toolset was created. */ createTime: string; /** * The description of the app version. */ description: string; /** * The display name of the app version. */ displayName: string; /** * (Output) * The agent that initially handles the conversation. If not specified, the * example represents a conversation that is handled by the root agent. * Format: `projects/{project}/locations/{location}/apps/{app}/agents/{agent}` */ entryAgent: string; /** * (Output) * ETag used to ensure the object hasn't changed during a read-modify-write * operation. If the etag is empty, the update will overwrite any concurrent * changes. */ etag: string; /** * (Output) * The example may become invalid if referencing resources are deleted. * Invalid examples will not be used as few-shot examples. */ invalid: boolean; /** * (Output) * The collection of messages that make up the conversation. * Structure is documented below. */ messages: outputs.ces.AppVersionSnapshotExampleMessage[]; /** * (Output) * Identifier. The unique identifier of the toolset. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ name: string; /** * (Output) * Timestamp when the toolset was last updated. */ updateTime: string; } interface AppVersionSnapshotExampleMessage { /** * (Output) * Content of the message as a series of chunks. * Structure is documented below. */ chunks: outputs.ces.AppVersionSnapshotExampleMessageChunk[]; /** * (Output) * The role within the conversation, e.g., user, agent. */ role: string; } interface AppVersionSnapshotExampleMessageChunk { /** * (Output) * Represents an event indicating the transfer of a conversation to a different * agent. * Structure is documented below. */ agentTransfers: outputs.ces.AppVersionSnapshotExampleMessageChunkAgentTransfer[]; /** * (Output) * Represents an image input or output in the conversation. * Structure is documented below. */ images: outputs.ces.AppVersionSnapshotExampleMessageChunkImage[]; /** * (Output) * Text for the agent to respond with. */ text: string; /** * (Output) * Request for the client or the agent to execute the specified tool. * Structure is documented below. */ toolCalls: outputs.ces.AppVersionSnapshotExampleMessageChunkToolCall[]; /** * (Output) * The execution result of a specific tool from the client or the agent. * Structure is documented below. */ toolResponses: outputs.ces.AppVersionSnapshotExampleMessageChunkToolResponse[]; /** * (Output) * A struct represents variables that were updated in the conversation, * keyed by variable names. */ updatedVariables: string; } interface AppVersionSnapshotExampleMessageChunkAgentTransfer { /** * The display name of the app version. */ displayName: string; /** * (Output) * The agent to which the conversation is being transferred. The agent will * handle the conversation from this point forward. * Format: `projects/{project}/locations/{location}/apps/{app}/agents/{agent}` */ targetAgent: string; } interface AppVersionSnapshotExampleMessageChunkImage { /** * (Output) * Raw bytes of the image. */ data: string; /** * (Output) * The IANA standard MIME type of the source data. * Supported image types includes: * * image/png * * image/jpeg * * image/webp */ mimeType: string; } interface AppVersionSnapshotExampleMessageChunkToolCall { /** * (Output) * The input parameters and values for the tool in JSON object format. */ args: string; /** * The display name of the app version. */ displayName: string; /** * (Output) * The matching ID of the tool call the response is for. */ id: string; /** * (Output) * The name of the tool to execute. * Format: `projects/{project}/locations/{location}/apps/{app}/tools/{tool}` */ tool: string; /** * (Output) * A tool that is created from a toolset. * Structure is documented below. */ toolsetTools: outputs.ces.AppVersionSnapshotExampleMessageChunkToolCallToolsetTool[]; } interface AppVersionSnapshotExampleMessageChunkToolCallToolsetTool { /** * (Output) * The tool ID to filter the tools to retrieve the schema for. */ toolId: string; /** * (Output) * The resource name of the Toolset from which this tool is derived. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ toolset: string; } interface AppVersionSnapshotExampleMessageChunkToolResponse { /** * The display name of the app version. */ displayName: string; /** * (Output) * The matching ID of the tool call the response is for. */ id: string; /** * (Output) * Represents a select subset of an OpenAPI 3.0 schema object. * Structure is documented below. */ response: string; /** * (Output) * The name of the tool to execute. * Format: `projects/{project}/locations/{location}/apps/{app}/tools/{tool}` */ tool: string; /** * (Output) * A tool that is created from a toolset. * Structure is documented below. */ toolsetTools: outputs.ces.AppVersionSnapshotExampleMessageChunkToolResponseToolsetTool[]; } interface AppVersionSnapshotExampleMessageChunkToolResponseToolsetTool { /** * (Output) * The tool ID to filter the tools to retrieve the schema for. */ toolId: string; /** * (Output) * The resource name of the Toolset from which this tool is derived. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ toolset: string; } interface AppVersionSnapshotGuardrail { /** * (Output) * Action that is taken when a certain precondition is met. * Structure is documented below. */ actions: outputs.ces.AppVersionSnapshotGuardrailAction[]; /** * (Output) * Guardrail that blocks the conversation based on the code callbacks * provided. * Structure is documented below. */ codeCallbacks: outputs.ces.AppVersionSnapshotGuardrailCodeCallback[]; /** * (Output) * Guardrail that bans certain content from being used in the conversation. * Structure is documented below. */ contentFilters: outputs.ces.AppVersionSnapshotGuardrailContentFilter[]; /** * (Output) * Timestamp when the toolset was created. */ createTime: string; /** * The description of the app version. */ description: string; /** * The display name of the app version. */ displayName: string; /** * (Output) * Whether the guardrail is enabled. */ enabled: boolean; /** * (Output) * ETag used to ensure the object hasn't changed during a read-modify-write * operation. If the etag is empty, the update will overwrite any concurrent * changes. */ etag: string; /** * (Output) * Guardrail that blocks the conversation if the LLM response is considered * violating the policy based on the LLM classification. * Structure is documented below. */ llmPolicies: outputs.ces.AppVersionSnapshotGuardrailLlmPolicy[]; /** * (Output) * Guardrail that blocks the conversation if the input is considered unsafe * based on the LLM classification. * Structure is documented below. */ llmPromptSecurities: outputs.ces.AppVersionSnapshotGuardrailLlmPromptSecurity[]; /** * (Output) * Model safety settings overrides. When this is set, it will override the * default settings and trigger the guardrail if the response is considered * unsafe. * Structure is documented below. */ modelSafeties: outputs.ces.AppVersionSnapshotGuardrailModelSafety[]; /** * (Output) * Identifier. The unique identifier of the toolset. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ name: string; /** * (Output) * Timestamp when the toolset was last updated. */ updateTime: string; } interface AppVersionSnapshotGuardrailAction { /** * (Output) * The agent will immediately respond with a generative answer. * Structure is documented below. */ generativeAnswers: outputs.ces.AppVersionSnapshotGuardrailActionGenerativeAnswer[]; /** * (Output) * The agent will immediately respond with a preconfigured response. * Structure is documented below. */ respondImmediatelies: outputs.ces.AppVersionSnapshotGuardrailActionRespondImmediately[]; /** * (Output) * The agent will transfer the conversation to a different agent. * Structure is documented below. */ transferAgents: outputs.ces.AppVersionSnapshotGuardrailActionTransferAgent[]; } interface AppVersionSnapshotGuardrailActionGenerativeAnswer { /** * (Output) * The prompt definition. If not set, default prompt will be used. */ prompt: string; } interface AppVersionSnapshotGuardrailActionRespondImmediately { /** * (Output) * The canned responses for the agent to choose from. The response is chosen * randomly. * Structure is documented below. */ responses: outputs.ces.AppVersionSnapshotGuardrailActionRespondImmediatelyResponse[]; } interface AppVersionSnapshotGuardrailActionRespondImmediatelyResponse { /** * (Output) * Whether summarization is disabled. */ disabled: boolean; /** * (Output) * Text for the agent to respond with. */ text: string; } interface AppVersionSnapshotGuardrailActionTransferAgent { /** * (Output) * The name of the agent to transfer the conversation to. The agent must be * in the same app as the current agent. * Format: * `projects/{project}/locations/{location}/apps/{app}/agents/{agent}` */ agent: string; } interface AppVersionSnapshotGuardrailCodeCallback { /** * (Output) * A callback defines the custom logic to be executed at various stages of * agent interaction. * Structure is documented below. */ afterAgentCallbacks: outputs.ces.AppVersionSnapshotGuardrailCodeCallbackAfterAgentCallback[]; /** * (Output) * A callback defines the custom logic to be executed at various stages of * agent interaction. * Structure is documented below. */ afterModelCallbacks: outputs.ces.AppVersionSnapshotGuardrailCodeCallbackAfterModelCallback[]; /** * (Output) * A callback defines the custom logic to be executed at various stages of * agent interaction. * Structure is documented below. */ beforeAgentCallbacks: outputs.ces.AppVersionSnapshotGuardrailCodeCallbackBeforeAgentCallback[]; /** * (Output) * A callback defines the custom logic to be executed at various stages of * agent interaction. * Structure is documented below. */ beforeModelCallbacks: outputs.ces.AppVersionSnapshotGuardrailCodeCallbackBeforeModelCallback[]; } interface AppVersionSnapshotGuardrailCodeCallbackAfterAgentCallback { /** * The description of the app version. */ description: string; /** * (Output) * Whether summarization is disabled. */ disabled: boolean; /** * (Output) * The Python code to execute for the tool. */ pythonCode: string; } interface AppVersionSnapshotGuardrailCodeCallbackAfterModelCallback { /** * The description of the app version. */ description: string; /** * (Output) * Whether summarization is disabled. */ disabled: boolean; /** * (Output) * The Python code to execute for the tool. */ pythonCode: string; } interface AppVersionSnapshotGuardrailCodeCallbackBeforeAgentCallback { /** * The description of the app version. */ description: string; /** * (Output) * Whether summarization is disabled. */ disabled: boolean; /** * (Output) * The Python code to execute for the tool. */ pythonCode: string; } interface AppVersionSnapshotGuardrailCodeCallbackBeforeModelCallback { /** * The description of the app version. */ description: string; /** * (Output) * Whether summarization is disabled. */ disabled: boolean; /** * (Output) * The Python code to execute for the tool. */ pythonCode: string; } interface AppVersionSnapshotGuardrailContentFilter { /** * (Output) * List of banned phrases. Applies to both user inputs and agent responses. */ bannedContents: string[]; /** * (Output) * List of banned phrases. Applies only to agent responses. */ bannedContentsInAgentResponses: string[]; /** * (Output) * List of banned phrases. Applies only to user inputs. */ bannedContentsInUserInputs: string[]; /** * (Output) * If true, diacritics are ignored during matching. */ disregardDiacritics: boolean; /** * (Output) * Match type for the content filter. * Possible values: * SIMPLE_STRING_MATCH * WORD_BOUNDARY_STRING_MATCH * REGEXP_MATCH */ matchType: string; } interface AppVersionSnapshotGuardrailLlmPolicy { /** * (Output) * If an error occurs during the policy check, fail open and do not trigger * the guardrail. */ failOpen: boolean; /** * (Output) * When checking this policy, consider the last 'n' messages in the * conversation. * When not set a default value of 10 will be used. */ maxConversationMessages: number; /** * (Output) * Model settings contains various configurations for the LLM model. * Structure is documented below. */ modelSettings: outputs.ces.AppVersionSnapshotGuardrailLlmPolicyModelSetting[]; /** * (Output) * Defines when to apply the policy check during the conversation. If set to * `POLICY_SCOPE_UNSPECIFIED`, the policy will be applied to the user input. * When applying the policy to the agent response, additional latency will * be introduced before the agent can respond. * Possible values: * USER_QUERY * AGENT_RESPONSE * USER_QUERY_AND_AGENT_RESPONSE */ policyScope: string; /** * (Output) * The prompt definition. If not set, default prompt will be used. */ prompt: string; } interface AppVersionSnapshotGuardrailLlmPolicyModelSetting { /** * (Output) * The LLM model that the agent should use. * If not set, the agent will inherit the model from its parent agent. */ model: string; /** * (Output) * If set, this temperature will be used for the LLM model. Temperature * controls the randomness of the model's responses. Lower temperatures * produce responses that are more predictable. Higher temperatures produce * responses that are more creative. */ temperature: number; } interface AppVersionSnapshotGuardrailLlmPromptSecurity { /** * (Output) * Guardrail that blocks the conversation if the LLM response is considered * violating the policy based on the LLM classification. * Structure is documented below. */ customPolicies: outputs.ces.AppVersionSnapshotGuardrailLlmPromptSecurityCustomPolicy[]; /** * (Output) * Configuration for default system security settings. * Structure is documented below. */ defaultSettings: outputs.ces.AppVersionSnapshotGuardrailLlmPromptSecurityDefaultSetting[]; } interface AppVersionSnapshotGuardrailLlmPromptSecurityCustomPolicy { /** * (Output) * If an error occurs during the policy check, fail open and do not trigger * the guardrail. */ failOpen: boolean; /** * (Output) * When checking this policy, consider the last 'n' messages in the * conversation. * When not set a default value of 10 will be used. */ maxConversationMessages: number; /** * (Output) * Model settings contains various configurations for the LLM model. * Structure is documented below. */ modelSettings: outputs.ces.AppVersionSnapshotGuardrailLlmPromptSecurityCustomPolicyModelSetting[]; /** * (Output) * Defines when to apply the policy check during the conversation. If set to * `POLICY_SCOPE_UNSPECIFIED`, the policy will be applied to the user input. * When applying the policy to the agent response, additional latency will * be introduced before the agent can respond. * Possible values: * USER_QUERY * AGENT_RESPONSE * USER_QUERY_AND_AGENT_RESPONSE */ policyScope: string; /** * (Output) * The prompt definition. If not set, default prompt will be used. */ prompt: string; } interface AppVersionSnapshotGuardrailLlmPromptSecurityCustomPolicyModelSetting { /** * (Output) * The LLM model that the agent should use. * If not set, the agent will inherit the model from its parent agent. */ model: string; /** * (Output) * If set, this temperature will be used for the LLM model. Temperature * controls the randomness of the model's responses. Lower temperatures * produce responses that are more predictable. Higher temperatures produce * responses that are more creative. */ temperature: number; } interface AppVersionSnapshotGuardrailLlmPromptSecurityDefaultSetting { /** * (Output) * The default prompt template used by the system. * This field is for display purposes to show the user what prompt * the system uses by default. It is OUTPUT_ONLY. */ defaultPromptTemplate: string; } interface AppVersionSnapshotGuardrailModelSafety { /** * (Output) * List of safety settings. * Structure is documented below. */ safetySettings: outputs.ces.AppVersionSnapshotGuardrailModelSafetySafetySetting[]; } interface AppVersionSnapshotGuardrailModelSafetySafetySetting { /** * (Output) * The harm category. * Possible values: * HARM_CATEGORY_HATE_SPEECH * HARM_CATEGORY_DANGEROUS_CONTENT * HARM_CATEGORY_HARASSMENT * HARM_CATEGORY_SEXUALLY_EXPLICIT */ category: string; /** * (Output) * The harm block threshold. * Possible values: * BLOCK_LOW_AND_ABOVE * BLOCK_MEDIUM_AND_ABOVE * BLOCK_ONLY_HIGH * BLOCK_NONE * OFF */ threshold: string; } interface AppVersionSnapshotTool { /** * (Output) * Represents a client-side function that the agent can invoke. When the * tool is chosen by the agent, control is handed off to the client. * The client is responsible for executing the function and returning the result * as a ToolResponse to continue the interaction with the agent. * Structure is documented below. */ clientFunctions: outputs.ces.AppVersionSnapshotToolClientFunction[]; /** * (Output) * Timestamp when the toolset was created. */ createTime: string; /** * (Output) * Tool to retrieve from Vertex AI Search datastore or engine for grounding. * Accepts either a datastore or an engine, but not both. * See Vertex AI Search: * https://cloud.google.com/generative-ai-app-builder/docs/enterprise-search-introduction. * Structure is documented below. */ dataStoreTools: outputs.ces.AppVersionSnapshotToolDataStoreTool[]; /** * The display name of the app version. */ displayName: string; /** * (Output) * ETag used to ensure the object hasn't changed during a read-modify-write * operation. If the etag is empty, the update will overwrite any concurrent * changes. */ etag: string; /** * (Output) * Possible values: * SYNCHRONOUS * ASYNCHRONOUS */ executionType: string; /** * (Output) * If the tool is generated by the LLM assistant, this field contains a * descriptive summary of the generation. */ generatedSummary: string; /** * (Output) * Represents a tool to perform Google web searches for grounding. * See * https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/grounding-with-google-search. * Structure is documented below. */ googleSearchTools: outputs.ces.AppVersionSnapshotToolGoogleSearchTool[]; /** * (Output) * Identifier. The unique identifier of the toolset. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ name: string; /** * (Output) * A remote API tool defined by an OpenAPI schema. * Structure is documented below. */ openApiTools: outputs.ces.AppVersionSnapshotToolOpenApiTool[]; /** * (Output) * A Python function tool. * Structure is documented below. */ pythonFunctions: outputs.ces.AppVersionSnapshotToolPythonFunction[]; /** * (Output) * The system tool. * Structure is documented below. */ systemTools: outputs.ces.AppVersionSnapshotToolSystemTool[]; /** * (Output) * Timestamp when the toolset was last updated. */ updateTime: string; } interface AppVersionSnapshotToolClientFunction { /** * The description of the app version. */ description: string; /** * (Output) * Identifier. The unique identifier of the toolset. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ name: string; /** * (Output) * Represents a select subset of an OpenAPI 3.0 schema object. * Structure is documented below. */ parameters: outputs.ces.AppVersionSnapshotToolClientFunctionParameter[]; /** * (Output) * Represents a select subset of an OpenAPI 3.0 schema object. * Structure is documented below. */ responses: outputs.ces.AppVersionSnapshotToolClientFunctionResponse[]; } interface AppVersionSnapshotToolClientFunctionParameter { /** * (Output) * Optional. Defines the schema for additional properties allowed in an object. * The value must be a valid JSON string representing the Schema object. * (Note: OpenAPI also allows a boolean, this definition expects a Schema JSON). */ additionalProperties: string; /** * (Output) * Optional. The instance value should be valid against at least one of the schemas in this list. */ anyOf: string; /** * (Output) * Optional. Default value of the data. Represents a dynamically typed value * which can be either null, a number, a string, a boolean, a struct, * or a list of values. The provided default value must be compatible * with the defined 'type' and other schema constraints. */ default: string; /** * (Output) * A map of definitions for use by ref. Only allowed at the root of the schema. */ defs: string; /** * The description of the app version. */ description: string; /** * (Output) * Possible values of the element of primitive type with enum format. * Examples: * 1. We can define direction as : * {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} * 2. We can define apartment number as : * {type:INTEGER, format:enum, enum:["101", "201", "301"]} */ enums: string[]; /** * (Output) * Schema of the elements of Type.ARRAY. */ items: string; /** * (Output) * Indicates if the value may be null. */ nullable: boolean; /** * (Output) * Optional. Schemas of initial elements of Type.ARRAY. */ prefixItems: string; /** * (Output) * Properties of Type.OBJECT. */ properties: string; /** * (Output) * Allows indirect references between schema nodes. The value should be a * valid reference to a child of the root `defs`. * For example, the following schema defines a reference to a schema node * named "Pet": * type: object * properties: * pet: * ref: #/defs/Pet * defs: * Pet: * type: object * properties: * name: * type: string * The value of the "pet" property is a reference to the schema node * named "Pet". * See details in * https://json-schema.org/understanding-json-schema/structuring. */ ref: string; /** * (Output) * Required properties of Type.OBJECT. */ requireds: string[]; /** * (Output) * The type of the data store. This field is readonly and populated by the * server. * Possible values: * PUBLIC_WEB * UNSTRUCTURED * FAQ * CONNECTOR */ type: string; /** * (Output) * Indicate the items in the array must be unique. Only applies to TYPE.ARRAY. */ uniqueItems: boolean; } interface AppVersionSnapshotToolClientFunctionResponse { /** * (Output) * Optional. Defines the schema for additional properties allowed in an object. * The value must be a valid JSON string representing the Schema object. * (Note: OpenAPI also allows a boolean, this definition expects a Schema JSON). */ additionalProperties: string; /** * (Output) * Optional. The instance value should be valid against at least one of the schemas in this list. */ anyOf: string; /** * (Output) * Optional. Default value of the data. Represents a dynamically typed value * which can be either null, a number, a string, a boolean, a struct, * or a list of values. The provided default value must be compatible * with the defined 'type' and other schema constraints. */ default: string; /** * (Output) * A map of definitions for use by ref. Only allowed at the root of the schema. */ defs: string; /** * The description of the app version. */ description: string; /** * (Output) * Possible values of the element of primitive type with enum format. * Examples: * 1. We can define direction as : * {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} * 2. We can define apartment number as : * {type:INTEGER, format:enum, enum:["101", "201", "301"]} */ enums: string[]; /** * (Output) * Schema of the elements of Type.ARRAY. */ items: string; /** * (Output) * Indicates if the value may be null. */ nullable: boolean; /** * (Output) * Optional. Schemas of initial elements of Type.ARRAY. */ prefixItems: string; /** * (Output) * Properties of Type.OBJECT. */ properties: string; /** * (Output) * Allows indirect references between schema nodes. The value should be a * valid reference to a child of the root `defs`. * For example, the following schema defines a reference to a schema node * named "Pet": * type: object * properties: * pet: * ref: #/defs/Pet * defs: * Pet: * type: object * properties: * name: * type: string * The value of the "pet" property is a reference to the schema node * named "Pet". * See details in * https://json-schema.org/understanding-json-schema/structuring. */ ref: string; /** * (Output) * Required properties of Type.OBJECT. */ requireds: string[]; /** * (Output) * The type of the data store. This field is readonly and populated by the * server. * Possible values: * PUBLIC_WEB * UNSTRUCTURED * FAQ * CONNECTOR */ type: string; /** * (Output) * Indicate the items in the array must be unique. Only applies to TYPE.ARRAY. */ uniqueItems: boolean; } interface AppVersionSnapshotToolDataStoreTool { /** * (Output) * Boost specification to boost certain documents. * Structure is documented below. */ boostSpecs: outputs.ces.AppVersionSnapshotToolDataStoreToolBoostSpec[]; /** * The description of the app version. */ description: string; /** * (Output) * Configuration for searching within an Engine, potentially targeting * specific DataStores. * Structure is documented below. */ engineSources: outputs.ces.AppVersionSnapshotToolDataStoreToolEngineSource[]; /** * (Output) * Number of search results to return per query. * The default value is 10. The maximum allowed value is 10. */ maxResults: number; /** * (Output) * The modality configs for the data store. * Structure is documented below. */ modalityConfigs: outputs.ces.AppVersionSnapshotToolDataStoreToolModalityConfig[]; /** * (Output) * Identifier. The unique identifier of the toolset. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ name: string; } interface AppVersionSnapshotToolDataStoreToolBoostSpec { /** * (Output) * The Data Store where the boosting configuration is applied. Full resource * name of DataStore, such as * projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}. */ dataStores: string[]; /** * (Output) * A list of boosting specifications. * Structure is documented below. */ specs: outputs.ces.AppVersionSnapshotToolDataStoreToolBoostSpecSpec[]; } interface AppVersionSnapshotToolDataStoreToolBoostSpecSpec { /** * (Output) * A list of boosting specifications. * Structure is documented below. */ conditionBoostSpecs: outputs.ces.AppVersionSnapshotToolDataStoreToolBoostSpecSpecConditionBoostSpec[]; } interface AppVersionSnapshotToolDataStoreToolBoostSpecSpecConditionBoostSpec { /** * (Output) * Strength of the boost, which should be in [-1, 1]. Negative boost means * demotion. Default is 0.0. * Setting to 1.0 gives the suggestions a big promotion. However, it does * not necessarily mean that the top result will be a boosted suggestion. * Setting to -1.0 gives the suggestions a big demotion. However, other * suggestions that are relevant might still be shown. * Setting to 0.0 means no boost applied. The boosting condition is * ignored. */ boost: number; /** * (Output) * Specification for custom ranking based on customer specified attribute * value. It provides more controls for customized ranking than the simple * (condition, boost) combination above. * Structure is documented below. */ boostControlSpecs: outputs.ces.AppVersionSnapshotToolDataStoreToolBoostSpecSpecConditionBoostSpecBoostControlSpec[]; /** * (Output) * An expression which specifies a boost condition. The syntax is the same * as filter expression syntax. Currently, the only supported condition is * a list of BCP-47 lang codes. * Example: To boost suggestions in languages en or fr: * (lang_code: ANY("en", "fr")) */ condition: string; } interface AppVersionSnapshotToolDataStoreToolBoostSpecSpecConditionBoostSpecBoostControlSpec { /** * (Output) * The attribute type to be used to determine the boost amount. The * attribute value can be derived from the field value of the specified * field_name. In the case of numerical it is straightforward i.e. * attributeValue = numerical_field_value. In the case of freshness * however, attributeValue = (time.now() - datetime_field_value). * Possible values: * NUMERICAL * FRESHNESS */ attributeType: string; /** * (Output) * The control points used to define the curve. The monotonic function * (defined through the interpolationType above) passes through the * control points listed here. * Structure is documented below. */ controlPoints: outputs.ces.AppVersionSnapshotToolDataStoreToolBoostSpecSpecConditionBoostSpecBoostControlSpecControlPoint[]; /** * (Output) * The name of the field whose value will be used to determine the * boost amount. */ fieldName: string; /** * (Output) * The interpolation type to be applied to connect the control points * listed below. * Possible values: * LINEAR */ interpolationType: string; } interface AppVersionSnapshotToolDataStoreToolBoostSpecSpecConditionBoostSpecBoostControlSpecControlPoint { /** * (Output) * Can be one of: * 1. The numerical field value. * 2. The duration spec for freshness: * The value must be formatted as an XSD `dayTimeDuration` value (a * restricted subset of an ISO 8601 duration value). The pattern for * this is: `nDnM]`. */ attributeValue: string; /** * (Output) * The value between -1 to 1 by which to boost the score if the * attributeValue evaluates to the value specified above. */ boostAmount: number; } interface AppVersionSnapshotToolDataStoreToolEngineSource { /** * (Output) * Use to target specific DataStores within the Engine. * If empty, the search applies to all DataStores associated with the * Engine. * Structure is documented below. */ dataStoreSources: outputs.ces.AppVersionSnapshotToolDataStoreToolEngineSourceDataStoreSource[]; /** * (Output) * Full resource name of the Engine. * Format: * `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` */ engine: string; /** * (Output) * Filter specification for the DataStore. * See: * https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata */ filter: string; } interface AppVersionSnapshotToolDataStoreToolEngineSourceDataStoreSource { /** * (Output) * A DataStore resource in Vertex AI Search. * Structure is documented below. */ dataStores: outputs.ces.AppVersionSnapshotToolDataStoreToolEngineSourceDataStoreSourceDataStore[]; /** * (Output) * Filter specification for the DataStore. * See: * https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata */ filter: string; } interface AppVersionSnapshotToolDataStoreToolEngineSourceDataStoreSourceDataStore { /** * (Output) * The connector config for the data store connection. * Structure is documented below. */ connectorConfigs: outputs.ces.AppVersionSnapshotToolDataStoreToolEngineSourceDataStoreSourceDataStoreConnectorConfig[]; /** * (Output) * Timestamp when the toolset was created. */ createTime: string; /** * The display name of the app version. */ displayName: string; /** * (Output) * The document processing mode for the data store connection. * Only set for PUBLIC_WEB and UNSTRUCTURED data stores. * Possible values: * DOCUMENTS * CHUNKS */ documentProcessingMode: string; /** * (Output) * Identifier. The unique identifier of the toolset. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ name: string; /** * (Output) * The type of the data store. This field is readonly and populated by the * server. * Possible values: * PUBLIC_WEB * UNSTRUCTURED * FAQ * CONNECTOR */ type: string; } interface AppVersionSnapshotToolDataStoreToolEngineSourceDataStoreSourceDataStoreConnectorConfig { /** * (Output) * Resource name of the collection the data store belongs to. */ collection: string; /** * (Output) * Display name of the collection the data store belongs to. */ collectionDisplayName: string; /** * (Output) * The name of the data source. * Example: `salesforce`, `jira`, `confluence`, `bigquery`. */ dataSource: string; } interface AppVersionSnapshotToolDataStoreToolModalityConfig { /** * (Output) * Grounding configuration. * Structure is documented below. */ groundingConfigs: outputs.ces.AppVersionSnapshotToolDataStoreToolModalityConfigGroundingConfig[]; /** * (Output) * The modality type. * Possible values: * TEXT * AUDIO */ modalityType: string; /** * (Output) * Rewriter configuration. * Structure is documented below. */ rewriterConfigs: outputs.ces.AppVersionSnapshotToolDataStoreToolModalityConfigRewriterConfig[]; /** * (Output) * Summarization configuration. * Structure is documented below. */ summarizationConfigs: outputs.ces.AppVersionSnapshotToolDataStoreToolModalityConfigSummarizationConfig[]; } interface AppVersionSnapshotToolDataStoreToolModalityConfigGroundingConfig { /** * (Output) * Whether summarization is disabled. */ disabled: boolean; /** * (Output) * The groundedness threshold of the answer based on the retrieved sources. * The value has a configurable range of [1, 5]. The level is used to * threshold the groundedness of the answer, meaning that all responses with * a groundedness score below the threshold will fall back to returning * relevant snippets only. * For example, a level of 3 means that the groundedness score must be * 3 or higher for the response to be returned. */ groundingLevel: number; } interface AppVersionSnapshotToolDataStoreToolModalityConfigRewriterConfig { /** * (Output) * Whether summarization is disabled. */ disabled: boolean; /** * (Output) * Model settings contains various configurations for the LLM model. * Structure is documented below. */ modelSettings: outputs.ces.AppVersionSnapshotToolDataStoreToolModalityConfigRewriterConfigModelSetting[]; /** * (Output) * The prompt definition. If not set, default prompt will be used. */ prompt: string; } interface AppVersionSnapshotToolDataStoreToolModalityConfigRewriterConfigModelSetting { /** * (Output) * The LLM model that the agent should use. * If not set, the agent will inherit the model from its parent agent. */ model: string; /** * (Output) * If set, this temperature will be used for the LLM model. Temperature * controls the randomness of the model's responses. Lower temperatures * produce responses that are more predictable. Higher temperatures produce * responses that are more creative. */ temperature: number; } interface AppVersionSnapshotToolDataStoreToolModalityConfigSummarizationConfig { /** * (Output) * Whether summarization is disabled. */ disabled: boolean; /** * (Output) * Model settings contains various configurations for the LLM model. * Structure is documented below. */ modelSettings: outputs.ces.AppVersionSnapshotToolDataStoreToolModalityConfigSummarizationConfigModelSetting[]; /** * (Output) * The prompt definition. If not set, default prompt will be used. */ prompt: string; } interface AppVersionSnapshotToolDataStoreToolModalityConfigSummarizationConfigModelSetting { /** * (Output) * The LLM model that the agent should use. * If not set, the agent will inherit the model from its parent agent. */ model: string; /** * (Output) * If set, this temperature will be used for the LLM model. Temperature * controls the randomness of the model's responses. Lower temperatures * produce responses that are more predictable. Higher temperatures produce * responses that are more creative. */ temperature: number; } interface AppVersionSnapshotToolGoogleSearchTool { /** * The description of the app version. */ description: string; /** * (Output) * List of domains to be excluded from the search results. * Example: "example.com". * A maximum of 2000 domains can be excluded. */ excludeDomains: string[]; /** * (Output) * Identifier. The unique identifier of the toolset. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ name: string; } interface AppVersionSnapshotToolOpenApiTool { /** * (Output) * Authentication information required for API calls. * Structure is documented below. */ apiAuthentications: outputs.ces.AppVersionSnapshotToolOpenApiToolApiAuthentication[]; /** * The description of the app version. */ description: string; /** * (Output) * If true, the agent will ignore unknown fields in the API response for all * operations defined in the OpenAPI schema. */ ignoreUnknownFields: boolean; /** * (Output) * Identifier. The unique identifier of the toolset. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ name: string; /** * (Output) * The OpenAPI schema of the toolset. */ openApiSchema: string; /** * (Output) * Configuration for tools using Service Directory. * Structure is documented below. */ serviceDirectoryConfigs: outputs.ces.AppVersionSnapshotToolOpenApiToolServiceDirectoryConfig[]; /** * (Output) * The TLS configuration. * Structure is documented below. */ tlsConfigs: outputs.ces.AppVersionSnapshotToolOpenApiToolTlsConfig[]; /** * (Output) * The server URL of the Open API schema. * This field is only set in toolsets in the environment dependencies * during the export process if the schema contains a server url. * During the import process, if this url is present in the environment dependencies * and the schema has the $env_var placeholder, * it will replace the placeholder in the schema. */ url: string; } interface AppVersionSnapshotToolOpenApiToolApiAuthentication { /** * (Output) * Configurations for authentication with API key. * Structure is documented below. */ apiKeyConfigs: outputs.ces.AppVersionSnapshotToolOpenApiToolApiAuthenticationApiKeyConfig[]; /** * (Output) * Configurations for authentication with OAuth. * Structure is documented below. */ oauthConfigs: outputs.ces.AppVersionSnapshotToolOpenApiToolApiAuthenticationOauthConfig[]; /** * (Output) * Configurations for authentication using a custom service account. * Structure is documented below. */ serviceAccountAuthConfigs: outputs.ces.AppVersionSnapshotToolOpenApiToolApiAuthenticationServiceAccountAuthConfig[]; /** * (Output) * Configurations for authentication with [ID * token](https://cloud.google.com/docs/authentication/token-types#id) generated * from service agent. */ serviceAgentIdTokenAuthConfigs: outputs.ces.AppVersionSnapshotToolOpenApiToolApiAuthenticationServiceAgentIdTokenAuthConfig[]; } interface AppVersionSnapshotToolOpenApiToolApiAuthenticationApiKeyConfig { /** * (Output) * The name of the SecretManager secret version resource storing the API key. * Format: `projects/{project}/secrets/{secret}/versions/{version}` * Note: You should grant `roles/secretmanager.secretAccessor` role to the CES * service agent * `service-@gcp-sa-ces.iam.gserviceaccount.com`. */ apiKeySecretVersion: string; /** * (Output) * The parameter name or the header name of the API key. * E.g., If the API request is "https://example.com/act?X-Api-Key=", "X-Api-Key" would be the parameter name. */ keyName: string; /** * (Output) * Key location in the request. * Possible values: * HEADER * QUERY_STRING */ requestLocation: string; } interface AppVersionSnapshotToolOpenApiToolApiAuthenticationOauthConfig { /** * (Output) * The client ID from the OAuth provider. */ clientId: string; /** * (Output) * The name of the SecretManager secret version resource storing the * client secret. * Format: `projects/{project}/secrets/{secret}/versions/{version}` * Note: You should grant `roles/secretmanager.secretAccessor` role to the CES * service agent * `service-@gcp-sa-ces.iam.gserviceaccount.com`. */ clientSecretVersion: string; /** * (Output) * OAuth grant types. * Possible values: * CLIENT_CREDENTIAL */ oauthGrantType: string; /** * (Output) * The OAuth scopes to grant. */ scopes: string[]; /** * (Output) * The token endpoint in the OAuth provider to exchange for an access token. */ tokenEndpoint: string; } interface AppVersionSnapshotToolOpenApiToolApiAuthenticationServiceAccountAuthConfig { /** * (Output) * The email address of the service account used for authenticatation. CES * uses this service account to exchange an access token and the access token * is then sent in the `Authorization` header of the request. * The service account must have the * `roles/iam.serviceAccountTokenCreator` role granted to the * CES service agent * `service-@gcp-sa-ces.iam.gserviceaccount.com`. */ serviceAccount: string; } interface AppVersionSnapshotToolOpenApiToolApiAuthenticationServiceAgentIdTokenAuthConfig { } interface AppVersionSnapshotToolOpenApiToolServiceDirectoryConfig { /** * (Output) * The name of [Service * Directory](https://cloud.google.com/service-directory) service. * Format: * `projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}`. * Location of the service directory must be the same as the location of the * app. */ service: string; } interface AppVersionSnapshotToolOpenApiToolTlsConfig { /** * (Output) * Specifies a list of allowed custom CA certificates for HTTPS * verification. * Structure is documented below. */ caCerts: outputs.ces.AppVersionSnapshotToolOpenApiToolTlsConfigCaCert[]; } interface AppVersionSnapshotToolOpenApiToolTlsConfigCaCert { /** * (Output) * The allowed custom CA certificates (in DER format) for * HTTPS verification. This overrides the default SSL trust store. If this * is empty or unspecified, CES will use Google's default trust * store to verify certificates. N.B. Make sure the HTTPS server * certificates are signed with "subject alt name". For instance a * certificate can be self-signed using the following command, * openssl x509 -req -days 200 -in example.com.csr \ * -signkey example.com.key \ * -out example.com.crt \ * -extfile <(printf "\nsubjectAltName='DNS:www.example.com'") */ cert: string; /** * The display name of the app version. */ displayName: string; } interface AppVersionSnapshotToolPythonFunction { /** * The description of the app version. */ description: string; /** * (Output) * Identifier. The unique identifier of the toolset. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ name: string; /** * (Output) * The Python code to execute for the tool. */ pythonCode: string; } interface AppVersionSnapshotToolSystemTool { /** * The description of the app version. */ description: string; /** * (Output) * Identifier. The unique identifier of the toolset. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ name: string; } interface AppVersionSnapshotToolset { /** * (Output) * Timestamp when the toolset was created. */ createTime: string; /** * The description of the app version. */ description: string; /** * The display name of the app version. */ displayName: string; /** * (Output) * ETag used to ensure the object hasn't changed during a read-modify-write * operation. If the etag is empty, the update will overwrite any concurrent * changes. */ etag: string; /** * (Output) * Possible values: * SYNCHRONOUS * ASYNCHRONOUS */ executionType: string; /** * (Output) * Identifier. The unique identifier of the toolset. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ name: string; /** * (Output) * A toolset that contains a list of tools that are defined by an OpenAPI * schema. * Structure is documented below. */ openApiToolsets: outputs.ces.AppVersionSnapshotToolsetOpenApiToolset[]; /** * (Output) * Timestamp when the toolset was last updated. */ updateTime: string; } interface AppVersionSnapshotToolsetOpenApiToolset { /** * (Output) * Authentication information required for API calls. * Structure is documented below. */ apiAuthentications: outputs.ces.AppVersionSnapshotToolsetOpenApiToolsetApiAuthentication[]; /** * (Output) * If true, the agent will ignore unknown fields in the API response for all * operations defined in the OpenAPI schema. */ ignoreUnknownFields: boolean; /** * (Output) * The OpenAPI schema of the toolset. */ openApiSchema: string; /** * (Output) * Configuration for tools using Service Directory. * Structure is documented below. */ serviceDirectoryConfigs: outputs.ces.AppVersionSnapshotToolsetOpenApiToolsetServiceDirectoryConfig[]; /** * (Output) * The TLS configuration. * Structure is documented below. */ tlsConfigs: outputs.ces.AppVersionSnapshotToolsetOpenApiToolsetTlsConfig[]; /** * (Output) * The server URL of the Open API schema. * This field is only set in toolsets in the environment dependencies * during the export process if the schema contains a server url. * During the import process, if this url is present in the environment dependencies * and the schema has the $env_var placeholder, * it will replace the placeholder in the schema. */ url: string; } interface AppVersionSnapshotToolsetOpenApiToolsetApiAuthentication { /** * (Output) * Configurations for authentication with API key. * Structure is documented below. */ apiKeyConfigs: outputs.ces.AppVersionSnapshotToolsetOpenApiToolsetApiAuthenticationApiKeyConfig[]; /** * (Output) * Configurations for authentication with a bearer token. * Structure is documented below. */ bearerTokenConfigs: outputs.ces.AppVersionSnapshotToolsetOpenApiToolsetApiAuthenticationBearerTokenConfig[]; /** * (Output) * Configurations for authentication with OAuth. * Structure is documented below. */ oauthConfigs: outputs.ces.AppVersionSnapshotToolsetOpenApiToolsetApiAuthenticationOauthConfig[]; /** * (Output) * Configurations for authentication using a custom service account. * Structure is documented below. */ serviceAccountAuthConfigs: outputs.ces.AppVersionSnapshotToolsetOpenApiToolsetApiAuthenticationServiceAccountAuthConfig[]; /** * (Output) * Configurations for authentication with [ID * token](https://cloud.google.com/docs/authentication/token-types#id) generated * from service agent. */ serviceAgentIdTokenAuthConfigs: outputs.ces.AppVersionSnapshotToolsetOpenApiToolsetApiAuthenticationServiceAgentIdTokenAuthConfig[]; } interface AppVersionSnapshotToolsetOpenApiToolsetApiAuthenticationApiKeyConfig { /** * (Output) * The name of the SecretManager secret version resource storing the API key. * Format: `projects/{project}/secrets/{secret}/versions/{version}` * Note: You should grant `roles/secretmanager.secretAccessor` role to the CES * service agent * `service-@gcp-sa-ces.iam.gserviceaccount.com`. */ apiKeySecretVersion: string; /** * (Output) * The parameter name or the header name of the API key. * E.g., If the API request is "https://example.com/act?X-Api-Key=", "X-Api-Key" would be the parameter name. */ keyName: string; /** * (Output) * Key location in the request. * Possible values: * HEADER * QUERY_STRING */ requestLocation: string; } interface AppVersionSnapshotToolsetOpenApiToolsetApiAuthenticationBearerTokenConfig { /** * (Output) */ token: string; } interface AppVersionSnapshotToolsetOpenApiToolsetApiAuthenticationOauthConfig { /** * (Output) * The client ID from the OAuth provider. */ clientId: string; /** * (Output) * The name of the SecretManager secret version resource storing the * client secret. * Format: `projects/{project}/secrets/{secret}/versions/{version}` * Note: You should grant `roles/secretmanager.secretAccessor` role to the CES * service agent * `service-@gcp-sa-ces.iam.gserviceaccount.com`. */ clientSecretVersion: string; /** * (Output) * OAuth grant types. * Possible values: * CLIENT_CREDENTIAL */ oauthGrantType: string; /** * (Output) * The OAuth scopes to grant. */ scopes: string[]; /** * (Output) * The token endpoint in the OAuth provider to exchange for an access token. */ tokenEndpoint: string; } interface AppVersionSnapshotToolsetOpenApiToolsetApiAuthenticationServiceAccountAuthConfig { /** * (Output) * The email address of the service account used for authenticatation. CES * uses this service account to exchange an access token and the access token * is then sent in the `Authorization` header of the request. * The service account must have the * `roles/iam.serviceAccountTokenCreator` role granted to the * CES service agent * `service-@gcp-sa-ces.iam.gserviceaccount.com`. */ serviceAccount: string; } interface AppVersionSnapshotToolsetOpenApiToolsetApiAuthenticationServiceAgentIdTokenAuthConfig { } interface AppVersionSnapshotToolsetOpenApiToolsetServiceDirectoryConfig { /** * (Output) * The name of [Service * Directory](https://cloud.google.com/service-directory) service. * Format: * `projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}`. * Location of the service directory must be the same as the location of the * app. */ service: string; } interface AppVersionSnapshotToolsetOpenApiToolsetTlsConfig { /** * (Output) * Specifies a list of allowed custom CA certificates for HTTPS * verification. * Structure is documented below. */ caCerts: outputs.ces.AppVersionSnapshotToolsetOpenApiToolsetTlsConfigCaCert[]; } interface AppVersionSnapshotToolsetOpenApiToolsetTlsConfigCaCert { /** * (Output) * The allowed custom CA certificates (in DER format) for * HTTPS verification. This overrides the default SSL trust store. If this * is empty or unspecified, CES will use Google's default trust * store to verify certificates. N.B. Make sure the HTTPS server * certificates are signed with "subject alt name". For instance a * certificate can be self-signed using the following command, * openssl x509 -req -days 200 -in example.com.csr \ * -signkey example.com.key \ * -out example.com.crt \ * -extfile <(printf "\nsubjectAltName='DNS:www.example.com'") */ cert: string; /** * The display name of the app version. */ displayName: string; } interface DeploymentChannelProfile { /** * The type of the channel profile. * Possible values: * UNKNOWN * WEB_UI * API * TWILIO * GOOGLE_TELEPHONY_PLATFORM * CONTACT_CENTER_AS_A_SERVICE */ channelType?: string; /** * Whether to disable user barge-in control in the conversation. * - **true**: User interruptions are disabled while the agent is speaking. * - **false**: The agent retains automatic control over when the user can * interrupt. */ disableBargeInControl?: boolean; /** * Whether to disable DTMF (dual-tone multi-frequency). */ disableDtmf?: boolean; /** * Represents the persona property of a channel. * Structure is documented below. */ personaProperty?: outputs.ces.DeploymentChannelProfilePersonaProperty; /** * The unique identifier of the channel profile. */ profileId?: string; /** * Message for configuration for the web widget. * Structure is documented below. */ webWidgetConfig?: outputs.ces.DeploymentChannelProfileWebWidgetConfig; } interface DeploymentChannelProfilePersonaProperty { /** * The persona of the channel. * Possible values: * UNKNOWN * CONCISE * CHATTY */ persona?: string; } interface DeploymentChannelProfileWebWidgetConfig { /** * The modality of the web widget. * Possible values: * UNKNOWN_MODALITY * CHAT_AND_VOICE * VOICE_ONLY * CHAT_ONLY */ modality?: string; /** * The theme of the web widget. * Possible values: * UNKNOWN_THEME * LIGHT * DARK */ theme?: string; /** * The title of the web widget. */ webWidgetTitle?: string; } interface ExampleMessage { /** * Content of the message as a series of chunks. * Structure is documented below. */ chunks?: outputs.ces.ExampleMessageChunk[]; /** * The role within the conversation, e.g., user, agent. */ role?: string; } interface ExampleMessageChunk { /** * Represents an event indicating the transfer of a conversation to a different * agent. * Structure is documented below. */ agentTransfer?: outputs.ces.ExampleMessageChunkAgentTransfer; /** * Represents an image input or output in the conversation. * Structure is documented below. */ image?: outputs.ces.ExampleMessageChunkImage; /** * Text data. */ text?: string; /** * Request for the client or the agent to execute the specified tool. * Structure is documented below. */ toolCall?: outputs.ces.ExampleMessageChunkToolCall; /** * The execution result of a specific tool from the client or the agent. * Structure is documented below. */ toolResponse?: outputs.ces.ExampleMessageChunkToolResponse; /** * A struct represents variables that were updated in the conversation, * keyed by variable names. */ updatedVariables?: string; } interface ExampleMessageChunkAgentTransfer { /** * (Output) * Display name of the agent. */ displayName: string; /** * The agent to which the conversation is being transferred. The agent will * handle the conversation from this point forward. * Format: `projects/{project}/locations/{location}/apps/{app}/agents/{agent}` */ targetAgent: string; } interface ExampleMessageChunkImage { /** * Raw bytes of the image. */ data: string; /** * The IANA standard MIME type of the source data. * Supported image types includes: * * image/png * * image/jpeg * * image/webp */ mimeType: string; } interface ExampleMessageChunkToolCall { /** * The input parameters and values for the tool in JSON object format. */ args?: string; /** * (Output) * Display name of the tool. */ displayName: string; /** * The unique identifier of the tool call. If populated, the client should * return the execution result with the matching ID in * ToolResponse. */ id?: string; /** * The name of the tool to execute. * Format: `projects/{project}/locations/{location}/apps/{app}/tools/{tool}` */ tool?: string; /** * A tool that is created from a toolset. * Structure is documented below. */ toolsetTool?: outputs.ces.ExampleMessageChunkToolCallToolsetTool; } interface ExampleMessageChunkToolCallToolsetTool { /** * The tool ID to filter the tools to retrieve the schema for. */ toolId?: string; /** * The resource name of the Toolset from which this tool is derived. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ toolset: string; } interface ExampleMessageChunkToolResponse { /** * (Output) * Display name of the tool. */ displayName: string; /** * The matching ID of the tool call the response is for. */ id?: string; /** * The tool execution result in JSON object format. * Use "output" key to specify tool response and "error" key to specify * error details (if any). If "output" and "error" keys are not specified, * then whole "response" is treated as tool execution result. */ response: string; /** * The name of the tool to execute. * Format: `projects/{project}/locations/{location}/apps/{app}/tools/{tool}` */ tool?: string; /** * A tool that is created from a toolset. * Structure is documented below. */ toolsetTool?: outputs.ces.ExampleMessageChunkToolResponseToolsetTool; } interface ExampleMessageChunkToolResponseToolsetTool { /** * The tool ID to filter the tools to retrieve the schema for. */ toolId?: string; /** * The resource name of the Toolset from which this tool is derived. * Format: * `projects/{project}/locations/{location}/apps/{app}/toolsets/{toolset}` */ toolset: string; } interface GuardrailAction { /** * The agent will immediately respond with a generative answer. * Structure is documented below. */ generativeAnswer?: outputs.ces.GuardrailActionGenerativeAnswer; /** * The agent will immediately respond with a preconfigured response. * Structure is documented below. */ respondImmediately?: outputs.ces.GuardrailActionRespondImmediately; /** * The agent will transfer the conversation to a different agent. * Structure is documented below. */ transferAgent?: outputs.ces.GuardrailActionTransferAgent; } interface GuardrailActionGenerativeAnswer { /** * The prompt to use for the generative answer. */ prompt: string; } interface GuardrailActionRespondImmediately { /** * The canned responses for the agent to choose from. The response is chosen * randomly. * Structure is documented below. */ responses: outputs.ces.GuardrailActionRespondImmediatelyResponse[]; } interface GuardrailActionRespondImmediatelyResponse { /** * Whether the response is disabled. Disabled responses are not used by the * agent. */ disabled?: boolean; /** * Text for the agent to respond with. */ text: string; } interface GuardrailActionTransferAgent { /** * The name of the agent to transfer the conversation to. The agent must be * in the same app as the current agent. * Format: * `projects/{project}/locations/{location}/apps/{app}/agents/{agent}` */ agent: string; } interface GuardrailCodeCallback { /** * A callback defines the custom logic to be executed at various stages of * agent interaction. * Structure is documented below. */ afterAgentCallback?: outputs.ces.GuardrailCodeCallbackAfterAgentCallback; /** * A callback defines the custom logic to be executed at various stages of * agent interaction. * Structure is documented below. */ afterModelCallback?: outputs.ces.GuardrailCodeCallbackAfterModelCallback; /** * A callback defines the custom logic to be executed at various stages of * agent interaction. * Structure is documented below. */ beforeAgentCallback?: outputs.ces.GuardrailCodeCallbackBeforeAgentCallback; /** * A callback defines the custom logic to be executed at various stages of * agent interaction. * Structure is documented below. */ beforeModelCallback?: outputs.ces.GuardrailCodeCallbackBeforeModelCallback; } interface GuardrailCodeCallbackAfterAgentCallback { /** * Human-readable description of the callback. */ description?: string; /** * Whether the callback is disabled. Disabled callbacks are ignored by the * agent. */ disabled?: boolean; /** * The python code to execute for the callback. */ pythonCode: string; } interface GuardrailCodeCallbackAfterModelCallback { /** * Human-readable description of the callback. */ description?: string; /** * Whether the callback is disabled. Disabled callbacks are ignored by the * agent. */ disabled?: boolean; /** * The python code to execute for the callback. */ pythonCode: string; } interface GuardrailCodeCallbackBeforeAgentCallback { /** * Human-readable description of the callback. */ description?: string; /** * Whether the callback is disabled. Disabled callbacks are ignored by the * agent. */ disabled?: boolean; /** * The python code to execute for the callback. */ pythonCode: string; } interface GuardrailCodeCallbackBeforeModelCallback { /** * Human-readable description of the callback. */ description?: string; /** * Whether the callback is disabled. Disabled callbacks are ignored by the * agent. */ disabled?: boolean; /** * The python code to execute for the callback. */ pythonCode: string; } interface GuardrailContentFilter { /** * List of banned phrases. Applies to both user inputs and agent responses. */ bannedContents?: string[]; /** * List of banned phrases. Applies only to agent responses. */ bannedContentsInAgentResponses?: string[]; /** * List of banned phrases. Applies only to user inputs. */ bannedContentsInUserInputs?: string[]; /** * If true, diacritics are ignored during matching. */ disregardDiacritics?: boolean; /** * Match type for the content filter. * Possible values: * SIMPLE_STRING_MATCH * WORD_BOUNDARY_STRING_MATCH * REGEXP_MATCH */ matchType: string; } interface GuardrailLlmPolicy { /** * By default, the LLM policy check is bypassed for short utterances. * Enabling this setting applies the policy check to all utterances, * including those that would normally be skipped. */ allowShortUtterance?: boolean; /** * If an error occurs during the policy check, fail open and do not trigger * the guardrail. */ failOpen?: boolean; /** * When checking this policy, consider the last 'n' messages in the * conversation. * When not set a default value of 10 will be used. */ maxConversationMessages?: number; /** * Model settings contains various configurations for the LLM model. * Structure is documented below. */ modelSettings?: outputs.ces.GuardrailLlmPolicyModelSettings; /** * Defines when to apply the policy check during the conversation. If set to * `POLICY_SCOPE_UNSPECIFIED`, the policy will be applied to the user input. * When applying the policy to the agent response, additional latency will * be introduced before the agent can respond. * Possible values: * USER_QUERY * AGENT_RESPONSE * USER_QUERY_AND_AGENT_RESPONSE * Possible values are: `USER_QUERY`, `AGENT_RESPONSE`, `USER_QUERY_AND_AGENT_RESPONSE`. */ policyScope: string; /** * Policy prompt. */ prompt: string; } interface GuardrailLlmPolicyModelSettings { /** * The LLM model that the agent should use. * If not set, the agent will inherit the model from its parent agent. */ model?: string; /** * If set, this temperature will be used for the LLM model. Temperature * controls the randomness of the model's responses. Lower temperatures * produce responses that are more predictable. Higher temperatures produce * responses that are more creative. */ temperature?: number; } interface GuardrailLlmPromptSecurity { /** * Guardrail that blocks the conversation if the LLM response is considered * violating the policy based on the LLM classification. * Structure is documented below. */ customPolicy?: outputs.ces.GuardrailLlmPromptSecurityCustomPolicy; /** * Configuration for default system security settings. * Structure is documented below. */ defaultSettings?: outputs.ces.GuardrailLlmPromptSecurityDefaultSettings; } interface GuardrailLlmPromptSecurityCustomPolicy { /** * By default, the LLM policy check is bypassed for short utterances. * Enabling this setting applies the policy check to all utterances, * including those that would normally be skipped. */ allowShortUtterance?: boolean; /** * If an error occurs during the policy check, fail open and do not trigger * the guardrail. */ failOpen?: boolean; /** * When checking this policy, consider the last 'n' messages in the * conversation. * When not set a default value of 10 will be used. */ maxConversationMessages?: number; /** * Model settings contains various configurations for the LLM model. * Structure is documented below. */ modelSettings?: outputs.ces.GuardrailLlmPromptSecurityCustomPolicyModelSettings; /** * Defines when to apply the policy check during the conversation. If set to * `POLICY_SCOPE_UNSPECIFIED`, the policy will be applied to the user input. * When applying the policy to the agent response, additional latency will * be introduced before the agent can respond. * Possible values: * USER_QUERY * AGENT_RESPONSE * USER_QUERY_AND_AGENT_RESPONSE */ policyScope: string; /** * Policy prompt. */ prompt: string; } interface GuardrailLlmPromptSecurityCustomPolicyModelSettings { /** * The LLM model that the agent should use. * If not set, the agent will inherit the model from its parent agent. */ model?: string; /** * If set, this temperature will be used for the LLM model. Temperature * controls the randomness of the model's responses. Lower temperatures * produce responses that are more predictable. Higher temperatures produce * responses that are more creative. */ temperature?: number; } interface GuardrailLlmPromptSecurityDefaultSettings { /** * (Output) * The default prompt template used by the system. * This field is for display purposes to show the user what prompt * the system uses by default. It is OUTPUT_ONLY. */ defaultPromptTemplate: string; } interface GuardrailModelSafety { /** * List of safety settings. * Structure is documented below. */ safetySettings: outputs.ces.GuardrailModelSafetySafetySetting[]; } interface GuardrailModelSafetySafetySetting { /** * The harm category. * Possible values: * HARM_CATEGORY_HATE_SPEECH * HARM_CATEGORY_DANGEROUS_CONTENT * HARM_CATEGORY_HARASSMENT * HARM_CATEGORY_SEXUALLY_EXPLICIT * Possible values are: `HARM_CATEGORY_HATE_SPEECH`, `HARM_CATEGORY_DANGEROUS_CONTENT`, `HARM_CATEGORY_HARASSMENT`, `HARM_CATEGORY_SEXUALLY_EXPLICIT`. */ category: string; /** * The harm block threshold. * Possible values: * BLOCK_LOW_AND_ABOVE * BLOCK_MEDIUM_AND_ABOVE * BLOCK_ONLY_HIGH * BLOCK_NONE * OFF * Possible values are: `BLOCK_LOW_AND_ABOVE`, `BLOCK_MEDIUM_AND_ABOVE`, `BLOCK_ONLY_HIGH`, `BLOCK_NONE`, `OFF`. */ threshold: string; } interface ToolClientFunction { /** * The function description. */ description?: string; /** * The function name. */ name: string; /** * Represents a select subset of an OpenAPI 3.0 schema object. * Structure is documented below. */ parameters?: outputs.ces.ToolClientFunctionParameters; /** * Represents a select subset of an OpenAPI 3.0 schema object. * Structure is documented below. */ response?: outputs.ces.ToolClientFunctionResponse; } interface ToolClientFunctionParameters { /** * Defines the schema for additional properties allowed in an object. * The value must be a valid JSON string representing the Schema object. * (Note: OpenAPI also allows a boolean, this definition expects a Schema JSON). */ additionalProperties?: string; /** * The instance value should be valid against at least one of the schemas in this list. */ anyOf?: string; /** * Default value of the data. Represents a dynamically typed value * which can be either null, a number, a string, a boolean, a struct, * or a list of values. The provided default value must be compatible * with the defined 'type' and other schema constraints. */ default?: string; /** * A map of definitions for use by ref. Only allowed at the root of the schema. */ defs?: string; /** * The description of the data. */ description?: string; /** * Possible values of the element of primitive type with enum format. * Examples: * 1. We can define direction as : * {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} * 2. We can define apartment number as : * {type:INTEGER, format:enum, enum:["101", "201", "301"]} */ enums?: string[]; /** * Schema of the elements of Type.ARRAY. */ items?: string; /** * Maximum number of the elements for Type.ARRAY. (int64 format) */ maxItems?: number; /** * Maximum value for Type.INTEGER and Type.NUMBER. */ maximum?: number; /** * Minimum number of the elements for Type.ARRAY. (int64 format) */ minItems?: number; /** * Minimum value for Type.INTEGER and Type.NUMBER. */ minimum?: number; /** * Indicates if the value may be null. */ nullable?: boolean; /** * Schemas of initial elements of Type.ARRAY. */ prefixItems?: string; /** * Properties of Type.OBJECT. */ properties?: string; /** * Allows indirect references between schema nodes. The value should be a * valid reference to a child of the root `defs`. * For example, the following schema defines a reference to a schema node * named "Pet": * type: object * properties: * pet: * ref: #/defs/Pet * defs: * Pet: * type: object * properties: * name: * type: string * The value of the "pet" property is a reference to the schema node * named "Pet". * See details in * https://json-schema.org/understanding-json-schema/structuring. */ ref?: string; /** * Required properties of Type.OBJECT. */ requireds?: string[]; /** * The title of the schema. */ title?: string; /** * The type of the data. * Possible values: * STRING * INTEGER * NUMBER * BOOLEAN * OBJECT * ARRAY */ type: string; /** * Indicate the items in the array must be unique. Only applies to TYPE.ARRAY. */ uniqueItems?: boolean; } interface ToolClientFunctionResponse { /** * Defines the schema for additional properties allowed in an object. * The value must be a valid JSON string representing the Schema object. * (Note: OpenAPI also allows a boolean, this definition expects a Schema JSON). */ additionalProperties?: string; /** * The instance value should be valid against at least one of the schemas in this list. */ anyOf?: string; /** * Default value of the data. Represents a dynamically typed value * which can be either null, a number, a string, a boolean, a struct, * or a list of values. The provided default value must be compatible * with the defined 'type' and other schema constraints. */ default?: string; /** * A map of definitions for use by ref. Only allowed at the root of the schema. */ defs?: string; /** * The description of the data. */ description?: string; /** * Possible values of the element of primitive type with enum format. * Examples: * 1. We can define direction as : * {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} * 2. We can define apartment number as : * {type:INTEGER, format:enum, enum:["101", "201", "301"]} */ enums?: string[]; /** * Schema of the elements of Type.ARRAY. */ items?: string; /** * Maximum number of the elements for Type.ARRAY. (int64 format) */ maxItems?: number; /** * Maximum value for Type.INTEGER and Type.NUMBER. */ maximum?: number; /** * Minimum number of the elements for Type.ARRAY. (int64 format) */ minItems?: number; /** * Minimum value for Type.INTEGER and Type.NUMBER. */ minimum?: number; /** * Indicates if the value may be null. */ nullable?: boolean; /** * Schemas of initial elements of Type.ARRAY. */ prefixItems?: string; /** * Properties of Type.OBJECT. */ properties?: string; /** * Allows indirect references between schema nodes. The value should be a * valid reference to a child of the root `defs`. * For example, the following schema defines a reference to a schema node * named "Pet": * type: object * properties: * pet: * ref: #/defs/Pet * defs: * Pet: * type: object * properties: * name: * type: string * The value of the "pet" property is a reference to the schema node * named "Pet". * See details in * https://json-schema.org/understanding-json-schema/structuring. */ ref?: string; /** * Required properties of Type.OBJECT. */ requireds?: string[]; /** * The title of the schema. */ title?: string; /** * The type of the data. * Possible values: * STRING * INTEGER * NUMBER * BOOLEAN * OBJECT * ARRAY */ type: string; /** * Indicate the items in the array must be unique. Only applies to TYPE.ARRAY. */ uniqueItems?: boolean; } interface ToolDataStoreTool { /** * Boost specification to boost certain documents. * Structure is documented below. */ boostSpecs?: outputs.ces.ToolDataStoreToolBoostSpec[]; /** * The tool description. */ description?: string; /** * Configuration for searching within an Engine, potentially targeting * specific DataStores. * Structure is documented below. */ engineSource?: outputs.ces.ToolDataStoreToolEngineSource; /** * Number of search results to return per query. * The default value is 10. The maximum allowed value is 10. */ maxResults?: number; /** * The modality configs for the data store. * Structure is documented below. */ modalityConfigs?: outputs.ces.ToolDataStoreToolModalityConfig[]; /** * The data store tool name. */ name: string; } interface ToolDataStoreToolBoostSpec { /** * The Data Store where the boosting configuration is applied. Full resource * name of DataStore, such as * projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}. */ dataStores: string[]; /** * A list of boosting specifications. * Structure is documented below. */ specs: outputs.ces.ToolDataStoreToolBoostSpecSpec[]; } interface ToolDataStoreToolBoostSpecSpec { /** * A list of boosting specifications. * Structure is documented below. */ conditionBoostSpecs: outputs.ces.ToolDataStoreToolBoostSpecSpecConditionBoostSpec[]; } interface ToolDataStoreToolBoostSpecSpecConditionBoostSpec { /** * Strength of the boost, which should be in [-1, 1]. Negative boost means * demotion. Default is 0.0. * Setting to 1.0 gives the suggestions a big promotion. However, it does * not necessarily mean that the top result will be a boosted suggestion. * Setting to -1.0 gives the suggestions a big demotion. However, other * suggestions that are relevant might still be shown. * Setting to 0.0 means no boost applied. The boosting condition is * ignored. */ boost?: number; /** * Specification for custom ranking based on customer specified attribute * value. It provides more controls for customized ranking than the simple * (condition, boost) combination above. * Structure is documented below. */ boostControlSpec?: outputs.ces.ToolDataStoreToolBoostSpecSpecConditionBoostSpecBoostControlSpec; /** * An expression which specifies a boost condition. The syntax is the same * as filter expression syntax. Currently, the only supported condition is * a list of BCP-47 lang codes. * Example: To boost suggestions in languages en or fr: * (lang_code: ANY("en", "fr")) */ condition: string; } interface ToolDataStoreToolBoostSpecSpecConditionBoostSpecBoostControlSpec { /** * The attribute type to be used to determine the boost amount. The * attribute value can be derived from the field value of the specified * field_name. In the case of numerical it is straightforward i.e. * attributeValue = numerical_field_value. In the case of freshness * however, attributeValue = (time.now() - datetime_field_value). * Possible values: * NUMERICAL * FRESHNESS */ attributeType?: string; /** * The control points used to define the curve. The monotonic function * (defined through the interpolationType above) passes through the * control points listed here. * Structure is documented below. */ controlPoints?: outputs.ces.ToolDataStoreToolBoostSpecSpecConditionBoostSpecBoostControlSpecControlPoint[]; /** * The name of the field whose value will be used to determine the * boost amount. */ fieldName?: string; /** * The interpolation type to be applied to connect the control points * listed below. * Possible values: * LINEAR */ interpolationType?: string; } interface ToolDataStoreToolBoostSpecSpecConditionBoostSpecBoostControlSpecControlPoint { /** * Can be one of: * 1. The numerical field value. * 2. The duration spec for freshness: * The value must be formatted as an XSD `dayTimeDuration` value (a * restricted subset of an ISO 8601 duration value). The pattern for * this is: `nDnM]`. */ attributeValue?: string; /** * The value between -1 to 1 by which to boost the score if the * attributeValue evaluates to the value specified above. */ boostAmount?: number; } interface ToolDataStoreToolEngineSource { /** * Use to target specific DataStores within the Engine. * If empty, the search applies to all DataStores associated with the * Engine. * Structure is documented below. */ dataStoreSources?: outputs.ces.ToolDataStoreToolEngineSourceDataStoreSource[]; /** * Full resource name of the Engine. * Format: * `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` */ engine: string; /** * A filter applied to the search across the Engine. Not relevant and not * used if 'data_store_sources' is provided. * See: * https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata */ filter?: string; } interface ToolDataStoreToolEngineSourceDataStoreSource { /** * A DataStore resource in Vertex AI Search. * Structure is documented below. */ dataStore?: outputs.ces.ToolDataStoreToolEngineSourceDataStoreSourceDataStore; /** * Filter specification for the DataStore. * See: * https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata */ filter?: string; } interface ToolDataStoreToolEngineSourceDataStoreSourceDataStore { /** * (Output) * The connector config for the data store connection. * Structure is documented below. */ connectorConfigs: outputs.ces.ToolDataStoreToolEngineSourceDataStoreSourceDataStoreConnectorConfig[]; /** * (Output) * Timestamp when the data store was created. */ createTime: string; /** * (Output) * The display name of the data store. */ displayName: string; /** * (Output) * The document processing mode for the data store connection. * Only set for PUBLIC_WEB and UNSTRUCTURED data stores. * Possible values: * DOCUMENTS * CHUNKS */ documentProcessingMode: string; /** * Full resource name of the DataStore. * Format: * `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` */ name: string; /** * (Output) * The type of the data store. This field is readonly and populated by the * server. * Possible values: * PUBLIC_WEB * UNSTRUCTURED * FAQ * CONNECTOR * * * The `connectorConfig` block contains: */ type: string; } interface ToolDataStoreToolEngineSourceDataStoreSourceDataStoreConnectorConfig { /** * Resource name of the collection the data store belongs to. */ collection: string; /** * Display name of the collection the data store belongs to. */ collectionDisplayName: string; /** * The name of the data source. * Example: 'salesforce', 'jira', 'confluence', 'bigquery'. */ dataSource: string; } interface ToolDataStoreToolModalityConfig { /** * Grounding configuration. * Structure is documented below. */ groundingConfig?: outputs.ces.ToolDataStoreToolModalityConfigGroundingConfig; /** * The modality type. * Possible values: * TEXT * AUDIO */ modalityType: string; /** * Rewriter configuration. * Structure is documented below. */ rewriterConfig?: outputs.ces.ToolDataStoreToolModalityConfigRewriterConfig; /** * Summarization configuration. * Structure is documented below. */ summarizationConfig?: outputs.ces.ToolDataStoreToolModalityConfigSummarizationConfig; } interface ToolDataStoreToolModalityConfigGroundingConfig { /** * Whether grounding is disabled. */ disabled?: boolean; /** * The groundedness threshold of the answer based on the retrieved sources. * The value has a configurable range of [1, 5]. The level is used to * threshold the groundedness of the answer, meaning that all responses with * a groundedness score below the threshold will fall back to returning * relevant snippets only. * For example, a level of 3 means that the groundedness score must be * 3 or higher for the response to be returned. */ groundingLevel?: number; } interface ToolDataStoreToolModalityConfigRewriterConfig { /** * Whether the rewriter is disabled. */ disabled?: boolean; /** * Model settings contains various configurations for the LLM model. * Structure is documented below. */ modelSettings: outputs.ces.ToolDataStoreToolModalityConfigRewriterConfigModelSettings; /** * The prompt definition. If not set, default prompt will be used. */ prompt?: string; } interface ToolDataStoreToolModalityConfigRewriterConfigModelSettings { /** * The LLM model that the agent should use. * If not set, the agent will inherit the model from its parent agent. */ model?: string; /** * If set, this temperature will be used for the LLM model. Temperature * controls the randomness of the model's responses. Lower temperatures * produce responses that are more predictable. Higher temperatures produce * responses that are more creative. */ temperature?: number; } interface ToolDataStoreToolModalityConfigSummarizationConfig { /** * Whether summarization is disabled. */ disabled?: boolean; /** * Model settings contains various configurations for the LLM model. * Structure is documented below. */ modelSettings?: outputs.ces.ToolDataStoreToolModalityConfigSummarizationConfigModelSettings; /** * The prompt definition. If not set, default prompt will be used. */ prompt?: string; } interface ToolDataStoreToolModalityConfigSummarizationConfigModelSettings { /** * The LLM model that the agent should use. * If not set, the agent will inherit the model from its parent agent. */ model?: string; /** * If set, this temperature will be used for the LLM model. Temperature * controls the randomness of the model's responses. Lower temperatures * produce responses that are more predictable. Higher temperatures produce * responses that are more creative. */ temperature?: number; } interface ToolGoogleSearchTool { /** * Content will be fetched directly from these URLs for context and grounding. * More details: https://cloud.google.com/vertex-ai/generative-ai/docs/url-context. * Example: "https://example.com/path.html". A maximum of 20 URLs are allowed. */ contextUrls?: string[]; /** * Description of the tool's purpose. */ description?: string; /** * List of domains to be excluded from the search results. * Example: "example.com". * A maximum of 2000 domains can be excluded. */ excludeDomains?: string[]; /** * The name of the tool. */ name: string; /** * Specifies domain names to guide the search. * The model will be instructed to prioritize these domains * when formulating queries for google search. * This is a best-effort hint and these domains may or may * not be exclusively reflected in the final search results. * Example: "example.com", "another.site". * A maximum of 20 domains can be specified. */ preferredDomains?: string[]; } interface ToolOpenApiTool { /** * (Output) * Authentication information required for API calls. * Structure is documented below. */ apiAuthentications: outputs.ces.ToolOpenApiToolApiAuthentication[]; /** * (Output) * The description of the system tool. */ description: string; /** * (Output) * If true, the agent will ignore unknown fields in the API response. */ ignoreUnknownFields: boolean; /** * (Output) * The name of the system tool. */ name: string; /** * (Output) * The OpenAPI schema in JSON or YAML format. */ openApiSchema: string; /** * (Output) * Configuration for tools using Service Directory. * Structure is documented below. */ serviceDirectoryConfigs: outputs.ces.ToolOpenApiToolServiceDirectoryConfig[]; /** * (Output) * The TLS configuration. * Structure is documented below. */ tlsConfigs: outputs.ces.ToolOpenApiToolTlsConfig[]; /** * (Output) * The server URL of the Open API schema. This field is only set in tools in the * environment dependencies during the export process if the schema contains a * server url. During the import process, if this url is present in the environment * dependencies and the schema has the $env_var placeholder, it will replace the * placeholder in the schema. */ url: string; } interface ToolOpenApiToolApiAuthentication { /** * (Output) * Configurations for authentication with API key. * Structure is documented below. */ apiKeyConfigs: outputs.ces.ToolOpenApiToolApiAuthenticationApiKeyConfig[]; /** * (Output) * Configurations for authentication with a bearer token. * Structure is documented below. */ bearerTokenConfigs: outputs.ces.ToolOpenApiToolApiAuthenticationBearerTokenConfig[]; /** * (Output) * Configurations for authentication with OAuth. * Structure is documented below. */ oauthConfigs: outputs.ces.ToolOpenApiToolApiAuthenticationOauthConfig[]; /** * (Output) * Configurations for authentication using a custom service account. * Structure is documented below. */ serviceAccountAuthConfigs: outputs.ces.ToolOpenApiToolApiAuthenticationServiceAccountAuthConfig[]; /** * (Output) * Configurations for authentication with [ID * token](https://cloud.google.com/docs/authentication/token-types#id) generated * from service agent. */ serviceAgentIdTokenAuthConfigs: outputs.ces.ToolOpenApiToolApiAuthenticationServiceAgentIdTokenAuthConfig[]; } interface ToolOpenApiToolApiAuthenticationApiKeyConfig { /** * (Output) * The name of the SecretManager secret version resource storing the API key. * Format: `projects/{project}/secrets/{secret}/versions/{version}` * Note: You should grant `roles/secretmanager.secretAccessor` role to the CES * service agent * `service-@gcp-sa-ces.iam.gserviceaccount.com`. */ apiKeySecretVersion: string; /** * (Output) * The parameter name or the header name of the API key. * E.g., If the API request is "https://example.com/act?X-Api-Key=", "X-Api-Key" would be the parameter name. */ keyName: string; /** * (Output) * Key location in the request. * Possible values: * HEADER * QUERY_STRING */ requestLocation: string; } interface ToolOpenApiToolApiAuthenticationBearerTokenConfig { /** * (Output) * The bearer token. Must be in the format $context.variables.. */ token: string; } interface ToolOpenApiToolApiAuthenticationOauthConfig { /** * (Output) * The client ID from the OAuth provider. */ clientId: string; /** * (Output) * The name of the SecretManager secret version resource storing the * client secret. * Format: `projects/{project}/secrets/{secret}/versions/{version}` * Note: You should grant `roles/secretmanager.secretAccessor` role to the CES * service agent * `service-@gcp-sa-ces.iam.gserviceaccount.com`. */ clientSecretVersion: string; /** * (Output) * OAuth grant types. * Possible values: * CLIENT_CREDENTIAL */ oauthGrantType: string; /** * (Output) * The OAuth scopes to grant. */ scopes: string[]; /** * (Output) * The token endpoint in the OAuth provider to exchange for an access token. */ tokenEndpoint: string; } interface ToolOpenApiToolApiAuthenticationServiceAccountAuthConfig { /** * (Output) * The email address of the service account used for authenticatation. CES * uses this service account to exchange an access token and the access token * is then sent in the `Authorization` header of the request. * The service account must have the * `roles/iam.serviceAccountTokenCreator` role granted to the * CES service agent * `service-@gcp-sa-ces.iam.gserviceaccount.com`. */ serviceAccount: string; } interface ToolOpenApiToolApiAuthenticationServiceAgentIdTokenAuthConfig { } interface ToolOpenApiToolServiceDirectoryConfig { /** * (Output) * The name of [Service * Directory](https://cloud.google.com/service-directory) service. * Format: * `projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}`. * Location of the service directory must be the same as the location of the * app. */ service: string; } interface ToolOpenApiToolTlsConfig { /** * (Output) * Specifies a list of allowed custom CA certificates for HTTPS * verification. * Structure is documented below. */ caCerts: outputs.ces.ToolOpenApiToolTlsConfigCaCert[]; } interface ToolOpenApiToolTlsConfigCaCert { /** * (Output) * The allowed custom CA certificates (in DER format) for * HTTPS verification. This overrides the default SSL trust store. If this * is empty or unspecified, CES will use Google's default trust * store to verify certificates. N.B. Make sure the HTTPS server * certificates are signed with "subject alt name". For instance a * certificate can be self-signed using the following command, * openssl x509 -req -days 200 -in example.com.csr \ * -signkey example.com.key \ * -out example.com.crt \ * -extfile <(printf "\nsubjectAltName='DNS:www.example.com'") * A base64-encoded string. */ cert: string; /** * (Output) * The name of the allowed custom CA certificates. This * can be used to disambiguate the custom CA certificates. */ displayName: string; } interface ToolPythonFunction { /** * (Output) * The description of the Python function, parsed from the python code's * docstring. */ description: string; /** * The name of the Python function to execute. Must match a Python function * name defined in the python code. Case sensitive. If the name is not * provided, the first function defined in the python code will be used. */ name?: string; /** * The Python code to execute for the tool. */ pythonCode?: string; } interface ToolSystemTool { /** * (Output) * The description of the system tool. */ description: string; /** * (Output) * The name of the system tool. */ name: string; } interface ToolsetMcpToolset { /** * Authentication information required to access tools and execute a tool * against the MCP server. For API key auth, the API key can only be sent in * the request header; sending it via query parameters is not supported. * Structure is documented below. */ apiAuthentication?: outputs.ces.ToolsetMcpToolsetApiAuthentication; /** * The address of the MCP server, for example, "https://example.com/mcp/". If * the server is built with the MCP SDK, the url should be suffixed with * "/mcp/". Only Streamable HTTP transport based servers are supported. See * https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#streamable-http * for more details. */ serverAddress: string; /** * Service Directory configuration for VPC-SC, used to resolve service names * within a perimeter. * Structure is documented below. */ serviceDirectoryConfig?: outputs.ces.ToolsetMcpToolsetServiceDirectoryConfig; /** * The TLS configuration. Includes the custom server certificates that the * client should trust. * Structure is documented below. */ tlsConfig?: outputs.ces.ToolsetMcpToolsetTlsConfig; } interface ToolsetMcpToolsetApiAuthentication { /** * Configurations for authentication with API key. * Structure is documented below. */ apiKeyConfig?: outputs.ces.ToolsetMcpToolsetApiAuthenticationApiKeyConfig; /** * Configurations for authentication with a bearer token. * Structure is documented below. */ bearerTokenConfig?: outputs.ces.ToolsetMcpToolsetApiAuthenticationBearerTokenConfig; /** * Configurations for authentication with OAuth. * Structure is documented below. */ oauthConfig?: outputs.ces.ToolsetMcpToolsetApiAuthenticationOauthConfig; /** * Configurations for authentication using a custom service account. * Structure is documented below. */ serviceAccountAuthConfig?: outputs.ces.ToolsetMcpToolsetApiAuthenticationServiceAccountAuthConfig; /** * Configurations for authentication with [ID * token](https://cloud.google.com/docs/authentication/token-types#id) generated * from service agent. */ serviceAgentIdTokenAuthConfig?: outputs.ces.ToolsetMcpToolsetApiAuthenticationServiceAgentIdTokenAuthConfig; } interface ToolsetMcpToolsetApiAuthenticationApiKeyConfig { /** * The name of the SecretManager secret version resource storing the API key. * Format: `projects/{project}/secrets/{secret}/versions/{version}` * Note: You should grant `roles/secretmanager.secretAccessor` role to the CES * service agent * `service-@gcp-sa-ces.iam.gserviceaccount.com`. */ apiKeySecretVersion: string; /** * The parameter name or the header name of the API key. * E.g., If the API request is "https://example.com/act?X-Api-Key=", "X-Api-Key" would be the parameter name. */ keyName: string; /** * Key location in the request. For API key auth on MCP toolsets, * the API key can only be sent in the request header. * Possible values: * HEADER */ requestLocation: string; } interface ToolsetMcpToolsetApiAuthenticationBearerTokenConfig { /** * (Optional) */ token?: string; } interface ToolsetMcpToolsetApiAuthenticationOauthConfig { /** * The client ID from the OAuth provider. */ clientId: string; /** * The name of the SecretManager secret version resource storing the * client secret. * Format: `projects/{project}/secrets/{secret}/versions/{version}` * Note: You should grant `roles/secretmanager.secretAccessor` role to the CES * service agent * `service-@gcp-sa-ces.iam.gserviceaccount.com`. */ clientSecretVersion: string; /** * OAuth grant types. * Possible values: * CLIENT_CREDENTIAL */ oauthGrantType: string; /** * The OAuth scopes to grant. */ scopes?: string[]; /** * The token endpoint in the OAuth provider to exchange for an access token. */ tokenEndpoint: string; } interface ToolsetMcpToolsetApiAuthenticationServiceAccountAuthConfig { /** * The email address of the service account used for authenticatation. CES * uses this service account to exchange an access token and the access token * is then sent in the `Authorization` header of the request. * The service account must have the * `roles/iam.serviceAccountTokenCreator` role granted to the * CES service agent * `service-@gcp-sa-ces.iam.gserviceaccount.com`. */ serviceAccount: string; } interface ToolsetMcpToolsetApiAuthenticationServiceAgentIdTokenAuthConfig { } interface ToolsetMcpToolsetServiceDirectoryConfig { /** * The name of [Service * Directory](https://cloud.google.com/service-directory) service. * Format: * `projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}`. * Location of the service directory must be the same as the location of the * app. */ service: string; } interface ToolsetMcpToolsetTlsConfig { /** * Specifies a list of allowed custom CA certificates for HTTPS * verification. * Structure is documented below. */ caCerts: outputs.ces.ToolsetMcpToolsetTlsConfigCaCert[]; } interface ToolsetMcpToolsetTlsConfigCaCert { /** * The allowed custom CA certificates (in DER format) for * HTTPS verification. This overrides the default SSL trust store. If this * is empty or unspecified, CES will use Google's default trust * store to verify certificates. N.B. Make sure the HTTPS server * certificates are signed with "subject alt name". For instance a * certificate can be self-signed using the following command, * openssl x509 -req -days 200 -in example.com.csr \ * -signkey example.com.key \ * -out example.com.crt \ * -extfile <(printf "\nsubjectAltName='DNS:www.example.com'") */ cert: string; /** * The name of the allowed custom CA certificates. This * can be used to disambiguate the custom CA certificates. */ displayName: string; } interface ToolsetOpenApiToolset { /** * Authentication information required for API calls. * Structure is documented below. */ apiAuthentication?: outputs.ces.ToolsetOpenApiToolsetApiAuthentication; /** * If true, the agent will ignore unknown fields in the API response for all * operations defined in the OpenAPI schema. */ ignoreUnknownFields?: boolean; /** * The OpenAPI schema of the toolset. */ openApiSchema: string; /** * Configuration for tools using Service Directory. * Structure is documented below. */ serviceDirectoryConfig?: outputs.ces.ToolsetOpenApiToolsetServiceDirectoryConfig; /** * The TLS configuration. * Structure is documented below. */ tlsConfig?: outputs.ces.ToolsetOpenApiToolsetTlsConfig; /** * (Output) * The server URL of the Open API schema. * This field is only set in toolsets in the environment dependencies * during the export process if the schema contains a server url. * During the import process, if this url is present in the environment dependencies * and the schema has the $env_var placeholder, * it will replace the placeholder in the schema. */ url: string; } interface ToolsetOpenApiToolsetApiAuthentication { /** * Configurations for authentication with API key. * Structure is documented below. */ apiKeyConfig?: outputs.ces.ToolsetOpenApiToolsetApiAuthenticationApiKeyConfig; /** * Configurations for authentication with a bearer token. * Structure is documented below. */ bearerTokenConfig?: outputs.ces.ToolsetOpenApiToolsetApiAuthenticationBearerTokenConfig; /** * Configurations for authentication with OAuth. * Structure is documented below. */ oauthConfig?: outputs.ces.ToolsetOpenApiToolsetApiAuthenticationOauthConfig; /** * Configurations for authentication using a custom service account. * Structure is documented below. */ serviceAccountAuthConfig?: outputs.ces.ToolsetOpenApiToolsetApiAuthenticationServiceAccountAuthConfig; /** * Configurations for authentication with [ID * token](https://cloud.google.com/docs/authentication/token-types#id) generated * from service agent. */ serviceAgentIdTokenAuthConfig?: outputs.ces.ToolsetOpenApiToolsetApiAuthenticationServiceAgentIdTokenAuthConfig; } interface ToolsetOpenApiToolsetApiAuthenticationApiKeyConfig { /** * The name of the SecretManager secret version resource storing the API key. * Format: `projects/{project}/secrets/{secret}/versions/{version}` * Note: You should grant `roles/secretmanager.secretAccessor` role to the CES * service agent * `service-@gcp-sa-ces.iam.gserviceaccount.com`. */ apiKeySecretVersion: string; /** * The parameter name or the header name of the API key. * E.g., If the API request is "https://example.com/act?X-Api-Key=", "X-Api-Key" would be the parameter name. */ keyName: string; /** * Key location in the request. For API key auth on MCP toolsets, * the API key can only be sent in the request header. * Possible values: * HEADER */ requestLocation: string; } interface ToolsetOpenApiToolsetApiAuthenticationBearerTokenConfig { /** * (Optional) */ token?: string; } interface ToolsetOpenApiToolsetApiAuthenticationOauthConfig { /** * The client ID from the OAuth provider. */ clientId: string; /** * The name of the SecretManager secret version resource storing the * client secret. * Format: `projects/{project}/secrets/{secret}/versions/{version}` * Note: You should grant `roles/secretmanager.secretAccessor` role to the CES * service agent * `service-@gcp-sa-ces.iam.gserviceaccount.com`. */ clientSecretVersion: string; /** * OAuth grant types. * Possible values: * CLIENT_CREDENTIAL */ oauthGrantType: string; /** * The OAuth scopes to grant. */ scopes?: string[]; /** * The token endpoint in the OAuth provider to exchange for an access token. */ tokenEndpoint: string; } interface ToolsetOpenApiToolsetApiAuthenticationServiceAccountAuthConfig { /** * The email address of the service account used for authenticatation. CES * uses this service account to exchange an access token and the access token * is then sent in the `Authorization` header of the request. * The service account must have the * `roles/iam.serviceAccountTokenCreator` role granted to the * CES service agent * `service-@gcp-sa-ces.iam.gserviceaccount.com`. */ serviceAccount: string; } interface ToolsetOpenApiToolsetApiAuthenticationServiceAgentIdTokenAuthConfig { } interface ToolsetOpenApiToolsetServiceDirectoryConfig { /** * The name of [Service * Directory](https://cloud.google.com/service-directory) service. * Format: * `projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}`. * Location of the service directory must be the same as the location of the * app. */ service: string; } interface ToolsetOpenApiToolsetTlsConfig { /** * Specifies a list of allowed custom CA certificates for HTTPS * verification. * Structure is documented below. */ caCerts: outputs.ces.ToolsetOpenApiToolsetTlsConfigCaCert[]; } interface ToolsetOpenApiToolsetTlsConfigCaCert { /** * The allowed custom CA certificates (in DER format) for * HTTPS verification. This overrides the default SSL trust store. If this * is empty or unspecified, CES will use Google's default trust * store to verify certificates. N.B. Make sure the HTTPS server * certificates are signed with "subject alt name". For instance a * certificate can be self-signed using the following command, * openssl x509 -req -days 200 -in example.com.csr \ * -signkey example.com.key \ * -out example.com.crt \ * -extfile <(printf "\nsubjectAltName='DNS:www.example.com'") */ cert: string; /** * The name of the allowed custom CA certificates. This * can be used to disambiguate the custom CA certificates. */ displayName: string; } } export declare namespace chronicle { interface DataAccessScopeAllowedDataAccessLabel { /** * The asset namespace configured in the forwarder * of the customer's events. */ assetNamespace?: string; /** * The name of the data access label. */ dataAccessLabel?: string; /** * (Output) * Output only. The display name of the label. * Data access label and log types's name * will match the display name of the resource. * The asset namespace will match the namespace itself. * The ingestion key value pair will match the key of the tuple. */ displayName: string; /** * Representation of an ingestion label type. * Structure is documented below. */ ingestionLabel?: outputs.chronicle.DataAccessScopeAllowedDataAccessLabelIngestionLabel; /** * The name of the log type. */ logType?: string; } interface DataAccessScopeAllowedDataAccessLabelIngestionLabel { /** * Required. The key of the ingestion label. Always required. */ ingestionLabelKey: string; /** * Optional. The value of the ingestion label. Optional. An object * with no provided value and some key provided would match * against the given key and ANY value. */ ingestionLabelValue?: string; } interface DataAccessScopeDeniedDataAccessLabel { /** * The asset namespace configured in the forwarder * of the customer's events. */ assetNamespace?: string; /** * The name of the data access label. */ dataAccessLabel?: string; /** * (Output) * Output only. The display name of the label. * Data access label and log types's name * will match the display name of the resource. * The asset namespace will match the namespace itself. * The ingestion key value pair will match the key of the tuple. */ displayName: string; /** * Representation of an ingestion label type. * Structure is documented below. */ ingestionLabel?: outputs.chronicle.DataAccessScopeDeniedDataAccessLabelIngestionLabel; /** * The name of the log type. */ logType?: string; } interface DataAccessScopeDeniedDataAccessLabelIngestionLabel { /** * Required. The key of the ingestion label. Always required. */ ingestionLabelKey: string; /** * Optional. The value of the ingestion label. Optional. An object * with no provided value and some key provided would match * against the given key and ANY value. */ ingestionLabelValue?: string; } interface ReferenceListEntry { /** * Required. The value of the entry. Maximum length is 512 characters. */ value: string; } interface ReferenceListScopeInfo { /** * ReferenceListScope specifies the list of scope names of the reference list. * Structure is documented below. */ referenceListScope?: outputs.chronicle.ReferenceListScopeInfoReferenceListScope; } interface ReferenceListScopeInfoReferenceListScope { /** * Optional. The list of scope names of the reference list. The scope names should be * full resource names and should be of the format: * "projects/{project}/locations/{location}/instances/{instance}/dataAccessScopes/{scope_name}". */ scopeNames?: string[]; } interface RetrohuntExecutionInterval { /** * Optional. Exclusive end of the interval. * If specified, a Timestamp matching this interval will have to be before the * end. */ endTime?: string; /** * Optional. Inclusive start of the interval. * If specified, a Timestamp matching this interval will have to be the same * or after the start. */ startTime?: string; } interface RetrohuntProcessInterval { /** * Exclusive end of the interval. */ endTime: string; /** * Inclusive start of the interval. */ startTime: string; } interface RuleCompilationDiagnostic { /** * (Output) * Output only. The diagnostic message. */ message: string; /** * CompilationPosition represents the location of a compilation diagnostic in * rule text. * Structure is documented below. */ position?: outputs.chronicle.RuleCompilationDiagnosticPosition; /** * (Output) * Output only. The severity of a rule's compilation diagnostic. * Possible values: * SEVERITY_UNSPECIFIED * WARNING * ERROR */ severity: string; /** * (Output) * Output only. Link to documentation that describes a diagnostic in more detail. */ uri: string; } interface RuleCompilationDiagnosticPosition { /** * (Output) * Output only. End column number, beginning at 1. */ endColumn: number; /** * (Output) * Output only. End line number, beginning at 1. */ endLine: number; /** * (Output) * Output only. Start column number, beginning at 1. */ startColumn: number; /** * (Output) * Output only. Start line number, beginning at 1. */ startLine: number; } interface RuleSeverity { /** * The display name of the severity level. Extracted from the meta section of * the rule text. */ displayName?: string; } interface WatchlistEntityCount { /** * (Output) * Output only. Count of asset type entities in the watchlist. */ asset: number; /** * (Output) * Output only. Count of user type entities in the watchlist. */ user: number; } interface WatchlistEntityPopulationMechanism { /** * Entities are added manually. */ manual: outputs.chronicle.WatchlistEntityPopulationMechanismManual; } interface WatchlistEntityPopulationMechanismManual { } interface WatchlistWatchlistUserPreferences { /** * Optional. Whether the watchlist is pinned on the dashboard. */ pinned?: boolean; } } export declare namespace cloudasset { interface FolderFeedCondition { /** * Description of the expression. This is a longer text which describes the expression, * e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a file * name and a position in the file. */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. * This can be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface FolderFeedFeedOutputConfig { /** * Destination on Cloud Pubsub. * Structure is documented below. */ pubsubDestination: outputs.cloudasset.FolderFeedFeedOutputConfigPubsubDestination; } interface FolderFeedFeedOutputConfigPubsubDestination { /** * Destination on Cloud Pubsub topic. */ topic: string; } interface GetResourcesSearchAllResult { /** * Additional searchable attributes of this resource. Informational only. The exact set of attributes is subject to change. For example: project id, DNS name etc. */ additionalAttributes: string[]; /** * The type of this resource. */ assetType: string; /** * One or more paragraphs of text description of this resource. Maximum length could be up to 1M bytes. */ description: string; /** * The display name of this resource. */ displayName: string; /** * Labels associated with this resource. */ labels: { [key: string]: string; }; /** * Location can be `global`, regional like `us-east1`, or zonal like `us-west1-b`. */ location: string; /** * The full resource name. See [Resource Names](https://cloud.google.com/apis/design/resource_names#full_resource_name) for more information. */ name: string; /** * Network tags associated with this resource. */ networkTags: string[]; /** * The project that this resource belongs to, in the form of `projects/{project_number}`. */ project: string; } interface GetSearchAllResourcesResult { /** * The type of this resource. */ assetType: string; /** * The create timestamp of this resource, at which the resource was created. */ createTime: string; /** * One or more paragraphs of text description of this resource. Maximum length could be up to 1M bytes. */ description: string; /** * The display name of this resource. */ displayName: string; /** * The folder(s) that this resource belongs to, in the form of `folders/{FOLDER_NUMBER}`. This field is available when the resource belongs to one or more folders. */ folders: string[]; /** * The Cloud KMS CryptoKey names or CryptoKeyVersion names. This field is available only when the resource's Protobuf contains it. */ kmsKeys: string[]; /** * Labels associated with this resource. */ labels: { [key: string]: string; }; /** * Location can be `global`, regional like `us-east1`, or zonal like `us-west1-b`. */ location: string; /** * The full resource name of this resource.. See [Resource Names](https://cloud.google.com/apis/design/resource_names#full_resource_name) for more information. */ name: string; /** * Network tags associated with this resource. */ networkTags: string[]; /** * The organization that this resource belongs to, in the form of `organizations/{ORGANIZATION_NUMBER}`. This field is available when the resource belongs to an organization. */ organization: string; /** * The type of this resource's immediate parent, if there is one. */ parentAssetType: string; /** * The full resource name of this resource's parent, if it has one. */ parentFullResourceName: string; /** * The project that this resource belongs to, in the form of `projects/{project_number}`. */ project: string; /** * The state of this resource. */ state: string; /** * The last update timestamp of this resource, at which the resource was last modified or deleted. */ updateTime: string; } interface OrganizationFeedCondition { /** * Description of the expression. This is a longer text which describes the expression, * e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a file * name and a position in the file. */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. * This can be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface OrganizationFeedFeedOutputConfig { /** * Destination on Cloud Pubsub. * Structure is documented below. */ pubsubDestination: outputs.cloudasset.OrganizationFeedFeedOutputConfigPubsubDestination; } interface OrganizationFeedFeedOutputConfigPubsubDestination { /** * Destination on Cloud Pubsub topic. */ topic: string; } interface ProjectFeedCondition { /** * Description of the expression. This is a longer text which describes the expression, * e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a file * name and a position in the file. */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. * This can be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface ProjectFeedFeedOutputConfig { /** * Destination on Cloud Pubsub. * Structure is documented below. */ pubsubDestination: outputs.cloudasset.ProjectFeedFeedOutputConfigPubsubDestination; } interface ProjectFeedFeedOutputConfigPubsubDestination { /** * Destination on Cloud Pubsub topic. */ topic: string; } } export declare namespace cloudbuild { interface BitbucketServerConfigConnectedRepository { /** * Identifier for the project storing the repository. */ projectKey: string; /** * Identifier for the repository. */ repoSlug: string; } interface BitbucketServerConfigSecrets { /** * The resource name for the admin access token's secret version. */ adminAccessTokenVersionName: string; /** * The resource name for the read access token's secret version. */ readAccessTokenVersionName: string; /** * Immutable. The resource name for the webhook secret's secret version. Once this field has been set, it cannot be changed. * Changing this field will result in deleting/ recreating the resource. */ webhookSecretVersionName: string; } interface GetTriggerApprovalConfig { /** * Whether or not approval is needed. If this is set on a build, it will become pending when run, * and will need to be explicitly approved to start. */ approvalRequired: boolean; } interface GetTriggerBitbucketServerTriggerConfig { /** * The Bitbucket server config resource that this trigger config maps to. */ bitbucketServerConfigResource: string; /** * Key of the project that the repo is in. For example: The key for https://mybitbucket.server/projects/TEST/repos/test-repo is "TEST". */ projectKey: string; /** * Filter to match changes in pull requests. */ pullRequests: outputs.cloudbuild.GetTriggerBitbucketServerTriggerConfigPullRequest[]; /** * Filter to match changes in refs like branches, tags. */ pushes: outputs.cloudbuild.GetTriggerBitbucketServerTriggerConfigPush[]; /** * Slug of the repository. A repository slug is a URL-friendly version of a repository name, automatically generated by Bitbucket for use in the URL. * For example, if the repository name is 'test repo', in the URL it would become 'test-repo' as in https://mybitbucket.server/projects/TEST/repos/test-repo. */ repoSlug: string; } interface GetTriggerBitbucketServerTriggerConfigPullRequest { /** * Regex of branches to match. * The syntax of the regular expressions accepted is the syntax accepted by RE2 and described at https://github.com/google/re2/wiki/Syntax */ branch: string; /** * Configure builds to run whether a repository owner or collaborator need to comment /gcbrun. Possible values: ["COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY"] */ commentControl: string; /** * If true, branches that do NOT match the gitRef will trigger a build. */ invertRegex: boolean; } interface GetTriggerBitbucketServerTriggerConfigPush { /** * Regex of branches to match. Specify only one of branch or tag. */ branch: string; /** * When true, only trigger a build if the revision regex does NOT match the gitRef regex. */ invertRegex: boolean; /** * Regex of tags to match. Specify only one of branch or tag. */ tag: string; } interface GetTriggerBuild { /** * Artifacts produced by the build that should be uploaded upon successful completion of all build steps. */ artifacts: outputs.cloudbuild.GetTriggerBuildArtifact[]; /** * Secrets and secret environment variables. */ availableSecrets: outputs.cloudbuild.GetTriggerBuildAvailableSecret[]; /** * A list of images to be pushed upon the successful completion of all build steps. * The images are pushed using the builder service account's credentials. * The digests of the pushed images will be stored in the Build resource's results field. * If any of the images fail to be pushed, the build status is marked FAILURE. */ images: string[]; /** * Google Cloud Storage bucket where logs should be written. * Logs file names will be of the format ${logsBucket}/log-${build_id}.txt. */ logsBucket: string; /** * Special options for this build. */ options: outputs.cloudbuild.GetTriggerBuildOption[]; /** * TTL in queue for this build. If provided and the build is enqueued longer than this value, * the build will expire and the build status will be EXPIRED. * The TTL starts ticking from createTime. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ queueTtl: string; /** * Secrets to decrypt using Cloud Key Management Service. */ secrets: outputs.cloudbuild.GetTriggerBuildSecret[]; /** * The location of the source files to build. * * One of 'storageSource' or 'repoSource' must be provided. */ sources: outputs.cloudbuild.GetTriggerBuildSource[]; /** * The operations to be performed on the workspace. */ steps: outputs.cloudbuild.GetTriggerBuildStep[]; /** * Substitutions data for Build resource. */ substitutions: { [key: string]: string; }; /** * Tags for annotation of a Build. These are not docker tags. */ tags: string[]; /** * Amount of time that this build should be allowed to run, to second granularity. * If this amount of time elapses, work on the build will cease and the build status will be TIMEOUT. * This timeout must be equal to or greater than the sum of the timeouts for build steps within the build. * The expected format is the number of seconds followed by s. * Default time is ten minutes (600s). */ timeout: string; } interface GetTriggerBuildArtifact { /** * A list of images to be pushed upon the successful completion of all build steps. * * The images will be pushed using the builder service account's credentials. * * The digests of the pushed images will be stored in the Build resource's results field. * * If any of the images fail to be pushed, the build is marked FAILURE. */ images: string[]; /** * A Maven artifact to upload to Artifact Registry upon successful completion of all build steps. * * The location and generation of the uploaded objects will be stored in the Build resource's results field. * * If any objects fail to be pushed, the build is marked FAILURE. */ mavenArtifacts: outputs.cloudbuild.GetTriggerBuildArtifactMavenArtifact[]; /** * Npm package to upload to Artifact Registry upon successful completion of all build steps. * * The location and generation of the uploaded objects will be stored in the Build resource's results field. * * If any objects fail to be pushed, the build is marked FAILURE. */ npmPackages: outputs.cloudbuild.GetTriggerBuildArtifactNpmPackage[]; /** * A list of objects to be uploaded to Cloud Storage upon successful completion of all build steps. * * Files in the workspace matching specified paths globs will be uploaded to the * Cloud Storage location using the builder service account's credentials. * * The location and generation of the uploaded objects will be stored in the Build resource's results field. * * If any objects fail to be pushed, the build is marked FAILURE. */ objects: outputs.cloudbuild.GetTriggerBuildArtifactObject[]; /** * Python package to upload to Artifact Registry upon successful completion of all build steps. A package can encapsulate multiple objects to be uploaded to a single repository. * * The location and generation of the uploaded objects will be stored in the Build resource's results field. * * If any objects fail to be pushed, the build is marked FAILURE. */ pythonPackages: outputs.cloudbuild.GetTriggerBuildArtifactPythonPackage[]; } interface GetTriggerBuildArtifactMavenArtifact { /** * Maven artifactId value used when uploading the artifact to Artifact Registry. */ artifactId: string; /** * Maven groupId value used when uploading the artifact to Artifact Registry. */ groupId: string; /** * Path to an artifact in the build's workspace to be uploaded to Artifact Registry. This can be either an absolute path, e.g. /workspace/my-app/target/my-app-1.0.SNAPSHOT.jar or a relative path from /workspace, e.g. my-app/target/my-app-1.0.SNAPSHOT.jar. */ path: string; /** * Artifact Registry repository, in the form "https://$REGION-maven.pkg.dev/$PROJECT/$REPOSITORY" * * Artifact in the workspace specified by path will be uploaded to Artifact Registry with this location as a prefix. */ repository: string; /** * Maven version value used when uploading the artifact to Artifact Registry. */ version: string; } interface GetTriggerBuildArtifactNpmPackage { /** * Path to the package.json. e.g. workspace/path/to/package */ packagePath: string; /** * Artifact Registry repository, in the form "https://$REGION-npm.pkg.dev/$PROJECT/$REPOSITORY" * * Npm package in the workspace specified by path will be zipped and uploaded to Artifact Registry with this location as a prefix. */ repository: string; } interface GetTriggerBuildArtifactObject { /** * The Cloud Build location for the trigger. * * - - - */ location: string; /** * Path globs used to match files in the build's workspace. */ paths: string[]; /** * Output only. Stores timing information for pushing all artifact objects. */ timings: outputs.cloudbuild.GetTriggerBuildArtifactObjectTiming[]; } interface GetTriggerBuildArtifactObjectTiming { /** * End of time span. * * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to * nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ endTime: string; /** * Start of time span. * * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to * nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ startTime: string; } interface GetTriggerBuildArtifactPythonPackage { /** * Path globs used to match files in the build's workspace. For Python/ Twine, this is usually dist/*, and sometimes additionally an .asc file. */ paths: string[]; /** * Artifact Registry repository, in the form "https://$REGION-python.pkg.dev/$PROJECT/$REPOSITORY" * * Files in the workspace matching any path pattern will be uploaded to Artifact Registry with this location as a prefix. */ repository: string; } interface GetTriggerBuildAvailableSecret { /** * Pairs a secret environment variable with a SecretVersion in Secret Manager. */ secretManagers: outputs.cloudbuild.GetTriggerBuildAvailableSecretSecretManager[]; } interface GetTriggerBuildAvailableSecretSecretManager { /** * Environment variable name to associate with the secret. Secret environment * variables must be unique across all of a build's secrets, and must be used * by at least one build step. */ env: string; /** * Resource name of the SecretVersion. In format: projects/*/secrets/*/versions/* */ versionName: string; } interface GetTriggerBuildOption { /** * Requested disk size for the VM that runs the build. Note that this is NOT "disk free"; * some of the space will be used by the operating system and build utilities. * Also note that this is the minimum disk size that will be allocated for the build -- * the build may run with a larger disk than requested. At present, the maximum disk size * is 1000GB; builds that request more than the maximum are rejected with an error. */ diskSizeGb: number; /** * Option to specify whether or not to apply bash style string operations to the substitutions. * * NOTE this is always enabled for triggered builds and cannot be overridden in the build configuration file. */ dynamicSubstitutions: boolean; /** * A list of global environment variable definitions that will exist for all build steps * in this build. If a variable is defined in both globally and in a build step, * the variable will use the build step value. * * The elements are of the form "KEY=VALUE" for the environment variable "KEY" being given the value "VALUE". */ envs: string[]; /** * Option to define build log streaming behavior to Google Cloud Storage. Possible values: ["STREAM_DEFAULT", "STREAM_ON", "STREAM_OFF"] */ logStreamingOption: string; /** * Option to specify the logging mode, which determines if and where build logs are stored. Possible values: ["LOGGING_UNSPECIFIED", "LEGACY", "GCS_ONLY", "STACKDRIVER_ONLY", "CLOUD_LOGGING_ONLY", "NONE"] */ logging: string; /** * Compute Engine machine type on which to run the build. */ machineType: string; /** * Requested verifiability options. Possible values: ["NOT_VERIFIED", "VERIFIED"] */ requestedVerifyOption: string; /** * A list of global environment variables, which are encrypted using a Cloud Key Management * Service crypto key. These values must be specified in the build's Secret. These variables * will be available to all build steps in this build. */ secretEnvs: string[]; /** * Requested hash for SourceProvenance. Possible values: ["NONE", "SHA256", "MD5"] */ sourceProvenanceHashes: string[]; /** * Option to specify behavior when there is an error in the substitution checks. * * NOTE this is always set to ALLOW_LOOSE for triggered builds and cannot be overridden * in the build configuration file. Possible values: ["MUST_MATCH", "ALLOW_LOOSE"] */ substitutionOption: string; /** * Global list of volumes to mount for ALL build steps * * Each volume is created as an empty volume prior to starting the build process. * Upon completion of the build, volumes and their contents are discarded. Global * volume names and paths cannot conflict with the volumes defined a build step. * * Using a global volume in a build with only one step is not valid as it is indicative * of a build request with an incorrect configuration. */ volumes: outputs.cloudbuild.GetTriggerBuildOptionVolume[]; /** * Option to specify a WorkerPool for the build. Format projects/{project}/workerPools/{workerPool} * * This field is experimental. */ workerPool: string; } interface GetTriggerBuildOptionVolume { /** * Name of the volume to mount. * * Volume names must be unique per build step and must be valid names for Docker volumes. * Each named volume must be used by at least two build steps. */ name: string; /** * Path at which to mount the volume. * * Paths must be absolute and cannot conflict with other volume paths on the same * build step or with certain reserved volume paths. */ path: string; } interface GetTriggerBuildSecret { /** * Cloud KMS key name to use to decrypt these envs. */ kmsKeyName: string; /** * Map of environment variable name to its encrypted value. * Secret environment variables must be unique across all of a build's secrets, * and must be used by at least one build step. Values can be at most 64 KB in size. * There can be at most 100 secret values across all of a build's secrets. */ secretEnv: { [key: string]: string; }; } interface GetTriggerBuildSource { /** * Location of the source in a Google Cloud Source Repository. */ repoSources: outputs.cloudbuild.GetTriggerBuildSourceRepoSource[]; /** * Location of the source in an archive file in Google Cloud Storage. */ storageSources: outputs.cloudbuild.GetTriggerBuildSourceStorageSource[]; } interface GetTriggerBuildSourceRepoSource { /** * Regex matching branches to build. Exactly one a of branch name, tag, or commit SHA must be provided. * The syntax of the regular expressions accepted is the syntax accepted by RE2 and * described at https://github.com/google/re2/wiki/Syntax */ branchName: string; /** * Explicit commit SHA to build. Exactly one a of branch name, tag, or commit SHA must be provided. */ commitSha: string; /** * Directory, relative to the source root, in which to run the build. * This must be a relative path. If a step's dir is specified and is an absolute path, * this value is ignored for that step's execution. */ dir: string; /** * Only trigger a build if the revision regex does NOT match the revision regex. */ invertRegex: boolean; /** * ID of the project that owns the Cloud Source Repository. * If omitted, the project ID requesting the build is assumed. */ projectId: string; /** * Name of the Cloud Source Repository. */ repoName: string; /** * Substitutions to use in a triggered build. Should only be used with triggers.run */ substitutions: { [key: string]: string; }; /** * Regex matching tags to build. Exactly one a of branch name, tag, or commit SHA must be provided. * The syntax of the regular expressions accepted is the syntax accepted by RE2 and * described at https://github.com/google/re2/wiki/Syntax */ tagName: string; } interface GetTriggerBuildSourceStorageSource { /** * Google Cloud Storage bucket containing the source. */ bucket: string; /** * Google Cloud Storage generation for the object. * If the generation is omitted, the latest generation will be used */ generation: string; /** * Google Cloud Storage object containing the source. * This object must be a gzipped archive file (.tar.gz) containing source to build. */ object: string; } interface GetTriggerBuildStep { /** * Allow this build step to fail without failing the entire build if and * only if the exit code is one of the specified codes. * * If 'allowFailure' is also specified, this field will take precedence. */ allowExitCodes: number[]; /** * Allow this build step to fail without failing the entire build. * If false, the entire build will fail if this step fails. Otherwise, the * build will succeed, but this step will still have a failure status. * Error information will be reported in the 'failureDetail' field. * * 'allowExitCodes' takes precedence over this field. */ allowFailure: boolean; /** * A list of arguments that will be presented to the step when it is started. * * If the image used to run the step's container has an entrypoint, the args * are used as arguments to that entrypoint. If the image does not define an * entrypoint, the first element in args is used as the entrypoint, and the * remainder will be used as arguments. */ args: string[]; /** * Working directory to use when running this step's container. * * If this value is a relative path, it is relative to the build's working * directory. If this value is absolute, it may be outside the build's working * directory, in which case the contents of the path may not be persisted * across build step executions, unless a 'volume' for that path is specified. * * If the build specifies a 'RepoSource' with 'dir' and a step with a * 'dir', * which specifies an absolute path, the 'RepoSource' 'dir' is ignored * for the step's execution. */ dir: string; /** * Entrypoint to be used instead of the build step image's * default entrypoint. * If unset, the image's default entrypoint is used */ entrypoint: string; /** * A list of environment variable definitions to be used when * running a step. * * The elements are of the form "KEY=VALUE" for the environment variable * "KEY" being given the value "VALUE". */ envs: string[]; /** * Unique identifier for this build step, used in 'wait_for' to * reference this build step as a dependency. */ id: string; /** * The name of the container image that will run this particular build step. * * If the image is available in the host's Docker daemon's cache, it will be * run directly. If not, the host will attempt to pull the image first, using * the builder service account's credentials if necessary. * * The Docker daemon's cache will already have the latest versions of all of * the officially supported build steps (see https://github.com/GoogleCloudPlatform/cloud-builders * for images and examples). * The Docker daemon will also have cached many of the layers for some popular * images, like "ubuntu", "debian", but they will be refreshed at the time * you attempt to use them. * * If you built an image in a previous build step, it will be stored in the * host's Docker daemon's cache and is available to use as the name for a * later build step. */ name: string; /** * A shell script to be executed in the step. * When script is provided, the user cannot specify the entrypoint or args. */ script: string; /** * A list of environment variables which are encrypted using * a Cloud Key * Management Service crypto key. These values must be specified in * the build's 'Secret'. */ secretEnvs: string[]; /** * Time limit for executing this build step. If not defined, * the step has no * time limit and will be allowed to continue to run until either it * completes or the build itself times out. */ timeout: string; /** * Output only. Stores timing information for executing this * build step. */ timing: string; /** * List of volumes to mount into the build step. * * Each volume is created as an empty volume prior to execution of the * build step. Upon completion of the build, volumes and their contents * are discarded. * * Using a named volume in only one step is not valid as it is * indicative of a build request with an incorrect configuration. */ volumes: outputs.cloudbuild.GetTriggerBuildStepVolume[]; /** * The ID(s) of the step(s) that this build step depends on. * * This build step will not start until all the build steps in 'wait_for' * have completed successfully. If 'wait_for' is empty, this build step * will start when all previous build steps in the 'Build.Steps' list * have completed successfully. */ waitFors: string[]; } interface GetTriggerBuildStepVolume { /** * Name of the volume to mount. * * Volume names must be unique per build step and must be valid names for * Docker volumes. Each named volume must be used by at least two build steps. */ name: string; /** * Path at which to mount the volume. * * Paths must be absolute and cannot conflict with other volume paths on * the same build step or with certain reserved volume paths. */ path: string; } interface GetTriggerDeveloperConnectEventConfig { /** * The Developer Connect Git repository link, formatted as 'projects/*/locations/*/connections/*/gitRepositoryLink/*'. */ gitRepositoryLink: string; /** * The type of DeveloperConnect GitRepositoryLink. */ gitRepositoryLinkType: string; /** * Filter to match changes in pull requests. */ pullRequests: outputs.cloudbuild.GetTriggerDeveloperConnectEventConfigPullRequest[]; /** * Filter to match changes in refs like branches and tags. */ pushes: outputs.cloudbuild.GetTriggerDeveloperConnectEventConfigPush[]; } interface GetTriggerDeveloperConnectEventConfigPullRequest { /** * Regex of branches to match. */ branch: string; /** * Configure builds to run whether a repository owner or collaborator need to comment '/gcbrun'. Possible values: ["COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY"] */ commentControl: string; /** * If true, branches that do NOT match the gitRef will trigger a build. */ invertRegex: boolean; } interface GetTriggerDeveloperConnectEventConfigPush { /** * Regex of branches to match. */ branch: string; /** * If true, only trigger a build if the revision regex does NOT match the gitRef regex. */ invertRegex: boolean; /** * Regex of tags to match. */ tag: string; } interface GetTriggerGitFileSource { /** * The full resource name of the bitbucket server config. * Format: projects/{project}/locations/{location}/bitbucketServerConfigs/{id}. */ bitbucketServerConfig: string; /** * The full resource name of the github enterprise config. * Format: projects/{project}/locations/{location}/githubEnterpriseConfigs/{id}. projects/{project}/githubEnterpriseConfigs/{id}. */ githubEnterpriseConfig: string; /** * The path of the file, with the repo root as the root of the path. */ path: string; /** * The type of the repo, since it may not be explicit from the repo field (e.g from a URL). * Values can be UNKNOWN, CLOUD_SOURCE_REPOSITORIES, GITHUB, BITBUCKET_SERVER Possible values: ["UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET_SERVER"] */ repoType: string; /** * The fully qualified resource name of the Repo API repository. The fully qualified resource name of the Repo API repository. * If unspecified, the repo from which the trigger invocation originated is assumed to be the repo from which to read the specified path. */ repository: string; /** * The branch, tag, arbitrary ref, or SHA version of the repo to use when resolving the * filename (optional). This field respects the same syntax/resolution as described here: https://git-scm.com/docs/gitrevisions * If unspecified, the revision from which the trigger invocation originated is assumed to be the revision from which to read the specified path. */ revision: string; /** * The URI of the repo (optional). If unspecified, the repo from which the trigger * invocation originated is assumed to be the repo from which to read the specified path. */ uri: string; } interface GetTriggerGithub { /** * The resource name of the github enterprise config that should be applied to this installation. * For example: "projects/{$projectId}/locations/{$locationId}/githubEnterpriseConfigs/{$configId}" */ enterpriseConfigResourceName: string; /** * Name of the repository. For example: The name for * https://github.com/googlecloudplatform/cloud-builders is "cloud-builders". */ name: string; /** * Owner of the repository. For example: The owner for * https://github.com/googlecloudplatform/cloud-builders is "googlecloudplatform". */ owner: string; /** * filter to match changes in pull requests. Specify only one of 'pull_request' or 'push'. */ pullRequests: outputs.cloudbuild.GetTriggerGithubPullRequest[]; /** * filter to match changes in refs, like branches or tags. Specify only one of 'pull_request' or 'push'. */ pushes: outputs.cloudbuild.GetTriggerGithubPush[]; } interface GetTriggerGithubPullRequest { /** * Regex of branches to match. */ branch: string; /** * Whether to block builds on a "/gcbrun" comment from a repository owner or collaborator. Possible values: ["COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY"] */ commentControl: string; /** * If true, branches that do NOT match the gitRef will trigger a build. */ invertRegex: boolean; } interface GetTriggerGithubPush { /** * Regex of branches to match. Specify only one of branch or tag. */ branch: string; /** * When true, only trigger a build if the revision regex does NOT match the gitRef regex. */ invertRegex: boolean; /** * Regex of tags to match. Specify only one of branch or tag. */ tag: string; } interface GetTriggerPubsubConfig { /** * Service account that will make the push request. */ serviceAccountEmail: string; /** * Potential issues with the underlying Pub/Sub subscription configuration. * Only populated on get requests. */ state: string; /** * Output only. Name of the subscription. */ subscription: string; /** * The name of the topic from which this subscription is receiving messages. */ topic: string; } interface GetTriggerRepositoryEventConfig { /** * Contains filter properties for matching Pull Requests. */ pullRequests: outputs.cloudbuild.GetTriggerRepositoryEventConfigPullRequest[]; /** * Contains filter properties for matching git pushes. */ pushes: outputs.cloudbuild.GetTriggerRepositoryEventConfigPush[]; /** * The resource name of the Repo API resource. */ repository: string; } interface GetTriggerRepositoryEventConfigPullRequest { /** * Regex of branches to match. * * The syntax of the regular expressions accepted is the syntax accepted by * RE2 and described at https://github.com/google/re2/wiki/Syntax */ branch: string; /** * Configure builds to run whether a repository owner or collaborator need to comment '/gcbrun'. Possible values: ["COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY"] */ commentControl: string; /** * If true, branches that do NOT match the gitRef will trigger a build. */ invertRegex: boolean; } interface GetTriggerRepositoryEventConfigPush { /** * Regex of branches to match. * * The syntax of the regular expressions accepted is the syntax accepted by * RE2 and described at https://github.com/google/re2/wiki/Syntax */ branch: string; /** * If true, only trigger a build if the revision regex does NOT match the gitRef regex. */ invertRegex: boolean; /** * Regex of tags to match. * * The syntax of the regular expressions accepted is the syntax accepted by * RE2 and described at https://github.com/google/re2/wiki/Syntax */ tag: string; } interface GetTriggerSourceToBuild { /** * The full resource name of the bitbucket server config. * Format: projects/{project}/locations/{location}/bitbucketServerConfigs/{id}. */ bitbucketServerConfig: string; /** * The full resource name of the github enterprise config. * Format: projects/{project}/locations/{location}/githubEnterpriseConfigs/{id}. projects/{project}/githubEnterpriseConfigs/{id}. */ githubEnterpriseConfig: string; /** * The branch or tag to use. Must start with "refs/" (required). */ ref: string; /** * The type of the repo, since it may not be explicit from the repo field (e.g from a URL). * Values can be UNKNOWN, CLOUD_SOURCE_REPOSITORIES, GITHUB, BITBUCKET_SERVER Possible values: ["UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET_SERVER"] */ repoType: string; /** * The qualified resource name of the Repo API repository. * Either uri or repository can be specified and is required. */ repository: string; /** * The URI of the repo. */ uri: string; } interface GetTriggerTriggerTemplate { /** * Name of the branch to build. Exactly one a of branch name, tag, or commit SHA must be provided. * This field is a regular expression. */ branchName: string; /** * Explicit commit SHA to build. Exactly one of a branch name, tag, or commit SHA must be provided. */ commitSha: string; /** * Directory, relative to the source root, in which to run the build. * * This must be a relative path. If a step's dir is specified and * is an absolute path, this value is ignored for that step's * execution. */ dir: string; /** * Only trigger a build if the revision regex does NOT match the revision regex. */ invertRegex: boolean; /** * ID of the project that owns the Cloud Source Repository. If * omitted, the project ID requesting the build is assumed. */ projectId: string; /** * Name of the Cloud Source Repository. If omitted, the name "default" is assumed. */ repoName: string; /** * Name of the tag to build. Exactly one of a branch name, tag, or commit SHA must be provided. * This field is a regular expression. */ tagName: string; } interface GetTriggerWebhookConfig { /** * Resource name for the secret required as a URL parameter. */ secret: string; /** * Potential issues with the underlying Pub/Sub subscription configuration. * Only populated on get requests. */ state: string; } interface TriggerApprovalConfig { /** * Whether or not approval is needed. If this is set on a build, it will become pending when run, * and will need to be explicitly approved to start. */ approvalRequired?: boolean; } interface TriggerBitbucketServerTriggerConfig { /** * The Bitbucket server config resource that this trigger config maps to. */ bitbucketServerConfigResource: string; /** * Key of the project that the repo is in. For example: The key for https://mybitbucket.server/projects/TEST/repos/test-repo is "TEST". */ projectKey: string; /** * Filter to match changes in pull requests. * Structure is documented below. */ pullRequest?: outputs.cloudbuild.TriggerBitbucketServerTriggerConfigPullRequest; /** * Filter to match changes in refs like branches, tags. * Structure is documented below. */ push?: outputs.cloudbuild.TriggerBitbucketServerTriggerConfigPush; /** * Slug of the repository. A repository slug is a URL-friendly version of a repository name, automatically generated by Bitbucket for use in the URL. * For example, if the repository name is 'test repo', in the URL it would become 'test-repo' as in https://mybitbucket.server/projects/TEST/repos/test-repo. */ repoSlug: string; } interface TriggerBitbucketServerTriggerConfigPullRequest { /** * Regex of branches to match. */ branch: string; /** * Configure builds to run whether a repository owner or collaborator need to comment `/gcbrun`. * Possible values are: `COMMENTS_DISABLED`, `COMMENTS_ENABLED`, `COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY`. */ commentControl?: string; /** * If true, branches that do NOT match the gitRef will trigger a build. */ invertRegex?: boolean; } interface TriggerBitbucketServerTriggerConfigPush { /** * Regex of branches to match. */ branch?: string; /** * If true, only trigger a build if the revision regex does NOT match the gitRef regex. */ invertRegex?: boolean; /** * Regex of tags to match. */ tag?: string; } interface TriggerBuild { /** * Artifacts produced by the build that should be uploaded upon successful completion of all build steps. * Structure is documented below. */ artifacts?: outputs.cloudbuild.TriggerBuildArtifacts; /** * Secrets and secret environment variables. * Structure is documented below. */ availableSecrets?: outputs.cloudbuild.TriggerBuildAvailableSecrets; /** * A list of images to be pushed upon the successful completion of all build steps. * The images are pushed using the builder service account's credentials. * The digests of the pushed images will be stored in the Build resource's results field. * If any of the images fail to be pushed, the build status is marked FAILURE. */ images?: string[]; /** * Google Cloud Storage bucket where logs should be written. * Logs file names will be of the format ${logsBucket}/log-${build_id}.txt. */ logsBucket?: string; /** * Special options for this build. * Structure is documented below. */ options?: outputs.cloudbuild.TriggerBuildOptions; /** * TTL in queue for this build. If provided and the build is enqueued longer than this value, * the build will expire and the build status will be EXPIRED. * The TTL starts ticking from createTime. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ queueTtl?: string; /** * Secrets to decrypt using Cloud Key Management Service. * Structure is documented below. */ secrets?: outputs.cloudbuild.TriggerBuildSecret[]; /** * The location of the source files to build. * One of `storageSource` or `repoSource` must be provided. * Structure is documented below. */ source?: outputs.cloudbuild.TriggerBuildSource; /** * The operations to be performed on the workspace. * Structure is documented below. */ steps: outputs.cloudbuild.TriggerBuildStep[]; /** * Substitutions data for Build resource. */ substitutions?: { [key: string]: string; }; /** * Tags for annotation of a Build. These are not docker tags. */ tags?: string[]; /** * Amount of time that this build should be allowed to run, to second granularity. * If this amount of time elapses, work on the build will cease and the build status will be TIMEOUT. * This timeout must be equal to or greater than the sum of the timeouts for build steps within the build. * The expected format is the number of seconds followed by s. * Default time is ten minutes (600s). */ timeout?: string; } interface TriggerBuildArtifacts { /** * A list of images to be pushed upon the successful completion of all build steps. * The images will be pushed using the builder service account's credentials. * The digests of the pushed images will be stored in the Build resource's results field. * If any of the images fail to be pushed, the build is marked FAILURE. */ images?: string[]; /** * A Maven artifact to upload to Artifact Registry upon successful completion of all build steps. * The location and generation of the uploaded objects will be stored in the Build resource's results field. * If any objects fail to be pushed, the build is marked FAILURE. * Structure is documented below. */ mavenArtifacts?: outputs.cloudbuild.TriggerBuildArtifactsMavenArtifact[]; /** * Npm package to upload to Artifact Registry upon successful completion of all build steps. * The location and generation of the uploaded objects will be stored in the Build resource's results field. * If any objects fail to be pushed, the build is marked FAILURE. * Structure is documented below. */ npmPackages?: outputs.cloudbuild.TriggerBuildArtifactsNpmPackage[]; /** * A list of objects to be uploaded to Cloud Storage upon successful completion of all build steps. * Files in the workspace matching specified paths globs will be uploaded to the * Cloud Storage location using the builder service account's credentials. * The location and generation of the uploaded objects will be stored in the Build resource's results field. * If any objects fail to be pushed, the build is marked FAILURE. * Structure is documented below. */ objects?: outputs.cloudbuild.TriggerBuildArtifactsObjects; /** * Python package to upload to Artifact Registry upon successful completion of all build steps. A package can encapsulate multiple objects to be uploaded to a single repository. * The location and generation of the uploaded objects will be stored in the Build resource's results field. * If any objects fail to be pushed, the build is marked FAILURE. * Structure is documented below. */ pythonPackages?: outputs.cloudbuild.TriggerBuildArtifactsPythonPackage[]; } interface TriggerBuildArtifactsMavenArtifact { /** * Maven artifactId value used when uploading the artifact to Artifact Registry. */ artifactId?: string; /** * Maven groupId value used when uploading the artifact to Artifact Registry. */ groupId?: string; /** * Path to an artifact in the build's workspace to be uploaded to Artifact Registry. This can be either an absolute path, e.g. /workspace/my-app/target/my-app-1.0.SNAPSHOT.jar or a relative path from /workspace, e.g. my-app/target/my-app-1.0.SNAPSHOT.jar. */ path?: string; /** * Artifact Registry repository, in the form "https://$REGION-maven.pkg.dev/$PROJECT/$REPOSITORY" * Artifact in the workspace specified by path will be uploaded to Artifact Registry with this location as a prefix. */ repository?: string; /** * Maven version value used when uploading the artifact to Artifact Registry. */ version?: string; } interface TriggerBuildArtifactsNpmPackage { /** * Path to the package.json. e.g. workspace/path/to/package */ packagePath?: string; /** * Artifact Registry repository, in the form "https://$REGION-npm.pkg.dev/$PROJECT/$REPOSITORY" * Npm package in the workspace specified by path will be zipped and uploaded to Artifact Registry with this location as a prefix. */ repository?: string; } interface TriggerBuildArtifactsObjects { /** * Cloud Storage bucket and optional object path, in the form "gs://bucket/path/to/somewhere/". * Files in the workspace matching any path pattern will be uploaded to Cloud Storage with * this location as a prefix. */ location?: string; /** * Path globs used to match files in the build's workspace. */ paths?: string[]; /** * (Output) * Output only. Stores timing information for pushing all artifact objects. * Structure is documented below. * * * The `timing` block contains: */ timings: outputs.cloudbuild.TriggerBuildArtifactsObjectsTiming[]; } interface TriggerBuildArtifactsObjectsTiming { /** * End of time span. * * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to * nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ endTime?: string; /** * Start of time span. * * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to * nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ startTime?: string; } interface TriggerBuildArtifactsPythonPackage { /** * Path globs used to match files in the build's workspace. For Python/ Twine, this is usually dist/*, and sometimes additionally an .asc file. */ paths?: string[]; /** * Artifact Registry repository, in the form "https://$REGION-python.pkg.dev/$PROJECT/$REPOSITORY" * Files in the workspace matching any path pattern will be uploaded to Artifact Registry with this location as a prefix. */ repository?: string; } interface TriggerBuildAvailableSecrets { /** * Pairs a secret environment variable with a SecretVersion in Secret Manager. * Structure is documented below. */ secretManagers: outputs.cloudbuild.TriggerBuildAvailableSecretsSecretManager[]; } interface TriggerBuildAvailableSecretsSecretManager { /** * Environment variable name to associate with the secret. Secret environment * variables must be unique across all of a build's secrets, and must be used * by at least one build step. */ env: string; /** * Resource name of the SecretVersion. In format: projects/*/secrets/*/versions/* */ versionName: string; } interface TriggerBuildOptions { /** * Requested disk size for the VM that runs the build. Note that this is NOT "disk free"; * some of the space will be used by the operating system and build utilities. * Also note that this is the minimum disk size that will be allocated for the build -- * the build may run with a larger disk than requested. At present, the maximum disk size * is 1000GB; builds that request more than the maximum are rejected with an error. */ diskSizeGb?: number; /** * Option to specify whether or not to apply bash style string operations to the substitutions. * NOTE this is always enabled for triggered builds and cannot be overridden in the build configuration file. */ dynamicSubstitutions?: boolean; /** * A list of global environment variable definitions that will exist for all build steps * in this build. If a variable is defined in both globally and in a build step, * the variable will use the build step value. * The elements are of the form "KEY=VALUE" for the environment variable "KEY" being given the value "VALUE". */ envs?: string[]; /** * Option to define build log streaming behavior to Google Cloud Storage. * Possible values are: `STREAM_DEFAULT`, `STREAM_ON`, `STREAM_OFF`. */ logStreamingOption?: string; /** * Option to specify the logging mode, which determines if and where build logs are stored. * Possible values are: `LOGGING_UNSPECIFIED`, `LEGACY`, `GCS_ONLY`, `STACKDRIVER_ONLY`, `CLOUD_LOGGING_ONLY`, `NONE`. */ logging?: string; /** * Compute Engine machine type on which to run the build. */ machineType?: string; /** * Requested verifiability options. * Possible values are: `NOT_VERIFIED`, `VERIFIED`. */ requestedVerifyOption?: string; /** * A list of global environment variables, which are encrypted using a Cloud Key Management * Service crypto key. These values must be specified in the build's Secret. These variables * will be available to all build steps in this build. */ secretEnvs?: string[]; /** * Requested hash for SourceProvenance. * Each value may be one of: `NONE`, `SHA256`, `MD5`. */ sourceProvenanceHashes?: string[]; /** * Option to specify behavior when there is an error in the substitution checks. * NOTE this is always set to ALLOW_LOOSE for triggered builds and cannot be overridden * in the build configuration file. * Possible values are: `MUST_MATCH`, `ALLOW_LOOSE`. */ substitutionOption?: string; /** * Global list of volumes to mount for ALL build steps * Each volume is created as an empty volume prior to starting the build process. * Upon completion of the build, volumes and their contents are discarded. Global * volume names and paths cannot conflict with the volumes defined a build step. * Using a global volume in a build with only one step is not valid as it is indicative * of a build request with an incorrect configuration. * Structure is documented below. */ volumes?: outputs.cloudbuild.TriggerBuildOptionsVolume[]; /** * Option to specify a WorkerPool for the build. Format projects/{project}/workerPools/{workerPool} * This field is experimental. */ workerPool?: string; } interface TriggerBuildOptionsVolume { /** * Name of the volume to mount. * Volume names must be unique per build step and must be valid names for Docker volumes. * Each named volume must be used by at least two build steps. */ name?: string; /** * Path at which to mount the volume. * Paths must be absolute and cannot conflict with other volume paths on the same * build step or with certain reserved volume paths. */ path?: string; } interface TriggerBuildSecret { /** * Cloud KMS key name to use to decrypt these envs. */ kmsKeyName: string; /** * Map of environment variable name to its encrypted value. * Secret environment variables must be unique across all of a build's secrets, * and must be used by at least one build step. Values can be at most 64 KB in size. * There can be at most 100 secret values across all of a build's secrets. */ secretEnv?: { [key: string]: string; }; } interface TriggerBuildSource { /** * Location of the source in a Google Cloud Source Repository. * Structure is documented below. */ repoSource?: outputs.cloudbuild.TriggerBuildSourceRepoSource; /** * Location of the source in an archive file in Google Cloud Storage. * Structure is documented below. */ storageSource?: outputs.cloudbuild.TriggerBuildSourceStorageSource; } interface TriggerBuildSourceRepoSource { /** * Regex matching branches to build. Exactly one a of branch name, tag, or commit SHA must be provided. * The syntax of the regular expressions accepted is the syntax accepted by RE2 and * described at https://github.com/google/re2/wiki/Syntax */ branchName?: string; /** * Explicit commit SHA to build. Exactly one a of branch name, tag, or commit SHA must be provided. */ commitSha?: string; /** * Directory, relative to the source root, in which to run the build. * This must be a relative path. If a step's dir is specified and is an absolute path, * this value is ignored for that step's execution. */ dir?: string; /** * Only trigger a build if the revision regex does NOT match the revision regex. */ invertRegex?: boolean; /** * ID of the project that owns the Cloud Source Repository. * If omitted, the project ID requesting the build is assumed. */ projectId?: string; /** * Name of the Cloud Source Repository. */ repoName: string; /** * Substitutions to use in a triggered build. Should only be used with triggers.run */ substitutions?: { [key: string]: string; }; /** * Regex matching tags to build. Exactly one a of branch name, tag, or commit SHA must be provided. * The syntax of the regular expressions accepted is the syntax accepted by RE2 and * described at https://github.com/google/re2/wiki/Syntax */ tagName?: string; } interface TriggerBuildSourceStorageSource { /** * Google Cloud Storage bucket containing the source. */ bucket: string; /** * Google Cloud Storage generation for the object. * If the generation is omitted, the latest generation will be used */ generation?: string; /** * Google Cloud Storage object containing the source. * This object must be a gzipped archive file (.tar.gz) containing source to build. */ object: string; } interface TriggerBuildStep { /** * Allow this build step to fail without failing the entire build if and * only if the exit code is one of the specified codes. * If `allowFailure` is also specified, this field will take precedence. */ allowExitCodes?: number[]; /** * Allow this build step to fail without failing the entire build. * If false, the entire build will fail if this step fails. Otherwise, the * build will succeed, but this step will still have a failure status. * Error information will be reported in the `failureDetail` field. * `allowExitCodes` takes precedence over this field. */ allowFailure?: boolean; /** * A list of arguments that will be presented to the step when it is started. * If the image used to run the step's container has an entrypoint, the args * are used as arguments to that entrypoint. If the image does not define an * entrypoint, the first element in args is used as the entrypoint, and the * remainder will be used as arguments. */ args?: string[]; /** * Working directory to use when running this step's container. * If this value is a relative path, it is relative to the build's working * directory. If this value is absolute, it may be outside the build's working * directory, in which case the contents of the path may not be persisted * across build step executions, unless a `volume` for that path is specified. * If the build specifies a `RepoSource` with `dir` and a step with a * `dir`, * which specifies an absolute path, the `RepoSource` `dir` is ignored * for the step's execution. */ dir?: string; /** * Entrypoint to be used instead of the build step image's * default entrypoint. * If unset, the image's default entrypoint is used */ entrypoint?: string; /** * A list of environment variable definitions to be used when * running a step. * The elements are of the form "KEY=VALUE" for the environment variable * "KEY" being given the value "VALUE". */ envs?: string[]; /** * Unique identifier for this build step, used in `waitFor` to * reference this build step as a dependency. */ id?: string; /** * The name of the container image that will run this particular build step. * If the image is available in the host's Docker daemon's cache, it will be * run directly. If not, the host will attempt to pull the image first, using * the builder service account's credentials if necessary. * The Docker daemon's cache will already have the latest versions of all of * the officially supported build steps (see https://github.com/GoogleCloudPlatform/cloud-builders * for images and examples). * The Docker daemon will also have cached many of the layers for some popular * images, like "ubuntu", "debian", but they will be refreshed at the time * you attempt to use them. * If you built an image in a previous build step, it will be stored in the * host's Docker daemon's cache and is available to use as the name for a * later build step. */ name: string; /** * A shell script to be executed in the step. * When script is provided, the user cannot specify the entrypoint or args. */ script?: string; /** * A list of environment variables which are encrypted using * a Cloud Key * Management Service crypto key. These values must be specified in * the build's `Secret`. */ secretEnvs?: string[]; /** * Time limit for executing this build step. If not defined, * the step has no * time limit and will be allowed to continue to run until either it * completes or the build itself times out. */ timeout?: string; /** * Output only. Stores timing information for executing this * build step. */ timing?: string; /** * List of volumes to mount into the build step. * Each volume is created as an empty volume prior to execution of the * build step. Upon completion of the build, volumes and their contents * are discarded. * Using a named volume in only one step is not valid as it is * indicative of a build request with an incorrect configuration. * Structure is documented below. */ volumes?: outputs.cloudbuild.TriggerBuildStepVolume[]; /** * The ID(s) of the step(s) that this build step depends on. * This build step will not start until all the build steps in `waitFor` * have completed successfully. If `waitFor` is empty, this build step * will start when all previous build steps in the `Build.Steps` list * have completed successfully. */ waitFors?: string[]; } interface TriggerBuildStepVolume { /** * Name of the volume to mount. * Volume names must be unique per build step and must be valid names for Docker volumes. * Each named volume must be used by at least two build steps. */ name: string; /** * Path at which to mount the volume. * Paths must be absolute and cannot conflict with other volume paths on the same * build step or with certain reserved volume paths. */ path: string; } interface TriggerDeveloperConnectEventConfig { /** * The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. */ gitRepositoryLink: string; /** * (Output) * The type of DeveloperConnect GitRepositoryLink. */ gitRepositoryLinkType: string; /** * Filter to match changes in pull requests. * Structure is documented below. */ pullRequest?: outputs.cloudbuild.TriggerDeveloperConnectEventConfigPullRequest; /** * Filter to match changes in refs like branches and tags. * Structure is documented below. */ push?: outputs.cloudbuild.TriggerDeveloperConnectEventConfigPush; } interface TriggerDeveloperConnectEventConfigPullRequest { /** * Regex of branches to match. */ branch?: string; /** * Configure builds to run whether a repository owner or collaborator need to comment `/gcbrun`. * Possible values are: `COMMENTS_DISABLED`, `COMMENTS_ENABLED`, `COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY`. */ commentControl?: string; /** * If true, branches that do NOT match the gitRef will trigger a build. */ invertRegex?: boolean; } interface TriggerDeveloperConnectEventConfigPush { /** * Regex of branches to match. */ branch?: string; /** * If true, only trigger a build if the revision regex does NOT match the gitRef regex. */ invertRegex?: boolean; /** * Regex of tags to match. */ tag?: string; } interface TriggerGitFileSource { /** * The full resource name of the bitbucket server config. * Format: projects/{project}/locations/{location}/bitbucketServerConfigs/{id}. */ bitbucketServerConfig?: string; /** * The full resource name of the github enterprise config. * Format: projects/{project}/locations/{location}/githubEnterpriseConfigs/{id}. projects/{project}/githubEnterpriseConfigs/{id}. */ githubEnterpriseConfig?: string; /** * The path of the file, with the repo root as the root of the path. */ path: string; /** * The type of the repo, since it may not be explicit from the repo field (e.g from a URL). * Values can be UNKNOWN, CLOUD_SOURCE_REPOSITORIES, GITHUB, BITBUCKET_SERVER * Possible values are: `UNKNOWN`, `CLOUD_SOURCE_REPOSITORIES`, `GITHUB`, `BITBUCKET_SERVER`. */ repoType: string; /** * The fully qualified resource name of the Repo API repository. The fully qualified resource name of the Repo API repository. * If unspecified, the repo from which the trigger invocation originated is assumed to be the repo from which to read the specified path. */ repository?: string; /** * The branch, tag, arbitrary ref, or SHA version of the repo to use when resolving the * filename (optional). This field respects the same syntax/resolution as described here: https://git-scm.com/docs/gitrevisions * If unspecified, the revision from which the trigger invocation originated is assumed to be the revision from which to read the specified path. */ revision?: string; /** * The URI of the repo (optional). If unspecified, the repo from which the trigger * invocation originated is assumed to be the repo from which to read the specified path. */ uri?: string; } interface TriggerGithub { /** * The resource name of the github enterprise config that should be applied to this installation. * For example: "projects/{$projectId}/locations/{$locationId}/githubEnterpriseConfigs/{$configId}" */ enterpriseConfigResourceName?: string; /** * Name of the repository. For example: The name for * https://github.com/googlecloudplatform/cloud-builders is "cloud-builders". */ name?: string; /** * Owner of the repository. For example: The owner for * https://github.com/googlecloudplatform/cloud-builders is "googlecloudplatform". */ owner?: string; /** * filter to match changes in pull requests. Specify only one of `pullRequest` or `push`. * Structure is documented below. */ pullRequest?: outputs.cloudbuild.TriggerGithubPullRequest; /** * filter to match changes in refs, like branches or tags. Specify only one of `pullRequest` or `push`. * Structure is documented below. */ push?: outputs.cloudbuild.TriggerGithubPush; } interface TriggerGithubPullRequest { /** * Regex of branches to match. */ branch: string; /** * Configure builds to run whether a repository owner or collaborator need to comment `/gcbrun`. * Possible values are: `COMMENTS_DISABLED`, `COMMENTS_ENABLED`, `COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY`. */ commentControl?: string; /** * If true, branches that do NOT match the gitRef will trigger a build. */ invertRegex?: boolean; } interface TriggerGithubPush { /** * Regex of branches to match. */ branch?: string; /** * If true, only trigger a build if the revision regex does NOT match the gitRef regex. */ invertRegex?: boolean; /** * Regex of tags to match. */ tag?: string; } interface TriggerPubsubConfig { /** * Service account that will make the push request. */ serviceAccountEmail?: string; /** * (Output) * Potential issues with the underlying Pub/Sub subscription configuration. * Only populated on get requests. */ state: string; /** * (Output) * Output only. Name of the subscription. */ subscription: string; /** * The name of the topic from which this subscription is receiving messages. */ topic: string; } interface TriggerRepositoryEventConfig { /** * Contains filter properties for matching Pull Requests. * Structure is documented below. */ pullRequest?: outputs.cloudbuild.TriggerRepositoryEventConfigPullRequest; /** * Contains filter properties for matching git pushes. * Structure is documented below. */ push?: outputs.cloudbuild.TriggerRepositoryEventConfigPush; /** * The resource name of the Repo API resource. */ repository?: string; } interface TriggerRepositoryEventConfigPullRequest { /** * Regex of branches to match. */ branch?: string; /** * Configure builds to run whether a repository owner or collaborator need to comment `/gcbrun`. * Possible values are: `COMMENTS_DISABLED`, `COMMENTS_ENABLED`, `COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY`. */ commentControl?: string; /** * If true, branches that do NOT match the gitRef will trigger a build. */ invertRegex?: boolean; } interface TriggerRepositoryEventConfigPush { /** * Regex of branches to match. */ branch?: string; /** * If true, only trigger a build if the revision regex does NOT match the gitRef regex. */ invertRegex?: boolean; /** * Regex of tags to match. */ tag?: string; } interface TriggerSourceToBuild { /** * The full resource name of the bitbucket server config. * Format: projects/{project}/locations/{location}/bitbucketServerConfigs/{id}. */ bitbucketServerConfig?: string; /** * The full resource name of the github enterprise config. * Format: projects/{project}/locations/{location}/githubEnterpriseConfigs/{id}. projects/{project}/githubEnterpriseConfigs/{id}. */ githubEnterpriseConfig?: string; /** * The branch or tag to use. Must start with "refs/" (required). */ ref: string; /** * The type of the repo, since it may not be explicit from the repo field (e.g from a URL). * Values can be UNKNOWN, CLOUD_SOURCE_REPOSITORIES, GITHUB, BITBUCKET_SERVER * Possible values are: `UNKNOWN`, `CLOUD_SOURCE_REPOSITORIES`, `GITHUB`, `BITBUCKET_SERVER`. */ repoType: string; /** * The qualified resource name of the Repo API repository. * Either uri or repository can be specified and is required. */ repository?: string; /** * The URI of the repo. */ uri?: string; } interface TriggerTriggerTemplate { /** * Name of the branch to build. Exactly one a of branch name, tag, or commit SHA must be provided. * This field is a regular expression. */ branchName?: string; /** * Explicit commit SHA to build. Exactly one of a branch name, tag, or commit SHA must be provided. */ commitSha?: string; /** * Directory, relative to the source root, in which to run the build. * This must be a relative path. If a step's dir is specified and * is an absolute path, this value is ignored for that step's * execution. */ dir?: string; /** * Only trigger a build if the revision regex does NOT match the revision regex. */ invertRegex?: boolean; /** * ID of the project that owns the Cloud Source Repository. If * omitted, the project ID requesting the build is assumed. */ projectId: string; /** * Name of the Cloud Source Repository. If omitted, the name "default" is assumed. */ repoName?: string; /** * Name of the tag to build. Exactly one of a branch name, tag, or commit SHA must be provided. * This field is a regular expression. */ tagName?: string; } interface TriggerWebhookConfig { /** * Resource name for the secret required as a URL parameter. */ secret: string; /** * (Output) * Potential issues with the underlying Pub/Sub subscription configuration. * Only populated on get requests. */ state: string; } interface WorkerPoolNetworkConfig { /** * Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See (https://cloud.google.com/cloud-build/docs/custom-workers/set-up-custom-worker-pool-environment#understanding_the_network_configuration_options) */ peeredNetwork: string; /** * Immutable. Subnet IP range within the peered network. This is specified in CIDR notation with a slash and the subnet prefix size. You can optionally specify an IP address before the subnet prefix value. e.g. `192.168.0.0/29` would specify an IP range starting at 192.168.0.0 with a prefix size of 29 bits. `/16` would specify a prefix size of 16 bits, with an automatically determined IP within the peered VPC. If unspecified, a value of `/24` will be used. */ peeredNetworkIpRange?: string; } interface WorkerPoolPrivateServiceConnect { /** * Required. Immutable. The network attachment that the worker network interface is connected to. Must be in the format `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. The region of network attachment must be the same as the worker pool. See [Network Attachments](https://cloud.google.com/vpc/docs/about-network-attachments) */ networkAttachment: string; /** * Immutable. Route all traffic through PSC interface. Enable this if you want full control of traffic in the private pool. Configure Cloud NAT for the subnet of network attachment if you need to access public Internet. If false, Only route private IPs, e.g. 10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16 through PSC interface. */ routeAllTraffic?: boolean; } interface WorkerPoolWorkerConfig { /** * Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. */ diskSizeGb?: number; /** * Enable nested virtualization on the worker, if supported by the machine type. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). If left blank, Cloud Build will set this to false. */ enableNestedVirtualization?: boolean; /** * Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. */ machineType?: string; /** * If true, workers are created without any public address, which prevents network egress to public IPs. */ noExternalIp: boolean; } } export declare namespace cloudbuildv2 { interface ConnectionBitbucketCloudConfig { /** * Required. An access token with the `webhook`, `repository`, `repository:admin` and `pullrequest` scope access. It can be either a workspace, project or repository access token. It's recommended to use a system account to generate these credentials. * Structure is documented below. */ authorizerCredential: outputs.cloudbuildv2.ConnectionBitbucketCloudConfigAuthorizerCredential; /** * Required. An access token with the `repository` access. It can be either a workspace, project or repository access token. It's recommended to use a system account to generate the credentials. * Structure is documented below. */ readAuthorizerCredential: outputs.cloudbuildv2.ConnectionBitbucketCloudConfigReadAuthorizerCredential; /** * Required. Immutable. SecretManager resource containing the webhook secret used to verify webhook events, formatted as `projects/*/secrets/*/versions/*`. */ webhookSecretSecretVersion: string; /** * The Bitbucket Cloud Workspace ID to be connected to Google Cloud Platform. */ workspace: string; } interface ConnectionBitbucketCloudConfigAuthorizerCredential { /** * Required. A SecretManager resource containing the user token that authorizes the Cloud Build connection. Format: `projects/*/secrets/*/versions/*`. */ userTokenSecretVersion: string; /** * (Output) * Output only. The username associated to this token. */ username: string; } interface ConnectionBitbucketCloudConfigReadAuthorizerCredential { /** * Required. A SecretManager resource containing the user token that authorizes the Cloud Build connection. Format: `projects/*/secrets/*/versions/*`. */ userTokenSecretVersion: string; /** * (Output) * Output only. The username associated to this token. */ username: string; } interface ConnectionBitbucketDataCenterConfig { /** * Required. A http access token with the `REPO_ADMIN` scope access. * Structure is documented below. */ authorizerCredential: outputs.cloudbuildv2.ConnectionBitbucketDataCenterConfigAuthorizerCredential; /** * The URI of the Bitbucket Data Center host this connection is for. */ hostUri: string; /** * Required. A http access token with the `REPO_READ` access. * Structure is documented below. */ readAuthorizerCredential: outputs.cloudbuildv2.ConnectionBitbucketDataCenterConfigReadAuthorizerCredential; /** * (Output) * Output only. Version of the Bitbucket Data Center running on the `hostUri`. */ serverVersion: string; /** * Configuration for using Service Directory to privately connect to a Bitbucket Data Center. This should only be set if the Bitbucket Data Center is hosted on-premises and not reachable by public internet. If this field is left empty, calls to the Bitbucket Data Center will be made over the public internet. * Structure is documented below. */ serviceDirectoryConfig?: outputs.cloudbuildv2.ConnectionBitbucketDataCenterConfigServiceDirectoryConfig; /** * SSL certificate to use for requests to the Bitbucket Data Center. */ sslCa?: string; /** * Required. Immutable. SecretManager resource containing the webhook secret used to verify webhook events, formatted as `projects/*/secrets/*/versions/*`. */ webhookSecretSecretVersion: string; } interface ConnectionBitbucketDataCenterConfigAuthorizerCredential { /** * Required. A SecretManager resource containing the user token that authorizes the Cloud Build connection. Format: `projects/*/secrets/*/versions/*`. */ userTokenSecretVersion: string; /** * (Output) * Output only. The username associated to this token. */ username: string; } interface ConnectionBitbucketDataCenterConfigReadAuthorizerCredential { /** * Required. A SecretManager resource containing the user token that authorizes the Cloud Build connection. Format: `projects/*/secrets/*/versions/*`. */ userTokenSecretVersion: string; /** * (Output) * Output only. The username associated to this token. */ username: string; } interface ConnectionBitbucketDataCenterConfigServiceDirectoryConfig { /** * Required. The Service Directory service name. Format: projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}. */ service: string; } interface ConnectionGithubConfig { /** * GitHub App installation id. */ appInstallationId?: number; /** * OAuth credential of the account that authorized the Cloud Build GitHub App. It is recommended to use a robot account instead of a human user account. The OAuth token must be tied to the Cloud Build GitHub App. * Structure is documented below. */ authorizerCredential?: outputs.cloudbuildv2.ConnectionGithubConfigAuthorizerCredential; } interface ConnectionGithubConfigAuthorizerCredential { /** * A SecretManager resource containing the OAuth token that authorizes the Cloud Build connection. Format: `projects/*/secrets/*/versions/*`. */ oauthTokenSecretVersion?: string; /** * (Output) * Output only. The username associated to this token. */ username: string; } interface ConnectionGithubEnterpriseConfig { /** * Id of the GitHub App created from the manifest. */ appId?: number; /** * ID of the installation of the GitHub App. */ appInstallationId?: number; /** * The URL-friendly name of the GitHub App. */ appSlug?: string; /** * Required. The URI of the GitHub Enterprise host this connection is for. */ hostUri: string; /** * SecretManager resource containing the private key of the GitHub App, formatted as `projects/*/secrets/*/versions/*`. */ privateKeySecretVersion?: string; /** * Configuration for using Service Directory to privately connect to a GitHub Enterprise server. This should only be set if the GitHub Enterprise server is hosted on-premises and not reachable by public internet. If this field is left empty, calls to the GitHub Enterprise server will be made over the public internet. * Structure is documented below. */ serviceDirectoryConfig?: outputs.cloudbuildv2.ConnectionGithubEnterpriseConfigServiceDirectoryConfig; /** * SSL certificate to use for requests to GitHub Enterprise. */ sslCa?: string; /** * SecretManager resource containing the webhook secret of the GitHub App, formatted as `projects/*/secrets/*/versions/*`. */ webhookSecretSecretVersion?: string; } interface ConnectionGithubEnterpriseConfigServiceDirectoryConfig { /** * Required. The Service Directory service name. Format: projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}. */ service: string; } interface ConnectionGitlabConfig { /** * Required. A GitLab personal access token with the `api` scope access. * Structure is documented below. */ authorizerCredential: outputs.cloudbuildv2.ConnectionGitlabConfigAuthorizerCredential; /** * The URI of the GitLab Enterprise host this connection is for. If not specified, the default value is https://gitlab.com. */ hostUri: string; /** * Required. A GitLab personal access token with the minimum `readApi` scope access. * Structure is documented below. */ readAuthorizerCredential: outputs.cloudbuildv2.ConnectionGitlabConfigReadAuthorizerCredential; /** * (Output) * Output only. Version of the GitLab Enterprise server running on the `hostUri`. */ serverVersion: string; /** * Configuration for using Service Directory to privately connect to a GitLab Enterprise server. This should only be set if the GitLab Enterprise server is hosted on-premises and not reachable by public internet. If this field is left empty, calls to the GitLab Enterprise server will be made over the public internet. * Structure is documented below. */ serviceDirectoryConfig?: outputs.cloudbuildv2.ConnectionGitlabConfigServiceDirectoryConfig; /** * SSL certificate to use for requests to GitLab Enterprise. */ sslCa?: string; /** * Required. Immutable. SecretManager resource containing the webhook secret of a GitLab Enterprise project, formatted as `projects/*/secrets/*/versions/*`. */ webhookSecretSecretVersion: string; } interface ConnectionGitlabConfigAuthorizerCredential { /** * Required. A SecretManager resource containing the user token that authorizes the Cloud Build connection. Format: `projects/*/secrets/*/versions/*`. */ userTokenSecretVersion: string; /** * (Output) * Output only. The username associated to this token. */ username: string; } interface ConnectionGitlabConfigReadAuthorizerCredential { /** * Required. A SecretManager resource containing the user token that authorizes the Cloud Build connection. Format: `projects/*/secrets/*/versions/*`. */ userTokenSecretVersion: string; /** * (Output) * Output only. The username associated to this token. */ username: string; } interface ConnectionGitlabConfigServiceDirectoryConfig { /** * Required. The Service Directory service name. Format: projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}. */ service: string; } interface ConnectionIAMBindingCondition { description?: string; expression: string; title: string; } interface ConnectionIAMMemberCondition { description?: string; expression: string; title: string; } interface ConnectionInstallationState { /** * (Output) * Output only. Link to follow for next action. Empty string if the installation is already complete. */ actionUri: string; /** * (Output) * Output only. Message of what the user should do next to continue the installation. Empty string if the installation is already complete. */ message: string; /** * (Output) * Output only. Current step of the installation process. */ stage: string; } } export declare namespace clouddeploy { interface AutomationRule { /** * Optional. The `AdvanceRolloutRule` will automatically advance a successful Rollout. * Structure is documented below. */ advanceRolloutRule?: outputs.clouddeploy.AutomationRuleAdvanceRolloutRule; /** * Optional. `PromoteReleaseRule` will automatically promote a release from the current target to a specified target. * Structure is documented below. */ promoteReleaseRule?: outputs.clouddeploy.AutomationRulePromoteReleaseRule; /** * Optional. The RepairRolloutRule will automatically repair a failed rollout. * Structure is documented below. */ repairRolloutRule?: outputs.clouddeploy.AutomationRuleRepairRolloutRule; /** * Optional. The `TimedPromoteReleaseRule` will automatically promote a release from the current target(s) to the specified target(s) on a configured schedule. * Structure is documented below. */ timedPromoteReleaseRule?: outputs.clouddeploy.AutomationRuleTimedPromoteReleaseRule; } interface AutomationRuleAdvanceRolloutRule { /** * Required. ID of the rule. This id must be unique in the `Automation` resource to which this rule belongs. The format is `a-z{0,62}`. */ id: string; /** * Optional. Proceeds only after phase name matched any one in the list. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^a-z?$`. */ sourcePhases?: string[]; /** * Optional. How long to wait after a rollout is finished. */ wait?: string; } interface AutomationRulePromoteReleaseRule { /** * Optional. The starting phase of the rollout created by this operation. Default to the first phase. */ destinationPhase?: string; /** * Optional. The ID of the stage in the pipeline to which this `Release` is deploying. If unspecified, default it to the next stage in the promotion flow. The value of this field could be one of the following: * The last segment of a target name. It only needs the ID to determine if the target is one of the stages in the promotion sequence defined in the pipeline. * "@next", the next target in the promotion sequence. */ destinationTargetId?: string; /** * Required. ID of the rule. This id must be unique in the `Automation` resource to which this rule belongs. The format is `a-z{0,62}`. */ id: string; /** * Optional. How long the release need to be paused until being promoted to the next target. */ wait?: string; } interface AutomationRuleRepairRolloutRule { /** * Required. ID of the rule. This id must be unique in the `Automation` resource to which this rule belongs. The format is `a-z{0,62}`. */ id: string; /** * Optional. Jobs to repair. Proceeds only after job name matched any one in the list, or for all jobs if unspecified or empty. The phase that includes the job must match the phase ID specified in sourcePhase. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: ^a-z?$. */ jobs?: string[]; /** * Optional. Phases within which jobs are subject to automatic repair actions on failure. Proceeds only after phase name matched any one in the list, or for all phases if unspecified. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: ^a-z?$. */ phases?: string[]; /** * Optional. Proceeds only after phase name matched any one in the list. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^a-z?$`. * Structure is documented below. */ repairPhases?: outputs.clouddeploy.AutomationRuleRepairRolloutRuleRepairPhase[]; } interface AutomationRuleRepairRolloutRuleRepairPhase { /** * Optional. Retries a failed job. * Structure is documented below. */ retry?: outputs.clouddeploy.AutomationRuleRepairRolloutRuleRepairPhaseRetry; /** * Optional. Rolls back a Rollout. * Structure is documented below. */ rollback?: outputs.clouddeploy.AutomationRuleRepairRolloutRuleRepairPhaseRollback; } interface AutomationRuleRepairRolloutRuleRepairPhaseRetry { /** * Required. Total number of retries. Retry is skipped if set to 0; The minimum value is 1, and the maximum value is 10. */ attempts: string; /** * Optional. The pattern of how wait time will be increased. Default is linear. Backoff mode will be ignored if wait is 0. * Possible values are: `BACKOFF_MODE_UNSPECIFIED`, `BACKOFF_MODE_LINEAR`, `BACKOFF_MODE_EXPONENTIAL`. */ backoffMode?: string; /** * Optional. How long to wait for the first retry. Default is 0, and the maximum value is 14d. A duration in seconds with up to nine fractional digits, ending with 's'. Example: `3.5s`. */ wait?: string; } interface AutomationRuleRepairRolloutRuleRepairPhaseRollback { /** * Optional. The starting phase ID for the Rollout. If unspecified, the Rollout will start in the stable phase. */ destinationPhase?: string; /** * Optional. If pending rollout exists on the target, the rollback operation will be aborted. */ disableRollbackIfRolloutPending?: boolean; } interface AutomationRuleTimedPromoteReleaseRule { /** * Optional. The starting phase of the rollout created by this rule. Default to the first phase. */ destinationPhase?: string; /** * Optional. The ID of the stage in the pipeline to which this Release is deploying. If unspecified, default it to the next stage in the promotion flow. The value of this field could be one of the following: * - The last segment of a target name * - "@next", the next target in the promotion sequence" */ destinationTargetId?: string; /** * Required. ID of the rule. This id must be unique in the `Automation` resource to which this rule belongs. The format is `a-z{0,62}`. */ id: string; /** * Required. Schedule in crontab format. e.g. `0 9 * * 1` for every Monday at 9am. */ schedule: string; /** * Required. The time zone in IANA format IANA Time Zone Database (e.g. America/New_York). */ timeZone: string; } interface AutomationSelector { /** * Contains attributes about a target. * Structure is documented below. */ targets: outputs.clouddeploy.AutomationSelectorTarget[]; } interface AutomationSelectorTarget { /** * ID of the `Target`. The value of this field could be one of the following: * The last segment of a target name. It only needs the ID to determine which target is being referred to * "*", all targets in a location. */ id?: string; /** * Target labels. */ labels: { [key: string]: string; }; } interface CustomTargetTypeCustomActions { /** * The Skaffold custom action responsible for deploy operations. */ deployAction: string; /** * List of Skaffold modules Cloud Deploy will include in the Skaffold Config as required before performing diagnose. * Structure is documented below. */ includeSkaffoldModules?: outputs.clouddeploy.CustomTargetTypeCustomActionsIncludeSkaffoldModule[]; /** * The Skaffold custom action responsible for render operations. If not provided then Cloud Deploy will perform the render operations via `skaffold render`. */ renderAction?: string; } interface CustomTargetTypeCustomActionsIncludeSkaffoldModule { /** * The Skaffold Config modules to use from the specified source. */ configs?: string[]; /** * Remote git repository containing the Skaffold Config modules. * Structure is documented below. */ git?: outputs.clouddeploy.CustomTargetTypeCustomActionsIncludeSkaffoldModuleGit; /** * Cloud Build 2nd gen repository containing the Skaffold Config modules. * Structure is documented below. */ googleCloudBuildRepo?: outputs.clouddeploy.CustomTargetTypeCustomActionsIncludeSkaffoldModuleGoogleCloudBuildRepo; /** * Cloud Storage bucket containing Skaffold Config modules. * Structure is documented below. */ googleCloudStorage?: outputs.clouddeploy.CustomTargetTypeCustomActionsIncludeSkaffoldModuleGoogleCloudStorage; } interface CustomTargetTypeCustomActionsIncludeSkaffoldModuleGit { /** * Relative path from the repository root to the Skaffold file. */ path?: string; /** * Git ref the package should be cloned from. */ ref?: string; /** * Git repository the package should be cloned from. */ repo: string; } interface CustomTargetTypeCustomActionsIncludeSkaffoldModuleGoogleCloudBuildRepo { /** * Relative path from the repository root to the Skaffold file. */ path?: string; /** * Branch or tag to use when cloning the repository. */ ref?: string; /** * Cloud Build 2nd gen repository in the format of 'projects//locations//connections//repositories/'. */ repository: string; } interface CustomTargetTypeCustomActionsIncludeSkaffoldModuleGoogleCloudStorage { /** * Relative path from the source to the Skaffold file. */ path?: string; /** * Cloud Storage source paths to copy recursively. For example, providing `gs://my-bucket/dir/configs/*` will result in Skaffold copying all files within the `dir/configs` directory in the bucket `my-bucket`. */ source: string; } interface CustomTargetTypeIamBindingCondition { description?: string; expression: string; title: string; } interface CustomTargetTypeIamMemberCondition { description?: string; expression: string; title: string; } interface DeliveryPipelineCondition { /** * Details around the Pipeline's overall status. */ pipelineReadyConditions: outputs.clouddeploy.DeliveryPipelineConditionPipelineReadyCondition[]; /** * Details around targets enumerated in the pipeline. */ targetsPresentConditions: outputs.clouddeploy.DeliveryPipelineConditionTargetsPresentCondition[]; /** * Details on the whether the targets enumerated in the pipeline are of the same type. */ targetsTypeConditions: outputs.clouddeploy.DeliveryPipelineConditionTargetsTypeCondition[]; } interface DeliveryPipelineConditionPipelineReadyCondition { /** * True if the Pipeline is in a valid state. Otherwise at least one condition in `PipelineCondition` is in an invalid state. Iterate over those conditions and see which condition(s) has status = false to find out what is wrong with the Pipeline. */ status: boolean; /** * Output only. Most recent time at which the pipeline was updated. */ updateTime: string; } interface DeliveryPipelineConditionTargetsPresentCondition { /** * The list of Target names that are missing. For example, projects/{project_id}/locations/{location_name}/targets/{target_name}. */ missingTargets: string[]; /** * True if there aren't any missing Targets. */ status: boolean; /** * Output only. Most recent time at which the pipeline was updated. */ updateTime: string; } interface DeliveryPipelineConditionTargetsTypeCondition { /** * Human readable error message. */ errorDetails: string; /** * True if the targets are all a comparable type. For example this is true if all targets are GKE clusters. This is false if some targets are Cloud Run targets and others are GKE clusters. */ status: boolean; } interface DeliveryPipelineIamBindingCondition { description?: string; expression: string; title: string; } interface DeliveryPipelineIamMemberCondition { description?: string; expression: string; title: string; } interface DeliveryPipelineSerialPipeline { /** * Each stage specifies configuration for a `Target`. The ordering of this list defines the promotion flow. */ stages?: outputs.clouddeploy.DeliveryPipelineSerialPipelineStage[]; } interface DeliveryPipelineSerialPipelineStage { /** * Optional. The deploy parameters to use for the target in this stage. */ deployParameters?: outputs.clouddeploy.DeliveryPipelineSerialPipelineStageDeployParameter[]; /** * Skaffold profiles to use when rendering the manifest for this stage's `Target`. */ profiles?: string[]; /** * Optional. The strategy to use for a `Rollout` to this stage. */ strategy?: outputs.clouddeploy.DeliveryPipelineSerialPipelineStageStrategy; /** * The targetId to which this stage points. This field refers exclusively to the last segment of a target name. For example, this field would just be `my-target` (rather than `projects/project/locations/location/targets/my-target`). The location of the `Target` is inferred to be the same as the location of the `DeliveryPipeline` that contains this `Stage`. */ targetId?: string; } interface DeliveryPipelineSerialPipelineStageDeployParameter { /** * Optional. Deploy parameters are applied to targets with match labels. If unspecified, deploy parameters are applied to all targets (including child targets of a multi-target). */ matchTargetLabels?: { [key: string]: string; }; /** * Required. Values are deploy parameters in key-value pairs. */ values: { [key: string]: string; }; } interface DeliveryPipelineSerialPipelineStageStrategy { /** * Canary deployment strategy provides progressive percentage based deployments to a Target. */ canary?: outputs.clouddeploy.DeliveryPipelineSerialPipelineStageStrategyCanary; /** * Standard deployment strategy executes a single deploy and allows verifying the deployment. */ standard?: outputs.clouddeploy.DeliveryPipelineSerialPipelineStageStrategyStandard; } interface DeliveryPipelineSerialPipelineStageStrategyCanary { /** * Configures the progressive based deployment for a Target. */ canaryDeployment?: outputs.clouddeploy.DeliveryPipelineSerialPipelineStageStrategyCanaryCanaryDeployment; /** * Configures the progressive based deployment for a Target, but allows customizing at the phase level where a phase represents each of the percentage deployments. */ customCanaryDeployment?: outputs.clouddeploy.DeliveryPipelineSerialPipelineStageStrategyCanaryCustomCanaryDeployment; /** * Optional. Runtime specific configurations for the deployment strategy. The runtime configuration is used to determine how Cloud Deploy will split traffic to enable a progressive deployment. */ runtimeConfig?: outputs.clouddeploy.DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfig; } interface DeliveryPipelineSerialPipelineStageStrategyCanaryCanaryDeployment { /** * Required. The percentage based deployments that will occur as a part of a `Rollout`. List is expected in ascending order and each integer n is 0 <= n < 100. */ percentages: number[]; /** * Optional. Configuration for the postdeploy job of the last phase. If this is not configured, postdeploy job will not be present. */ postdeploy?: outputs.clouddeploy.DeliveryPipelineSerialPipelineStageStrategyCanaryCanaryDeploymentPostdeploy; /** * Optional. Configuration for the predeploy job of the first phase. If this is not configured, predeploy job will not be present. */ predeploy?: outputs.clouddeploy.DeliveryPipelineSerialPipelineStageStrategyCanaryCanaryDeploymentPredeploy; /** * Whether to run verify tests after each percentage deployment. */ verify?: boolean; } interface DeliveryPipelineSerialPipelineStageStrategyCanaryCanaryDeploymentPostdeploy { /** * Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job. */ actions?: string[]; } interface DeliveryPipelineSerialPipelineStageStrategyCanaryCanaryDeploymentPredeploy { /** * Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job. */ actions?: string[]; } interface DeliveryPipelineSerialPipelineStageStrategyCanaryCustomCanaryDeployment { /** * Required. Configuration for each phase in the canary deployment in the order executed. */ phaseConfigs: outputs.clouddeploy.DeliveryPipelineSerialPipelineStageStrategyCanaryCustomCanaryDeploymentPhaseConfig[]; } interface DeliveryPipelineSerialPipelineStageStrategyCanaryCustomCanaryDeploymentPhaseConfig { /** * Required. Percentage deployment for the phase. */ percentage: number; /** * Required. The ID to assign to the `Rollout` phase. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^a-z?$`. */ phaseId: string; /** * Optional. Configuration for the postdeploy job of this phase. If this is not configured, postdeploy job will not be present for this phase. */ postdeploy?: outputs.clouddeploy.DeliveryPipelineSerialPipelineStageStrategyCanaryCustomCanaryDeploymentPhaseConfigPostdeploy; /** * Optional. Configuration for the predeploy job of this phase. If this is not configured, predeploy job will not be present for this phase. */ predeploy?: outputs.clouddeploy.DeliveryPipelineSerialPipelineStageStrategyCanaryCustomCanaryDeploymentPhaseConfigPredeploy; /** * Skaffold profiles to use when rendering the manifest for this phase. These are in addition to the profiles list specified in the `DeliveryPipeline` stage. */ profiles?: string[]; /** * Whether to run verify tests after the deployment. */ verify?: boolean; } interface DeliveryPipelineSerialPipelineStageStrategyCanaryCustomCanaryDeploymentPhaseConfigPostdeploy { /** * Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job. */ actions?: string[]; } interface DeliveryPipelineSerialPipelineStageStrategyCanaryCustomCanaryDeploymentPhaseConfigPredeploy { /** * Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job. */ actions?: string[]; } interface DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfig { /** * Cloud Run runtime configuration. */ cloudRun?: outputs.clouddeploy.DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfigCloudRun; /** * Kubernetes runtime configuration. */ kubernetes?: outputs.clouddeploy.DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfigKubernetes; } interface DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfigCloudRun { /** * Whether Cloud Deploy should update the traffic stanza in a Cloud Run Service on the user's behalf to facilitate traffic splitting. This is required to be true for CanaryDeployments, but optional for CustomCanaryDeployments. */ automaticTrafficControl?: boolean; /** * Optional. A list of tags that are added to the canary revision while the canary phase is in progress. */ canaryRevisionTags?: string[]; /** * Optional. A list of tags that are added to the prior revision while the canary phase is in progress. */ priorRevisionTags?: string[]; /** * Optional. A list of tags that are added to the final stable revision when the stable phase is applied. */ stableRevisionTags?: string[]; } interface DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfigKubernetes { /** * Kubernetes Gateway API service mesh configuration. */ gatewayServiceMesh?: outputs.clouddeploy.DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh; /** * Kubernetes Service networking configuration. */ serviceNetworking?: outputs.clouddeploy.DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfigKubernetesServiceNetworking; } interface DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { /** * Required. Name of the Kubernetes Deployment whose traffic is managed by the specified HTTPRoute and Service. */ deployment: string; /** * Required. Name of the Gateway API HTTPRoute. */ httpRoute: string; /** * Optional. The label to use when selecting Pods for the Deployment and Service resources. This label must already be present in both resources. */ podSelectorLabel?: string; /** * Optional. Route destinations allow configuring the Gateway API HTTPRoute to be deployed to additional clusters. This option is available for multi-cluster service mesh set ups that require the route to exist in the clusters that call the service. If unspecified, the HTTPRoute will only be deployed to the Target cluster. */ routeDestinations?: outputs.clouddeploy.DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations; /** * Optional. The time to wait for route updates to propagate. The maximum configurable time is 3 hours, in seconds format. If unspecified, there is no wait time. */ routeUpdateWaitTime?: string; /** * Required. Name of the Kubernetes Service. */ service: string; /** * Optional. The amount of time to migrate traffic back from the canary Service to the original Service during the stable phase deployment. If specified, must be between 15s and 3600s. If unspecified, there is no cutback time. */ stableCutbackDuration?: string; } interface DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations { /** * Required. The clusters where the Gateway API HTTPRoute resource will be deployed to. Valid entries include the associated entities IDs configured in the Target resource and "@self" to include the Target cluster. */ destinationIds: string[]; /** * Optional. Whether to propagate the Kubernetes Service to the route destination clusters. The Service will always be deployed to the Target cluster even if the HTTPRoute is not. This option may be used to facilitiate successful DNS lookup in the route destination clusters. Can only be set to true if destinations are specified. */ propagateService?: boolean; } interface DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfigKubernetesServiceNetworking { /** * Required. Name of the Kubernetes Deployment whose traffic is managed by the specified Service. */ deployment: string; /** * Optional. Whether to disable Pod overprovisioning. If Pod overprovisioning is disabled then Cloud Deploy will limit the number of total Pods used for the deployment strategy to the number of Pods the Deployment has on the cluster. */ disablePodOverprovisioning?: boolean; /** * Optional. The label to use when selecting Pods for the Deployment resource. This label must already be present in the Deployment. */ podSelectorLabel?: string; /** * Required. Name of the Kubernetes Service. */ service: string; } interface DeliveryPipelineSerialPipelineStageStrategyStandard { /** * Optional. Configuration for the postdeploy job. If this is not configured, postdeploy job will not be present. */ postdeploy?: outputs.clouddeploy.DeliveryPipelineSerialPipelineStageStrategyStandardPostdeploy; /** * Optional. Configuration for the predeploy job. If this is not configured, predeploy job will not be present. */ predeploy?: outputs.clouddeploy.DeliveryPipelineSerialPipelineStageStrategyStandardPredeploy; /** * Whether to verify a deployment. */ verify?: boolean; } interface DeliveryPipelineSerialPipelineStageStrategyStandardPostdeploy { /** * Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job. */ actions?: string[]; } interface DeliveryPipelineSerialPipelineStageStrategyStandardPredeploy { /** * Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job. */ actions?: string[]; } interface DeployPolicyRule { /** * Rollout restrictions. * Structure is documented below. */ rolloutRestriction?: outputs.clouddeploy.DeployPolicyRuleRolloutRestriction; } interface DeployPolicyRuleRolloutRestriction { /** * Rollout actions to be restricted as part of the policy. If left empty, all actions will be restricted. * Each value may be one of: `ADVANCE`, `APPROVE`, `CANCEL`, `CREATE`, `IGNORE_JOB`, `RETRY_JOB`, `ROLLBACK`, `TERMINATE_JOBRUN`. */ actions?: string[]; /** * ID of the rule. This id must be unique in the `DeployPolicy` resource to which this rule belongs. The format is `a-z{0,62}`. */ id: string; /** * What invoked the action. If left empty, all invoker types will be restricted. * Each value may be one of: `USER`, `DEPLOY_AUTOMATION`. */ invokers?: string[]; /** * Time window within which actions are restricted. * Structure is documented below. */ timeWindows?: outputs.clouddeploy.DeployPolicyRuleRolloutRestrictionTimeWindows; } interface DeployPolicyRuleRolloutRestrictionTimeWindows { /** * One-time windows within which actions are restricted. * Structure is documented below. */ oneTimeWindows?: outputs.clouddeploy.DeployPolicyRuleRolloutRestrictionTimeWindowsOneTimeWindow[]; /** * The time zone in IANA format IANA Time Zone Database (e.g. America/New_York). */ timeZone: string; /** * Recurring weekly windows within which actions are restricted. * Structure is documented below. */ weeklyWindows?: outputs.clouddeploy.DeployPolicyRuleRolloutRestrictionTimeWindowsWeeklyWindow[]; } interface DeployPolicyRuleRolloutRestrictionTimeWindowsOneTimeWindow { /** * End date. * Structure is documented below. */ endDate: outputs.clouddeploy.DeployPolicyRuleRolloutRestrictionTimeWindowsOneTimeWindowEndDate; /** * End time (exclusive). You may use 24:00 for the end of the day. * Structure is documented below. */ endTime: outputs.clouddeploy.DeployPolicyRuleRolloutRestrictionTimeWindowsOneTimeWindowEndTime; /** * Start date. * Structure is documented below. */ startDate: outputs.clouddeploy.DeployPolicyRuleRolloutRestrictionTimeWindowsOneTimeWindowStartDate; /** * Start time (inclusive). Use 00:00 for the beginning of the day. * Structure is documented below. */ startTime: outputs.clouddeploy.DeployPolicyRuleRolloutRestrictionTimeWindowsOneTimeWindowStartTime; } interface DeployPolicyRuleRolloutRestrictionTimeWindowsOneTimeWindowEndDate { /** * Day of a month. Must be from 1 to 31 and valid for the year and month. */ day?: number; /** * Month of a year. Must be from 1 to 12. */ month?: number; /** * Year of the date. Must be from 1 to 9999. */ year?: number; } interface DeployPolicyRuleRolloutRestrictionTimeWindowsOneTimeWindowEndTime { /** * Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. */ minutes?: number; /** * Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. */ nanos?: number; /** * Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface DeployPolicyRuleRolloutRestrictionTimeWindowsOneTimeWindowStartDate { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ year?: number; } interface DeployPolicyRuleRolloutRestrictionTimeWindowsOneTimeWindowStartTime { /** * Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. */ minutes?: number; /** * Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. */ nanos?: number; /** * Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface DeployPolicyRuleRolloutRestrictionTimeWindowsWeeklyWindow { /** * Days of week. If left empty, all days of the week will be included. * Each value may be one of: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ daysOfWeeks?: string[]; /** * End time (exclusive). Use 24:00 to indicate midnight. If you specify endTime you must also specify startTime. If left empty, this will block for the entire day for the days specified in daysOfWeek. * Structure is documented below. */ endTime?: outputs.clouddeploy.DeployPolicyRuleRolloutRestrictionTimeWindowsWeeklyWindowEndTime; /** * Start time (inclusive). Use 00:00 for the beginning of the day. If you specify startTime you must also specify endTime. If left empty, this will block for the entire day for the days specified in daysOfWeek. * Structure is documented below. */ startTime?: outputs.clouddeploy.DeployPolicyRuleRolloutRestrictionTimeWindowsWeeklyWindowStartTime; } interface DeployPolicyRuleRolloutRestrictionTimeWindowsWeeklyWindowEndTime { /** * Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. */ minutes?: number; /** * Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. */ nanos?: number; /** * Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface DeployPolicyRuleRolloutRestrictionTimeWindowsWeeklyWindowStartTime { /** * Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. */ minutes?: number; /** * Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. */ nanos?: number; /** * Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface DeployPolicySelector { /** * Contains attributes about a delivery pipeline. * Structure is documented below. */ deliveryPipeline?: outputs.clouddeploy.DeployPolicySelectorDeliveryPipeline; /** * Contains attributes about a target. * Structure is documented below. */ target?: outputs.clouddeploy.DeployPolicySelectorTarget; } interface DeployPolicySelectorDeliveryPipeline { /** * ID of the DeliveryPipeline. The value of this field could be one of the following: * - The last segment of a pipeline name * - "*", all delivery pipelines in a location */ id?: string; /** * DeliveryPipeline labels. */ labels: { [key: string]: string; }; } interface DeployPolicySelectorTarget { /** * ID of the `Target`. The value of this field could be one of the following: * The last segment of a target name. It only needs the ID to determine which target is being referred to * "*", all targets in a location. */ id?: string; /** * Target labels. */ labels: { [key: string]: string; }; } interface TargetAnthosCluster { /** * Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. */ membership?: string; } interface TargetAssociatedEntity { /** * Optional. Information specifying Anthos clusters as associated entities. */ anthosClusters?: outputs.clouddeploy.TargetAssociatedEntityAnthosCluster[]; /** * The name for the key in the map for which this object is mapped to in the API */ entityId: string; /** * Optional. Information specifying GKE clusters as associated entities. */ gkeClusters?: outputs.clouddeploy.TargetAssociatedEntityGkeCluster[]; } interface TargetAssociatedEntityAnthosCluster { /** * Optional. Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. */ membership?: string; } interface TargetAssociatedEntityGkeCluster { /** * Optional. Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`. */ cluster?: string; /** * Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept). */ internalIp?: boolean; /** * Optional. If set, used to configure a [proxy](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#proxy) to the Kubernetes server. */ proxyUrl?: string; } interface TargetCustomTarget { /** * Required. The name of the CustomTargetType. Format must be `projects/{project}/locations/{location}/customTargetTypes/{custom_target_type}`. */ customTargetType: string; } interface TargetExecutionConfig { /** * Optional. Cloud Storage location in which to store execution outputs. This can either be a bucket ("gs://my-bucket") or a path within a bucket ("gs://my-bucket/my-dir"). If unspecified, a default bucket located in the same region will be used. */ artifactStorage: string; /** * Optional. Execution timeout for a Cloud Build Execution. This must be between 10m and 24h in seconds format. If unspecified, a default timeout of 1h is used. */ executionTimeout: string; /** * Optional. Google service account to use for execution. If unspecified, the project execution service account (-compute@developer.gserviceaccount.com) is used. */ serviceAccount: string; /** * Required. Usages when this configuration should be applied. */ usages: string[]; /** * Optional. If true, additional logging will be enabled when running builds in this execution environment. */ verbose?: boolean; /** * Optional. The resource name of the `WorkerPool`, with the format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. If this optional field is unspecified, the default Cloud Build pool will be used. */ workerPool?: string; } interface TargetGke { /** * Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}. */ cluster?: string; /** * Optional. If set, the cluster will be accessed using the DNS endpoint. Note that both `dnsEndpoint` and `internalIp` cannot be set to true. */ dnsEndpoint?: boolean; /** * Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept). */ internalIp?: boolean; /** * Optional. If set, used to configure a [proxy](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#proxy) to the Kubernetes server. */ proxyUrl?: string; } interface TargetIamBindingCondition { description?: string; expression: string; title: string; } interface TargetIamMemberCondition { description?: string; expression: string; title: string; } interface TargetMultiTarget { /** * Required. The targetIds of this multiTarget. */ targetIds: string[]; } interface TargetRun { /** * Required. The location where the Cloud Run Service should be located. Format is `projects/{project}/locations/{location}`. */ location: string; } } export declare namespace clouddomains { interface RegistrationContactSettings { /** * Caution: Anyone with access to this email address, phone number, and/or postal address can take control of the domain. * Warning: For new Registrations, the registrant receives an email confirmation that they must complete within 15 days to * avoid domain suspension. * Structure is documented below. */ adminContact: outputs.clouddomains.RegistrationContactSettingsAdminContact; /** * Required. Privacy setting for the contacts associated with the Registration. * Values are PUBLIC_CONTACT_DATA, PRIVATE_CONTACT_DATA, and REDACTED_CONTACT_DATA */ privacy: string; /** * Caution: Anyone with access to this email address, phone number, and/or postal address can take control of the domain. * Warning: For new Registrations, the registrant receives an email confirmation that they must complete within 15 days to * avoid domain suspension. * Structure is documented below. */ registrantContact: outputs.clouddomains.RegistrationContactSettingsRegistrantContact; /** * Caution: Anyone with access to this email address, phone number, and/or postal address can take control of the domain. * Warning: For new Registrations, the registrant receives an email confirmation that they must complete within 15 days to * avoid domain suspension. * Structure is documented below. */ technicalContact: outputs.clouddomains.RegistrationContactSettingsTechnicalContact; } interface RegistrationContactSettingsAdminContact { /** * Required. Email address of the contact. */ email: string; /** * Fax number of the contact in international format. For example, "+1-800-555-0123". */ faxNumber?: string; /** * Required. Phone number of the contact in international format. For example, "+1-800-555-0123". */ phoneNumber: string; /** * Required. Postal address of the contact. * Structure is documented below. */ postalAddress: outputs.clouddomains.RegistrationContactSettingsAdminContactPostalAddress; } interface RegistrationContactSettingsAdminContactPostalAddress { /** * Unstructured address lines describing the lower levels of an address. * Because values in addressLines do not have type information and may sometimes contain multiple values in a single * field (e.g. "Austin, TX"), it is important that the line order is clear. The order of address lines should be * "envelope order" for the country/region of the address. In places where this can vary (e.g. Japan), addressLanguage * is used to make it explicit (e.g. "ja" for large-to-small ordering and "ja-Latn" or "en" for small-to-large). This way, * the most specific line of an address can be selected based on the language. */ addressLines?: string[]; /** * Highest administrative subdivision which is used for postal addresses of a country or region. For example, this can be a state, * a province, an oblast, or a prefecture. Specifically, for Spain this is the province and not the autonomous community * (e.g. "Barcelona" and not "Catalonia"). Many countries don't use an administrative area in postal addresses. E.g. in Switzerland * this should be left unpopulated. */ administrativeArea?: string; /** * Generally refers to the city/town portion of the address. Examples: US city, IT comune, UK post town. In regions of the world * where localities are not well defined or do not fit into this structure well, leave locality empty and use addressLines. */ locality?: string; /** * The name of the organization at the address. */ organization?: string; /** * Postal code of the address. Not all countries use or require postal codes to be present, but where they are used, * they may trigger additional validation with other parts of the address (e.g. state/zip validation in the U.S.A.). */ postalCode?: string; /** * The recipient at the address. This field may, under certain circumstances, contain multiline information. For example, * it might contain "care of" information. */ recipients?: string[]; /** * Required. CLDR region code of the country/region of the address. This is never inferred and it is up to the user to * ensure the value is correct. See https://cldr.unicode.org/ and * https://www.unicode.org/cldr/charts/30/supplemental/territory_information.html for details. Example: "CH" for Switzerland. */ regionCode: string; } interface RegistrationContactSettingsRegistrantContact { /** * Required. Email address of the contact. */ email: string; /** * Fax number of the contact in international format. For example, "+1-800-555-0123". */ faxNumber?: string; /** * Required. Phone number of the contact in international format. For example, "+1-800-555-0123". */ phoneNumber: string; /** * Required. Postal address of the contact. * Structure is documented below. */ postalAddress: outputs.clouddomains.RegistrationContactSettingsRegistrantContactPostalAddress; } interface RegistrationContactSettingsRegistrantContactPostalAddress { /** * Unstructured address lines describing the lower levels of an address. * Because values in addressLines do not have type information and may sometimes contain multiple values in a single * field (e.g. "Austin, TX"), it is important that the line order is clear. The order of address lines should be * "envelope order" for the country/region of the address. In places where this can vary (e.g. Japan), addressLanguage * is used to make it explicit (e.g. "ja" for large-to-small ordering and "ja-Latn" or "en" for small-to-large). This way, * the most specific line of an address can be selected based on the language. */ addressLines?: string[]; /** * Highest administrative subdivision which is used for postal addresses of a country or region. For example, this can be a state, * a province, an oblast, or a prefecture. Specifically, for Spain this is the province and not the autonomous community * (e.g. "Barcelona" and not "Catalonia"). Many countries don't use an administrative area in postal addresses. E.g. in Switzerland * this should be left unpopulated. */ administrativeArea?: string; /** * Generally refers to the city/town portion of the address. Examples: US city, IT comune, UK post town. In regions of the world * where localities are not well defined or do not fit into this structure well, leave locality empty and use addressLines. */ locality?: string; /** * The name of the organization at the address. */ organization?: string; /** * Postal code of the address. Not all countries use or require postal codes to be present, but where they are used, * they may trigger additional validation with other parts of the address (e.g. state/zip validation in the U.S.A.). */ postalCode?: string; /** * The recipient at the address. This field may, under certain circumstances, contain multiline information. For example, * it might contain "care of" information. */ recipients?: string[]; /** * Required. CLDR region code of the country/region of the address. This is never inferred and it is up to the user to * ensure the value is correct. See https://cldr.unicode.org/ and * https://www.unicode.org/cldr/charts/30/supplemental/territory_information.html for details. Example: "CH" for Switzerland. */ regionCode: string; } interface RegistrationContactSettingsTechnicalContact { /** * Required. Email address of the contact. */ email: string; /** * Fax number of the contact in international format. For example, "+1-800-555-0123". */ faxNumber?: string; /** * Required. Phone number of the contact in international format. For example, "+1-800-555-0123". */ phoneNumber: string; /** * Required. Postal address of the contact. * Structure is documented below. */ postalAddress: outputs.clouddomains.RegistrationContactSettingsTechnicalContactPostalAddress; } interface RegistrationContactSettingsTechnicalContactPostalAddress { /** * Unstructured address lines describing the lower levels of an address. * Because values in addressLines do not have type information and may sometimes contain multiple values in a single * field (e.g. "Austin, TX"), it is important that the line order is clear. The order of address lines should be * "envelope order" for the country/region of the address. In places where this can vary (e.g. Japan), addressLanguage * is used to make it explicit (e.g. "ja" for large-to-small ordering and "ja-Latn" or "en" for small-to-large). This way, * the most specific line of an address can be selected based on the language. */ addressLines?: string[]; /** * Highest administrative subdivision which is used for postal addresses of a country or region. For example, this can be a state, * a province, an oblast, or a prefecture. Specifically, for Spain this is the province and not the autonomous community * (e.g. "Barcelona" and not "Catalonia"). Many countries don't use an administrative area in postal addresses. E.g. in Switzerland * this should be left unpopulated. */ administrativeArea?: string; /** * Generally refers to the city/town portion of the address. Examples: US city, IT comune, UK post town. In regions of the world * where localities are not well defined or do not fit into this structure well, leave locality empty and use addressLines. */ locality?: string; /** * The name of the organization at the address. */ organization?: string; /** * Postal code of the address. Not all countries use or require postal codes to be present, but where they are used, * they may trigger additional validation with other parts of the address (e.g. state/zip validation in the U.S.A.). */ postalCode?: string; /** * The recipient at the address. This field may, under certain circumstances, contain multiline information. For example, * it might contain "care of" information. */ recipients?: string[]; /** * Required. CLDR region code of the country/region of the address. This is never inferred and it is up to the user to * ensure the value is correct. See https://cldr.unicode.org/ and * https://www.unicode.org/cldr/charts/30/supplemental/territory_information.html for details. Example: "CH" for Switzerland. */ regionCode: string; } interface RegistrationDnsSettings { /** * Configuration for an arbitrary DNS provider. * Structure is documented below. */ customDns?: outputs.clouddomains.RegistrationDnsSettingsCustomDns; /** * The list of glue records for this Registration. Commonly empty. * Structure is documented below. */ glueRecords?: outputs.clouddomains.RegistrationDnsSettingsGlueRecord[]; } interface RegistrationDnsSettingsCustomDns { /** * The list of DS records for this domain, which are used to enable DNSSEC. The domain's DNS provider can provide * the values to set here. If this field is empty, DNSSEC is disabled. * Structure is documented below. */ dsRecords?: outputs.clouddomains.RegistrationDnsSettingsCustomDnsDsRecord[]; /** * Required. A list of name servers that store the DNS zone for this domain. Each name server is a domain * name, with Unicode domain names expressed in Punycode format. */ nameServers: string[]; } interface RegistrationDnsSettingsCustomDnsDsRecord { /** * The algorithm used to generate the referenced DNSKEY. */ algorithm?: string; /** * The digest generated from the referenced DNSKEY. */ digest?: string; /** * The hash function used to generate the digest of the referenced DNSKEY. */ digestType?: string; /** * The key tag of the record. Must be set in range 0 -- 65535. */ keyTag?: number; } interface RegistrationDnsSettingsGlueRecord { /** * Required. Domain name of the host in Punycode format. */ hostName: string; /** * List of IPv4 addresses corresponding to this host in the standard decimal format (e.g. 198.51.100.1). * At least one of ipv4Address and ipv6Address must be set. */ ipv4Addresses?: string[]; /** * List of IPv4 addresses corresponding to this host in the standard decimal format (e.g. 198.51.100.1). * At least one of ipv4Address and ipv6Address must be set. */ ipv6Addresses?: string[]; } interface RegistrationManagementSettings { /** * The desired renewal method for this Registration. The actual renewalMethod is automatically updated to reflect this choice. * If unset or equal to RENEWAL_METHOD_UNSPECIFIED, the actual renewalMethod is treated as if it were set to AUTOMATIC_RENEWAL. * You cannot use RENEWAL_DISABLED during resource creation, and you can update the renewal status only when the Registration * resource has state ACTIVE or SUSPENDED. * When preferredRenewalMethod is set to AUTOMATIC_RENEWAL, the actual renewalMethod can be set to RENEWAL_DISABLED in case of * problems with the billing account or reported domain abuse. In such cases, check the issues field on the Registration. After * the problem is resolved, the renewalMethod is automatically updated to preferredRenewalMethod in a few hours. */ preferredRenewalMethod: string; /** * (Output) * Output only. The actual renewal method for this Registration. When preferredRenewalMethod is set to AUTOMATIC_RENEWAL, * the actual renewalMethod can be equal to RENEWAL_DISABLED—for example, when there are problems with the billing account * or reported domain abuse. In such cases, check the issues field on the Registration. After the problem is resolved, the * renewalMethod is automatically updated to preferredRenewalMethod in a few hours. */ renewalMethod: string; /** * Controls whether the domain can be transferred to another registrar. Values are UNLOCKED or LOCKED. */ transferLockState: string; } interface RegistrationYearlyPrice { /** * The three-letter currency code defined in ISO 4217. */ currencyCode?: string; /** * The whole units of the amount. For example if currencyCode is "USD", then 1 unit is one US dollar. */ units?: string; } } export declare namespace cloudfunctions { interface FunctionAutomaticUpdatePolicy { } interface FunctionEventTrigger { /** * The type of event to observe. For example: `"google.storage.object.finalize"`. * See the documentation on [calling Cloud Functions](https://cloud.google.com/functions/docs/calling/) for a * full reference of accepted triggers. */ eventType: string; /** * Specifies policy for failed executions. Structure is documented below. */ failurePolicy: outputs.cloudfunctions.FunctionEventTriggerFailurePolicy; /** * Required. The name or partial URI of the resource from * which to observe events. For example, `"myBucket"` or `"projects/my-project/topics/my-topic"` */ resource: string; } interface FunctionEventTriggerFailurePolicy { /** * Whether the function should be retried on failure. Defaults to `false`. */ retry: boolean; } interface FunctionIamBindingCondition { description?: string; expression: string; title: string; } interface FunctionIamMemberCondition { description?: string; expression: string; title: string; } interface FunctionOnDeployUpdatePolicy { /** * The runtime version which was used during latest function deployment. */ runtimeVersion: string; } interface FunctionSecretEnvironmentVariable { /** * Name of the environment variable. */ key: string; /** * Project identifier (due to a known limitation, only project number is supported by this field) of the project that contains the secret. If not set, it will be populated with the function's project, assuming that the secret exists in the same project as of the function. */ projectId: string; /** * ID of the secret in secret manager (not the full resource name). */ secret: string; /** * Version of the secret (version number or the string "latest"). It is recommended to use a numeric version for secret environment variables as any updates to the secret value is not reflected until new clones start. */ version: string; } interface FunctionSecretVolume { /** * The path within the container to mount the secret volume. For example, setting the mountPath as "/etc/secrets" would mount the secret value files under the "/etc/secrets" directory. This directory will also be completely shadowed and unavailable to mount any other secrets. Recommended mount paths: "/etc/secrets" Restricted mount paths: "/cloudsql", "/dev/log", "/pod", "/proc", "/var/log". */ mountPath: string; /** * Project identifier (due to a known limitation, only project number is supported by this field) of the project that contains the secret. If not set, it will be populated with the function's project, assuming that the secret exists in the same project as of the function. */ projectId: string; /** * ID of the secret in secret manager (not the full resource name). */ secret: string; /** * List of secret versions to mount for this secret. If empty, the "latest" version of the secret will be made available in a file named after the secret under the mount point. Structure is documented below. */ versions?: outputs.cloudfunctions.FunctionSecretVolumeVersion[]; } interface FunctionSecretVolumeVersion { /** * Relative path of the file under the mount path where the secret value for this version will be fetched and made available. For example, setting the mountPath as "/etc/secrets" and path as "/secret_foo" would mount the secret value file at "/etc/secrets/secret_foo". */ path: string; /** * Version of the secret (version number or the string "latest"). It is preferable to use "latest" version with secret volumes as secret value changes are reflected immediately. */ version: string; } interface FunctionSourceRepository { /** * The URL pointing to the hosted repository where the function was defined at the time of deployment. */ deployedUrl: string; /** * The URL pointing to the hosted repository where the function is defined. There are supported Cloud Source Repository URLs in the following formats: * * * To refer to a specific commit: `https://source.developers.google.com/projects/*/repos/*/revisions/*/paths/*` * * To refer to a moveable alias (branch): `https://source.developers.google.com/projects/*/repos/*/moveable-aliases/*/paths/*`. To refer to HEAD, use the `master` moveable alias. * * To refer to a specific fixed alias (tag): `https://source.developers.google.com/projects/*/repos/*/fixed-aliases/*/paths/*` */ url: string; } interface GetFunctionAutomaticUpdatePolicy { } interface GetFunctionEventTrigger { /** * The type of event to observe. For example: `"google.storage.object.finalize"`. * See the documentation on [calling Cloud Functions](https://cloud.google.com/functions/docs/calling/) * for a full reference of accepted triggers. */ eventType: string; /** * Policy for failed executions. Structure is documented below. */ failurePolicies: outputs.cloudfunctions.GetFunctionEventTriggerFailurePolicy[]; /** * The name of the resource whose events are being observed, for example, `"myBucket"` */ resource: string; } interface GetFunctionEventTriggerFailurePolicy { /** * Whether the function should be retried on failure. */ retry: boolean; } interface GetFunctionOnDeployUpdatePolicy { /** * The runtime version which was used during latest function deployment. */ runtimeVersion: string; } interface GetFunctionSecretEnvironmentVariable { /** * Name of the environment variable. */ key: string; /** * Project identifier (due to a known limitation, only project number is supported by this field) of the project that contains the secret. If not set, it will be populated with the function's project, assuming that the secret exists in the same project as of the function. */ projectId: string; /** * ID of the secret in secret manager (not the full resource name). */ secret: string; /** * Version of the secret (version number or the string "latest"). It is recommended to use a numeric version for secret environment variables as any updates to the secret value is not reflected until new clones start. */ version: string; } interface GetFunctionSecretVolume { /** * The path within the container to mount the secret volume. For example, setting the mountPath as "/etc/secrets" would mount the secret value files under the "/etc/secrets" directory. This directory will also be completely shadowed and unavailable to mount any other secrets. Recommended mount paths: "/etc/secrets" Restricted mount paths: "/cloudsql", "/dev/log", "/pod", "/proc", "/var/log". */ mountPath: string; /** * Project identifier (due to a known limitation, only project number is supported by this field) of the project that contains the secret. If not set, it will be populated with the function's project, assuming that the secret exists in the same project as of the function. */ projectId: string; /** * ID of the secret in secret manager (not the full resource name). */ secret: string; /** * List of secret versions to mount for this secret. If empty, the "latest" version of the secret will be made available in a file named after the secret under the mount point. */ versions: outputs.cloudfunctions.GetFunctionSecretVolumeVersion[]; } interface GetFunctionSecretVolumeVersion { /** * Relative path of the file under the mount path where the secret value for this version will be fetched and made available. For example, setting the mountPath as "/etc/secrets" and path as "/secret_foo" would mount the secret value file at "/etc/secrets/secret_foo". */ path: string; /** * Version of the secret (version number or the string "latest"). It is preferable to use "latest" version with secret volumes as secret value changes are reflected immediately. */ version: string; } interface GetFunctionSourceRepository { /** * The URL pointing to the hosted repository where the function was defined at the time of deployment. */ deployedUrl: string; /** * The URL pointing to the hosted repository where the function is defined. */ url: string; } } export declare namespace cloudfunctionsv2 { interface FunctionBuildConfig { /** * Security patches are applied automatically to the runtime without requiring * the function to be redeployed. */ automaticUpdatePolicy: outputs.cloudfunctionsv2.FunctionBuildConfigAutomaticUpdatePolicy; /** * (Output) * The Cloud Build name of the latest successful * deployment of the function. */ build: string; /** * User managed repository created in Artifact Registry optionally with a customer managed encryption key. */ dockerRepository: string; /** * The name of the function (as defined in source code) that will be executed. * Defaults to the resource name suffix, if not specified. For backward * compatibility, if function with given name is not found, then the system * will try to use function named "function". For Node.js this is name of a * function exported by the module specified in source_location. */ entryPoint?: string; /** * User-provided build-time environment variables for the function. */ environmentVariables: { [key: string]: string; }; /** * Security patches are only applied when a function is redeployed. * Structure is documented below. */ onDeployUpdatePolicy?: outputs.cloudfunctionsv2.FunctionBuildConfigOnDeployUpdatePolicy; /** * The runtime in which to run the function. Required when deploying a new * function, optional when updating an existing function. */ runtime?: string; /** * The fully-qualified name of the service account to be used for building the container. */ serviceAccount: string; /** * The location of the function source code. * Structure is documented below. */ source?: outputs.cloudfunctionsv2.FunctionBuildConfigSource; /** * Name of the Cloud Build Custom Worker Pool that should be used to build the function. */ workerPool?: string; } interface FunctionBuildConfigAutomaticUpdatePolicy { } interface FunctionBuildConfigOnDeployUpdatePolicy { /** * (Output) * The runtime version which was used during latest function deployment. */ runtimeVersion: string; } interface FunctionBuildConfigSource { /** * If provided, get the source from this location in a Cloud Source Repository. * Structure is documented below. */ repoSource?: outputs.cloudfunctionsv2.FunctionBuildConfigSourceRepoSource; /** * If provided, get the source from this location in Google Cloud Storage. * Structure is documented below. */ storageSource?: outputs.cloudfunctionsv2.FunctionBuildConfigSourceStorageSource; } interface FunctionBuildConfigSourceRepoSource { /** * Regex matching branches to build. */ branchName?: string; /** * Regex matching tags to build. */ commitSha?: string; /** * Directory, relative to the source root, in which to run the build. */ dir?: string; /** * Only trigger a build if the revision regex does * NOT match the revision regex. */ invertRegex?: boolean; /** * ID of the project that owns the Cloud Source Repository. If omitted, the * project ID requesting the build is assumed. */ projectId?: string; /** * Name of the Cloud Source Repository. */ repoName?: string; /** * Regex matching tags to build. */ tagName?: string; } interface FunctionBuildConfigSourceStorageSource { /** * Google Cloud Storage bucket containing the source */ bucket?: string; /** * Google Cloud Storage generation for the object. If the generation * is omitted, the latest generation will be used. */ generation: number; /** * Google Cloud Storage object containing the source. */ object?: string; } interface FunctionEventTrigger { /** * Criteria used to filter events. * Structure is documented below. */ eventFilters?: outputs.cloudfunctionsv2.FunctionEventTriggerEventFilter[]; /** * Required. The type of event to observe. */ eventType: string; /** * The name of a Pub/Sub topic in the same project that will be used * as the transport topic for the event delivery. */ pubsubTopic: string; /** * Describes the retry policy in case of function's execution failure. * Retried execution is charged as any other execution. * Possible values are: `RETRY_POLICY_UNSPECIFIED`, `RETRY_POLICY_DO_NOT_RETRY`, `RETRY_POLICY_RETRY`. */ retryPolicy?: string; /** * Optional. The email of the trigger's service account. The service account * must have permission to invoke Cloud Run services. If empty, defaults to the * Compute Engine default service account: {project_number}-compute@developer.gserviceaccount.com. */ serviceAccountEmail: string; /** * (Output) * Output only. The resource name of the Eventarc trigger. */ trigger: string; /** * The region that the trigger will be in. The trigger will only receive * events originating in this region. It can be the same * region as the function, a different region or multi-region, or the global * region. If not provided, defaults to the same region as the function. */ triggerRegion: string; } interface FunctionEventTriggerEventFilter { /** * 'Required. The name of a CloudEvents attribute. * Currently, only a subset of attributes are supported for filtering. Use the `gcloud eventarc providers describe` command to learn more about events and their attributes. * Do not filter for the 'type' attribute here, as this is already achieved by the resource's `eventType` attribute. */ attribute: string; /** * Optional. The operator used for matching the events with the value of * the filter. If not specified, only events that have an exact key-value * pair specified in the filter are matched. * The only allowed value is `match-path-pattern`. * [See documentation on path patterns here](https://cloud.google.com/eventarc/docs/path-patterns)' */ operator?: string; /** * Required. The value for the attribute. * If the operator field is set as `match-path-pattern`, this value can be a path pattern instead of an exact value. */ value: string; } interface FunctionIamBindingCondition { description?: string; expression: string; title: string; } interface FunctionIamMemberCondition { description?: string; expression: string; title: string; } interface FunctionServiceConfig { /** * Whether 100% of traffic is routed to the latest revision. Defaults to true. */ allTrafficOnLatestRevision?: boolean; /** * The number of CPUs used in a single container instance. Default value is calculated from available memory. */ availableCpu: string; /** * The amount of memory available for a function. * Defaults to 256M. Supported units are k, M, G, Mi, Gi. If no unit is * supplied the value is interpreted as bytes. */ availableMemory: string; /** * The binary authorization policy to be checked when deploying the Cloud Run service. */ binaryAuthorizationPolicy?: string; /** * (Optional, Beta) * Egress settings for direct VPC. If not provided, it defaults to VPC_EGRESS_PRIVATE_RANGES_ONLY. * Possible values are: `VPC_EGRESS_ALL_TRAFFIC`, `VPC_EGRESS_PRIVATE_RANGES_ONLY`. */ directVpcEgress: string; /** * (Optional, Beta) * The Direct VPC network interface for the Cloud Function. Currently only a single Direct VPC is supported. * Structure is documented below. */ directVpcNetworkInterfaces?: outputs.cloudfunctionsv2.FunctionServiceConfigDirectVpcNetworkInterface[]; /** * Environment variables that shall be available during function execution. */ environmentVariables: { [key: string]: string; }; /** * (Output) * URIs of the Service deployed */ gcfUri: string; /** * Available ingress settings. Defaults to "ALLOW_ALL" if unspecified. * Default value is `ALLOW_ALL`. * Possible values are: `ALLOW_ALL`, `ALLOW_INTERNAL_ONLY`, `ALLOW_INTERNAL_AND_GCLB`. */ ingressSettings?: string; /** * The limit on the maximum number of function instances that may coexist at a * given time. */ maxInstanceCount: number; /** * Sets the maximum number of concurrent requests that each instance can receive. Defaults to 1. */ maxInstanceRequestConcurrency: number; /** * The limit on the minimum number of function instances that may coexist at a * given time. */ minInstanceCount?: number; /** * Secret environment variables configuration. * Structure is documented below. */ secretEnvironmentVariables?: outputs.cloudfunctionsv2.FunctionServiceConfigSecretEnvironmentVariable[]; /** * Secret volumes configuration. * Structure is documented below. */ secretVolumes?: outputs.cloudfunctionsv2.FunctionServiceConfigSecretVolume[]; /** * (Output) * Name of the service associated with a Function. */ service: string; /** * The email of the service account for this function. */ serviceAccountEmail: string; /** * The function execution timeout. Execution is considered failed and * can be terminated if the function is not completed at the end of the * timeout period. Defaults to 60 seconds. */ timeoutSeconds: number; /** * (Output) * URI of the Service deployed. */ uri: string; /** * The Serverless VPC Access connector that this cloud function can connect to. */ vpcConnector?: string; /** * Available egress settings. * Possible values are: `VPC_CONNECTOR_EGRESS_SETTINGS_UNSPECIFIED`, `PRIVATE_RANGES_ONLY`, `ALL_TRAFFIC`. */ vpcConnectorEgressSettings?: string; } interface FunctionServiceConfigDirectVpcNetworkInterface { /** * The name of the VPC network to which the function will be connected. Specify either a VPC network or a subnet, or both. If you specify only a network, the subnet uses the same name as the network. */ network?: string; /** * The name of the VPC subnetwork that the Cloud Function resource will get IPs from. Specify either a VPC network or a subnet, or both. If both network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If subnetwork is not specified, the subnetwork with the same name with the network will be used. */ subnetwork?: string; /** * Network tags applied to this Cloud Function resource. */ tags?: string[]; } interface FunctionServiceConfigSecretEnvironmentVariable { /** * Name of the environment variable. */ key: string; /** * Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. */ projectId: string; /** * Name of the secret in secret manager (not the full resource name). */ secret: string; /** * Version of the secret (version number or the string 'latest'). It is recommended to use a numeric version for secret environment variables as any updates to the secret value is not reflected until new instances start. */ version: string; } interface FunctionServiceConfigSecretVolume { /** * The path within the container to mount the secret volume. For example, setting the mountPath as /etc/secrets would mount the secret value files under the /etc/secrets directory. This directory will also be completely shadowed and unavailable to mount any other secrets. Recommended mount path: /etc/secrets */ mountPath: string; /** * Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. */ projectId: string; /** * Name of the secret in secret manager (not the full resource name). */ secret: string; /** * List of secret versions to mount for this secret. If empty, the latest version of the secret will be made available in a file named after the secret under the mount point.' * Structure is documented below. */ versions: outputs.cloudfunctionsv2.FunctionServiceConfigSecretVolumeVersion[]; } interface FunctionServiceConfigSecretVolumeVersion { /** * Relative path of the file under the mount path where the secret value for this version will be fetched and made available. For example, setting the mountPath as '/etc/secrets' and path as secretFoo would mount the secret value file at /etc/secrets/secret_foo. */ path: string; /** * Version of the secret (version number or the string 'latest'). It is preferable to use latest version with secret volumes as secret value changes are reflected immediately. */ version: string; } interface GetFunctionBuildConfig { /** * Security patches are applied automatically to the runtime without requiring * the function to be redeployed. */ automaticUpdatePolicies: outputs.cloudfunctionsv2.GetFunctionBuildConfigAutomaticUpdatePolicy[]; /** * The Cloud Build name of the latest successful * deployment of the function. */ build: string; /** * User managed repository created in Artifact Registry optionally with a customer managed encryption key. */ dockerRepository: string; /** * The name of the function (as defined in source code) that will be executed. * Defaults to the resource name suffix, if not specified. For backward * compatibility, if function with given name is not found, then the system * will try to use function named "function". For Node.js this is name of a * function exported by the module specified in source_location. */ entryPoint: string; /** * User-provided build-time environment variables for the function. */ environmentVariables: { [key: string]: string; }; /** * Security patches are only applied when a function is redeployed. */ onDeployUpdatePolicies: outputs.cloudfunctionsv2.GetFunctionBuildConfigOnDeployUpdatePolicy[]; /** * The runtime in which to run the function. Required when deploying a new * function, optional when updating an existing function. */ runtime: string; /** * The fully-qualified name of the service account to be used for building the container. */ serviceAccount: string; /** * The location of the function source code. */ sources: outputs.cloudfunctionsv2.GetFunctionBuildConfigSource[]; /** * Name of the Cloud Build Custom Worker Pool that should be used to build the function. */ workerPool: string; } interface GetFunctionBuildConfigAutomaticUpdatePolicy { } interface GetFunctionBuildConfigOnDeployUpdatePolicy { /** * The runtime version which was used during latest function deployment. */ runtimeVersion: string; } interface GetFunctionBuildConfigSource { /** * If provided, get the source from this location in a Cloud Source Repository. */ repoSources: outputs.cloudfunctionsv2.GetFunctionBuildConfigSourceRepoSource[]; /** * If provided, get the source from this location in Google Cloud Storage. */ storageSources: outputs.cloudfunctionsv2.GetFunctionBuildConfigSourceStorageSource[]; } interface GetFunctionBuildConfigSourceRepoSource { /** * Regex matching branches to build. */ branchName: string; /** * Regex matching tags to build. */ commitSha: string; /** * Directory, relative to the source root, in which to run the build. */ dir: string; /** * Only trigger a build if the revision regex does * NOT match the revision regex. */ invertRegex: boolean; /** * ID of the project that owns the Cloud Source Repository. If omitted, the * project ID requesting the build is assumed. */ projectId: string; /** * Name of the Cloud Source Repository. */ repoName: string; /** * Regex matching tags to build. */ tagName: string; } interface GetFunctionBuildConfigSourceStorageSource { /** * Google Cloud Storage bucket containing the source */ bucket: string; /** * Google Cloud Storage generation for the object. If the generation * is omitted, the latest generation will be used. */ generation: number; /** * Google Cloud Storage object containing the source. */ object: string; } interface GetFunctionEventTrigger { /** * Criteria used to filter events. */ eventFilters: outputs.cloudfunctionsv2.GetFunctionEventTriggerEventFilter[]; /** * Required. The type of event to observe. */ eventType: string; /** * The name of a Pub/Sub topic in the same project that will be used * as the transport topic for the event delivery. */ pubsubTopic: string; /** * Describes the retry policy in case of function's execution failure. * Retried execution is charged as any other execution. Possible values: ["RETRY_POLICY_UNSPECIFIED", "RETRY_POLICY_DO_NOT_RETRY", "RETRY_POLICY_RETRY"] */ retryPolicy: string; /** * Optional. The email of the trigger's service account. The service account * must have permission to invoke Cloud Run services. If empty, defaults to the * Compute Engine default service account: {project_number}-compute@developer.gserviceaccount.com. */ serviceAccountEmail: string; /** * Output only. The resource name of the Eventarc trigger. */ trigger: string; /** * The region that the trigger will be in. The trigger will only receive * events originating in this region. It can be the same * region as the function, a different region or multi-region, or the global * region. If not provided, defaults to the same region as the function. */ triggerRegion: string; } interface GetFunctionEventTriggerEventFilter { /** * 'Required. The name of a CloudEvents attribute. * Currently, only a subset of attributes are supported for filtering. Use the 'gcloud eventarc providers describe' command to learn more about events and their attributes. * Do not filter for the 'type' attribute here, as this is already achieved by the resource's 'event_type' attribute. */ attribute: string; /** * Optional. The operator used for matching the events with the value of * the filter. If not specified, only events that have an exact key-value * pair specified in the filter are matched. * The only allowed value is 'match-path-pattern'. * [See documentation on path patterns here](https://cloud.google.com/eventarc/docs/path-patterns)' */ operator: string; /** * Required. The value for the attribute. * If the operator field is set as 'match-path-pattern', this value can be a path pattern instead of an exact value. */ value: string; } interface GetFunctionServiceConfig { /** * Whether 100% of traffic is routed to the latest revision. Defaults to true. */ allTrafficOnLatestRevision: boolean; /** * The number of CPUs used in a single container instance. Default value is calculated from available memory. */ availableCpu: string; /** * The amount of memory available for a function. * Defaults to 256M. Supported units are k, M, G, Mi, Gi. If no unit is * supplied the value is interpreted as bytes. */ availableMemory: string; /** * The binary authorization policy to be checked when deploying the Cloud Run service. */ binaryAuthorizationPolicy: string; /** * Egress settings for direct VPC. If not provided, it defaults to VPC_EGRESS_PRIVATE_RANGES_ONLY. Possible values: ["VPC_EGRESS_ALL_TRAFFIC", "VPC_EGRESS_PRIVATE_RANGES_ONLY"] */ directVpcEgress: string; /** * The Direct VPC network interface for the Cloud Function. Currently only a single Direct VPC is supported. */ directVpcNetworkInterfaces: outputs.cloudfunctionsv2.GetFunctionServiceConfigDirectVpcNetworkInterface[]; /** * Environment variables that shall be available during function execution. */ environmentVariables: { [key: string]: string; }; /** * URIs of the Service deployed */ gcfUri: string; /** * Available ingress settings. Defaults to "ALLOW_ALL" if unspecified. Default value: "ALLOW_ALL" Possible values: ["ALLOW_ALL", "ALLOW_INTERNAL_ONLY", "ALLOW_INTERNAL_AND_GCLB"] */ ingressSettings: string; /** * The limit on the maximum number of function instances that may coexist at a * given time. */ maxInstanceCount: number; /** * Sets the maximum number of concurrent requests that each instance can receive. Defaults to 1. */ maxInstanceRequestConcurrency: number; /** * The limit on the minimum number of function instances that may coexist at a * given time. */ minInstanceCount: number; /** * Secret environment variables configuration. */ secretEnvironmentVariables: outputs.cloudfunctionsv2.GetFunctionServiceConfigSecretEnvironmentVariable[]; /** * Secret volumes configuration. */ secretVolumes: outputs.cloudfunctionsv2.GetFunctionServiceConfigSecretVolume[]; /** * Name of the service associated with a Function. */ service: string; /** * The email of the service account for this function. */ serviceAccountEmail: string; /** * The function execution timeout. Execution is considered failed and * can be terminated if the function is not completed at the end of the * timeout period. Defaults to 60 seconds. */ timeoutSeconds: number; /** * URI of the Service deployed. */ uri: string; /** * The Serverless VPC Access connector that this cloud function can connect to. */ vpcConnector: string; /** * Available egress settings. Possible values: ["VPC_CONNECTOR_EGRESS_SETTINGS_UNSPECIFIED", "PRIVATE_RANGES_ONLY", "ALL_TRAFFIC"] */ vpcConnectorEgressSettings: string; } interface GetFunctionServiceConfigDirectVpcNetworkInterface { /** * The name of the VPC network to which the function will be connected. Specify either a VPC network or a subnet, or both. If you specify only a network, the subnet uses the same name as the network. */ network: string; /** * The name of the VPC subnetwork that the Cloud Function resource will get IPs from. Specify either a VPC network or a subnet, or both. If both network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If subnetwork is not specified, the subnetwork with the same name with the network will be used. */ subnetwork: string; /** * Network tags applied to this Cloud Function resource. */ tags: string[]; } interface GetFunctionServiceConfigSecretEnvironmentVariable { /** * Name of the environment variable. */ key: string; /** * Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. */ projectId: string; /** * Name of the secret in secret manager (not the full resource name). */ secret: string; /** * Version of the secret (version number or the string 'latest'). It is recommended to use a numeric version for secret environment variables as any updates to the secret value is not reflected until new instances start. */ version: string; } interface GetFunctionServiceConfigSecretVolume { /** * The path within the container to mount the secret volume. For example, setting the mountPath as /etc/secrets would mount the secret value files under the /etc/secrets directory. This directory will also be completely shadowed and unavailable to mount any other secrets. Recommended mount path: /etc/secrets */ mountPath: string; /** * Project identifier (preferably project number but can also be the project ID) of the project that contains the secret. If not set, it will be populated with the function's project assuming that the secret exists in the same project as of the function. */ projectId: string; /** * Name of the secret in secret manager (not the full resource name). */ secret: string; /** * List of secret versions to mount for this secret. If empty, the latest version of the secret will be made available in a file named after the secret under the mount point.' */ versions: outputs.cloudfunctionsv2.GetFunctionServiceConfigSecretVolumeVersion[]; } interface GetFunctionServiceConfigSecretVolumeVersion { /** * Relative path of the file under the mount path where the secret value for this version will be fetched and made available. For example, setting the mountPath as '/etc/secrets' and path as secretFoo would mount the secret value file at /etc/secrets/secret_foo. */ path: string; /** * Version of the secret (version number or the string 'latest'). It is preferable to use latest version with secret volumes as secret value changes are reflected immediately. */ version: string; } } export declare namespace cloudidentity { interface GetGroupLookupGroupKey { /** * (Required) The ID of the entity. * For Google-managed entities, the id is the email address of an existing group or user. * For external-identity-mapped entities, the id is a string conforming * to the Identity Source's requirements. */ id: string; /** * (Optional) The namespace in which the entity exists. * If not populated, the EntityKey represents a Google-managed entity * such as a Google user or a Google Group. * If populated, the EntityKey represents an external-identity-mapped group. * The namespace must correspond to an identity source created in Admin Console * and must be in the form of `identitysources/{identity_source_id}`. */ namespace?: string; } interface GetGroupMembershipsMembership { /** * If set to true, skip group member creation if a membership with the same name already exists. Defaults to false. */ createIgnoreAlreadyExists: boolean; /** * The time when the Membership was created. */ createTime: string; /** * The parent Group resource under which to lookup the Membership names. Must be of the form groups/{group_id}. */ group: string; /** * EntityKey of the member. Structure is documented below. */ memberKeys: outputs.cloudidentity.GetGroupMembershipsMembershipMemberKey[]; /** * The name of the MembershipRole. One of OWNER, MANAGER, MEMBER. */ name: string; /** * EntityKey of the member. Structure is documented below. */ preferredMemberKeys: outputs.cloudidentity.GetGroupMembershipsMembershipPreferredMemberKey[]; /** * The MembershipRoles that apply to the Membership. Structure is documented below. */ roles: outputs.cloudidentity.GetGroupMembershipsMembershipRole[]; /** * The type of the membership. */ type: string; /** * The time when the Membership was last updated. */ updateTime: string; } interface GetGroupMembershipsMembershipMemberKey { /** * The ID of the entity. For Google-managed entities, the id is the email address of an existing * group or user. For external-identity-mapped entities, the id is a string conforming * to the Identity Source's requirements. */ id: string; /** * The namespace in which the entity exists. * If not populated, the EntityKey represents a Google-managed entity * such as a Google user or a Google Group. * If populated, the EntityKey represents an external-identity-mapped group. */ namespace: string; } interface GetGroupMembershipsMembershipPreferredMemberKey { /** * The ID of the entity. For Google-managed entities, the id is the email address of an existing * group or user. For external-identity-mapped entities, the id is a string conforming * to the Identity Source's requirements. */ id: string; /** * The namespace in which the entity exists. * If not populated, the EntityKey represents a Google-managed entity * such as a Google user or a Google Group. * If populated, the EntityKey represents an external-identity-mapped group. */ namespace: string; } interface GetGroupMembershipsMembershipRole { /** * The MembershipRole expiry details, only supported for MEMBER role. * Other roles cannot be accompanied with MEMBER role having expiry. */ expiryDetails: outputs.cloudidentity.GetGroupMembershipsMembershipRoleExpiryDetail[]; /** * The name of the MembershipRole. One of OWNER, MANAGER, MEMBER. */ name: string; } interface GetGroupMembershipsMembershipRoleExpiryDetail { /** * The time at which the MembershipRole will expire. * * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. * * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ expireTime: string; } interface GetGroupTransitiveMembershipsMembership { /** * EntityKey of the member. This value will be either a userKey in the format `users/000000000000000000000` with a numerical id or a groupKey in the format `groups/000ab0000ab0000` with a hexadecimal id. */ member: string; /** * EntityKey of the member. Structure is documented below. */ preferredMemberKeys: outputs.cloudidentity.GetGroupTransitiveMembershipsMembershipPreferredMemberKey[]; /** * The relation between the group and the transitive member. The value can be DIRECT, INDIRECT, or DIRECT_AND_INDIRECT. */ relationType: string; /** * The TransitiveMembershipRoles that apply to the Membership. Structure is documented below. */ roles: outputs.cloudidentity.GetGroupTransitiveMembershipsMembershipRole[]; } interface GetGroupTransitiveMembershipsMembershipPreferredMemberKey { /** * The ID of the entity. For Google-managed entities, the id is the email address of an existing * group or user. For external-identity-mapped entities, the id is a string conforming * to the Identity Source's requirements. */ id: string; /** * The namespace in which the entity exists. * If not populated, the EntityKey represents a Google-managed entity * such as a Google user or a Google Group. * If populated, the EntityKey represents an external-identity-mapped group. */ namespace: string; } interface GetGroupTransitiveMembershipsMembershipRole { /** * The name of the TransitiveMembershipRole. One of OWNER, MANAGER, MEMBER. */ role: string; } interface GetGroupsGroup { /** * Additional group keys associated with the Group */ additionalGroupKeys: outputs.cloudidentity.GetGroupsGroupAdditionalGroupKey[]; /** * The time when the Group was created. */ createTime: string; /** * An extended description to help users determine the purpose of a Group. */ description: string; /** * The display name of the Group. */ displayName: string; /** * EntityKey of the Group. Structure is documented below. */ groupKeys: outputs.cloudidentity.GetGroupsGroupGroupKey[]; /** * The initial configuration options for creating a Group. * * See the * [API reference](https://cloud.google.com/identity/docs/reference/rest/v1beta1/groups/create#initialgroupconfig) * for possible values. Default value: "EMPTY" Possible values: ["INITIAL_GROUP_CONFIG_UNSPECIFIED", "WITH_INITIAL_OWNER", "EMPTY"] */ initialGroupConfig: string; /** * The labels that apply to the Group. * Contains 'cloudidentity.googleapis.com/groups.discussion_forum': '' if the Group is a Google Group or * 'system/groups/external': '' if the Group is an external-identity-mapped group. */ labels: { [key: string]: string; }; /** * Resource name of the Group in the format: groups/{group_id}, where `groupId` is the unique ID assigned to the Group. */ name: string; /** * The parent resource under which to list all Groups. Must be of the form identitysources/{identity_source_id} for external- identity-mapped groups or customers/{customer_id} for Google Groups. */ parent: string; /** * The time when the Group was last updated. */ updateTime: string; } interface GetGroupsGroupAdditionalGroupKey { /** * The ID of the entity. * For Google-managed entities, the id is the email address of an existing group or user. * For external-identity-mapped entities, the id is a string conforming * to the Identity Source's requirements. */ id: string; /** * The namespace in which the entity exists. * If not populated, the EntityKey represents a Google-managed entity * such as a Google user or a Google Group. * If populated, the EntityKey represents an external-identity-mapped group. * The namespace must correspond to an identity source created in Admin Console * and must be in the form of `identitysources/{identity_source_id}`. */ namespace: string; } interface GetGroupsGroupGroupKey { /** * The ID of the entity. * For Google-managed entities, the id is the email address of an existing group or user. * For external-identity-mapped entities, the id is a string conforming * to the Identity Source's requirements. */ id: string; /** * The namespace in which the entity exists. * If not populated, the EntityKey represents a Google-managed entity * such as a Google user or a Google Group. * If populated, the EntityKey represents an external-identity-mapped group. * The namespace must correspond to an identity source created in Admin Console * and must be in the form of `identitysources/{identity_source_id}`. */ namespace: string; } interface GetPoliciesPolicy { /** * The customer that the policy belongs to. */ customer: string; /** * The resource name of the policy. */ name: string; /** * A list containing the CEL query that defines which entities the policy applies to. Structure is documented below. */ policyQueries: outputs.cloudidentity.GetPoliciesPolicyPolicyQuery[]; /** * The setting configured by this policy, represented as a JSON string. */ setting: string; /** * The type of the policy. */ type: string; } interface GetPoliciesPolicyPolicyQuery { /** * The group that the policy applies to. */ group: string; /** * The org unit that the policy applies to. */ orgUnit: string; /** * The query that defines which entities the policy applies to. */ query: string; /** * The sort order of the policy. */ sortOrder: number; } interface GetPolicyPolicyQuery { /** * The group that the policy applies to. */ group: string; /** * The org unit that the policy applies to. */ orgUnit: string; /** * The query that defines which entities the policy applies to. */ query: string; /** * The sort order of the policy. */ sortOrder: number; } interface GroupAdditionalGroupKey { /** * (Output) * The ID of the entity. * For Google-managed entities, the id must be the email address of an existing * group or user. * For external-identity-mapped entities, the id must be a string conforming * to the Identity Source's requirements. * Must be unique within a namespace. */ id: string; /** * (Output) * The namespace in which the entity exists. * If not specified, the EntityKey represents a Google-managed entity * such as a Google user or a Google Group. * If specified, the EntityKey represents an external-identity-mapped group. * The namespace must correspond to an identity source created in Admin Console * and must be in the form of `identitysources/{identity_source_id}`. */ namespace: string; } interface GroupGroupKey { /** * The ID of the entity. * For Google-managed entities, the id must be the email address of an existing * group or user. * For external-identity-mapped entities, the id must be a string conforming * to the Identity Source's requirements. * Must be unique within a namespace. */ id: string; /** * The namespace in which the entity exists. * If not specified, the EntityKey represents a Google-managed entity * such as a Google user or a Google Group. * If specified, the EntityKey represents an external-identity-mapped group. * The namespace must correspond to an identity source created in Admin Console * and must be in the form of `identitysources/{identity_source_id}`. */ namespace?: string; } interface GroupMembershipMemberKey { /** * The ID of the entity. * For Google-managed entities, the id must be the email address of an existing * group or user. * For external-identity-mapped entities, the id must be a string conforming * to the Identity Source's requirements. * Must be unique within a namespace. */ id: string; /** * The namespace in which the entity exists. * If not specified, the EntityKey represents a Google-managed entity * such as a Google user or a Google Group. * If specified, the EntityKey represents an external-identity-mapped group. * The namespace must correspond to an identity source created in Admin Console * and must be in the form of `identitysources/{identity_source_id}`. */ namespace?: string; } interface GroupMembershipPreferredMemberKey { /** * The ID of the entity. * For Google-managed entities, the id must be the email address of an existing * group or user. * For external-identity-mapped entities, the id must be a string conforming * to the Identity Source's requirements. * Must be unique within a namespace. */ id: string; /** * The namespace in which the entity exists. * If not specified, the EntityKey represents a Google-managed entity * such as a Google user or a Google Group. * If specified, the EntityKey represents an external-identity-mapped group. * The namespace must correspond to an identity source created in Admin Console * and must be in the form of `identitysources/{identity_source_id}`. */ namespace?: string; } interface GroupMembershipRole { /** * The MembershipRole expiry details, only supported for MEMBER role. * Other roles cannot be accompanied with MEMBER role having expiry. * Structure is documented below. */ expiryDetail?: outputs.cloudidentity.GroupMembershipRoleExpiryDetail; /** * The name of the MembershipRole. Must be one of OWNER, MANAGER, MEMBER. * Possible values are: `OWNER`, `MANAGER`, `MEMBER`. */ name: string; } interface GroupMembershipRoleExpiryDetail { /** * The time at which the MembershipRole will expire. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ expireTime: string; } interface PolicyPolicyQuery { /** * The group that the query applies to. */ group?: string; /** * The OrgUnit the query applies to. */ orgUnit: string; /** * The CEL query that defines which entities the Policy applies to. */ query?: string; /** * (Output) * Decimal sort order of this PolicyQuery. */ sortOrder: number; } interface PolicySetting { /** * The type of the Setting. */ type: string; /** * The value of the Setting as JSON string. */ valueJson: string; } } export declare namespace cloudquota { interface GetSQuotaInfoDimensionsInfo { /** * The applicable regions or zones of this dimensions info. The field will be set to `['global']` for quotas that are not per region or per zone. Otherwise, it will be set to the list of locations this dimension info is applicable to. */ applicableLocations: string[]; /** * The quota details for a map of dimensions. */ details: outputs.cloudquota.GetSQuotaInfoDimensionsInfoDetail[]; /** * The map of dimensions for this dimensions info. The key of a map entry is "region", "zone" or the name of a service specific dimension, and the value of a map entry is the value of the dimension. If a dimension does not appear in the map of dimensions, the dimensions info applies to all the dimension values except for those that have another DimenisonInfo instance configured for the specific value. Example: {"provider" : "Foo Inc"} where "provider" is a service specific dimension of a quota. */ dimensions: { [key: string]: string; }; } interface GetSQuotaInfoDimensionsInfoDetail { /** * The value currently in effect and being enforced. */ value: string; } interface GetSQuotaInfoQuotaIncreaseEligibility { /** * The enumeration of reasons when it is ineligible to request increase adjustment. */ ineligibilityReason: string; /** * Whether a higher quota value can be requested for the quota. */ isEligible: boolean; } interface GetSQuotaInfosQuotaInfo { /** * (Output) The container type of the QuotaInfo. */ containerType: string; /** * The map of dimensions for this dimensions info. The key of a map entry is "region", "zone" or the name of a service specific dimension, and the value of a map entry is the value of the dimension. If a dimension does not appear in the map of dimensions, the dimensions info applies to all the dimension values except for those that have another DimenisonInfo instance configured for the specific value. Example: {"provider" : "Foo Inc"} where "provider" is a service specific dimension of a quota. */ dimensions: string[]; /** * (Output) The collection of dimensions info ordered by their dimensions from more specific ones to less specific ones. */ dimensionsInfos: outputs.cloudquota.GetSQuotaInfosQuotaInfoDimensionsInfo[]; /** * (Output) Whether the quota is a concurrent quota. Concurrent quotas are enforced on the total number of concurrent operations in flight at any given time. */ isConcurrent: boolean; /** * (Output) Whether the quota value is fixed or adjustable. */ isFixed: boolean; /** * (Output) Whether this is a precise quota. A precise quota is tracked with absolute precision. In contrast, an imprecise quota is not tracked with precision. */ isPrecise: boolean; /** * (Output) The metric of the quota. It specifies the resources consumption the quota is defined for, for example: `compute.googleapis.com/cpus`. */ metric: string; /** * (Output) The display name of the quota metric. */ metricDisplayName: string; /** * (Output) The unit in which the metric value is reported, e.g., `MByte`. */ metricUnit: string; /** * (Output) Resource name of this QuotaInfo, for example: `projects/123/locations/global/services/compute.googleapis.com/quotaInfos/CpusPerProjectPerRegion`. */ name: string; /** * (Output) The display name of the quota. */ quotaDisplayName: string; quotaId: string; /** * (Output) Whether it is eligible to request a higher quota value for this quota. */ quotaIncreaseEligibilities: outputs.cloudquota.GetSQuotaInfosQuotaInfoQuotaIncreaseEligibility[]; /** * (Output) The reset time interval for the quota. Refresh interval applies to rate quota only. Example: "minute" for per minute, "day" for per day, or "10 seconds" for every 10 seconds. */ refreshInterval: string; /** * The name of the service in which the quotas are defined. */ service: string; /** * (Output) URI to the page where users can request more quota for the cloud service, for example: `https://console.cloud.google.com/iam-admin/quotas`. */ serviceRequestQuotaUri: string; } interface GetSQuotaInfosQuotaInfoDimensionsInfo { /** * The applicable regions or zones of this dimensions info. The field will be set to `['global']` for quotas that are not per region or per zone. Otherwise, it will be set to the list of locations this dimension info is applicable to. */ applicableLocations: string[]; /** * The quota details for a map of dimensions. */ details: outputs.cloudquota.GetSQuotaInfosQuotaInfoDimensionsInfoDetail[]; /** * The map of dimensions for this dimensions info. The key of a map entry is "region", "zone" or the name of a service specific dimension, and the value of a map entry is the value of the dimension. If a dimension does not appear in the map of dimensions, the dimensions info applies to all the dimension values except for those that have another DimenisonInfo instance configured for the specific value. Example: {"provider" : "Foo Inc"} where "provider" is a service specific dimension of a quota. */ dimensions: { [key: string]: string; }; } interface GetSQuotaInfosQuotaInfoDimensionsInfoDetail { /** * The value currently in effect and being enforced. */ value: string; } interface GetSQuotaInfosQuotaInfoQuotaIncreaseEligibility { /** * The enumeration of reasons when it is ineligible to request increase adjustment. */ ineligibilityReason: string; /** * Whether a higher quota value can be requested for the quota. */ isEligible: boolean; } interface SQuotaPreferenceQuotaConfig { /** * The annotations map for clients to store small amounts of arbitrary data. Do not put PII or other sensitive information here. See https://google.aip.dev/128#annotations. * An object containing a list of "key: value" pairs. Example: `{ "name": "wrench", "mass": "1.3kg", "count": "3" }`. */ annotations?: { [key: string]: string; }; /** * (Output) * Granted quota value. */ grantedValue: string; /** * The preferred value. Must be greater than or equal to -1. If set to -1, it means the value is "unlimited". */ preferredValue: string; /** * (Output) * The origin of the quota preference request. */ requestOrigin: string; /** * (Output) * Optional details about the state of this quota preference. */ stateDetail: string; /** * (Output) * The trace id that the Google Cloud uses to provision the requested quota. This trace id may be used by the client to contact Cloud support to track the state of a quota preference request. The trace id is only produced for increase requests and is unique for each request. The quota decrease requests do not have a trace id. */ traceId: string; } } export declare namespace cloudrun { interface DomainMappingMetadata { /** * Annotations is a key value map stored with a resource that * may be set by external tools to store and retrieve arbitrary metadata. * More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations * **Note**: The Cloud Run API may add additional annotations that were not provided in your config. * If the provider plan shows a diff where a server-side annotation is added, you can add it to your config * or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field. * **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. * Please refer to the field `effectiveAnnotations` for all of the annotations present on the resource. */ annotations?: { [key: string]: string; }; /** * (Output) * All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. */ effectiveAnnotations: { [key: string]: string; }; /** * (Output) * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. */ effectiveLabels: { [key: string]: string; }; /** * (Output) * A sequence number representing a specific generation of the desired state. */ generation: number; /** * Map of string keys and values that can be used to organize and categorize * (scope and select) objects. May match selectors of replication controllers * and routes. * More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. * Please refer to the field `effectiveLabels` for all of the labels present on the resource. */ labels?: { [key: string]: string; }; /** * In Cloud Run the namespace must be equal to either the * project ID or project number. */ namespace: string; /** * The combination of labels configured directly on the resource * and default labels configured on the provider. */ pulumiLabels: { [key: string]: string; }; /** * (Output) * An opaque value that represents the internal version of this object that * can be used by clients to determine when objects have changed. May be used * for optimistic concurrency, change detection, and the watch operation on a * resource or set of resources. They may only be valid for a * particular resource or set of resources. * More info: * https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency */ resourceVersion: string; /** * (Output) * SelfLink is a URL representing this object. */ selfLink: string; /** * (Output) * UID is a unique id generated by the server on successful creation of a resource and is not * allowed to change on PUT operations. * More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids */ uid: string; } interface DomainMappingSpec { /** * The mode of the certificate. * Default value is `AUTOMATIC`. * Possible values are: `NONE`, `AUTOMATIC`. */ certificateMode?: string; /** * If set, the mapping will override any mapping set before this spec was set. * It is recommended that the user leaves this empty to receive an error * warning about a potential conflict and only set it once the respective UI * has given such a warning. */ forceOverride?: boolean; /** * The name of the Cloud Run Service that this DomainMapping applies to. * The route must exist. */ routeName: string; } interface DomainMappingStatus { /** * (Output) * Array of observed DomainMappingConditions, indicating the current state * of the DomainMapping. * Structure is documented below. */ conditions: outputs.cloudrun.DomainMappingStatusCondition[]; /** * (Output) * The name of the route that the mapping currently points to. */ mappedRouteName: string; /** * (Output) * ObservedGeneration is the 'Generation' of the DomainMapping that * was last processed by the controller. */ observedGeneration: number; /** * The resource records required to configure this domain mapping. These * records must be added to the domain's DNS configuration in order to * serve the application via this domain mapping. * Structure is documented below. */ resourceRecords?: outputs.cloudrun.DomainMappingStatusResourceRecord[]; } interface DomainMappingStatusCondition { /** * (Output) * Human readable message indicating details about the current status. */ message: string; /** * (Output) * One-word CamelCase reason for the condition's current status. */ reason: string; /** * (Output) * Status of the condition, one of True, False, Unknown. */ status: string; /** * Resource record type. Example: `AAAA`. * Possible values are: `A`, `AAAA`, `CNAME`. */ type: string; } interface DomainMappingStatusResourceRecord { /** * Name should be a [verified](https://support.google.com/webmasters/answer/9008080) domain */ name: string; /** * (Output) * Data for this record. Values vary by record type, as defined in RFC 1035 * (section 5) and RFC 1034 (section 3.6.1). */ rrdata: string; /** * Resource record type. Example: `AAAA`. * Possible values are: `A`, `AAAA`, `CNAME`. */ type?: string; } interface GetServiceMetadata { /** * Annotations is a key value map stored with a resource that * may be set by external tools to store and retrieve arbitrary metadata. * More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations * * **Note**: The Cloud Run API may add additional annotations that were not provided in your config. * If terraform plan shows a diff where a server-side annotation is added, you can add it to your config * or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field. * * Annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted. Use the following annotation * keys to configure features on a Service: * * - 'run.googleapis.com/binary-authorization-breakglass' sets the [Binary Authorization breakglass](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--breakglass). * - 'run.googleapis.com/binary-authorization' sets the [Binary Authorization](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--binary-authorization). * - 'run.googleapis.com/client-name' sets the client name calling the Cloud Run API. * - 'run.googleapis.com/custom-audiences' sets the [custom audiences](https://cloud.google.com/sdk/gcloud/reference/alpha/run/deploy#--add-custom-audiences) * that can be used in the audience field of ID token for authenticated requests. * - 'run.googleapis.com/description' sets a user defined description for the Service. * - 'run.googleapis.com/ingress' sets the [ingress settings](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--ingress) * for the Service. For example, '"run.googleapis.com/ingress" = "all"'. * - 'run.googleapis.com/launch-stage' sets the [launch stage](https://cloud.google.com/run/docs/troubleshooting#launch-stage-validation) * when a preview feature is used. For example, '"run.googleapis.com/launch-stage": "BETA"' * - 'run.googleapis.com/minScale' sets the [minimum number of container instances](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--min) of the Service. * - 'run.googleapis.com/scalingMode' sets the type of scaling mode for the service. The supported values for scaling mode are "manual" and "automatic". If not provided, it defaults to "automatic". * - 'run.googleapis.com/manualInstanceCount' sets the total instance count for the service in manual scaling mode. This number of instances is divided among all revisions with specified traffic based on the percent of traffic they are receiving. * * **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. * Please refer to the field 'effective_annotations' for all of the annotations present on the resource. */ annotations: { [key: string]: string; }; /** * All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. */ effectiveAnnotations: { [key: string]: string; }; /** * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. */ effectiveLabels: { [key: string]: string; }; /** * A sequence number representing a specific generation of the desired state. */ generation: number; /** * Map of string keys and values that can be used to organize and categorize * (scope and select) objects. May match selectors of replication controllers * and routes. * * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. * Please refer to the field 'effective_labels' for all of the labels present on the resource. */ labels: { [key: string]: string; }; /** * In Cloud Run the namespace must be equal to either the * project ID or project number. */ namespace: string; /** * The combination of labels configured directly on the resource * and default labels configured on the provider. */ pulumiLabels: { [key: string]: string; }; /** * An opaque value that represents the internal version of this object that * can be used by clients to determine when objects have changed. May be used * for optimistic concurrency, change detection, and the watch operation on a * resource or set of resources. They may only be valid for a * particular resource or set of resources. */ resourceVersion: string; /** * SelfLink is a URL representing this object. */ selfLink: string; /** * UID is a unique id generated by the server on successful creation of a resource and is not * allowed to change on PUT operations. */ uid: string; } interface GetServiceStatus { /** * Array of observed Service Conditions, indicating the current ready state of the service. */ conditions: outputs.cloudrun.GetServiceStatusCondition[]; /** * From ConfigurationStatus. LatestCreatedRevisionName is the last revision that was created * from this Service's Configuration. It might not be ready yet, for that use * LatestReadyRevisionName. */ latestCreatedRevisionName: string; /** * From ConfigurationStatus. LatestReadyRevisionName holds the name of the latest Revision * stamped out from this Service's Configuration that has had its "Ready" condition become * "True". */ latestReadyRevisionName: string; /** * ObservedGeneration is the 'Generation' of the Route that was last processed by the * controller. * * Clients polling for completed reconciliation should poll until observedGeneration = * metadata.generation and the Ready condition's status is True or False. */ observedGeneration: number; /** * Traffic specifies how to distribute traffic over a collection of Knative Revisions * and Configurations */ traffics: outputs.cloudrun.GetServiceStatusTraffic[]; /** * From RouteStatus. URL holds the url that will distribute traffic over the provided traffic * targets. It generally has the form * https://{route-hash}-{project-hash}-{cluster-level-suffix}.a.run.app */ url: string; } interface GetServiceStatusCondition { /** * Human readable message indicating details about the current status. */ message: string; /** * One-word CamelCase reason for the condition's current status. */ reason: string; /** * Status of the condition, one of True, False, Unknown. */ status: string; /** * Type of domain mapping condition. */ type: string; } interface GetServiceStatusTraffic { /** * LatestRevision may be optionally provided to indicate that the latest ready * Revision of the Configuration should be used for this traffic target. When * provided LatestRevision must be true if RevisionName is empty; it must be * false when RevisionName is non-empty. */ latestRevision: boolean; /** * Percent specifies percent of the traffic to this Revision or Configuration. */ percent: number; /** * RevisionName of a specific revision to which to send this portion of traffic. */ revisionName: string; /** * Tag is optionally used to expose a dedicated url for referencing this target exclusively. */ tag: string; /** * URL displays the URL for accessing tagged traffic targets. URL is displayed in status, * and is disallowed on spec. URL must contain a scheme (e.g. http://) and a hostname, * but may not contain anything else (e.g. basic auth, url path, etc.) */ url: string; } interface GetServiceTemplate { /** * Optional metadata for this Revision, including labels and annotations. * Name will be generated by the Configuration. To set minimum instances * for this revision, use the "autoscaling.knative.dev/minScale" annotation * key. To set maximum instances for this revision, use the * "autoscaling.knative.dev/maxScale" annotation key. To set Cloud SQL * connections for the revision, use the "run.googleapis.com/cloudsql-instances" * annotation key. */ metadatas: outputs.cloudrun.GetServiceTemplateMetadata[]; /** * RevisionSpec holds the desired state of the Revision (from the client). */ specs: outputs.cloudrun.GetServiceTemplateSpec[]; } interface GetServiceTemplateMetadata { /** * Annotations is a key value map stored with a resource that * may be set by external tools to store and retrieve arbitrary metadata. * More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations * * **Note**: The Cloud Run API may add additional annotations that were not provided in your config. * If terraform plan shows a diff where a server-side annotation is added, you can add it to your config * or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field. * * Annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted. Use the following annotation * keys to configure features on a Revision template: * * - 'autoscaling.knative.dev/maxScale' sets the [maximum number of container * instances](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--max-instances) of the Revision to run. * - 'autoscaling.knative.dev/minScale' sets the [minimum number of container * instances](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--min-instances) of the Revision to run. * - 'run.googleapis.com/client-name' sets the client name calling the Cloud Run API. * - 'run.googleapis.com/cloudsql-instances' sets the [Cloud SQL * instances](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--add-cloudsql-instances) the Revision connects to. * - 'run.googleapis.com/cpu-throttling' sets whether to throttle the CPU when the container is not actively serving * requests. See https://cloud.google.com/sdk/gcloud/reference/run/deploy#--[no-]cpu-throttling. * - 'run.googleapis.com/encryption-key-shutdown-hours' sets the number of hours to wait before an automatic shutdown * server after CMEK key revocation is detected. * - 'run.googleapis.com/encryption-key' sets the [CMEK key](https://cloud.google.com/run/docs/securing/using-cmek) * reference to encrypt the container with. * - 'run.googleapis.com/execution-environment' sets the [execution * environment](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--execution-environment) * where the application will run. * - 'run.googleapis.com/post-key-revocation-action-type' sets the * [action type](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--post-key-revocation-action-type) * after CMEK key revocation. * - 'run.googleapis.com/secrets' sets a list of key-value pairs to set as * [secrets](https://cloud.google.com/run/docs/configuring/secrets#yaml). * - 'run.googleapis.com/sessionAffinity' sets whether to enable * [session affinity](https://cloud.google.com/sdk/gcloud/reference/beta/run/deploy#--[no-]session-affinity) * for connections to the Revision. * - 'run.googleapis.com/startup-cpu-boost' sets whether to allocate extra CPU to containers on startup. * See https://cloud.google.com/sdk/gcloud/reference/run/deploy#--[no-]cpu-boost. * - 'run.googleapis.com/network-interfaces' sets [Direct VPC egress](https://cloud.google.com/run/docs/configuring/vpc-direct-vpc#yaml) * for the Revision. * - 'run.googleapis.com/vpc-access-connector' sets a [VPC connector](https://cloud.google.com/run/docs/configuring/connecting-vpc#terraform_1) * for the Revision. * - 'run.googleapis.com/vpc-access-egress' sets the outbound traffic to send through the VPC connector for this resource. * See https://cloud.google.com/sdk/gcloud/reference/run/deploy#--vpc-egress. * - 'run.googleapis.com/gpu-zonal-redundancy-disabled' sets * [GPU zonal redundancy](https://cloud.google.com/run/docs/configuring/services/gpu-zonal-redundancy) for the Revision. * - 'run.googleapis.com/health-check-disabled' disabled health checking containers during deployment. */ annotations: { [key: string]: string; }; /** * A sequence number representing a specific generation of the desired state. */ generation: number; /** * Map of string keys and values that can be used to organize and categorize * (scope and select) objects. */ labels: { [key: string]: string; }; /** * The name of the Cloud Run Service. */ name: string; /** * In Cloud Run the namespace must be equal to either the * project ID or project number. It will default to the resource's project. */ namespace: string; /** * An opaque value that represents the internal version of this object that * can be used by clients to determine when objects have changed. May be used * for optimistic concurrency, change detection, and the watch operation on a * resource or set of resources. They may only be valid for a * particular resource or set of resources. */ resourceVersion: string; /** * SelfLink is a URL representing this object. */ selfLink: string; /** * UID is a unique id generated by the server on successful creation of a resource and is not * allowed to change on PUT operations. */ uid: string; } interface GetServiceTemplateSpec { /** * ContainerConcurrency specifies the maximum allowed in-flight (concurrent) * requests per container of the Revision. If not specified or 0, defaults to 80 when * requested CPU >= 1 and defaults to 1 when requested CPU < 1. */ containerConcurrency: number; /** * Containers defines the unit of execution for this Revision. */ containers: outputs.cloudrun.GetServiceTemplateSpecContainer[]; /** * Node Selector describes the hardware requirements of the resources. * Use the following node selector keys to configure features on a Revision: * - 'run.googleapis.com/accelerator' sets the [type of GPU](https://cloud.google.com/run/docs/configuring/services/gpu) required by the Revision to run. */ nodeSelector: { [key: string]: string; }; /** * Email address of the IAM service account associated with the revision of the * service. The service account represents the identity of the running revision, * and determines what permissions the revision has. If not provided, the revision * will use the project's default service account. */ serviceAccountName: string; /** * ServingState holds a value describing the state the resources * are in for this Revision. * It is expected * that the system will manipulate this based on routability and load. */ servingState: string; /** * TimeoutSeconds holds the max duration the instance is allowed for responding to a request. */ timeoutSeconds: number; /** * Volume represents a named volume in a container. */ volumes: outputs.cloudrun.GetServiceTemplateSpecVolume[]; } interface GetServiceTemplateSpecContainer { /** * Arguments to the entrypoint. * The docker image's CMD is used if this is not provided. */ args: string[]; /** * Entrypoint array. Not executed within a shell. * The docker image's ENTRYPOINT is used if this is not provided. */ commands: string[]; /** * List of sources to populate environment variables in the container. * All invalid keys will be reported as an event when the container is starting. * When a key exists in multiple sources, the value associated with the last source will * take precedence. Values defined by an Env with a duplicate key will take * precedence. */ envFroms: outputs.cloudrun.GetServiceTemplateSpecContainerEnvFrom[]; /** * List of environment variables to set in the container. */ envs: outputs.cloudrun.GetServiceTemplateSpecContainerEnv[]; /** * Docker image name. This is most often a reference to a container located * in the container registry, such as gcr.io/cloudrun/hello */ image: string; /** * Periodic probe of container liveness. Container will be restarted if the probe fails. */ livenessProbes: outputs.cloudrun.GetServiceTemplateSpecContainerLivenessProbe[]; /** * The name of the Cloud Run Service. */ name: string; /** * List of open ports in the container. */ ports: outputs.cloudrun.GetServiceTemplateSpecContainerPort[]; /** * Periodic probe of container readiness. */ readinessProbes: outputs.cloudrun.GetServiceTemplateSpecContainerReadinessProbe[]; /** * Compute Resources required by this container. Used to set values such as max memory */ resources: outputs.cloudrun.GetServiceTemplateSpecContainerResource[]; /** * Startup probe of application within the container. * All other probes are disabled if a startup probe is provided, until it * succeeds. Container will not be added to service endpoints if the probe fails. */ startupProbes: outputs.cloudrun.GetServiceTemplateSpecContainerStartupProbe[]; /** * Volume to mount into the container's filesystem. * Only supports SecretVolumeSources. */ volumeMounts: outputs.cloudrun.GetServiceTemplateSpecContainerVolumeMount[]; /** * Container's working directory. * If not specified, the container runtime's default will be used, which * might be configured in the container image. */ workingDir: string; } interface GetServiceTemplateSpecContainerEnv { /** * The name of the Cloud Run Service. */ name: string; /** * Defaults to "". */ value: string; /** * Source for the environment variable's value. Only supports secret_key_ref. */ valueFroms: outputs.cloudrun.GetServiceTemplateSpecContainerEnvValueFrom[]; } interface GetServiceTemplateSpecContainerEnvFrom { /** * The ConfigMap to select from. */ configMapReves: outputs.cloudrun.GetServiceTemplateSpecContainerEnvFromConfigMapRef[]; /** * An optional identifier to prepend to each key in the ConfigMap. */ prefix: string; /** * The Secret to select from. */ secretReves: outputs.cloudrun.GetServiceTemplateSpecContainerEnvFromSecretRef[]; } interface GetServiceTemplateSpecContainerEnvFromConfigMapRef { /** * The ConfigMap to select from. */ localObjectReferences: outputs.cloudrun.GetServiceTemplateSpecContainerEnvFromConfigMapRefLocalObjectReference[]; /** * Specify whether the ConfigMap must be defined */ optional: boolean; } interface GetServiceTemplateSpecContainerEnvFromConfigMapRefLocalObjectReference { /** * The name of the Cloud Run Service. */ name: string; } interface GetServiceTemplateSpecContainerEnvFromSecretRef { /** * The Secret to select from. */ localObjectReferences: outputs.cloudrun.GetServiceTemplateSpecContainerEnvFromSecretRefLocalObjectReference[]; /** * Specify whether the Secret must be defined */ optional: boolean; } interface GetServiceTemplateSpecContainerEnvFromSecretRefLocalObjectReference { /** * The name of the Cloud Run Service. */ name: string; } interface GetServiceTemplateSpecContainerEnvValueFrom { /** * Selects a key (version) of a secret in Secret Manager. */ secretKeyReves: outputs.cloudrun.GetServiceTemplateSpecContainerEnvValueFromSecretKeyRef[]; } interface GetServiceTemplateSpecContainerEnvValueFromSecretKeyRef { /** * A Cloud Secret Manager secret version. Must be 'latest' for the latest * version or an integer for a specific version. */ key: string; /** * The name of the Cloud Run Service. */ name: string; } interface GetServiceTemplateSpecContainerLivenessProbe { /** * Minimum consecutive failures for the probe to be considered failed after * having succeeded. Defaults to 3. Minimum value is 1. */ failureThreshold: number; /** * GRPC specifies an action involving a GRPC port. */ grpcs: outputs.cloudrun.GetServiceTemplateSpecContainerLivenessProbeGrpc[]; /** * HttpGet specifies the http request to perform. */ httpGets: outputs.cloudrun.GetServiceTemplateSpecContainerLivenessProbeHttpGet[]; /** * Number of seconds after the container has started before the probe is * initiated. * Defaults to 0 seconds. Minimum value is 0. Maximum value is 3600. */ initialDelaySeconds: number; /** * How often (in seconds) to perform the probe. * Default to 10 seconds. Minimum value is 1. Maximum value is 3600. */ periodSeconds: number; /** * Number of seconds after which the probe times out. * Defaults to 1 second. Minimum value is 1. Maximum value is 3600. * Must be smaller than period_seconds. */ timeoutSeconds: number; } interface GetServiceTemplateSpecContainerLivenessProbeGrpc { /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; /** * The name of the service to place in the gRPC HealthCheckRequest * (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). * If this is not specified, the default behavior is defined by gRPC. */ service: string; } interface GetServiceTemplateSpecContainerLivenessProbeHttpGet { /** * Custom headers to set in the request. HTTP allows repeated headers. */ httpHeaders: outputs.cloudrun.GetServiceTemplateSpecContainerLivenessProbeHttpGetHttpHeader[]; /** * Path to access on the HTTP server. If set, it should not be empty string. */ path: string; /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface GetServiceTemplateSpecContainerLivenessProbeHttpGetHttpHeader { /** * The name of the Cloud Run Service. */ name: string; /** * The header field value. */ value: string; } interface GetServiceTemplateSpecContainerPort { /** * Port number the container listens on. This must be a valid port number (between 1 and 65535). Defaults to "8080". */ containerPort: number; /** * The name of the Cloud Run Service. */ name: string; /** * Protocol for port. Must be "TCP". Defaults to "TCP". */ protocol: string; } interface GetServiceTemplateSpecContainerReadinessProbe { /** * Minimum consecutive failures for the probe to be considered failed after * having succeeded. Defaults to 3. */ failureThreshold: number; /** * GRPC specifies an action involving a GRPC port. */ grpcs: outputs.cloudrun.GetServiceTemplateSpecContainerReadinessProbeGrpc[]; /** * HttpGet specifies the http request to perform. */ httpGets: outputs.cloudrun.GetServiceTemplateSpecContainerReadinessProbeHttpGet[]; /** * How often (in seconds) to perform the probe. * Default to 10 seconds. */ periodSeconds: number; /** * Minimum consecutive successes for the probe to be considered successful after having failed. * Defaults to 2. */ successThreshold: number; /** * Number of seconds after which the probe times out. * Defaults to 1 second. Must be smaller than period_seconds. */ timeoutSeconds: number; } interface GetServiceTemplateSpecContainerReadinessProbeGrpc { /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; /** * The name of the service to place in the gRPC HealthCheckRequest * (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). * If this is not specified, the default behavior is defined by gRPC. */ service: string; } interface GetServiceTemplateSpecContainerReadinessProbeHttpGet { /** * Path to access on the HTTP server. If set, it should not be empty string. */ path: string; /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface GetServiceTemplateSpecContainerResource { /** * Limits describes the maximum amount of compute resources allowed. * CPU Limit details: * - For fractional CPU values (e.g. '0.5', '0.75', min '0.08') are also supported. * - CPU allocation must comply with memory limits and concurrency rules described in: * https://cloud.google.com/run/docs/configuring/services/cpu * The values of the map is string form of the 'quantity' k8s type: * https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go */ limits: { [key: string]: string; }; /** * Requests describes the minimum amount of compute resources required. * If Requests is omitted for a container, it defaults to Limits if that is * explicitly specified, otherwise to an implementation-defined value. * The values of the map is string form of the 'quantity' k8s type: * https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go */ requests: { [key: string]: string; }; } interface GetServiceTemplateSpecContainerStartupProbe { /** * Minimum consecutive failures for the probe to be considered failed after * having succeeded. Defaults to 3. Minimum value is 1. */ failureThreshold: number; /** * GRPC specifies an action involving a GRPC port. */ grpcs: outputs.cloudrun.GetServiceTemplateSpecContainerStartupProbeGrpc[]; /** * HttpGet specifies the http request to perform. */ httpGets: outputs.cloudrun.GetServiceTemplateSpecContainerStartupProbeHttpGet[]; /** * Number of seconds after the container has started before the probe is * initiated. * Defaults to 0 seconds. Minimum value is 0. Maximum value is 240. */ initialDelaySeconds: number; /** * How often (in seconds) to perform the probe. * Default to 10 seconds. Minimum value is 1. Maximum value is 240. */ periodSeconds: number; /** * TcpSocket specifies an action involving a TCP port. */ tcpSockets: outputs.cloudrun.GetServiceTemplateSpecContainerStartupProbeTcpSocket[]; /** * Number of seconds after which the probe times out. * Defaults to 1 second. Minimum value is 1. Maximum value is 3600. * Must be smaller than periodSeconds. */ timeoutSeconds: number; } interface GetServiceTemplateSpecContainerStartupProbeGrpc { /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; /** * The name of the service to place in the gRPC HealthCheckRequest * (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). * If this is not specified, the default behavior is defined by gRPC. */ service: string; } interface GetServiceTemplateSpecContainerStartupProbeHttpGet { /** * Custom headers to set in the request. HTTP allows repeated headers. */ httpHeaders: outputs.cloudrun.GetServiceTemplateSpecContainerStartupProbeHttpGetHttpHeader[]; /** * Path to access on the HTTP server. If set, it should not be empty string. */ path: string; /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface GetServiceTemplateSpecContainerStartupProbeHttpGetHttpHeader { /** * The name of the Cloud Run Service. */ name: string; /** * The header field value. */ value: string; } interface GetServiceTemplateSpecContainerStartupProbeTcpSocket { /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface GetServiceTemplateSpecContainerVolumeMount { /** * Path within the container at which the volume should be mounted. Must * not contain ':'. */ mountPath: string; /** * The name of the Cloud Run Service. */ name: string; /** * Path within the volume from which the container's volume should be mounted. */ subPath: string; } interface GetServiceTemplateSpecVolume { /** * A filesystem specified by the Container Storage Interface (CSI). */ csis: outputs.cloudrun.GetServiceTemplateSpecVolumeCsi[]; /** * Ephemeral storage which can be backed by real disks (HD, SSD), network storage or memory (i.e. tmpfs). For now only in memory (tmpfs) is supported. It is ephemeral in the sense that when the sandbox is taken down, the data is destroyed with it (it does not persist across sandbox runs). */ emptyDirs: outputs.cloudrun.GetServiceTemplateSpecVolumeEmptyDir[]; /** * The name of the Cloud Run Service. */ name: string; /** * A filesystem backed by a Network File System share. This filesystem requires the * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" */ nfs: outputs.cloudrun.GetServiceTemplateSpecVolumeNf[]; /** * The secret's value will be presented as the content of a file whose * name is defined in the item path. If no items are defined, the name of * the file is the secret_name. */ secrets: outputs.cloudrun.GetServiceTemplateSpecVolumeSecret[]; } interface GetServiceTemplateSpecVolumeCsi { /** * Unique name representing the type of file system to be created. Cloud Run supports the following values: * * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" */ driver: string; /** * If true, all mounts created from this volume will be read-only. */ readOnly: boolean; /** * Driver-specific attributes. The following options are supported for available drivers: * * gcsfuse.run.googleapis.com * * bucketName: The name of the Cloud Storage Bucket that backs this volume. The Cloud Run Service identity must have access to this bucket. */ volumeAttributes: { [key: string]: string; }; } interface GetServiceTemplateSpecVolumeEmptyDir { /** * The medium on which the data is stored. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. */ medium: string; /** * Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir. */ sizeLimit: string; } interface GetServiceTemplateSpecVolumeNf { /** * Path exported by the NFS server */ path: string; /** * If true, mount the NFS volume as read only in all mounts. Defaults to false. */ readOnly: boolean; /** * IP address or hostname of the NFS server */ server: string; } interface GetServiceTemplateSpecVolumeSecret { /** * Mode bits to use on created files by default. Must be a value between 0000 * and 0777. Defaults to 0644. Directories within the path are not affected by * this setting. This might be in conflict with other options that affect the * file mode, like fsGroup, and the result can be other mode bits set. */ defaultMode: number; /** * If unspecified, the volume will expose a file whose name is the * secret_name. * If specified, the key will be used as the version to fetch from Cloud * Secret Manager and the path will be the name of the file exposed in the * volume. When items are defined, they must specify a key and a path. */ items: outputs.cloudrun.GetServiceTemplateSpecVolumeSecretItem[]; /** * The name of the secret in Cloud Secret Manager. By default, the secret * is assumed to be in the same project. * If the secret is in another project, you must define an alias. * An alias definition has the form: * {alias}:projects/{project-id|project-number}/secrets/{secret-name}. * If multiple alias definitions are needed, they must be separated by * commas. * The alias definitions must be set on the run.googleapis.com/secrets * annotation. */ secretName: string; } interface GetServiceTemplateSpecVolumeSecretItem { /** * The Cloud Secret Manager secret version. * Can be 'latest' for the latest value or an integer for a specific version. */ key: string; /** * Mode bits to use on this file, must be a value between 0000 and 0777. If * not specified, the volume defaultMode will be used. This might be in * conflict with other options that affect the file mode, like fsGroup, and * the result can be other mode bits set. */ mode: number; /** * The relative path of the file to map the key to. * May not be an absolute path. * May not contain the path element '..'. * May not start with the string '..'. */ path: string; } interface GetServiceTraffic { /** * LatestRevision may be optionally provided to indicate that the latest ready * Revision of the Configuration should be used for this traffic target. When * provided LatestRevision must be true if RevisionName is empty; it must be * false when RevisionName is non-empty. */ latestRevision: boolean; /** * Percent specifies percent of the traffic to this Revision or Configuration. */ percent: number; /** * RevisionName of a specific revision to which to send this portion of traffic. */ revisionName: string; /** * Tag is optionally used to expose a dedicated url for referencing this target exclusively. */ tag: string; /** * URL displays the URL for accessing tagged traffic targets. URL is displayed in status, * and is disallowed on spec. URL must contain a scheme (e.g. http://) and a hostname, * but may not contain anything else (e.g. basic auth, url path, etc.) */ url: string; } interface IamBindingCondition { description?: string; expression: string; title: string; } interface IamMemberCondition { description?: string; expression: string; title: string; } interface ServiceMetadata { /** * Annotations is a key value map stored with a resource that * may be set by external tools to store and retrieve arbitrary metadata. * More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations * **Note**: The Cloud Run API may add additional annotations that were not provided in your config. * If the provider plan shows a diff where a server-side annotation is added, you can add it to your config * or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field. * Annotations with `run.googleapis.com/` and `autoscaling.knative.dev` are restricted. Use the following annotation * keys to configure features on a Service: * - `run.googleapis.com/binary-authorization-breakglass` sets the [Binary Authorization breakglass](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--breakglass). * - `run.googleapis.com/binary-authorization` sets the [Binary Authorization](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--binary-authorization). * - `run.googleapis.com/client-name` sets the client name calling the Cloud Run API. * - `run.googleapis.com/custom-audiences` sets the [custom audiences](https://cloud.google.com/sdk/gcloud/reference/alpha/run/deploy#--add-custom-audiences) * that can be used in the audience field of ID token for authenticated requests. * - `run.googleapis.com/description` sets a user defined description for the Service. * - `run.googleapis.com/ingress` sets the [ingress settings](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--ingress) * for the Service. For example, `"run.googleapis.com/ingress" = "all"`. * - `run.googleapis.com/launch-stage` sets the [launch stage](https://cloud.google.com/run/docs/troubleshooting#launch-stage-validation) * when a preview feature is used. For example, `"run.googleapis.com/launch-stage": "BETA"` * - `run.googleapis.com/minScale` sets the [minimum number of container instances](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--min) of the Service. * - `run.googleapis.com/scalingMode` sets the type of scaling mode for the service. The supported values for scaling mode are "manual" and "automatic". If not provided, it defaults to "automatic". * - `run.googleapis.com/manualInstanceCount` sets the total instance count for the service in manual scaling mode. This number of instances is divided among all revisions with specified traffic based on the percent of traffic they are receiving. * **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. * Please refer to the field `effectiveAnnotations` for all of the annotations present on the resource. */ annotations?: { [key: string]: string; }; /** * (Output) * All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. */ effectiveAnnotations: { [key: string]: string; }; /** * (Output) * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. */ effectiveLabels: { [key: string]: string; }; /** * (Output) * A sequence number representing a specific generation of the desired state. */ generation: number; /** * Map of string keys and values that can be used to organize and categorize * (scope and select) objects. May match selectors of replication controllers * and routes. * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. * Please refer to the field `effectiveLabels` for all of the labels present on the resource. */ labels?: { [key: string]: string; }; /** * In Cloud Run the namespace must be equal to either the * project ID or project number. */ namespace: string; /** * The combination of labels configured directly on the resource * and default labels configured on the provider. */ pulumiLabels: { [key: string]: string; }; /** * (Output) * An opaque value that represents the internal version of this object that * can be used by clients to determine when objects have changed. May be used * for optimistic concurrency, change detection, and the watch operation on a * resource or set of resources. They may only be valid for a * particular resource or set of resources. */ resourceVersion: string; /** * (Output) * SelfLink is a URL representing this object. */ selfLink: string; /** * (Output) * UID is a unique id generated by the server on successful creation of a resource and is not * allowed to change on PUT operations. */ uid: string; } interface ServiceStatus { /** * (Output) * Array of observed Service Conditions, indicating the current ready state of the service. * Structure is documented below. */ conditions: outputs.cloudrun.ServiceStatusCondition[]; /** * (Output) * From ConfigurationStatus. LatestCreatedRevisionName is the last revision that was created * from this Service's Configuration. It might not be ready yet, for that use * LatestReadyRevisionName. */ latestCreatedRevisionName: string; /** * (Output) * From ConfigurationStatus. LatestReadyRevisionName holds the name of the latest Revision * stamped out from this Service's Configuration that has had its "Ready" condition become * "True". */ latestReadyRevisionName: string; /** * (Output) * ObservedGeneration is the 'Generation' of the Route that was last processed by the * controller. * Clients polling for completed reconciliation should poll until observedGeneration = * metadata.generation and the Ready condition's status is True or False. */ observedGeneration: number; /** * Traffic specifies how to distribute traffic over a collection of Knative Revisions * and Configurations * Structure is documented below. */ traffics: outputs.cloudrun.ServiceStatusTraffic[]; /** * (Output) * URL displays the URL for accessing tagged traffic targets. URL is displayed in status, * and is disallowed on spec. URL must contain a scheme (e.g. http://) and a hostname, * but may not contain anything else (e.g. basic auth, url path, etc.) */ url: string; } interface ServiceStatusCondition { /** * (Output) * Human readable message indicating details about the current status. */ message: string; /** * (Output) * One-word CamelCase reason for the condition's current status. */ reason: string; /** * (Output) * Status of the condition, one of True, False, Unknown. */ status: string; /** * (Output) * Type of domain mapping condition. */ type: string; } interface ServiceStatusTraffic { /** * LatestRevision may be optionally provided to indicate that the latest ready * Revision of the Configuration should be used for this traffic target. When * provided LatestRevision must be true if RevisionName is empty; it must be * false when RevisionName is non-empty. */ latestRevision: boolean; /** * Percent specifies percent of the traffic to this Revision or Configuration. */ percent: number; /** * RevisionName of a specific revision to which to send this portion of traffic. */ revisionName: string; /** * Tag is optionally used to expose a dedicated url for referencing this target exclusively. */ tag: string; /** * (Output) * URL displays the URL for accessing tagged traffic targets. URL is displayed in status, * and is disallowed on spec. URL must contain a scheme (e.g. http://) and a hostname, * but may not contain anything else (e.g. basic auth, url path, etc.) */ url: string; } interface ServiceTemplate { /** * Optional metadata for this Revision, including labels and annotations. * Name will be generated by the Configuration. To set minimum instances * for this revision, use the "autoscaling.knative.dev/minScale" annotation * key. To set maximum instances for this revision, use the * "autoscaling.knative.dev/maxScale" annotation key. To set Cloud SQL * connections for the revision, use the "run.googleapis.com/cloudsql-instances" * annotation key. * Structure is documented below. */ metadata: outputs.cloudrun.ServiceTemplateMetadata; /** * RevisionSpec holds the desired state of the Revision (from the client). * Structure is documented below. */ spec: outputs.cloudrun.ServiceTemplateSpec; } interface ServiceTemplateMetadata { /** * Annotations is a key value map stored with a resource that * may be set by external tools to store and retrieve arbitrary metadata. * More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations * **Note**: The Cloud Run API may add additional annotations that were not provided in your config. * If the provider plan shows a diff where a server-side annotation is added, you can add it to your config * or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field. * Annotations with `run.googleapis.com/` and `autoscaling.knative.dev` are restricted. Use the following annotation * keys to configure features on a Service: * - `run.googleapis.com/binary-authorization-breakglass` sets the [Binary Authorization breakglass](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--breakglass). * - `run.googleapis.com/binary-authorization` sets the [Binary Authorization](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--binary-authorization). * - `run.googleapis.com/client-name` sets the client name calling the Cloud Run API. * - `run.googleapis.com/custom-audiences` sets the [custom audiences](https://cloud.google.com/sdk/gcloud/reference/alpha/run/deploy#--add-custom-audiences) * that can be used in the audience field of ID token for authenticated requests. * - `run.googleapis.com/description` sets a user defined description for the Service. * - `run.googleapis.com/ingress` sets the [ingress settings](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--ingress) * for the Service. For example, `"run.googleapis.com/ingress" = "all"`. * - `run.googleapis.com/launch-stage` sets the [launch stage](https://cloud.google.com/run/docs/troubleshooting#launch-stage-validation) * when a preview feature is used. For example, `"run.googleapis.com/launch-stage": "BETA"` * - `run.googleapis.com/minScale` sets the [minimum number of container instances](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--min) of the Service. * - `run.googleapis.com/scalingMode` sets the type of scaling mode for the service. The supported values for scaling mode are "manual" and "automatic". If not provided, it defaults to "automatic". * - `run.googleapis.com/manualInstanceCount` sets the total instance count for the service in manual scaling mode. This number of instances is divided among all revisions with specified traffic based on the percent of traffic they are receiving. * **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. * Please refer to the field `effectiveAnnotations` for all of the annotations present on the resource. */ annotations: { [key: string]: string; }; /** * (Output) * A sequence number representing a specific generation of the desired state. */ generation: number; /** * Map of string keys and values that can be used to organize and categorize * (scope and select) objects. May match selectors of replication controllers * and routes. * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. * Please refer to the field `effectiveLabels` for all of the labels present on the resource. */ labels: { [key: string]: string; }; /** * Name must be unique within a Google Cloud project and region. * Is required when creating resources. Name is primarily intended * for creation idempotence and configuration definition. Cannot be updated. */ name: string; /** * In Cloud Run the namespace must be equal to either the * project ID or project number. */ namespace: string; /** * (Output) * An opaque value that represents the internal version of this object that * can be used by clients to determine when objects have changed. May be used * for optimistic concurrency, change detection, and the watch operation on a * resource or set of resources. They may only be valid for a * particular resource or set of resources. */ resourceVersion: string; /** * (Output) * SelfLink is a URL representing this object. */ selfLink: string; /** * (Output) * UID is a unique id generated by the server on successful creation of a resource and is not * allowed to change on PUT operations. */ uid: string; } interface ServiceTemplateSpec { /** * ContainerConcurrency specifies the maximum allowed in-flight (concurrent) * requests per container of the Revision. If not specified or 0, defaults to 80 when * requested CPU >= 1 and defaults to 1 when requested CPU < 1. */ containerConcurrency: number; /** * Containers defines the unit of execution for this Revision. * Structure is documented below. */ containers: outputs.cloudrun.ServiceTemplateSpecContainer[]; /** * Node Selector describes the hardware requirements of the resources. * Use the following node selector keys to configure features on a Revision: * - `run.googleapis.com/accelerator` sets the [type of GPU](https://cloud.google.com/run/docs/configuring/services/gpu) required by the Revision to run. */ nodeSelector?: { [key: string]: string; }; /** * Email address of the IAM service account associated with the revision of the * service. The service account represents the identity of the running revision, * and determines what permissions the revision has. If not provided, the revision * will use the project's default service account. */ serviceAccountName: string; /** * (Output, Deprecated) * ServingState holds a value describing the state the resources * are in for this Revision. * It is expected * that the system will manipulate this based on routability and load. * * > **Warning:** `servingState` is deprecated and will be removed in a future major release. This field is not supported by the Cloud Run API. * * @deprecated `servingState` is deprecated and will be removed in a future major release. This field is not supported by the Cloud Run API. */ servingState: string; /** * TimeoutSeconds holds the max duration the instance is allowed for responding to a request. */ timeoutSeconds: number; /** * Volume represents a named volume in a container. * Structure is documented below. */ volumes?: outputs.cloudrun.ServiceTemplateSpecVolume[]; } interface ServiceTemplateSpecContainer { /** * Arguments to the entrypoint. * The docker image's CMD is used if this is not provided. */ args?: string[]; /** * Entrypoint array. Not executed within a shell. * The docker image's ENTRYPOINT is used if this is not provided. */ commands?: string[]; /** * (Optional, Deprecated) * List of sources to populate environment variables in the container. * All invalid keys will be reported as an event when the container is starting. * When a key exists in multiple sources, the value associated with the last source will * take precedence. Values defined by an Env with a duplicate key will take * precedence. * Structure is documented below. * * > **Warning:** `envFrom` is deprecated and will be removed in a future major release. This field is not supported by the Cloud Run API. * * @deprecated `envFrom` is deprecated and will be removed in a future major release. This field is not supported by the Cloud Run API. */ envFroms?: outputs.cloudrun.ServiceTemplateSpecContainerEnvFrom[]; /** * List of environment variables to set in the container. * Structure is documented below. */ envs?: outputs.cloudrun.ServiceTemplateSpecContainerEnv[]; /** * Docker image name. This is most often a reference to a container located * in the container registry, such as gcr.io/cloudrun/hello */ image: string; /** * Periodic probe of container liveness. Container will be restarted if the probe fails. More info: * https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes * Structure is documented below. */ livenessProbe?: outputs.cloudrun.ServiceTemplateSpecContainerLivenessProbe; /** * Name of the container */ name: string; /** * List of open ports in the container. * Structure is documented below. */ ports: outputs.cloudrun.ServiceTemplateSpecContainerPort[]; /** * Periodic probe of container readiness. * Structure is documented below. */ readinessProbe?: outputs.cloudrun.ServiceTemplateSpecContainerReadinessProbe; /** * Compute Resources required by this container. Used to set values such as max memory * Structure is documented below. */ resources: outputs.cloudrun.ServiceTemplateSpecContainerResources; /** * Startup probe of application within the container. * All other probes are disabled if a startup probe is provided, until it * succeeds. Container will not be added to service endpoints if the probe fails. * Structure is documented below. */ startupProbe: outputs.cloudrun.ServiceTemplateSpecContainerStartupProbe; /** * Volume to mount into the container's filesystem. * Only supports SecretVolumeSources. * Structure is documented below. */ volumeMounts?: outputs.cloudrun.ServiceTemplateSpecContainerVolumeMount[]; /** * (Optional, Deprecated) * Container's working directory. * If not specified, the container runtime's default will be used, which * might be configured in the container image. * * > **Warning:** `workingDir` is deprecated and will be removed in a future major release. This field is not supported by the Cloud Run API. * * @deprecated `workingDir` is deprecated and will be removed in a future major release. This field is not supported by the Cloud Run API. */ workingDir?: string; } interface ServiceTemplateSpecContainerEnv { /** * Name of the environment variable. */ name?: string; /** * Defaults to "". */ value?: string; /** * Source for the environment variable's value. Only supports secret_key_ref. * Structure is documented below. */ valueFrom?: outputs.cloudrun.ServiceTemplateSpecContainerEnvValueFrom; } interface ServiceTemplateSpecContainerEnvFrom { /** * The ConfigMap to select from. * Structure is documented below. */ configMapRef?: outputs.cloudrun.ServiceTemplateSpecContainerEnvFromConfigMapRef; /** * An optional identifier to prepend to each key in the ConfigMap. */ prefix?: string; /** * The Secret to select from. * Structure is documented below. */ secretRef?: outputs.cloudrun.ServiceTemplateSpecContainerEnvFromSecretRef; } interface ServiceTemplateSpecContainerEnvFromConfigMapRef { /** * The ConfigMap to select from. * Structure is documented below. */ localObjectReference?: outputs.cloudrun.ServiceTemplateSpecContainerEnvFromConfigMapRefLocalObjectReference; /** * Specify whether the ConfigMap must be defined */ optional?: boolean; } interface ServiceTemplateSpecContainerEnvFromConfigMapRefLocalObjectReference { /** * Name of the referent. */ name: string; } interface ServiceTemplateSpecContainerEnvFromSecretRef { /** * The Secret to select from. * Structure is documented below. */ localObjectReference?: outputs.cloudrun.ServiceTemplateSpecContainerEnvFromSecretRefLocalObjectReference; /** * Specify whether the Secret must be defined */ optional?: boolean; } interface ServiceTemplateSpecContainerEnvFromSecretRefLocalObjectReference { /** * Name of the referent. */ name: string; } interface ServiceTemplateSpecContainerEnvValueFrom { /** * Selects a key (version) of a secret in Secret Manager. * Structure is documented below. */ secretKeyRef: outputs.cloudrun.ServiceTemplateSpecContainerEnvValueFromSecretKeyRef; } interface ServiceTemplateSpecContainerEnvValueFromSecretKeyRef { /** * A Cloud Secret Manager secret version. Must be 'latest' for the latest * version or an integer for a specific version. */ key: string; /** * The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. * If the secret is in another project, you must define an alias. * An alias definition has the form: * {alias}:projects/{project-id|project-number}/secrets/{secret-name}. * If multiple alias definitions are needed, they must be separated by commas. * The alias definitions must be set on the run.googleapis.com/secrets annotation. */ name: string; } interface ServiceTemplateSpecContainerLivenessProbe { /** * Minimum consecutive failures for the probe to be considered failed after * having succeeded. Defaults to 3. Minimum value is 1. */ failureThreshold?: number; /** * GRPC specifies an action involving a GRPC port. * Structure is documented below. */ grpc?: outputs.cloudrun.ServiceTemplateSpecContainerLivenessProbeGrpc; /** * HttpGet specifies the http request to perform. * Structure is documented below. */ httpGet?: outputs.cloudrun.ServiceTemplateSpecContainerLivenessProbeHttpGet; /** * Number of seconds after the container has started before the probe is * initiated. * Defaults to 0 seconds. Minimum value is 0. Maximum value is 3600. */ initialDelaySeconds?: number; /** * How often (in seconds) to perform the probe. * Default to 10 seconds. Minimum value is 1. Maximum value is 3600. */ periodSeconds?: number; /** * Number of seconds after which the probe times out. * Defaults to 1 second. Minimum value is 1. Maximum value is 3600. * Must be smaller than period_seconds. */ timeoutSeconds?: number; } interface ServiceTemplateSpecContainerLivenessProbeGrpc { /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; /** * The name of the service to place in the gRPC HealthCheckRequest * (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). * If this is not specified, the default behavior is defined by gRPC. */ service?: string; } interface ServiceTemplateSpecContainerLivenessProbeHttpGet { /** * Custom headers to set in the request. HTTP allows repeated headers. * Structure is documented below. */ httpHeaders?: outputs.cloudrun.ServiceTemplateSpecContainerLivenessProbeHttpGetHttpHeader[]; /** * Path to access on the HTTP server. If set, it should not be empty string. */ path?: string; /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface ServiceTemplateSpecContainerLivenessProbeHttpGetHttpHeader { /** * The header field name. */ name: string; /** * The header field value. */ value?: string; } interface ServiceTemplateSpecContainerPort { /** * Port number the container listens on. This must be a valid port number (between 1 and 65535). Defaults to "8080". */ containerPort?: number; /** * If specified, used to specify which protocol to use. Allowed values are "http1" (HTTP/1) and "h2c" (HTTP/2 end-to-end). Defaults to "http1". */ name: string; /** * Protocol for port. Must be "TCP". Defaults to "TCP". */ protocol?: string; } interface ServiceTemplateSpecContainerReadinessProbe { /** * Minimum consecutive failures for the probe to be considered failed after * having succeeded. Defaults to 3. */ failureThreshold: number; /** * GRPC specifies an action involving a GRPC port. * Structure is documented below. */ grpc?: outputs.cloudrun.ServiceTemplateSpecContainerReadinessProbeGrpc; /** * HttpGet specifies the http request to perform. * Structure is documented below. */ httpGet?: outputs.cloudrun.ServiceTemplateSpecContainerReadinessProbeHttpGet; /** * How often (in seconds) to perform the probe. * Default to 10 seconds. */ periodSeconds: number; /** * Minimum consecutive successes for the probe to be considered successful after having failed. * Defaults to 2. */ successThreshold: number; /** * Number of seconds after which the probe times out. * Defaults to 1 second. Must be smaller than period_seconds. */ timeoutSeconds: number; } interface ServiceTemplateSpecContainerReadinessProbeGrpc { /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; /** * The name of the service to place in the gRPC HealthCheckRequest * (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). * If this is not specified, the default behavior is defined by gRPC. */ service?: string; } interface ServiceTemplateSpecContainerReadinessProbeHttpGet { /** * Path to access on the HTTP server. If set, it should not be empty string. */ path: string; /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface ServiceTemplateSpecContainerResources { /** * Limits describes the maximum amount of compute resources allowed. * CPU Limit details: * - For fractional CPU values (e.g. `0.5`, `0.75`, min `0.08`) are also supported. * - CPU allocation must comply with memory limits and concurrency rules described in: * https://cloud.google.com/run/docs/configuring/services/cpu * The values of the map is string form of the 'quantity' k8s type: * https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go */ limits: { [key: string]: string; }; /** * Requests describes the minimum amount of compute resources required. * If Requests is omitted for a container, it defaults to Limits if that is * explicitly specified, otherwise to an implementation-defined value. * The values of the map is string form of the 'quantity' k8s type: * https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go */ requests?: { [key: string]: string; }; } interface ServiceTemplateSpecContainerStartupProbe { /** * Minimum consecutive failures for the probe to be considered failed after * having succeeded. Defaults to 3. Minimum value is 1. */ failureThreshold?: number; /** * GRPC specifies an action involving a GRPC port. * Structure is documented below. */ grpc?: outputs.cloudrun.ServiceTemplateSpecContainerStartupProbeGrpc; /** * HttpGet specifies the http request to perform. * Structure is documented below. */ httpGet?: outputs.cloudrun.ServiceTemplateSpecContainerStartupProbeHttpGet; /** * Number of seconds after the container has started before the probe is * initiated. * Defaults to 0 seconds. Minimum value is 0. Maximum value is 240. */ initialDelaySeconds?: number; /** * How often (in seconds) to perform the probe. * Default to 10 seconds. Minimum value is 1. Maximum value is 240. */ periodSeconds?: number; /** * TcpSocket specifies an action involving a TCP port. * Structure is documented below. */ tcpSocket?: outputs.cloudrun.ServiceTemplateSpecContainerStartupProbeTcpSocket; /** * Number of seconds after which the probe times out. * Defaults to 1 second. Minimum value is 1. Maximum value is 3600. * Must be smaller than periodSeconds. */ timeoutSeconds?: number; } interface ServiceTemplateSpecContainerStartupProbeGrpc { /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; /** * The name of the service to place in the gRPC HealthCheckRequest * (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). * If this is not specified, the default behavior is defined by gRPC. */ service?: string; } interface ServiceTemplateSpecContainerStartupProbeHttpGet { /** * Custom headers to set in the request. HTTP allows repeated headers. * Structure is documented below. */ httpHeaders?: outputs.cloudrun.ServiceTemplateSpecContainerStartupProbeHttpGetHttpHeader[]; /** * Path to access on the HTTP server. If set, it should not be empty string. */ path?: string; /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface ServiceTemplateSpecContainerStartupProbeHttpGetHttpHeader { /** * The header field name. */ name: string; /** * The header field value. */ value?: string; } interface ServiceTemplateSpecContainerStartupProbeTcpSocket { /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface ServiceTemplateSpecContainerVolumeMount { /** * Path within the container at which the volume should be mounted. Must * not contain ':'. */ mountPath: string; /** * This must match the Name of a Volume. */ name: string; /** * Path within the volume from which the container's volume should be mounted. */ subPath?: string; } interface ServiceTemplateSpecVolume { /** * A filesystem specified by the Container Storage Interface (CSI). * Structure is documented below. */ csi?: outputs.cloudrun.ServiceTemplateSpecVolumeCsi; /** * Ephemeral storage which can be backed by real disks (HD, SSD), network storage or memory (i.e. tmpfs). For now only in memory (tmpfs) is supported. It is ephemeral in the sense that when the sandbox is taken down, the data is destroyed with it (it does not persist across sandbox runs). * Structure is documented below. */ emptyDir?: outputs.cloudrun.ServiceTemplateSpecVolumeEmptyDir; /** * Volume's name. */ name: string; /** * A filesystem backed by a Network File System share. This filesystem requires the * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" * Structure is documented below. */ nfs?: outputs.cloudrun.ServiceTemplateSpecVolumeNfs; /** * The secret's value will be presented as the content of a file whose * name is defined in the item path. If no items are defined, the name of * the file is the secret_name. * Structure is documented below. */ secret?: outputs.cloudrun.ServiceTemplateSpecVolumeSecret; } interface ServiceTemplateSpecVolumeCsi { /** * Unique name representing the type of file system to be created. Cloud Run supports the following values: * * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" */ driver: string; /** * If true, all mounts created from this volume will be read-only. */ readOnly: boolean; /** * Driver-specific attributes. The following options are supported for available drivers: * * gcsfuse.run.googleapis.com * * bucketName: The name of the Cloud Storage Bucket that backs this volume. The Cloud Run Service identity must have access to this bucket. */ volumeAttributes?: { [key: string]: string; }; } interface ServiceTemplateSpecVolumeEmptyDir { /** * The medium on which the data is stored. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. */ medium?: string; /** * Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir. */ sizeLimit?: string; } interface ServiceTemplateSpecVolumeNfs { /** * Path exported by the NFS server */ path: string; /** * If true, mount the NFS volume as read only in all mounts. Defaults to false. */ readOnly?: boolean; /** * IP address or hostname of the NFS server */ server: string; } interface ServiceTemplateSpecVolumeSecret { /** * Mode bits to use on created files by default. Must be a value between 0000 * and 0777. Defaults to 0644. Directories within the path are not affected by * this setting. This might be in conflict with other options that affect the * file mode, like fsGroup, and the result can be other mode bits set. */ defaultMode?: number; /** * If unspecified, the volume will expose a file whose name is the * secret_name. * If specified, the key will be used as the version to fetch from Cloud * Secret Manager and the path will be the name of the file exposed in the * volume. When items are defined, they must specify a key and a path. * Structure is documented below. */ items?: outputs.cloudrun.ServiceTemplateSpecVolumeSecretItem[]; /** * The name of the secret in Cloud Secret Manager. By default, the secret * is assumed to be in the same project. * If the secret is in another project, you must define an alias. * An alias definition has the form: * {alias}:projects/{project-id|project-number}/secrets/{secret-name}. * If multiple alias definitions are needed, they must be separated by * commas. * The alias definitions must be set on the run.googleapis.com/secrets * annotation. */ secretName: string; } interface ServiceTemplateSpecVolumeSecretItem { /** * The Cloud Secret Manager secret version. * Can be 'latest' for the latest value or an integer for a specific version. */ key: string; /** * Mode bits to use on this file, must be a value between 0000 and 0777. If * not specified, the volume defaultMode will be used. This might be in * conflict with other options that affect the file mode, like fsGroup, and * the result can be other mode bits set. */ mode?: number; /** * The relative path of the file to map the key to. * May not be an absolute path. * May not contain the path element '..'. * May not start with the string '..'. */ path: string; } interface ServiceTraffic { /** * LatestRevision may be optionally provided to indicate that the latest ready * Revision of the Configuration should be used for this traffic target. When * provided LatestRevision must be true if RevisionName is empty; it must be * false when RevisionName is non-empty. */ latestRevision?: boolean; /** * Percent specifies percent of the traffic to this Revision or Configuration. */ percent: number; /** * RevisionName of a specific revision to which to send this portion of traffic. */ revisionName?: string; /** * Tag is optionally used to expose a dedicated url for referencing this target exclusively. */ tag?: string; /** * (Output) * URL displays the URL for accessing tagged traffic targets. URL is displayed in status, * and is disallowed on spec. URL must contain a scheme (e.g. http://) and a hostname, * but may not contain anything else (e.g. basic auth, url path, etc.) */ url: string; } } export declare namespace cloudrunv2 { interface GetJobBinaryAuthorization { /** * If present, indicates to use Breakglass using this justification. If useDefault is False, then it must be empty. For more information on breakglass, see https://cloud.google.com/binary-authorization/docs/using-breakglass */ breakglassJustification: string; /** * The path to a binary authorization policy. Format: projects/{project}/platforms/cloudRun/{policy-name} */ policy: string; /** * If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled. */ useDefault: boolean; } interface GetJobCondition { /** * A reason for the execution condition. */ executionReason: string; /** * Last time the condition transitioned from one status to another. * * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ lastTransitionTime: string; /** * Human readable message indicating details about the current status. */ message: string; /** * A common (service-level) reason for this condition. */ reason: string; /** * A reason for the revision condition. */ revisionReason: string; /** * How to interpret failures of this condition, one of Error, Warning, Info */ severity: string; /** * State of the condition. */ state: string; /** * type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready. */ type: string; } interface GetJobLatestCreatedExecution { /** * Completion timestamp of the execution. * * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ completionTime: string; /** * Creation timestamp of the execution. * * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ createTime: string; /** * The name of the Cloud Run v2 Job. */ name: string; } interface GetJobTemplate { /** * Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. * * Cloud Run API v2 does not support annotations with 'run.googleapis.com', 'cloud.googleapis.com', 'serving.knative.dev', or 'autoscaling.knative.dev' namespaces, and they will be rejected. * All system annotations in v1 now have a corresponding field in v2 ExecutionTemplate. * * This field follows Kubernetes annotations' namespacing, limits, and rules. */ annotations: { [key: string]: string; }; /** * Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, * or break down billing charges by team, component, environment, state, etc. For more information, visit https://docs.cloud.google.com/resource-manager/docs/creating-managing-labels or * https://cloud.google.com/run/docs/configuring/labels. * * Cloud Run API v2 does not support labels with 'run.googleapis.com', 'cloud.googleapis.com', 'serving.knative.dev', or 'autoscaling.knative.dev' namespaces, and they will be rejected. * All system labels in v1 now have a corresponding field in v2 ExecutionTemplate. */ labels: { [key: string]: string; }; /** * Specifies the maximum desired number of tasks the execution should run at given time. Must be <= taskCount. When the job is run, if this field is 0 or unset, the maximum possible value will be used for that execution. The actual number of tasks running in steady state will be less than this number when there are fewer tasks waiting to be completed remaining, i.e. when the work left to do is less than max parallelism. */ parallelism: number; /** * Specifies the desired number of tasks the execution should run. Setting to 1 means that parallelism is limited to 1 and the success of that task signals the success of the execution. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ */ taskCount: number; /** * Describes the task(s) that will be created when executing an execution */ templates: outputs.cloudrunv2.GetJobTemplateTemplate[]; } interface GetJobTemplateTemplate { /** * Holds the single container that defines the unit of execution for this task. */ containers: outputs.cloudrunv2.GetJobTemplateTemplateContainer[]; /** * A reference to a customer managed encryption key (CMEK) to use to encrypt this container image. For more information, go to https://cloud.google.com/run/docs/securing/using-cmek */ encryptionKey: string; /** * The execution environment being used to host this Task. Possible values: ["EXECUTION_ENVIRONMENT_GEN1", "EXECUTION_ENVIRONMENT_GEN2"] */ executionEnvironment: string; /** * True if GPU zonal redundancy is disabled on this execution. */ gpuZonalRedundancyDisabled: boolean; /** * Number of retries allowed per Task, before marking this Task failed. Defaults to 3. Minimum value is 0. */ maxRetries: number; /** * Node Selector describes the hardware requirements of the resources. */ nodeSelectors: outputs.cloudrunv2.GetJobTemplateTemplateNodeSelector[]; /** * Email address of the IAM service account associated with the Task of a Job. The service account represents the identity of the running task, and determines what permissions the task has. If not provided, the task will use the project's default service account. */ serviceAccount: string; /** * Max allowed time duration the Task may be active before the system will actively try to mark it failed and kill associated containers. This applies per attempt of a task, meaning each retry can run for the full timeout. * * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ timeout: string; /** * A list of Volumes to make available to containers. */ volumes: outputs.cloudrunv2.GetJobTemplateTemplateVolume[]; /** * VPC Access configuration to use for this Task. For more information, visit https://cloud.google.com/run/docs/configuring/connecting-vpc. */ vpcAccesses: outputs.cloudrunv2.GetJobTemplateTemplateVpcAccess[]; } interface GetJobTemplateTemplateContainer { /** * Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references are not supported in Cloud Run. */ args: string[]; /** * Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell */ commands: string[]; /** * Names of the containers that must start before this container. */ dependsOns: string[]; /** * List of environment variables to set in the container. */ envs: outputs.cloudrunv2.GetJobTemplateTemplateContainerEnv[]; /** * URL of the Container image in Google Container Registry or Google Artifact Registry. More info: https://kubernetes.io/docs/concepts/containers/images */ image: string; /** * The name of the Cloud Run v2 Job. */ name: string; /** * List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. * * If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on */ ports: outputs.cloudrunv2.GetJobTemplateTemplateContainerPort[]; /** * Compute Resource requirements by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources */ resources: outputs.cloudrunv2.GetJobTemplateTemplateContainerResource[]; /** * Startup probe of application within the container. * All other probes are disabled if a startup probe is provided, until it * succeeds. Container will not be added to service endpoints if the probe fails. */ startupProbes: outputs.cloudrunv2.GetJobTemplateTemplateContainerStartupProbe[]; /** * Volume to mount into the container's filesystem. */ volumeMounts: outputs.cloudrunv2.GetJobTemplateTemplateContainerVolumeMount[]; /** * Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. */ workingDir: string; } interface GetJobTemplateTemplateContainerEnv { /** * The name of the Cloud Run v2 Job. */ name: string; /** * Literal value of the environment variable. Defaults to "" and the maximum allowed length is 32768 characters. Variable references are not supported in Cloud Run. */ value: string; /** * Source for the environment variable's value. */ valueSources: outputs.cloudrunv2.GetJobTemplateTemplateContainerEnvValueSource[]; } interface GetJobTemplateTemplateContainerEnvValueSource { /** * Selects a secret and a specific version from Cloud Secret Manager. */ secretKeyReves: outputs.cloudrunv2.GetJobTemplateTemplateContainerEnvValueSourceSecretKeyRef[]; } interface GetJobTemplateTemplateContainerEnvValueSourceSecretKeyRef { /** * The name of the secret in Cloud Secret Manager. Format: {secretName} if the secret is in the same project. projects/{project}/secrets/{secretName} if the secret is in a different project. */ secret: string; /** * The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. */ version: string; } interface GetJobTemplateTemplateContainerPort { /** * Port number the container listens on. This must be a valid TCP port number, 0 < containerPort < 65536. */ containerPort: number; /** * The name of the Cloud Run v2 Job. */ name: string; } interface GetJobTemplateTemplateContainerResource { /** * Only memory, CPU, and nvidia.com/gpu are supported. Use key 'cpu' for CPU limit, 'memory' for memory limit, 'nvidia.com/gpu' for gpu limit. Note: The only supported values for CPU are '1', '2', '4', '6', and '8'. Setting 4 CPU requires at least 2Gi of memory, setting 6 or more CPU requires at least 4Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go */ limits: { [key: string]: string; }; } interface GetJobTemplateTemplateContainerStartupProbe { /** * Minimum consecutive failures for the probe to be considered failed after * having succeeded. Defaults to 3. Minimum value is 1. */ failureThreshold: number; /** * GRPC specifies an action involving a GRPC port. */ grpcs: outputs.cloudrunv2.GetJobTemplateTemplateContainerStartupProbeGrpc[]; /** * HttpGet specifies the http request to perform. */ httpGets: outputs.cloudrunv2.GetJobTemplateTemplateContainerStartupProbeHttpGet[]; /** * Number of seconds after the container has started before the probe is * initiated. * Defaults to 0 seconds. Minimum value is 0. Maximum value is 240. */ initialDelaySeconds: number; /** * How often (in seconds) to perform the probe. * Default to 10 seconds. Minimum value is 1. Maximum value is 240. */ periodSeconds: number; /** * TcpSocket specifies an action involving a TCP port. */ tcpSockets: outputs.cloudrunv2.GetJobTemplateTemplateContainerStartupProbeTcpSocket[]; /** * Number of seconds after which the probe times out. * Defaults to 1 second. Minimum value is 1. Maximum value is 3600. * Must be smaller than periodSeconds. */ timeoutSeconds: number; } interface GetJobTemplateTemplateContainerStartupProbeGrpc { /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; /** * The name of the service to place in the gRPC HealthCheckRequest * (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). * If this is not specified, the default behavior is defined by gRPC. */ service: string; } interface GetJobTemplateTemplateContainerStartupProbeHttpGet { /** * Custom headers to set in the request. HTTP allows repeated headers. */ httpHeaders: outputs.cloudrunv2.GetJobTemplateTemplateContainerStartupProbeHttpGetHttpHeader[]; /** * Path to access on the HTTP server. If set, it should not be empty string. */ path: string; /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface GetJobTemplateTemplateContainerStartupProbeHttpGetHttpHeader { /** * The name of the Cloud Run v2 Job. */ name: string; /** * The header field value. */ value: string; } interface GetJobTemplateTemplateContainerStartupProbeTcpSocket { /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface GetJobTemplateTemplateContainerVolumeMount { /** * Path within the container at which the volume should be mounted. Must not contain ':'. For Cloud SQL volumes, it can be left empty, or must otherwise be /cloudsql. All instances defined in the Volume will be available as /cloudsql/[instance]. For more information on Cloud SQL volumes, visit https://cloud.google.com/sql/docs/mysql/connect-run */ mountPath: string; /** * The name of the Cloud Run v2 Job. */ name: string; /** * Path within the volume from which the container's volume should be mounted. */ subPath: string; } interface GetJobTemplateTemplateNodeSelector { /** * The GPU to attach to an instance. See https://cloud.google.com/run/docs/configuring/jobs/gpu for configuring GPU. */ accelerator: string; } interface GetJobTemplateTemplateVolume { /** * For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. */ cloudSqlInstances: outputs.cloudrunv2.GetJobTemplateTemplateVolumeCloudSqlInstance[]; /** * Ephemeral storage used as a shared volume. */ emptyDirs: outputs.cloudrunv2.GetJobTemplateTemplateVolumeEmptyDir[]; /** * Cloud Storage bucket mounted as a volume using GCSFuse. */ gcs: outputs.cloudrunv2.GetJobTemplateTemplateVolumeGc[]; /** * The name of the Cloud Run v2 Job. */ name: string; /** * NFS share mounted as a volume. */ nfs: outputs.cloudrunv2.GetJobTemplateTemplateVolumeNf[]; /** * Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret */ secrets: outputs.cloudrunv2.GetJobTemplateTemplateVolumeSecret[]; } interface GetJobTemplateTemplateVolumeCloudSqlInstance { /** * The Cloud SQL instance connection names, as can be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. Format: {project}:{location}:{instance} */ instances: string[]; } interface GetJobTemplateTemplateVolumeEmptyDir { /** * The different types of medium supported for EmptyDir. Default value: "MEMORY" Possible values: ["MEMORY"] */ medium: string; /** * Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir. */ sizeLimit: string; } interface GetJobTemplateTemplateVolumeGc { /** * Name of the cloud storage bucket to back the volume. The resource service account must have permission to access the bucket. */ bucket: string; /** * A list of flags to pass to the gcsfuse command for configuring this volume. * Flags should be passed without leading dashes. */ mountOptions: string[]; /** * If true, mount this volume as read-only in all mounts. If false, mount this volume as read-write. */ readOnly: boolean; } interface GetJobTemplateTemplateVolumeNf { /** * Path that is exported by the NFS server. */ path: string; /** * If true, mount this volume as read-only in all mounts. */ readOnly: boolean; /** * Hostname or IP address of the NFS server. */ server: string; } interface GetJobTemplateTemplateVolumeSecret { /** * Integer representation of mode bits to use on created files by default. Must be a value between 0000 and 0777 (octal), defaulting to 0444. Directories within the path are not affected by this setting. */ defaultMode: number; /** * If unspecified, the volume will expose a file whose name is the secret, relative to VolumeMount.mount_path. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a path and a version. */ items: outputs.cloudrunv2.GetJobTemplateTemplateVolumeSecretItem[]; /** * The name of the secret in Cloud Secret Manager. Format: {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project. */ secret: string; } interface GetJobTemplateTemplateVolumeSecretItem { /** * Integer octal mode bits to use on this file, must be a value between 01 and 0777 (octal). If 0 or not set, the Volume's default mode will be used. */ mode: number; /** * The relative path of the secret in the container. */ path: string; /** * The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version */ version: string; } interface GetJobTemplateTemplateVpcAccess { /** * VPC Access connector name. Format: projects/{project}/locations/{location}/connectors/{connector}, where {project} can be project id or number. */ connector: string; /** * Traffic VPC egress settings. Possible values: ["ALL_TRAFFIC", "PRIVATE_RANGES_ONLY"] */ egress: string; /** * Direct VPC egress settings. Currently only single network interface is supported. */ networkInterfaces: outputs.cloudrunv2.GetJobTemplateTemplateVpcAccessNetworkInterface[]; } interface GetJobTemplateTemplateVpcAccessNetworkInterface { /** * The VPC network that the Cloud Run resource will be able to send traffic to. At least one of network or subnetwork must be specified. If both * network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If network is not specified, it will be * looked up from the subnetwork. */ network: string; /** * The VPC subnetwork that the Cloud Run resource will get IPs from. At least one of network or subnetwork must be specified. If both * network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If subnetwork is not specified, the * subnetwork with the same name with the network will be used. */ subnetwork: string; /** * Network tags applied to this Cloud Run job. */ tags: string[]; } interface GetJobTerminalCondition { /** * A reason for the execution condition. */ executionReason: string; /** * Last time the condition transitioned from one status to another. * * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ lastTransitionTime: string; /** * Human readable message indicating details about the current status. */ message: string; /** * A common (service-level) reason for this condition. */ reason: string; /** * A reason for the revision condition. */ revisionReason: string; /** * How to interpret failures of this condition, one of Error, Warning, Info */ severity: string; /** * State of the condition. */ state: string; /** * type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready. */ type: string; } interface GetServiceBinaryAuthorization { /** * If present, indicates to use Breakglass using this justification. If useDefault is False, then it must be empty. For more information on breakglass, see https://cloud.google.com/binary-authorization/docs/using-breakglass */ breakglassJustification: string; /** * The path to a binary authorization policy. Format: projects/{project}/platforms/cloudRun/{policy-name} */ policy: string; /** * If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled. */ useDefault: boolean; } interface GetServiceBuildConfig { /** * The base image used to build the function. */ baseImage: string; /** * Sets whether the function will receive automatic base image updates. */ enableAutomaticUpdates: boolean; /** * User-provided build-time environment variables for the function. */ environmentVariables: { [key: string]: string; }; /** * The name of the function (as defined in source code) that will be executed. Defaults to the resource name suffix, if not specified. For backward compatibility, if function with given name is not found, then the system will try to use function named "function". */ functionTarget: string; /** * Artifact Registry URI to store the built image. */ imageUri: string; /** * The name of the Cloud Run v2 Service. */ name: string; /** * Service account to be used for building the container. The format of this field is 'projects/{projectId}/serviceAccounts/{serviceAccountEmail}'. */ serviceAccount: string; /** * The Cloud Storage bucket URI where the function source code is located. */ sourceLocation: string; /** * Name of the Cloud Build Custom Worker Pool that should be used to build the Cloud Run function. The format of this field is 'projects/{project}/locations/{region}/workerPools/{workerPool}' where {project} and {region} are the project id and region respectively where the worker pool is defined and {workerPool} is the short name of the worker pool. */ workerPool: string; } interface GetServiceCondition { /** * A reason for the execution condition. */ executionReason: string; /** * Last time the condition transitioned from one status to another. * * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ lastTransitionTime: string; /** * Human readable message indicating details about the current status. */ message: string; /** * A common (service-level) reason for this condition. */ reason: string; /** * A reason for the revision condition. */ revisionReason: string; /** * How to interpret failures of this condition, one of Error, Warning, Info */ severity: string; /** * State of the condition. */ state: string; /** * type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready. */ type: string; } interface GetServiceMultiRegionSetting { /** * System-generated unique id for the multi-region Service. */ multiRegionId: string; /** * The list of regions to deploy the multi-region Service. */ regions: string[]; } interface GetServiceScaling { /** * Total instance count for the service in manual scaling mode. This number of instances is divided among all revisions with specified traffic based on the percent of traffic they are receiving. */ manualInstanceCount: number; /** * Combined maximum number of instances for all revisions receiving traffic. */ maxInstanceCount: number; /** * Minimum number of instances for the service, to be divided among all revisions receiving traffic. */ minInstanceCount: number; /** * The [scaling mode](https://cloud.google.com/run/docs/reference/rest/v2/projects.locations.services#scalingmode) for the service. Possible values: ["AUTOMATIC", "MANUAL"] */ scalingMode: string; } interface GetServiceTemplate { /** * Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. * * Cloud Run API v2 does not support annotations with 'run.googleapis.com', 'cloud.googleapis.com', 'serving.knative.dev', or 'autoscaling.knative.dev' namespaces, and they will be rejected. * All system annotations in v1 now have a corresponding field in v2 RevisionTemplate. * * This field follows Kubernetes annotations' namespacing, limits, and rules. */ annotations: { [key: string]: string; }; /** * Holds the containers that define the unit of execution for this Service. */ containers: outputs.cloudrunv2.GetServiceTemplateContainer[]; /** * A reference to a customer managed encryption key (CMEK) to use to encrypt this container image. For more information, go to https://cloud.google.com/run/docs/securing/using-cmek */ encryptionKey: string; /** * The sandbox environment to host this Revision. Possible values: ["EXECUTION_ENVIRONMENT_GEN1", "EXECUTION_ENVIRONMENT_GEN2"] */ executionEnvironment: string; /** * True if GPU zonal redundancy is disabled on this revision. */ gpuZonalRedundancyDisabled: boolean; /** * Disables health checking containers during deployment. */ healthCheckDisabled: boolean; /** * Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. * For more information, visit https://docs.cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. * * Cloud Run API v2 does not support labels with 'run.googleapis.com', 'cloud.googleapis.com', 'serving.knative.dev', or 'autoscaling.knative.dev' namespaces, and they will be rejected. * All system labels in v1 now have a corresponding field in v2 RevisionTemplate. */ labels: { [key: string]: string; }; /** * Sets the maximum number of requests that each serving instance can receive. * If not specified or 0, defaults to 80 when requested CPU >= 1 and defaults to 1 when requested CPU < 1. */ maxInstanceRequestConcurrency: number; /** * Node Selector describes the hardware requirements of the resources. */ nodeSelectors: outputs.cloudrunv2.GetServiceTemplateNodeSelector[]; /** * The unique name for the revision. If this field is omitted, it will be automatically generated based on the Service name. */ revision: string; /** * Scaling settings for this Revision. */ scalings: outputs.cloudrunv2.GetServiceTemplateScaling[]; /** * Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. */ serviceAccount: string; /** * Enables Cloud Service Mesh for this Revision. */ serviceMeshes: outputs.cloudrunv2.GetServiceTemplateServiceMesh[]; /** * Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity */ sessionAffinity: boolean; /** * Max allowed time for an instance to respond to a request. * * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ timeout: string; /** * A list of Volumes to make available to containers. */ volumes: outputs.cloudrunv2.GetServiceTemplateVolume[]; /** * VPC Access configuration to use for this Task. For more information, visit https://cloud.google.com/run/docs/configuring/connecting-vpc. */ vpcAccesses: outputs.cloudrunv2.GetServiceTemplateVpcAccess[]; } interface GetServiceTemplateContainer { /** * Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references are not supported in Cloud Run. */ args: string[]; /** * Base image for this container. If set, it indicates that the service is enrolled into automatic base image update. */ baseImageUri: string; /** * The build info of the container image. */ buildInfos: outputs.cloudrunv2.GetServiceTemplateContainerBuildInfo[]; /** * Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell */ commands: string[]; /** * Containers which should be started before this container. If specified the container will wait to start until all containers with the listed names are healthy. */ dependsOns: string[]; /** * List of environment variables to set in the container. */ envs: outputs.cloudrunv2.GetServiceTemplateContainerEnv[]; /** * URL of the Container image in Google Container Registry or Google Artifact Registry. More info: https://kubernetes.io/docs/concepts/containers/images */ image: string; /** * Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes */ livenessProbes: outputs.cloudrunv2.GetServiceTemplateContainerLivenessProbe[]; /** * The name of the Cloud Run v2 Service. */ name: string; /** * List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. * * If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on */ ports: outputs.cloudrunv2.GetServiceTemplateContainerPort[]; /** * Periodic probe of container readiness. */ readinessProbes: outputs.cloudrunv2.GetServiceTemplateContainerReadinessProbe[]; /** * Compute Resource requirements by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources */ resources: outputs.cloudrunv2.GetServiceTemplateContainerResource[]; /** * Location of the source. */ sourceCodes: outputs.cloudrunv2.GetServiceTemplateContainerSourceCode[]; /** * Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes */ startupProbes: outputs.cloudrunv2.GetServiceTemplateContainerStartupProbe[]; /** * Volume to mount into the container's filesystem. */ volumeMounts: outputs.cloudrunv2.GetServiceTemplateContainerVolumeMount[]; /** * Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. */ workingDir: string; } interface GetServiceTemplateContainerBuildInfo { /** * Entry point of the function when the image is a Cloud Run function. */ functionTarget: string; /** * Source code location of the image. */ sourceLocation: string; } interface GetServiceTemplateContainerEnv { /** * The name of the Cloud Run v2 Service. */ name: string; /** * Literal value of the environment variable. Defaults to "" and the maximum allowed length is 32768 characters. Variable references are not supported in Cloud Run. */ value: string; /** * Source for the environment variable's value. */ valueSources: outputs.cloudrunv2.GetServiceTemplateContainerEnvValueSource[]; } interface GetServiceTemplateContainerEnvValueSource { /** * Selects a secret and a specific version from Cloud Secret Manager. */ secretKeyReves: outputs.cloudrunv2.GetServiceTemplateContainerEnvValueSourceSecretKeyRef[]; } interface GetServiceTemplateContainerEnvValueSourceSecretKeyRef { /** * The name of the secret in Cloud Secret Manager. Format: {secretName} if the secret is in the same project. projects/{project}/secrets/{secretName} if the secret is in a different project. */ secret: string; /** * The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. */ version: string; } interface GetServiceTemplateContainerLivenessProbe { /** * Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. */ failureThreshold: number; /** * GRPC specifies an action involving a GRPC port. */ grpcs: outputs.cloudrunv2.GetServiceTemplateContainerLivenessProbeGrpc[]; /** * HTTPGet specifies the http request to perform. */ httpGets: outputs.cloudrunv2.GetServiceTemplateContainerLivenessProbeHttpGet[]; /** * Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes */ initialDelaySeconds: number; /** * How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeoutSeconds */ periodSeconds: number; /** * TCPSocketAction describes an action based on opening a socket */ tcpSockets: outputs.cloudrunv2.GetServiceTemplateContainerLivenessProbeTcpSocket[]; /** * Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than periodSeconds. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes */ timeoutSeconds: number; } interface GetServiceTemplateContainerLivenessProbeGrpc { /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; /** * The name of the service to place in the gRPC HealthCheckRequest * (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). * If this is not specified, the default behavior is defined by gRPC. */ service: string; } interface GetServiceTemplateContainerLivenessProbeHttpGet { /** * Custom headers to set in the request. HTTP allows repeated headers. */ httpHeaders: outputs.cloudrunv2.GetServiceTemplateContainerLivenessProbeHttpGetHttpHeader[]; /** * Path to access on the HTTP server. Defaults to '/'. */ path: string; /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface GetServiceTemplateContainerLivenessProbeHttpGetHttpHeader { /** * The name of the Cloud Run v2 Service. */ name: string; /** * The header field value */ value: string; } interface GetServiceTemplateContainerLivenessProbeTcpSocket { /** * Port number to access on the container. Must be in the range 1 to 65535. * If not specified, defaults to the exposed port of the container, which * is the value of container.ports[0].containerPort. */ port: number; } interface GetServiceTemplateContainerPort { /** * Port number the container listens on. This must be a valid TCP port number, 0 < containerPort < 65536. */ containerPort: number; /** * The name of the Cloud Run v2 Service. */ name: string; } interface GetServiceTemplateContainerReadinessProbe { /** * Minimum consecutive failures for the probe to be considered failed after * having succeeded. Defaults to 3. */ failureThreshold: number; /** * GRPC specifies an action involving a GRPC port. */ grpcs: outputs.cloudrunv2.GetServiceTemplateContainerReadinessProbeGrpc[]; /** * HttpGet specifies the http request to perform. */ httpGets: outputs.cloudrunv2.GetServiceTemplateContainerReadinessProbeHttpGet[]; /** * How often (in seconds) to perform the probe. * Default to 10 seconds. */ periodSeconds: number; /** * Minimum consecutive successes for the probe to be considered successful after having failed. * Defaults to 2. */ successThreshold: number; /** * Number of seconds after which the probe times out. * Defaults to 1 second. Must be smaller than period_seconds. */ timeoutSeconds: number; } interface GetServiceTemplateContainerReadinessProbeGrpc { /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; /** * The name of the service to place in the gRPC HealthCheckRequest * (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). * If this is not specified, the default behavior is defined by gRPC. */ service: string; } interface GetServiceTemplateContainerReadinessProbeHttpGet { /** * Path to access on the HTTP server. If set, it should not be empty string. */ path: string; /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface GetServiceTemplateContainerResource { /** * Determines whether CPU is only allocated during requests. True by default if the parent 'resources' field is not set. However, if * 'resources' is set, this field must be explicitly set to true to preserve the default behavior. */ cpuIdle: boolean; /** * Only memory, CPU, and nvidia.com/gpu are supported. Use key 'cpu' for CPU limit, 'memory' for memory limit, 'nvidia.com/gpu' for gpu limit. Note: The only supported values for CPU are '1', '2', '4', '6' and '8'. Setting 4 CPU requires at least 2Gi of memory, setting 6 or more CPU requires at least 4Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go */ limits: { [key: string]: string; }; /** * Determines whether CPU should be boosted on startup of a new container instance above the requested CPU threshold, this can help reduce cold-start latency. */ startupCpuBoost: boolean; } interface GetServiceTemplateContainerSourceCode { /** * Cloud Storage source. */ cloudStorageSources: outputs.cloudrunv2.GetServiceTemplateContainerSourceCodeCloudStorageSource[]; } interface GetServiceTemplateContainerSourceCodeCloudStorageSource { /** * The Cloud Storage bucket name. */ bucket: string; /** * The Cloud Storage object generation. The is an int64 value. As with most Google APIs, its JSON representation will be a string instead of an integer. */ generation: string; /** * The Cloud Storage object name. */ object: string; } interface GetServiceTemplateContainerStartupProbe { /** * Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. */ failureThreshold: number; /** * GRPC specifies an action involving a GRPC port. */ grpcs: outputs.cloudrunv2.GetServiceTemplateContainerStartupProbeGrpc[]; /** * HTTPGet specifies the http request to perform. Exactly one of HTTPGet or TCPSocket must be specified. */ httpGets: outputs.cloudrunv2.GetServiceTemplateContainerStartupProbeHttpGet[]; /** * Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes */ initialDelaySeconds: number; /** * How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeoutSeconds */ periodSeconds: number; /** * TCPSocket specifies an action involving a TCP port. Exactly one of HTTPGet or TCPSocket must be specified. */ tcpSockets: outputs.cloudrunv2.GetServiceTemplateContainerStartupProbeTcpSocket[]; /** * Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than periodSeconds. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes */ timeoutSeconds: number; } interface GetServiceTemplateContainerStartupProbeGrpc { /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; /** * The name of the service to place in the gRPC HealthCheckRequest * (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). * If this is not specified, the default behavior is defined by gRPC. */ service: string; } interface GetServiceTemplateContainerStartupProbeHttpGet { /** * Custom headers to set in the request. HTTP allows repeated headers. */ httpHeaders: outputs.cloudrunv2.GetServiceTemplateContainerStartupProbeHttpGetHttpHeader[]; /** * Path to access on the HTTP server. Defaults to '/'. */ path: string; /** * Port number to access on the container. Must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface GetServiceTemplateContainerStartupProbeHttpGetHttpHeader { /** * The name of the Cloud Run v2 Service. */ name: string; /** * The header field value */ value: string; } interface GetServiceTemplateContainerStartupProbeTcpSocket { /** * Port number to access on the container. Must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface GetServiceTemplateContainerVolumeMount { /** * Path within the container at which the volume should be mounted. Must not contain ':'. For Cloud SQL volumes, it can be left empty, or must otherwise be /cloudsql. All instances defined in the Volume will be available as /cloudsql/[instance]. For more information on Cloud SQL volumes, visit https://cloud.google.com/sql/docs/mysql/connect-run */ mountPath: string; /** * The name of the Cloud Run v2 Service. */ name: string; /** * Path within the volume from which the container's volume should be mounted. */ subPath: string; } interface GetServiceTemplateNodeSelector { /** * The GPU to attach to an instance. See https://cloud.google.com/run/docs/configuring/services/gpu for configuring GPU. */ accelerator: string; } interface GetServiceTemplateScaling { /** * Maximum number of serving instances that this resource should have. Must not be less than minimum instance count. If absent, Cloud Run will calculate * a default value based on the project's available container instances quota in the region and specified instance size. */ maxInstanceCount: number; /** * Minimum number of serving instances that this resource should have. Defaults to 0. Must not be greater than maximum instance count. */ minInstanceCount: number; } interface GetServiceTemplateServiceMesh { /** * The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. */ mesh: string; } interface GetServiceTemplateVolume { /** * For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. */ cloudSqlInstances: outputs.cloudrunv2.GetServiceTemplateVolumeCloudSqlInstance[]; /** * Ephemeral storage used as a shared volume. */ emptyDirs: outputs.cloudrunv2.GetServiceTemplateVolumeEmptyDir[]; /** * Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. */ gcs: outputs.cloudrunv2.GetServiceTemplateVolumeGc[]; /** * The name of the Cloud Run v2 Service. */ name: string; /** * Represents an NFS mount. */ nfs: outputs.cloudrunv2.GetServiceTemplateVolumeNf[]; /** * Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret */ secrets: outputs.cloudrunv2.GetServiceTemplateVolumeSecret[]; } interface GetServiceTemplateVolumeCloudSqlInstance { /** * The Cloud SQL instance connection names, as can be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. Format: {project}:{location}:{instance} */ instances: string[]; } interface GetServiceTemplateVolumeEmptyDir { /** * The different types of medium supported for EmptyDir. Default value: "MEMORY" Possible values: ["MEMORY"] */ medium: string; /** * Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir. */ sizeLimit: string; } interface GetServiceTemplateVolumeGc { /** * GCS Bucket name */ bucket: string; /** * A list of flags to pass to the gcsfuse command for configuring this volume. * Flags should be passed without leading dashes. */ mountOptions: string[]; /** * If true, mount the GCS bucket as read-only */ readOnly: boolean; } interface GetServiceTemplateVolumeNf { /** * Path that is exported by the NFS server. */ path: string; /** * If true, mount the NFS volume as read only */ readOnly: boolean; /** * Hostname or IP address of the NFS server */ server: string; } interface GetServiceTemplateVolumeSecret { /** * Integer representation of mode bits to use on created files by default. Must be a value between 0000 and 0777 (octal), defaulting to 0444. Directories within the path are not affected by this setting. */ defaultMode: number; /** * If unspecified, the volume will expose a file whose name is the secret, relative to VolumeMount.mount_path. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a path and a version. */ items: outputs.cloudrunv2.GetServiceTemplateVolumeSecretItem[]; /** * The name of the secret in Cloud Secret Manager. Format: {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project. */ secret: string; } interface GetServiceTemplateVolumeSecretItem { /** * Integer octal mode bits to use on this file, must be a value between 01 and 0777 (octal). If 0 or not set, the Volume's default mode will be used. */ mode: number; /** * The relative path of the secret in the container. */ path: string; /** * The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version */ version: string; } interface GetServiceTemplateVpcAccess { /** * VPC Access connector name. Format: projects/{project}/locations/{location}/connectors/{connector}, where {project} can be project id or number. */ connector: string; /** * Traffic VPC egress settings. Possible values: ["ALL_TRAFFIC", "PRIVATE_RANGES_ONLY"] */ egress: string; /** * Direct VPC egress settings. Currently only single network interface is supported. */ networkInterfaces: outputs.cloudrunv2.GetServiceTemplateVpcAccessNetworkInterface[]; } interface GetServiceTemplateVpcAccessNetworkInterface { /** * The VPC network that the Cloud Run resource will be able to send traffic to. At least one of network or subnetwork must be specified. If both * network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If network is not specified, it will be * looked up from the subnetwork. */ network: string; /** * The VPC subnetwork that the Cloud Run resource will get IPs from. At least one of network or subnetwork must be specified. If both * network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If subnetwork is not specified, the * subnetwork with the same name with the network will be used. */ subnetwork: string; /** * Network tags applied to this Cloud Run service. */ tags: string[]; } interface GetServiceTerminalCondition { /** * A reason for the execution condition. */ executionReason: string; /** * Last time the condition transitioned from one status to another. */ lastTransitionTime: string; /** * Human readable message indicating details about the current status. */ message: string; /** * A common (service-level) reason for this condition. */ reason: string; /** * A reason for the revision condition. */ revisionReason: string; /** * How to interpret failures of this condition, one of Error, Warning, Info */ severity: string; /** * State of the condition. */ state: string; /** * type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready. */ type: string; } interface GetServiceTraffic { /** * Specifies percent of the traffic to this Revision. This defaults to zero if unspecified. */ percent: number; /** * Revision to which to send this portion of traffic, if traffic allocation is by revision. */ revision: string; /** * Indicates a string to be part of the URI to exclusively reference this target. */ tag: string; /** * The allocation type for this traffic target. Possible values: ["TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST", "TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION"] */ type: string; } interface GetServiceTrafficStatus { /** * Specifies percent of the traffic to this Revision. */ percent: number; /** * Revision to which this traffic is sent. */ revision: string; /** * Indicates the string used in the URI to exclusively reference this target. */ tag: string; /** * The allocation type for this traffic target. */ type: string; /** * Displays the target URI. */ uri: string; } interface GetWorkerPoolBinaryAuthorization { /** * If present, indicates to use Breakglass using this justification. If useDefault is False, then it must be empty. For more information on breakglass, see https://cloud.google.com/binary-authorization/docs/using-breakglass */ breakglassJustification: string; /** * The path to a binary authorization policy. Format: projects/{project}/platforms/cloudRun/{policy-name} */ policy: string; /** * If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled. */ useDefault: boolean; } interface GetWorkerPoolCondition { /** * A reason for the execution condition. */ executionReason: string; /** * Last time the condition transitioned from one status to another. * * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ lastTransitionTime: string; /** * Human readable message indicating details about the current status. */ message: string; /** * A common (workerPool-level) reason for this condition. */ reason: string; /** * A reason for the revision condition. */ revisionReason: string; /** * How to interpret failures of this condition, one of Error, Warning, Info */ severity: string; /** * State of the condition. */ state: string; /** * type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready. */ type: string; } interface GetWorkerPoolInstanceSplit { /** * Specifies percent of the instance split to this Revision. This defaults to zero if unspecified. */ percent: number; /** * Revision to which to assign this portion of instances, if split allocation is by revision. */ revision: string; /** * The allocation type for this instance split. Possible values: ["INSTANCE_SPLIT_ALLOCATION_TYPE_LATEST", "INSTANCE_SPLIT_ALLOCATION_TYPE_REVISION"] */ type: string; } interface GetWorkerPoolInstanceSplitStatus { /** * Specifies percent of the instance split to this Revision. */ percent: number; /** * Revision to which this instance split is assigned. */ revision: string; /** * The allocation type for this instance split. */ type: string; } interface GetWorkerPoolScaling { /** * The total number of instances in manual scaling mode. */ manualInstanceCount: number; /** * The maximum count of instances distributed among revisions based on the specified instance split percentages. */ maxInstanceCount: number; /** * The minimum count of instances distributed among revisions based on the specified instance split percentages. */ minInstanceCount: number; /** * The scaling mode for the worker pool. It defaults to MANUAL. Possible values: ["AUTOMATIC", "MANUAL"] */ scalingMode: string; } interface GetWorkerPoolTemplate { /** * Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. * * Cloud Run API v2 does not support annotations with 'run.googleapis.com', 'cloud.googleapis.com', 'serving.knative.dev', or 'autoscaling.knative.dev' namespaces, and they will be rejected. * All system annotations in v1 now have a corresponding field in v2 WorkerPoolRevisionTemplate. * * This field follows Kubernetes annotations' namespacing, limits, and rules. */ annotations: { [key: string]: string; }; /** * Holds the containers that define the unit of execution for this WorkerPool. */ containers: outputs.cloudrunv2.GetWorkerPoolTemplateContainer[]; /** * A reference to a customer managed encryption key (CMEK) to use to encrypt this container image. For more information, go to https://cloud.google.com/run/docs/securing/using-cmek */ encryptionKey: string; /** * The action to take if the encryption key is revoked. Possible values: ["PREVENT_NEW", "SHUTDOWN"] */ encryptionKeyRevocationAction: string; /** * If encryptionKeyRevocationAction is SHUTDOWN, the duration before shutting down all instances. The minimum increment is 1 hour. * * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ encryptionKeyShutdownDuration: string; /** * True if GPU zonal redundancy is disabled on this revision. */ gpuZonalRedundancyDisabled: boolean; /** * Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. * For more information, visit https://docs.cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. * * Cloud Run API v2 does not support labels with 'run.googleapis.com', 'cloud.googleapis.com', 'serving.knative.dev', or 'autoscaling.knative.dev' namespaces, and they will be rejected. * All system labels in v1 now have a corresponding field in v2 WorkerPoolRevisionTemplate. */ labels: { [key: string]: string; }; /** * Node Selector describes the hardware requirements of the resources. */ nodeSelectors: outputs.cloudrunv2.GetWorkerPoolTemplateNodeSelector[]; /** * The unique name for the revision. If this field is omitted, it will be automatically generated based on the WorkerPool name. */ revision: string; /** * Email address of the IAM service account associated with the revision of the WorkerPool. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. */ serviceAccount: string; /** * A list of Volumes to make available to containers. */ volumes: outputs.cloudrunv2.GetWorkerPoolTemplateVolume[]; /** * VPC Access configuration to use for this Revision. For more information, visit https://cloud.google.com/run/docs/configuring/connecting-vpc. */ vpcAccesses: outputs.cloudrunv2.GetWorkerPoolTemplateVpcAccess[]; } interface GetWorkerPoolTemplateContainer { /** * Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references are not supported in Cloud Run. */ args: string[]; /** * Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell */ commands: string[]; /** * Names of the containers that must start before this container. */ dependsOns: string[]; /** * List of environment variables to set in the container. */ envs: outputs.cloudrunv2.GetWorkerPoolTemplateContainerEnv[]; /** * URL of the Container image in Google Container Registry or Google Artifact Registry. More info: https://kubernetes.io/docs/concepts/containers/images */ image: string; /** * Periodic probe of container liveness. Container will be restarted if the probe fails. */ livenessProbes: outputs.cloudrunv2.GetWorkerPoolTemplateContainerLivenessProbe[]; /** * The name of the Cloud Run v2 Worker Pool. */ name: string; /** * Compute Resource requirements by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources */ resources: outputs.cloudrunv2.GetWorkerPoolTemplateContainerResource[]; /** * Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. */ startupProbes: outputs.cloudrunv2.GetWorkerPoolTemplateContainerStartupProbe[]; /** * Volume to mount into the container's filesystem. */ volumeMounts: outputs.cloudrunv2.GetWorkerPoolTemplateContainerVolumeMount[]; /** * Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. */ workingDir: string; } interface GetWorkerPoolTemplateContainerEnv { /** * The name of the Cloud Run v2 Worker Pool. */ name: string; /** * Literal value of the environment variable. Defaults to "" and the maximum allowed length is 32768 characters. Variable references are not supported in Cloud Run. */ value: string; /** * Source for the environment variable's value. */ valueSources: outputs.cloudrunv2.GetWorkerPoolTemplateContainerEnvValueSource[]; } interface GetWorkerPoolTemplateContainerEnvValueSource { /** * Selects a secret and a specific version from Cloud Secret Manager. */ secretKeyReves: outputs.cloudrunv2.GetWorkerPoolTemplateContainerEnvValueSourceSecretKeyRef[]; } interface GetWorkerPoolTemplateContainerEnvValueSourceSecretKeyRef { /** * The name of the secret in Cloud Secret Manager. Format: {secretName} if the secret is in the same project. projects/{project}/secrets/{secretName} if the secret is in a different project. */ secret: string; /** * The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. */ version: string; } interface GetWorkerPoolTemplateContainerLivenessProbe { /** * Optional. Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. */ failureThreshold: number; /** * Optional. GRPC specifies an action involving a gRPC port. Exactly one of httpGet, tcpSocket, or grpc must be specified. */ grpcs: outputs.cloudrunv2.GetWorkerPoolTemplateContainerLivenessProbeGrpc[]; /** * Optional. HTTPGet specifies the http request to perform. Exactly one of httpGet, tcpSocket, or grpc must be specified. */ httpGets: outputs.cloudrunv2.GetWorkerPoolTemplateContainerLivenessProbeHttpGet[]; /** * Optional. Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. */ initialDelaySeconds: number; /** * Optional. How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeout_seconds. */ periodSeconds: number; /** * Optional. TCPSocket specifies an action involving a TCP port. Exactly one of httpGet, tcpSocket, or grpc must be specified. */ tcpSockets: outputs.cloudrunv2.GetWorkerPoolTemplateContainerLivenessProbeTcpSocket[]; /** * Optional. Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than period_seconds. */ timeoutSeconds: number; } interface GetWorkerPoolTemplateContainerLivenessProbeGrpc { /** * Optional. Port number of the gRPC service. Number must be in the range 1 to 65535. If not specified, defaults to the exposed port of the container, which is the value of container.ports[0].containerPort. */ port: number; /** * Optional. Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md ). If this is not specified, the default behavior is defined by gRPC */ service: string; } interface GetWorkerPoolTemplateContainerLivenessProbeHttpGet { /** * Optional. Custom headers to set in the request. HTTP allows repeated headers. */ httpHeaders: outputs.cloudrunv2.GetWorkerPoolTemplateContainerLivenessProbeHttpGetHttpHeader[]; /** * Optional. Path to access on the HTTP server. Defaults to '/'. */ path: string; /** * Optional. Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to the exposed port of the container, which is the value of container.ports[0].containerPort. */ port: number; } interface GetWorkerPoolTemplateContainerLivenessProbeHttpGetHttpHeader { /** * Required. The header field name */ port: number; /** * Optional. The header field value */ value: string; } interface GetWorkerPoolTemplateContainerLivenessProbeTcpSocket { /** * Optional. Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to the exposed port of the container, which is the value of container.ports[0].containerPort. */ port: number; } interface GetWorkerPoolTemplateContainerResource { /** * Only memory, CPU, and nvidia.com/gpu are supported. Use key 'cpu' for CPU limit, 'memory' for memory limit, 'nvidia.com/gpu' for gpu limit. Note: The only supported values for CPU are '1', '2', '4', '6', and '8'. Setting 4 CPU requires at least 2Gi of memory, setting 6 or more CPU requires at least 4Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go */ limits: { [key: string]: string; }; } interface GetWorkerPoolTemplateContainerStartupProbe { /** * Optional. Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. */ failureThreshold: number; /** * Optional. GRPC specifies an action involving a gRPC port. Exactly one of httpGet, tcpSocket, or grpc must be specified. */ grpcs: outputs.cloudrunv2.GetWorkerPoolTemplateContainerStartupProbeGrpc[]; /** * Optional. HTTPGet specifies the http request to perform. Exactly one of httpGet, tcpSocket, or grpc must be specified. */ httpGets: outputs.cloudrunv2.GetWorkerPoolTemplateContainerStartupProbeHttpGet[]; /** * Optional. Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. */ initialDelaySeconds: number; /** * Optional. How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeout_seconds. */ periodSeconds: number; /** * Optional. TCPSocket specifies an action involving a TCP port. Exactly one of httpGet, tcpSocket, or grpc must be specified. */ tcpSockets: outputs.cloudrunv2.GetWorkerPoolTemplateContainerStartupProbeTcpSocket[]; /** * Optional. Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than period_seconds. */ timeoutSeconds: number; } interface GetWorkerPoolTemplateContainerStartupProbeGrpc { /** * Optional. Port number of the gRPC service. Number must be in the range 1 to 65535. If not specified, defaults to the exposed port of the container, which is the value of container.ports[0].containerPort. */ port: number; /** * Optional. Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md ). If this is not specified, the default behavior is defined by gRPC */ service: string; } interface GetWorkerPoolTemplateContainerStartupProbeHttpGet { /** * Optional. Custom headers to set in the request. HTTP allows repeated headers. */ httpHeaders: outputs.cloudrunv2.GetWorkerPoolTemplateContainerStartupProbeHttpGetHttpHeader[]; /** * Optional. Path to access on the HTTP server. Defaults to '/'. */ path: string; /** * Optional. Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to the exposed port of the container, which is the value of container.ports[0].containerPort. */ port: number; } interface GetWorkerPoolTemplateContainerStartupProbeHttpGetHttpHeader { /** * Required. The header field name */ port: number; /** * Optional. The header field value */ value: string; } interface GetWorkerPoolTemplateContainerStartupProbeTcpSocket { /** * Optional. Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to the exposed port of the container, which is the value of container.ports[0].containerPort. */ port: number; } interface GetWorkerPoolTemplateContainerVolumeMount { /** * Path within the container at which the volume should be mounted. Must not contain ':'. For Cloud SQL volumes, it can be left empty, or must otherwise be /cloudsql. All instances defined in the Volume will be available as /cloudsql/[instance]. For more information on Cloud SQL volumes, visit https://cloud.google.com/sql/docs/mysql/connect-run */ mountPath: string; /** * The name of the Cloud Run v2 Worker Pool. */ name: string; /** * Path within the volume from which the container's volume should be mounted. */ subPath: string; } interface GetWorkerPoolTemplateNodeSelector { /** * The GPU to attach to an instance. See https://cloud.google.com/run/docs/configuring/services/gpu for configuring GPU. */ accelerator: string; } interface GetWorkerPoolTemplateVolume { /** * For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. */ cloudSqlInstances: outputs.cloudrunv2.GetWorkerPoolTemplateVolumeCloudSqlInstance[]; /** * Ephemeral storage used as a shared volume. */ emptyDirs: outputs.cloudrunv2.GetWorkerPoolTemplateVolumeEmptyDir[]; /** * Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. */ gcs: outputs.cloudrunv2.GetWorkerPoolTemplateVolumeGc[]; /** * The name of the Cloud Run v2 Worker Pool. */ name: string; /** * Represents an NFS mount. */ nfs: outputs.cloudrunv2.GetWorkerPoolTemplateVolumeNf[]; /** * Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret */ secrets: outputs.cloudrunv2.GetWorkerPoolTemplateVolumeSecret[]; } interface GetWorkerPoolTemplateVolumeCloudSqlInstance { /** * The Cloud SQL instance connection names, as can be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. Format: {project}:{location}:{instance} */ instances: string[]; } interface GetWorkerPoolTemplateVolumeEmptyDir { /** * The different types of medium supported for EmptyDir. Default value: "MEMORY" Possible values: ["MEMORY"] */ medium: string; /** * Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir. */ sizeLimit: string; } interface GetWorkerPoolTemplateVolumeGc { /** * GCS Bucket name */ bucket: string; /** * A list of flags to pass to the gcsfuse command for configuring this volume. * Flags should be passed without leading dashes. */ mountOptions: string[]; /** * If true, mount the GCS bucket as read-only */ readOnly: boolean; } interface GetWorkerPoolTemplateVolumeNf { /** * Path that is exported by the NFS server. */ path: string; /** * If true, mount the NFS volume as read only */ readOnly: boolean; /** * Hostname or IP address of the NFS server */ server: string; } interface GetWorkerPoolTemplateVolumeSecret { /** * Integer representation of mode bits to use on created files by default. Must be a value between 0000 and 0777 (octal), defaulting to 0444. Directories within the path are not affected by this setting. */ defaultMode: number; /** * If unspecified, the volume will expose a file whose name is the secret, relative to VolumeMount.mount_path. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a path and a version. */ items: outputs.cloudrunv2.GetWorkerPoolTemplateVolumeSecretItem[]; /** * The name of the secret in Cloud Secret Manager. Format: {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project. */ secret: string; } interface GetWorkerPoolTemplateVolumeSecretItem { /** * Integer octal mode bits to use on this file, must be a value between 01 and 0777 (octal). If 0 or not set, the Volume's default mode will be used. */ mode: number; /** * The relative path of the secret in the container. */ path: string; /** * The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version */ version: string; } interface GetWorkerPoolTemplateVpcAccess { /** * VPC Access connector name. Format: projects/{project}/locations/{location}/connectors/{connector}, where {project} can be project id or number. */ connector: string; /** * Traffic VPC egress settings. Possible values: ["ALL_TRAFFIC", "PRIVATE_RANGES_ONLY"] */ egress: string; /** * Direct VPC egress settings. Currently only single network interface is supported. */ networkInterfaces: outputs.cloudrunv2.GetWorkerPoolTemplateVpcAccessNetworkInterface[]; } interface GetWorkerPoolTemplateVpcAccessNetworkInterface { /** * The VPC network that the Cloud Run resource will be able to send traffic to. At least one of network or subnetwork must be specified. If both * network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If network is not specified, it will be * looked up from the subnetwork. */ network: string; /** * The VPC subnetwork that the Cloud Run resource will get IPs from. At least one of network or subnetwork must be specified. If both * network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If subnetwork is not specified, the * subnetwork with the same name with the network will be used. */ subnetwork: string; /** * Network tags applied to this Cloud Run WorkerPool. */ tags: string[]; } interface GetWorkerPoolTerminalCondition { /** * A reason for the execution condition. */ executionReason: string; /** * Last time the condition transitioned from one status to another. */ lastTransitionTime: string; /** * Human readable message indicating details about the current status. */ message: string; /** * A common (workerPool-level) reason for this condition. */ reason: string; /** * A reason for the revision condition. */ revisionReason: string; /** * How to interpret failures of this condition, one of Error, Warning, Info */ severity: string; /** * State of the condition. */ state: string; /** * type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready. */ type: string; } interface JobBinaryAuthorization { /** * If present, indicates to use Breakglass using this justification. If useDefault is False, then it must be empty. For more information on breakglass, see https://cloud.google.com/binary-authorization/docs/using-breakglass */ breakglassJustification?: string; /** * The path to a binary authorization policy. Format: projects/{project}/platforms/cloudRun/{policy-name} */ policy?: string; /** * If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled. */ useDefault?: boolean; } interface JobCondition { /** * (Output) * A reason for the execution condition. */ executionReason: string; /** * (Output) * Last time the condition transitioned from one status to another. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ lastTransitionTime: string; /** * (Output) * Human readable message indicating details about the current status. */ message: string; /** * (Output) * A common (service-level) reason for this condition. */ reason: string; /** * (Output) * A reason for the revision condition. */ revisionReason: string; /** * (Output) * How to interpret failures of this condition, one of Error, Warning, Info */ severity: string; /** * (Output) * State of the condition. */ state: string; /** * (Output) * type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready. */ type: string; } interface JobIamBindingCondition { description?: string; expression: string; title: string; } interface JobIamMemberCondition { description?: string; expression: string; title: string; } interface JobLatestCreatedExecution { /** * (Output) * Completion timestamp of the execution. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ completionTime: string; /** * (Output) * Creation timestamp of the execution. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ createTime: string; /** * Name of the Job. */ name: string; } interface JobTemplate { /** * Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. * Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. * All system annotations in v1 now have a corresponding field in v2 ExecutionTemplate. * This field follows Kubernetes annotations' namespacing, limits, and rules. */ annotations?: { [key: string]: string; }; /** * Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, * or break down billing charges by team, component, environment, state, etc. For more information, visit https://docs.cloud.google.com/resource-manager/docs/creating-managing-labels or * https://cloud.google.com/run/docs/configuring/labels. * Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. * All system labels in v1 now have a corresponding field in v2 ExecutionTemplate. */ labels?: { [key: string]: string; }; /** * Specifies the maximum desired number of tasks the execution should run at given time. Must be <= taskCount. When the job is run, if this field is 0 or unset, the maximum possible value will be used for that execution. The actual number of tasks running in steady state will be less than this number when there are fewer tasks waiting to be completed remaining, i.e. when the work left to do is less than max parallelism. */ parallelism: number; /** * Specifies the desired number of tasks the execution should run. Setting to 1 means that parallelism is limited to 1 and the success of that task signals the success of the execution. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ */ taskCount: number; /** * Describes the task(s) that will be created when executing an execution * Structure is documented below. */ template: outputs.cloudrunv2.JobTemplateTemplate; } interface JobTemplateTemplate { /** * Holds the single container that defines the unit of execution for this task. * Structure is documented below. */ containers: outputs.cloudrunv2.JobTemplateTemplateContainer[]; /** * A reference to a customer managed encryption key (CMEK) to use to encrypt this container image. For more information, go to https://cloud.google.com/run/docs/securing/using-cmek */ encryptionKey?: string; /** * The execution environment being used to host this Task. * Possible values are: `EXECUTION_ENVIRONMENT_GEN1`, `EXECUTION_ENVIRONMENT_GEN2`. */ executionEnvironment: string; /** * True if GPU zonal redundancy is disabled on this execution. */ gpuZonalRedundancyDisabled?: boolean; /** * Number of retries allowed per Task, before marking this Task failed. Defaults to 3. Minimum value is 0. */ maxRetries?: number; /** * Node Selector describes the hardware requirements of the resources. * Structure is documented below. */ nodeSelector?: outputs.cloudrunv2.JobTemplateTemplateNodeSelector; /** * Email address of the IAM service account associated with the Task of a Job. The service account represents the identity of the running task, and determines what permissions the task has. If not provided, the task will use the project's default service account. */ serviceAccount: string; /** * Max allowed time duration the Task may be active before the system will actively try to mark it failed and kill associated containers. This applies per attempt of a task, meaning each retry can run for the full timeout. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ timeout: string; /** * A list of Volumes to make available to containers. * Structure is documented below. */ volumes?: outputs.cloudrunv2.JobTemplateTemplateVolume[]; /** * VPC Access configuration to use for this Task. For more information, visit https://cloud.google.com/run/docs/configuring/connecting-vpc. * Structure is documented below. */ vpcAccess?: outputs.cloudrunv2.JobTemplateTemplateVpcAccess; } interface JobTemplateTemplateContainer { /** * Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references are not supported in Cloud Run. */ args?: string[]; /** * Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell */ commands?: string[]; /** * Names of the containers that must start before this container. */ dependsOns?: string[]; /** * List of environment variables to set in the container. * Structure is documented below. */ envs?: outputs.cloudrunv2.JobTemplateTemplateContainerEnv[]; /** * URL of the Container image in Google Container Registry or Google Artifact Registry. More info: https://kubernetes.io/docs/concepts/containers/images */ image: string; /** * Name of the container specified as a DNS_LABEL. */ name?: string; /** * List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. * If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on * Structure is documented below. */ ports?: outputs.cloudrunv2.JobTemplateTemplateContainerPort[]; /** * Compute Resource requirements by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources * Structure is documented below. */ resources: outputs.cloudrunv2.JobTemplateTemplateContainerResources; /** * Startup probe of application within the container. * All other probes are disabled if a startup probe is provided, until it * succeeds. Container will not be added to service endpoints if the probe fails. * Structure is documented below. */ startupProbe: outputs.cloudrunv2.JobTemplateTemplateContainerStartupProbe; /** * Volume to mount into the container's filesystem. * Structure is documented below. */ volumeMounts?: outputs.cloudrunv2.JobTemplateTemplateContainerVolumeMount[]; /** * Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. */ workingDir?: string; } interface JobTemplateTemplateContainerEnv { /** * Name of the environment variable. Must be a C_IDENTIFIER, and mnay not exceed 32768 characters. */ name: string; /** * Literal value of the environment variable. Defaults to "" and the maximum allowed length is 32768 characters. Variable references are not supported in Cloud Run. */ value?: string; /** * Source for the environment variable's value. * Structure is documented below. */ valueSource?: outputs.cloudrunv2.JobTemplateTemplateContainerEnvValueSource; } interface JobTemplateTemplateContainerEnvValueSource { /** * Selects a secret and a specific version from Cloud Secret Manager. * Structure is documented below. */ secretKeyRef?: outputs.cloudrunv2.JobTemplateTemplateContainerEnvValueSourceSecretKeyRef; } interface JobTemplateTemplateContainerEnvValueSourceSecretKeyRef { /** * The name of the secret in Cloud Secret Manager. Format: {secretName} if the secret is in the same project. projects/{project}/secrets/{secretName} if the secret is in a different project. */ secret: string; /** * The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. */ version: string; } interface JobTemplateTemplateContainerPort { /** * Port number the container listens on. This must be a valid TCP port number, 0 < containerPort < 65536. */ containerPort?: number; /** * If specified, used to specify which protocol to use. Allowed values are "http1" and "h2c". */ name?: string; } interface JobTemplateTemplateContainerResources { /** * Only memory, CPU, and nvidia.com/gpu are supported. Use key `cpu` for CPU limit, `memory` for memory limit, `nvidia.com/gpu` for gpu limit. Note: The only supported values for CPU are '1', '2', '4', '6', and '8'. Setting 4 CPU requires at least 2Gi of memory, setting 6 or more CPU requires at least 4Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go */ limits: { [key: string]: string; }; } interface JobTemplateTemplateContainerStartupProbe { /** * Minimum consecutive failures for the probe to be considered failed after * having succeeded. Defaults to 3. Minimum value is 1. */ failureThreshold?: number; /** * GRPC specifies an action involving a GRPC port. * Structure is documented below. */ grpc?: outputs.cloudrunv2.JobTemplateTemplateContainerStartupProbeGrpc; /** * HttpGet specifies the http request to perform. * Structure is documented below. */ httpGet?: outputs.cloudrunv2.JobTemplateTemplateContainerStartupProbeHttpGet; /** * Number of seconds after the container has started before the probe is * initiated. * Defaults to 0 seconds. Minimum value is 0. Maximum value is 240. */ initialDelaySeconds?: number; /** * How often (in seconds) to perform the probe. * Default to 10 seconds. Minimum value is 1. Maximum value is 240. */ periodSeconds?: number; /** * TcpSocket specifies an action involving a TCP port. * Structure is documented below. */ tcpSocket?: outputs.cloudrunv2.JobTemplateTemplateContainerStartupProbeTcpSocket; /** * Number of seconds after which the probe times out. * Defaults to 1 second. Minimum value is 1. Maximum value is 3600. * Must be smaller than periodSeconds. */ timeoutSeconds?: number; } interface JobTemplateTemplateContainerStartupProbeGrpc { /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; /** * The name of the service to place in the gRPC HealthCheckRequest * (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). * If this is not specified, the default behavior is defined by gRPC. */ service?: string; } interface JobTemplateTemplateContainerStartupProbeHttpGet { /** * Custom headers to set in the request. HTTP allows repeated headers. * Structure is documented below. */ httpHeaders?: outputs.cloudrunv2.JobTemplateTemplateContainerStartupProbeHttpGetHttpHeader[]; /** * Path to access on the HTTP server. If set, it should not be empty string. */ path?: string; /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface JobTemplateTemplateContainerStartupProbeHttpGetHttpHeader { /** * The header field name. */ name: string; /** * The header field value. */ value?: string; } interface JobTemplateTemplateContainerStartupProbeTcpSocket { /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface JobTemplateTemplateContainerVolumeMount { /** * Path within the container at which the volume should be mounted. Must not contain ':'. For Cloud SQL volumes, it can be left empty, or must otherwise be /cloudsql. All instances defined in the Volume will be available as /cloudsql/[instance]. For more information on Cloud SQL volumes, visit https://cloud.google.com/sql/docs/mysql/connect-run */ mountPath: string; /** * This must match the Name of a Volume. */ name: string; /** * Path within the volume from which the container's volume should be mounted. */ subPath?: string; } interface JobTemplateTemplateNodeSelector { /** * The GPU to attach to an instance. See https://cloud.google.com/run/docs/configuring/jobs/gpu for configuring GPU. */ accelerator: string; } interface JobTemplateTemplateVolume { /** * For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. * Structure is documented below. */ cloudSqlInstance?: outputs.cloudrunv2.JobTemplateTemplateVolumeCloudSqlInstance; /** * Ephemeral storage used as a shared volume. * Structure is documented below. */ emptyDir?: outputs.cloudrunv2.JobTemplateTemplateVolumeEmptyDir; /** * Cloud Storage bucket mounted as a volume using GCSFuse. * Structure is documented below. */ gcs?: outputs.cloudrunv2.JobTemplateTemplateVolumeGcs; /** * Volume's name. */ name: string; /** * NFS share mounted as a volume. * Structure is documented below. */ nfs?: outputs.cloudrunv2.JobTemplateTemplateVolumeNfs; /** * Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret * Structure is documented below. */ secret?: outputs.cloudrunv2.JobTemplateTemplateVolumeSecret; } interface JobTemplateTemplateVolumeCloudSqlInstance { /** * The Cloud SQL instance connection names, as can be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. Format: {project}:{location}:{instance} */ instances?: string[]; } interface JobTemplateTemplateVolumeEmptyDir { /** * The different types of medium supported for EmptyDir. * Default value is `MEMORY`. * Possible values are: `MEMORY`. */ medium?: string; /** * Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir. */ sizeLimit?: string; } interface JobTemplateTemplateVolumeGcs { /** * Name of the cloud storage bucket to back the volume. The resource service account must have permission to access the bucket. */ bucket: string; /** * A list of flags to pass to the gcsfuse command for configuring this volume. * Flags should be passed without leading dashes. */ mountOptions?: string[]; /** * If true, mount this volume as read-only in all mounts. If false, mount this volume as read-write. */ readOnly?: boolean; } interface JobTemplateTemplateVolumeNfs { /** * Path that is exported by the NFS server. */ path?: string; /** * If true, mount this volume as read-only in all mounts. */ readOnly?: boolean; /** * Hostname or IP address of the NFS server. */ server: string; } interface JobTemplateTemplateVolumeSecret { /** * Integer representation of mode bits to use on created files by default. Must be a value between 0000 and 0777 (octal), defaulting to 0444. Directories within the path are not affected by this setting. */ defaultMode?: number; /** * If unspecified, the volume will expose a file whose name is the secret, relative to VolumeMount.mount_path. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a path and a version. * Structure is documented below. */ items?: outputs.cloudrunv2.JobTemplateTemplateVolumeSecretItem[]; /** * The name of the secret in Cloud Secret Manager. Format: {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project. */ secret: string; } interface JobTemplateTemplateVolumeSecretItem { /** * Integer octal mode bits to use on this file, must be a value between 01 and 0777 (octal). If 0 or not set, the Volume's default mode will be used. */ mode?: number; /** * The relative path of the secret in the container. */ path: string; /** * The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version */ version: string; } interface JobTemplateTemplateVpcAccess { /** * VPC Access connector name. Format: projects/{project}/locations/{location}/connectors/{connector}, where {project} can be project id or number. */ connector?: string; /** * Traffic VPC egress settings. * Possible values are: `ALL_TRAFFIC`, `PRIVATE_RANGES_ONLY`. */ egress: string; /** * Direct VPC egress settings. Currently only single network interface is supported. * Structure is documented below. */ networkInterfaces?: outputs.cloudrunv2.JobTemplateTemplateVpcAccessNetworkInterface[]; } interface JobTemplateTemplateVpcAccessNetworkInterface { /** * The VPC network that the Cloud Run resource will be able to send traffic to. At least one of network or subnetwork must be specified. If both * network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If network is not specified, it will be * looked up from the subnetwork. */ network: string; /** * The VPC subnetwork that the Cloud Run resource will get IPs from. At least one of network or subnetwork must be specified. If both * network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If subnetwork is not specified, the * subnetwork with the same name with the network will be used. */ subnetwork: string; /** * Network tags applied to this Cloud Run job. */ tags?: string[]; } interface JobTerminalCondition { /** * (Output) * A reason for the execution condition. */ executionReason: string; /** * (Output) * Last time the condition transitioned from one status to another. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ lastTransitionTime: string; /** * (Output) * Human readable message indicating details about the current status. */ message: string; /** * (Output) * A common (service-level) reason for this condition. */ reason: string; /** * (Output) * A reason for the revision condition. */ revisionReason: string; /** * (Output) * How to interpret failures of this condition, one of Error, Warning, Info */ severity: string; /** * (Output) * State of the condition. */ state: string; /** * (Output) * type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready. */ type: string; } interface ServiceBinaryAuthorization { /** * If present, indicates to use Breakglass using this justification. If useDefault is False, then it must be empty. For more information on breakglass, see https://cloud.google.com/binary-authorization/docs/using-breakglass */ breakglassJustification?: string; /** * The path to a binary authorization policy. Format: projects/{project}/platforms/cloudRun/{policy-name} */ policy?: string; /** * If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled. */ useDefault?: boolean; } interface ServiceBuildConfig { /** * The base image used to build the function. */ baseImage?: string; /** * Sets whether the function will receive automatic base image updates. */ enableAutomaticUpdates?: boolean; /** * User-provided build-time environment variables for the function. */ environmentVariables?: { [key: string]: string; }; /** * The name of the function (as defined in source code) that will be executed. Defaults to the resource name suffix, if not specified. For backward compatibility, if function with given name is not found, then the system will try to use function named "function". */ functionTarget?: string; /** * Artifact Registry URI to store the built image. */ imageUri?: string; /** * (Output) * The Cloud Build name of the latest successful deployment of the function. */ name: string; /** * Service account to be used for building the container. The format of this field is `projects/{projectId}/serviceAccounts/{serviceAccountEmail}`. */ serviceAccount?: string; /** * The Cloud Storage bucket URI where the function source code is located. */ sourceLocation?: string; /** * Name of the Cloud Build Custom Worker Pool that should be used to build the Cloud Run function. The format of this field is `projects/{project}/locations/{region}/workerPools/{workerPool}` where {project} and {region} are the project id and region respectively where the worker pool is defined and {workerPool} is the short name of the worker pool. */ workerPool?: string; } interface ServiceCondition { /** * (Output) * A reason for the execution condition. */ executionReason: string; /** * (Output) * Last time the condition transitioned from one status to another. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ lastTransitionTime: string; /** * (Output) * Human readable message indicating details about the current status. */ message: string; /** * (Output) * A common (service-level) reason for this condition. */ reason: string; /** * (Output) * A reason for the revision condition. */ revisionReason: string; /** * (Output) * How to interpret failures of this condition, one of Error, Warning, Info */ severity: string; /** * (Output) * State of the condition. */ state: string; /** * (Output) * The allocation type for this traffic target. */ type: string; } interface ServiceIamBindingCondition { description?: string; expression: string; title: string; } interface ServiceIamMemberCondition { description?: string; expression: string; title: string; } interface ServiceMultiRegionSettings { /** * (Output) * System-generated unique id for the multi-region Service. */ multiRegionId: string; /** * The list of regions to deploy the multi-region Service. */ regions?: string[]; } interface ServiceScaling { /** * Total instance count for the service in manual scaling mode. This number of instances is divided among all revisions with specified traffic based on the percent of traffic they are receiving. */ manualInstanceCount: number; /** * Combined maximum number of instances for all revisions receiving traffic. */ maxInstanceCount: number; /** * Minimum number of instances for the service, to be divided among all revisions receiving traffic. */ minInstanceCount: number; /** * The [scaling mode](https://cloud.google.com/run/docs/reference/rest/v2/projects.locations.services#scalingmode) for the service. * Possible values are: `AUTOMATIC`, `MANUAL`. */ scalingMode?: string; } interface ServiceTemplate { /** * Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. * Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. * All system annotations in v1 now have a corresponding field in v2 RevisionTemplate. * This field follows Kubernetes annotations' namespacing, limits, and rules. */ annotations?: { [key: string]: string; }; /** * Holds the containers that define the unit of execution for this Service. * Structure is documented below. */ containers?: outputs.cloudrunv2.ServiceTemplateContainer[]; /** * A reference to a customer managed encryption key (CMEK) to use to encrypt this container image. For more information, go to https://cloud.google.com/run/docs/securing/using-cmek */ encryptionKey?: string; /** * The sandbox environment to host this Revision. * Possible values are: `EXECUTION_ENVIRONMENT_GEN1`, `EXECUTION_ENVIRONMENT_GEN2`. */ executionEnvironment?: string; /** * True if GPU zonal redundancy is disabled on this revision. */ gpuZonalRedundancyDisabled?: boolean; /** * Disables health checking containers during deployment. */ healthCheckDisabled?: boolean; /** * Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. * For more information, visit https://docs.cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. * Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. * All system labels in v1 now have a corresponding field in v2 RevisionTemplate. */ labels?: { [key: string]: string; }; /** * Sets the maximum number of requests that each serving instance can receive. * If not specified or 0, defaults to 80 when requested CPU >= 1 and defaults to 1 when requested CPU < 1. */ maxInstanceRequestConcurrency: number; /** * Node Selector describes the hardware requirements of the resources. * Structure is documented below. */ nodeSelector?: outputs.cloudrunv2.ServiceTemplateNodeSelector; /** * The unique name for the revision. If this field is omitted, it will be automatically generated based on the Service name. */ revision?: string; /** * Scaling settings for this Revision. * Structure is documented below. */ scaling: outputs.cloudrunv2.ServiceTemplateScaling; /** * Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. */ serviceAccount: string; /** * (Optional, Beta) * Enables Cloud Service Mesh for this Revision. * Structure is documented below. */ serviceMesh?: outputs.cloudrunv2.ServiceTemplateServiceMesh; /** * Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity */ sessionAffinity?: boolean; /** * Max allowed time for an instance to respond to a request. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ timeout: string; /** * A list of Volumes to make available to containers. * Structure is documented below. */ volumes?: outputs.cloudrunv2.ServiceTemplateVolume[]; /** * VPC Access configuration to use for this Task. For more information, visit https://cloud.google.com/run/docs/configuring/connecting-vpc. * Structure is documented below. */ vpcAccess?: outputs.cloudrunv2.ServiceTemplateVpcAccess; } interface ServiceTemplateContainer { /** * Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references are not supported in Cloud Run. */ args?: string[]; /** * Base image for this container. If set, it indicates that the service is enrolled into automatic base image update. */ baseImageUri?: string; /** * (Output) * The build info of the container image. * Structure is documented below. */ buildInfos: outputs.cloudrunv2.ServiceTemplateContainerBuildInfo[]; /** * Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell */ commands?: string[]; /** * Containers which should be started before this container. If specified the container will wait to start until all containers with the listed names are healthy. */ dependsOns?: string[]; /** * List of environment variables to set in the container. * Structure is documented below. */ envs?: outputs.cloudrunv2.ServiceTemplateContainerEnv[]; /** * URL of the Container image in Google Container Registry or Google Artifact Registry. More info: https://kubernetes.io/docs/concepts/containers/images */ image: string; /** * Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes * Structure is documented below. */ livenessProbe?: outputs.cloudrunv2.ServiceTemplateContainerLivenessProbe; /** * Name of the container specified as a DNS_LABEL. */ name?: string; /** * List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. * If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on * Structure is documented below. */ ports: outputs.cloudrunv2.ServiceTemplateContainerPorts; /** * Periodic probe of container readiness. * Structure is documented below. */ readinessProbe?: outputs.cloudrunv2.ServiceTemplateContainerReadinessProbe; /** * Compute Resource requirements by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources * Structure is documented below. */ resources: outputs.cloudrunv2.ServiceTemplateContainerResources; /** * (Optional, Beta) * Location of the source. * Structure is documented below. */ sourceCode?: outputs.cloudrunv2.ServiceTemplateContainerSourceCode; /** * Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes * Structure is documented below. */ startupProbe: outputs.cloudrunv2.ServiceTemplateContainerStartupProbe; /** * Volume to mount into the container's filesystem. * Structure is documented below. */ volumeMounts?: outputs.cloudrunv2.ServiceTemplateContainerVolumeMount[]; /** * Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. */ workingDir?: string; } interface ServiceTemplateContainerBuildInfo { /** * Entry point of the function when the image is a Cloud Run function. */ functionTarget: string; /** * Source code location of the image. */ sourceLocation: string; } interface ServiceTemplateContainerEnv { /** * Name of the environment variable. Must be a C_IDENTIFIER, and may not exceed 32768 characters. */ name: string; /** * Literal value of the environment variable. Defaults to "" and the maximum allowed length is 32768 characters. Variable references are not supported in Cloud Run. */ value?: string; /** * Source for the environment variable's value. * Structure is documented below. */ valueSource?: outputs.cloudrunv2.ServiceTemplateContainerEnvValueSource; } interface ServiceTemplateContainerEnvValueSource { /** * Selects a secret and a specific version from Cloud Secret Manager. * Structure is documented below. */ secretKeyRef?: outputs.cloudrunv2.ServiceTemplateContainerEnvValueSourceSecretKeyRef; } interface ServiceTemplateContainerEnvValueSourceSecretKeyRef { /** * The name of the secret in Cloud Secret Manager. Format: {secretName} if the secret is in the same project. projects/{project}/secrets/{secretName} if the secret is in a different project. */ secret: string; /** * The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. */ version?: string; } interface ServiceTemplateContainerLivenessProbe { /** * Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. */ failureThreshold?: number; /** * GRPC specifies an action involving a GRPC port. * Structure is documented below. */ grpc?: outputs.cloudrunv2.ServiceTemplateContainerLivenessProbeGrpc; /** * HTTPGet specifies the http request to perform. * Structure is documented below. */ httpGet?: outputs.cloudrunv2.ServiceTemplateContainerLivenessProbeHttpGet; /** * Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes */ initialDelaySeconds?: number; /** * How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeoutSeconds */ periodSeconds?: number; /** * TCPSocketAction describes an action based on opening a socket * Structure is documented below. */ tcpSocket?: outputs.cloudrunv2.ServiceTemplateContainerLivenessProbeTcpSocket; /** * Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than periodSeconds. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes */ timeoutSeconds?: number; } interface ServiceTemplateContainerLivenessProbeGrpc { /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; /** * The name of the service to place in the gRPC HealthCheckRequest * (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). * If this is not specified, the default behavior is defined by gRPC. * * The `buildInfo` block contains: */ service?: string; } interface ServiceTemplateContainerLivenessProbeHttpGet { /** * Custom headers to set in the request. HTTP allows repeated headers. * Structure is documented below. */ httpHeaders?: outputs.cloudrunv2.ServiceTemplateContainerLivenessProbeHttpGetHttpHeader[]; /** * Path to access on the HTTP server. If set, it should not be empty string. */ path?: string; /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface ServiceTemplateContainerLivenessProbeHttpGetHttpHeader { /** * The header field name */ name: string; /** * The header field value */ value?: string; } interface ServiceTemplateContainerLivenessProbeTcpSocket { /** * Port number to access on the container. Must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface ServiceTemplateContainerPorts { /** * Port number the container listens on. This must be a valid TCP port number, 0 < containerPort < 65536. */ containerPort?: number; /** * If specified, used to specify which protocol to use. Allowed values are "http1" and "h2c". */ name: string; } interface ServiceTemplateContainerReadinessProbe { /** * Minimum consecutive failures for the probe to be considered failed after * having succeeded. Defaults to 3. */ failureThreshold: number; /** * GRPC specifies an action involving a GRPC port. * Structure is documented below. */ grpc?: outputs.cloudrunv2.ServiceTemplateContainerReadinessProbeGrpc; /** * HttpGet specifies the http request to perform. * Structure is documented below. */ httpGet?: outputs.cloudrunv2.ServiceTemplateContainerReadinessProbeHttpGet; /** * How often (in seconds) to perform the probe. * Default to 10 seconds. */ periodSeconds: number; /** * Minimum consecutive successes for the probe to be considered successful after having failed. * Defaults to 2. */ successThreshold: number; /** * Number of seconds after which the probe times out. * Defaults to 1 second. Must be smaller than period_seconds. */ timeoutSeconds: number; } interface ServiceTemplateContainerReadinessProbeGrpc { /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; /** * The name of the service to place in the gRPC HealthCheckRequest * (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). * If this is not specified, the default behavior is defined by gRPC. * * The `buildInfo` block contains: */ service?: string; } interface ServiceTemplateContainerReadinessProbeHttpGet { /** * Path to access on the HTTP server. If set, it should not be empty string. */ path: string; /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface ServiceTemplateContainerResources { /** * Determines whether CPU is only allocated during requests. True by default if the parent `resources` field is not set. However, if * `resources` is set, this field must be explicitly set to true to preserve the default behavior. */ cpuIdle?: boolean; /** * Only memory, CPU, and nvidia.com/gpu are supported. Use key `cpu` for CPU limit, `memory` for memory limit, `nvidia.com/gpu` for gpu limit. Note: The only supported values for CPU are '1', '2', '4', '6' and '8'. Setting 4 CPU requires at least 2Gi of memory, setting 6 or more CPU requires at least 4Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go */ limits: { [key: string]: string; }; /** * Determines whether CPU should be boosted on startup of a new container instance above the requested CPU threshold, this can help reduce cold-start latency. */ startupCpuBoost?: boolean; } interface ServiceTemplateContainerSourceCode { /** * Cloud Storage source. * Structure is documented below. */ cloudStorageSource?: outputs.cloudrunv2.ServiceTemplateContainerSourceCodeCloudStorageSource; } interface ServiceTemplateContainerSourceCodeCloudStorageSource { /** * The Cloud Storage bucket name. */ bucket: string; /** * The Cloud Storage object generation. The is an int64 value. As with most Google APIs, its JSON representation will be a string instead of an integer. */ generation?: string; /** * The Cloud Storage object name. */ object: string; } interface ServiceTemplateContainerStartupProbe { /** * Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. */ failureThreshold?: number; /** * GRPC specifies an action involving a GRPC port. * Structure is documented below. */ grpc?: outputs.cloudrunv2.ServiceTemplateContainerStartupProbeGrpc; /** * HTTPGet specifies the http request to perform. Exactly one of HTTPGet or TCPSocket must be specified. * Structure is documented below. */ httpGet?: outputs.cloudrunv2.ServiceTemplateContainerStartupProbeHttpGet; /** * Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes */ initialDelaySeconds?: number; /** * How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeoutSeconds */ periodSeconds?: number; /** * TCPSocket specifies an action involving a TCP port. Exactly one of HTTPGet or TCPSocket must be specified. * Structure is documented below. */ tcpSocket?: outputs.cloudrunv2.ServiceTemplateContainerStartupProbeTcpSocket; /** * Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than periodSeconds. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes */ timeoutSeconds?: number; } interface ServiceTemplateContainerStartupProbeGrpc { /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; /** * The name of the service to place in the gRPC HealthCheckRequest * (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). * If this is not specified, the default behavior is defined by gRPC. * * The `buildInfo` block contains: */ service?: string; } interface ServiceTemplateContainerStartupProbeHttpGet { /** * Custom headers to set in the request. HTTP allows repeated headers. * Structure is documented below. */ httpHeaders?: outputs.cloudrunv2.ServiceTemplateContainerStartupProbeHttpGetHttpHeader[]; /** * Path to access on the HTTP server. If set, it should not be empty string. */ path?: string; /** * Port number to access on the container. Number must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface ServiceTemplateContainerStartupProbeHttpGetHttpHeader { /** * The header field name */ name: string; /** * The header field value */ value?: string; } interface ServiceTemplateContainerStartupProbeTcpSocket { /** * Port number to access on the container. Must be in the range 1 to 65535. * If not specified, defaults to the same value as container.ports[0].containerPort. */ port: number; } interface ServiceTemplateContainerVolumeMount { /** * Path within the container at which the volume should be mounted. Must not contain ':'. For Cloud SQL volumes, it can be left empty, or must otherwise be /cloudsql. All instances defined in the Volume will be available as /cloudsql/[instance]. For more information on Cloud SQL volumes, visit https://cloud.google.com/sql/docs/mysql/connect-run */ mountPath: string; /** * This must match the Name of a Volume. */ name: string; /** * Path within the volume from which the container's volume should be mounted. */ subPath?: string; } interface ServiceTemplateNodeSelector { /** * The GPU to attach to an instance. See https://cloud.google.com/run/docs/configuring/services/gpu for configuring GPU. */ accelerator: string; } interface ServiceTemplateScaling { /** * Combined maximum number of instances for all revisions receiving traffic. */ maxInstanceCount?: number; /** * Minimum number of instances for the service, to be divided among all revisions receiving traffic. */ minInstanceCount?: number; } interface ServiceTemplateServiceMesh { /** * The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. */ mesh?: string; } interface ServiceTemplateVolume { /** * For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. * Structure is documented below. */ cloudSqlInstance?: outputs.cloudrunv2.ServiceTemplateVolumeCloudSqlInstance; /** * Ephemeral storage used as a shared volume. * Structure is documented below. */ emptyDir?: outputs.cloudrunv2.ServiceTemplateVolumeEmptyDir; /** * Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. * Structure is documented below. */ gcs?: outputs.cloudrunv2.ServiceTemplateVolumeGcs; /** * Volume's name. */ name: string; /** * Represents an NFS mount. * Structure is documented below. */ nfs?: outputs.cloudrunv2.ServiceTemplateVolumeNfs; /** * Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret * Structure is documented below. */ secret?: outputs.cloudrunv2.ServiceTemplateVolumeSecret; } interface ServiceTemplateVolumeCloudSqlInstance { /** * The Cloud SQL instance connection names, as can be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. Format: {project}:{location}:{instance} */ instances?: string[]; } interface ServiceTemplateVolumeEmptyDir { /** * The different types of medium supported for EmptyDir. * Default value is `MEMORY`. * Possible values are: `MEMORY`. */ medium?: string; /** * Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir. */ sizeLimit?: string; } interface ServiceTemplateVolumeGcs { /** * GCS Bucket name */ bucket: string; /** * A list of flags to pass to the gcsfuse command for configuring this volume. * Flags should be passed without leading dashes. */ mountOptions?: string[]; /** * If true, mount the GCS bucket as read-only */ readOnly?: boolean; } interface ServiceTemplateVolumeNfs { /** * Path that is exported by the NFS server. */ path: string; /** * If true, mount the NFS volume as read only */ readOnly?: boolean; /** * Hostname or IP address of the NFS server */ server: string; } interface ServiceTemplateVolumeSecret { /** * Integer representation of mode bits to use on created files by default. Must be a value between 0000 and 0777 (octal), defaulting to 0444. Directories within the path are not affected by this setting. */ defaultMode?: number; /** * If unspecified, the volume will expose a file whose name is the secret, relative to VolumeMount.mount_path. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a path and a version. * Structure is documented below. */ items?: outputs.cloudrunv2.ServiceTemplateVolumeSecretItem[]; /** * The name of the secret in Cloud Secret Manager. Format: {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project. */ secret: string; } interface ServiceTemplateVolumeSecretItem { /** * Integer octal mode bits to use on this file, must be a value between 01 and 0777 (octal). If 0 or not set, the Volume's default mode will be used. */ mode?: number; /** * The relative path of the secret in the container. */ path: string; /** * The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version */ version?: string; } interface ServiceTemplateVpcAccess { /** * VPC Access connector name. Format: projects/{project}/locations/{location}/connectors/{connector}, where {project} can be project id or number. */ connector?: string; /** * Traffic VPC egress settings. * Possible values are: `ALL_TRAFFIC`, `PRIVATE_RANGES_ONLY`. */ egress: string; /** * Direct VPC egress settings. Currently only single network interface is supported. * Structure is documented below. */ networkInterfaces?: outputs.cloudrunv2.ServiceTemplateVpcAccessNetworkInterface[]; } interface ServiceTemplateVpcAccessNetworkInterface { /** * The VPC network that the Cloud Run resource will be able to send traffic to. At least one of network or subnetwork must be specified. If both * network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If network is not specified, it will be * looked up from the subnetwork. */ network: string; /** * The VPC subnetwork that the Cloud Run resource will get IPs from. At least one of network or subnetwork must be specified. If both * network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If subnetwork is not specified, the * subnetwork with the same name with the network will be used. */ subnetwork: string; /** * Network tags applied to this Cloud Run service. */ tags?: string[]; } interface ServiceTerminalCondition { /** * (Output) * A reason for the execution condition. */ executionReason: string; /** * (Output) * Last time the condition transitioned from one status to another. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ lastTransitionTime: string; /** * (Output) * Human readable message indicating details about the current status. */ message: string; /** * (Output) * A common (service-level) reason for this condition. */ reason: string; /** * (Output) * A reason for the revision condition. */ revisionReason: string; /** * (Output) * How to interpret failures of this condition, one of Error, Warning, Info */ severity: string; /** * (Output) * State of the condition. */ state: string; /** * (Output) * The allocation type for this traffic target. */ type: string; } interface ServiceTraffic { /** * Specifies percent of the traffic to this Revision. This defaults to zero if unspecified. */ percent: number; /** * Revision to which to send this portion of traffic, if traffic allocation is by revision. */ revision?: string; /** * Indicates a string to be part of the URI to exclusively reference this target. */ tag?: string; /** * The allocation type for this traffic target. * Possible values are: `TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST`, `TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION`. */ type?: string; } interface ServiceTrafficStatus { /** * (Output) * Specifies percent of the traffic to this Revision. */ percent: number; /** * (Output) * Revision to which this traffic is sent. */ revision: string; /** * (Output) * Indicates the string used in the URI to exclusively reference this target. */ tag: string; /** * (Output) * The allocation type for this traffic target. */ type: string; /** * (Output) * Displays the target URI. */ uri: string; } interface WorkerPoolBinaryAuthorization { /** * If present, indicates to use Breakglass using this justification. If useDefault is False, then it must be empty. For more information on breakglass, see https://cloud.google.com/binary-authorization/docs/using-breakglass */ breakglassJustification?: string; /** * The path to a binary authorization policy. Format: projects/{project}/platforms/cloudRun/{policy-name} */ policy?: string; /** * If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled. */ useDefault?: boolean; } interface WorkerPoolCondition { /** * (Output) * A reason for the execution condition. */ executionReason: string; /** * (Output) * Last time the condition transitioned from one status to another. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ lastTransitionTime: string; /** * (Output) * Human readable message indicating details about the current status. */ message: string; /** * (Output) * A common (workerPool-level) reason for this condition. */ reason: string; /** * (Output) * A reason for the revision condition. */ revisionReason: string; /** * (Output) * How to interpret failures of this condition, one of Error, Warning, Info */ severity: string; /** * (Output) * State of the condition. */ state: string; /** * (Output) * The allocation type for this instance split. */ type: string; } interface WorkerPoolIamBindingCondition { description?: string; expression: string; title: string; } interface WorkerPoolIamMemberCondition { description?: string; expression: string; title: string; } interface WorkerPoolInstanceSplit { /** * Specifies percent of the instance split to this Revision. This defaults to zero if unspecified. */ percent: number; /** * Revision to which to assign this portion of instances, if split allocation is by revision. */ revision?: string; /** * The allocation type for this instance split. * Possible values are: `INSTANCE_SPLIT_ALLOCATION_TYPE_LATEST`, `INSTANCE_SPLIT_ALLOCATION_TYPE_REVISION`. */ type?: string; } interface WorkerPoolInstanceSplitStatus { /** * (Output) * Specifies percent of the instance split to this Revision. */ percent: number; /** * (Output) * Revision to which this instance split is assigned. */ revision: string; /** * (Output) * The allocation type for this instance split. */ type: string; } interface WorkerPoolScaling { /** * The total number of instances in manual scaling mode. */ manualInstanceCount?: number; /** * The maximum count of instances distributed among revisions based on the specified instance split percentages. */ maxInstanceCount?: number; /** * The minimum count of instances distributed among revisions based on the specified instance split percentages. */ minInstanceCount?: number; /** * The scaling mode for the worker pool. It defaults to MANUAL. * Possible values are: `AUTOMATIC`, `MANUAL`. */ scalingMode?: string; } interface WorkerPoolTemplate { /** * Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. * Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. * All system annotations in v1 now have a corresponding field in v2 WorkerPoolRevisionTemplate. * This field follows Kubernetes annotations' namespacing, limits, and rules. */ annotations?: { [key: string]: string; }; /** * Holds the containers that define the unit of execution for this WorkerPool. * Structure is documented below. */ containers?: outputs.cloudrunv2.WorkerPoolTemplateContainer[]; /** * A reference to a customer managed encryption key (CMEK) to use to encrypt this container image. For more information, go to https://cloud.google.com/run/docs/securing/using-cmek */ encryptionKey?: string; /** * The action to take if the encryption key is revoked. * Possible values are: `PREVENT_NEW`, `SHUTDOWN`. */ encryptionKeyRevocationAction?: string; /** * If encryptionKeyRevocationAction is SHUTDOWN, the duration before shutting down all instances. The minimum increment is 1 hour. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ encryptionKeyShutdownDuration?: string; /** * True if GPU zonal redundancy is disabled on this revision. */ gpuZonalRedundancyDisabled?: boolean; /** * Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. * For more information, visit https://docs.cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. * Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. * All system labels in v1 now have a corresponding field in v2 WorkerPoolRevisionTemplate. */ labels?: { [key: string]: string; }; /** * Node Selector describes the hardware requirements of the resources. * Structure is documented below. */ nodeSelector?: outputs.cloudrunv2.WorkerPoolTemplateNodeSelector; /** * The unique name for the revision. If this field is omitted, it will be automatically generated based on the WorkerPool name. */ revision?: string; /** * Email address of the IAM service account associated with the revision of the WorkerPool. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. */ serviceAccount: string; /** * A list of Volumes to make available to containers. * Structure is documented below. */ volumes?: outputs.cloudrunv2.WorkerPoolTemplateVolume[]; /** * VPC Access configuration to use for this Revision. For more information, visit https://cloud.google.com/run/docs/configuring/connecting-vpc. * Structure is documented below. */ vpcAccess?: outputs.cloudrunv2.WorkerPoolTemplateVpcAccess; } interface WorkerPoolTemplateContainer { /** * Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references are not supported in Cloud Run. */ args?: string[]; /** * Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell */ commands?: string[]; /** * Names of the containers that must start before this container. */ dependsOns?: string[]; /** * List of environment variables to set in the container. * Structure is documented below. */ envs?: outputs.cloudrunv2.WorkerPoolTemplateContainerEnv[]; /** * URL of the Container image in Google Container Registry or Google Artifact Registry. More info: https://kubernetes.io/docs/concepts/containers/images */ image: string; /** * Periodic probe of container liveness. Container will be restarted if the probe fails. * Structure is documented below. */ livenessProbe?: outputs.cloudrunv2.WorkerPoolTemplateContainerLivenessProbe; /** * Name of the container specified as a DNS_LABEL. */ name?: string; /** * Compute Resource requirements by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources * Structure is documented below. */ resources: outputs.cloudrunv2.WorkerPoolTemplateContainerResources; /** * Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. * Structure is documented below. */ startupProbe?: outputs.cloudrunv2.WorkerPoolTemplateContainerStartupProbe; /** * Volume to mount into the container's filesystem. * Structure is documented below. */ volumeMounts?: outputs.cloudrunv2.WorkerPoolTemplateContainerVolumeMount[]; /** * Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. */ workingDir?: string; } interface WorkerPoolTemplateContainerEnv { /** * Name of the environment variable. Must be a C_IDENTIFIER, and may not exceed 32768 characters. */ name: string; /** * Literal value of the environment variable. Defaults to "" and the maximum allowed length is 32768 characters. Variable references are not supported in Cloud Run. */ value?: string; /** * Source for the environment variable's value. * Structure is documented below. */ valueSource?: outputs.cloudrunv2.WorkerPoolTemplateContainerEnvValueSource; } interface WorkerPoolTemplateContainerEnvValueSource { /** * Selects a secret and a specific version from Cloud Secret Manager. * Structure is documented below. */ secretKeyRef?: outputs.cloudrunv2.WorkerPoolTemplateContainerEnvValueSourceSecretKeyRef; } interface WorkerPoolTemplateContainerEnvValueSourceSecretKeyRef { /** * The name of the secret in Cloud Secret Manager. Format: {secretName} if the secret is in the same project. projects/{project}/secrets/{secretName} if the secret is in a different project. */ secret: string; /** * The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. */ version?: string; } interface WorkerPoolTemplateContainerLivenessProbe { /** * Optional. Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. */ failureThreshold?: number; /** * Optional. GRPC specifies an action involving a gRPC port. Exactly one of httpGet, tcpSocket, or grpc must be specified. * Structure is documented below. */ grpc?: outputs.cloudrunv2.WorkerPoolTemplateContainerLivenessProbeGrpc; /** * Optional. HTTPGet specifies the http request to perform. Exactly one of httpGet, tcpSocket, or grpc must be specified. * Structure is documented below. */ httpGet?: outputs.cloudrunv2.WorkerPoolTemplateContainerLivenessProbeHttpGet; /** * Optional. Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. */ initialDelaySeconds?: number; /** * Optional. How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeout_seconds. */ periodSeconds?: number; /** * Optional. TCPSocket specifies an action involving a TCP port. Exactly one of httpGet, tcpSocket, or grpc must be specified. * Structure is documented below. */ tcpSocket?: outputs.cloudrunv2.WorkerPoolTemplateContainerLivenessProbeTcpSocket; /** * Optional. Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than period_seconds. */ timeoutSeconds?: number; } interface WorkerPoolTemplateContainerLivenessProbeGrpc { /** * Optional. Port number of the gRPC service. Number must be in the range 1 to 65535. If not specified, defaults to the exposed port of the container, which is the value of container.ports[0].containerPort. */ port?: number; /** * Optional. Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md ). If this is not specified, the default behavior is defined by gRPC */ service?: string; } interface WorkerPoolTemplateContainerLivenessProbeHttpGet { /** * Optional. Custom headers to set in the request. HTTP allows repeated headers. * Structure is documented below. */ httpHeaders?: outputs.cloudrunv2.WorkerPoolTemplateContainerLivenessProbeHttpGetHttpHeaders; /** * Optional. Path to access on the HTTP server. Defaults to '/'. */ path?: string; /** * Optional. Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to the exposed port of the container, which is the value of container.ports[0].containerPort. */ port?: number; } interface WorkerPoolTemplateContainerLivenessProbeHttpGetHttpHeaders { /** * Required. The header field name */ port: number; /** * Optional. The header field value */ value?: string; } interface WorkerPoolTemplateContainerLivenessProbeTcpSocket { /** * Optional. Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to the exposed port of the container, which is the value of container.ports[0].containerPort. */ port?: number; } interface WorkerPoolTemplateContainerResources { /** * Only memory, CPU, and nvidia.com/gpu are supported. Use key `cpu` for CPU limit, `memory` for memory limit, `nvidia.com/gpu` for gpu limit. Note: The only supported values for CPU are '1', '2', '4', '6', and '8'. Setting 4 CPU requires at least 2Gi of memory, setting 6 or more CPU requires at least 4Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go */ limits: { [key: string]: string; }; } interface WorkerPoolTemplateContainerStartupProbe { /** * Optional. Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. */ failureThreshold?: number; /** * Optional. GRPC specifies an action involving a gRPC port. Exactly one of httpGet, tcpSocket, or grpc must be specified. * Structure is documented below. */ grpc?: outputs.cloudrunv2.WorkerPoolTemplateContainerStartupProbeGrpc; /** * Optional. HTTPGet specifies the http request to perform. Exactly one of httpGet, tcpSocket, or grpc must be specified. * Structure is documented below. */ httpGet?: outputs.cloudrunv2.WorkerPoolTemplateContainerStartupProbeHttpGet; /** * Optional. Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. */ initialDelaySeconds?: number; /** * Optional. How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeout_seconds. */ periodSeconds?: number; /** * Optional. TCPSocket specifies an action involving a TCP port. Exactly one of httpGet, tcpSocket, or grpc must be specified. * Structure is documented below. */ tcpSocket?: outputs.cloudrunv2.WorkerPoolTemplateContainerStartupProbeTcpSocket; /** * Optional. Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than period_seconds. */ timeoutSeconds?: number; } interface WorkerPoolTemplateContainerStartupProbeGrpc { /** * Optional. Port number of the gRPC service. Number must be in the range 1 to 65535. If not specified, defaults to the exposed port of the container, which is the value of container.ports[0].containerPort. */ port?: number; /** * Optional. Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md ). If this is not specified, the default behavior is defined by gRPC */ service?: string; } interface WorkerPoolTemplateContainerStartupProbeHttpGet { /** * Optional. Custom headers to set in the request. HTTP allows repeated headers. * Structure is documented below. */ httpHeaders?: outputs.cloudrunv2.WorkerPoolTemplateContainerStartupProbeHttpGetHttpHeaders; /** * Optional. Path to access on the HTTP server. Defaults to '/'. */ path?: string; /** * Optional. Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to the exposed port of the container, which is the value of container.ports[0].containerPort. */ port?: number; } interface WorkerPoolTemplateContainerStartupProbeHttpGetHttpHeaders { /** * Required. The header field name */ port: number; /** * Optional. The header field value */ value?: string; } interface WorkerPoolTemplateContainerStartupProbeTcpSocket { /** * Optional. Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to the exposed port of the container, which is the value of container.ports[0].containerPort. */ port?: number; } interface WorkerPoolTemplateContainerVolumeMount { /** * Path within the container at which the volume should be mounted. Must not contain ':'. For Cloud SQL volumes, it can be left empty, or must otherwise be /cloudsql. All instances defined in the Volume will be available as /cloudsql/[instance]. For more information on Cloud SQL volumes, visit https://cloud.google.com/sql/docs/mysql/connect-run */ mountPath: string; /** * This must match the Name of a Volume. */ name: string; /** * Path within the volume from which the container's volume should be mounted. */ subPath?: string; } interface WorkerPoolTemplateNodeSelector { /** * The GPU to attach to an instance. See https://cloud.google.com/run/docs/configuring/services/gpu for configuring GPU. */ accelerator: string; } interface WorkerPoolTemplateVolume { /** * For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. * Structure is documented below. */ cloudSqlInstance?: outputs.cloudrunv2.WorkerPoolTemplateVolumeCloudSqlInstance; /** * Ephemeral storage used as a shared volume. * Structure is documented below. */ emptyDir?: outputs.cloudrunv2.WorkerPoolTemplateVolumeEmptyDir; /** * Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. * Structure is documented below. */ gcs?: outputs.cloudrunv2.WorkerPoolTemplateVolumeGcs; /** * Volume's name. */ name: string; /** * Represents an NFS mount. * Structure is documented below. */ nfs?: outputs.cloudrunv2.WorkerPoolTemplateVolumeNfs; /** * Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret * Structure is documented below. */ secret?: outputs.cloudrunv2.WorkerPoolTemplateVolumeSecret; } interface WorkerPoolTemplateVolumeCloudSqlInstance { /** * The Cloud SQL instance connection names, as can be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. Format: {project}:{location}:{instance} */ instances?: string[]; } interface WorkerPoolTemplateVolumeEmptyDir { /** * The different types of medium supported for EmptyDir. * Default value is `MEMORY`. * Possible values are: `MEMORY`. */ medium?: string; /** * Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir. */ sizeLimit?: string; } interface WorkerPoolTemplateVolumeGcs { /** * GCS Bucket name */ bucket: string; /** * A list of flags to pass to the gcsfuse command for configuring this volume. * Flags should be passed without leading dashes. */ mountOptions?: string[]; /** * If true, mount the GCS bucket as read-only */ readOnly?: boolean; } interface WorkerPoolTemplateVolumeNfs { /** * Path that is exported by the NFS server. */ path: string; /** * If true, mount the NFS volume as read only */ readOnly?: boolean; /** * Hostname or IP address of the NFS server */ server: string; } interface WorkerPoolTemplateVolumeSecret { /** * Integer representation of mode bits to use on created files by default. Must be a value between 0000 and 0777 (octal), defaulting to 0444. Directories within the path are not affected by this setting. */ defaultMode?: number; /** * If unspecified, the volume will expose a file whose name is the secret, relative to VolumeMount.mount_path. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a path and a version. * Structure is documented below. */ items?: outputs.cloudrunv2.WorkerPoolTemplateVolumeSecretItem[]; /** * The name of the secret in Cloud Secret Manager. Format: {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project. */ secret: string; } interface WorkerPoolTemplateVolumeSecretItem { /** * Integer octal mode bits to use on this file, must be a value between 01 and 0777 (octal). If 0 or not set, the Volume's default mode will be used. */ mode?: number; /** * The relative path of the secret in the container. */ path: string; /** * The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version */ version?: string; } interface WorkerPoolTemplateVpcAccess { /** * VPC Access connector name. Format: projects/{project}/locations/{location}/connectors/{connector}, where {project} can be project id or number. */ connector?: string; /** * Traffic VPC egress settings. * Possible values are: `ALL_TRAFFIC`, `PRIVATE_RANGES_ONLY`. */ egress: string; /** * Direct VPC egress settings. Currently only single network interface is supported. * Structure is documented below. */ networkInterfaces?: outputs.cloudrunv2.WorkerPoolTemplateVpcAccessNetworkInterface[]; } interface WorkerPoolTemplateVpcAccessNetworkInterface { /** * The VPC network that the Cloud Run resource will be able to send traffic to. At least one of network or subnetwork must be specified. If both * network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If network is not specified, it will be * looked up from the subnetwork. */ network: string; /** * The VPC subnetwork that the Cloud Run resource will get IPs from. At least one of network or subnetwork must be specified. If both * network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If subnetwork is not specified, the * subnetwork with the same name with the network will be used. */ subnetwork: string; /** * Network tags applied to this Cloud Run WorkerPool. */ tags?: string[]; } interface WorkerPoolTerminalCondition { /** * (Output) * A reason for the execution condition. */ executionReason: string; /** * (Output) * Last time the condition transitioned from one status to another. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ lastTransitionTime: string; /** * (Output) * Human readable message indicating details about the current status. */ message: string; /** * (Output) * A common (workerPool-level) reason for this condition. */ reason: string; /** * (Output) * A reason for the revision condition. */ revisionReason: string; /** * (Output) * How to interpret failures of this condition, one of Error, Warning, Info */ severity: string; /** * (Output) * State of the condition. */ state: string; /** * (Output) * The allocation type for this instance split. */ type: string; } } export declare namespace cloudscheduler { interface JobAppEngineHttpTarget { /** * App Engine Routing setting for the job. * Structure is documented below. */ appEngineRouting?: outputs.cloudscheduler.JobAppEngineHttpTargetAppEngineRouting; /** * HTTP request body. * A request body is allowed only if the HTTP method is POST or PUT. * It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. * A base64-encoded string. */ body?: string; /** * HTTP request headers. * This map contains the header field names and values. * Headers can be set when the job is created. */ headers?: { [key: string]: string; }; /** * Which HTTP method to use for the request. */ httpMethod?: string; /** * The relative URI. * The relative URL must begin with "/" and must be a valid HTTP relative URL. * It can contain a path, query string arguments, and \# fragments. * If the relative URL is empty, then the root path "/" will be used. * No spaces are allowed, and the maximum length allowed is 2083 characters */ relativeUri: string; } interface JobAppEngineHttpTargetAppEngineRouting { /** * App instance. * By default, the job is sent to an instance which is available when the job is attempted. */ instance?: string; /** * App service. * By default, the job is sent to the service which is the default service when the job is attempted. */ service?: string; /** * App version. * By default, the job is sent to the version which is the default version when the job is attempted. */ version?: string; } interface JobHttpTarget { /** * HTTP request body. * A request body is allowed only if the HTTP method is POST, PUT, or PATCH. * It is an error to set body on a job with an incompatible HttpMethod. * A base64-encoded string. */ body?: string; /** * This map contains the header field names and values. * Repeated headers are not supported, but a header value can contain commas. */ headers?: { [key: string]: string; }; /** * Which HTTP method to use for the request. */ httpMethod?: string; /** * Contains information needed for generating an OAuth token. * This type of authorization should be used when sending requests to a GCP endpoint. * Structure is documented below. */ oauthToken?: outputs.cloudscheduler.JobHttpTargetOauthToken; /** * Contains information needed for generating an OpenID Connect token. * This type of authorization should be used when sending requests to third party endpoints or Cloud Run. * Structure is documented below. */ oidcToken?: outputs.cloudscheduler.JobHttpTargetOidcToken; /** * The full URI path that the request will be sent to. */ uri: string; } interface JobHttpTargetOauthToken { /** * OAuth scope to be used for generating OAuth access token. If not specified, * "https://www.googleapis.com/auth/cloud-platform" will be used. */ scope?: string; /** * Service account email to be used for generating OAuth token. * The service account must be within the same project as the job. */ serviceAccountEmail: string; } interface JobHttpTargetOidcToken { /** * Audience to be used when generating OIDC token. If not specified, * the URI specified in target will be used. */ audience?: string; /** * Service account email to be used for generating OAuth token. * The service account must be within the same project as the job. */ serviceAccountEmail: string; } interface JobPubsubTarget { /** * Attributes for PubsubMessage. * Pubsub message must contain either non-empty data, or at least one attribute. */ attributes?: { [key: string]: string; }; /** * The message payload for PubsubMessage. * Pubsub message must contain either non-empty data, or at least one attribute. * A base64-encoded string. */ data?: string; /** * The full resource name for the Cloud Pub/Sub topic to which * messages will be published when a job is delivered. ~>**NOTE:** * The topic name must be in the same format as required by PubSub's * PublishRequest.name, e.g. `projects/my-project/topics/my-topic`. */ topicName: string; } interface JobRetryConfig { /** * The maximum amount of time to wait before retrying a job after it fails. * A duration in seconds with up to nine fractional digits, terminated by 's'. */ maxBackoffDuration: string; /** * The time between retries will double maxDoublings times. * A job's retry interval starts at minBackoffDuration, * then doubles maxDoublings times, then increases linearly, * and finally retries retries at intervals of maxBackoffDuration up to retryCount times. */ maxDoublings: number; /** * The time limit for retrying a failed job, measured from time when an execution was first attempted. * If specified with retryCount, the job will be retried until both limits are reached. * A duration in seconds with up to nine fractional digits, terminated by 's'. */ maxRetryDuration: string; /** * The minimum amount of time to wait before retrying a job after it fails. * A duration in seconds with up to nine fractional digits, terminated by 's'. */ minBackoffDuration: string; /** * The number of attempts that the system will make to run a * job using the exponential backoff procedure described by maxDoublings. * Values greater than 5 and negative values are not allowed. */ retryCount: number; } } export declare namespace cloudsecuritycompliance { interface CloudControlParameterSpec { /** * Possible parameter value types. * Structure is documented below. */ defaultValue?: outputs.cloudsecuritycompliance.CloudControlParameterSpecDefaultValue; /** * The description of the parameter. The maximum length is 2000 characters. */ description?: string; /** * The display name of the parameter. The maximum length is 200 characters. */ displayName?: string; /** * if the parameter is required */ isRequired: boolean; /** * The name of the parameter. */ name: string; /** * The parameter spec of the cloud control. * Structure is documented below. */ subParameters?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubParameter[]; /** * List of parameter substitutions. * Structure is documented below. */ substitutionRules?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubstitutionRule[]; /** * Validation of the parameter. * Structure is documented below. */ validation?: outputs.cloudsecuritycompliance.CloudControlParameterSpecValidation; /** * Parameter value type. * Possible values: * STRING * BOOLEAN * STRINGLIST * NUMBER * ONEOF */ valueType: string; } interface CloudControlParameterSpecDefaultValue { /** * Represents a boolean value. */ boolValue?: boolean; /** * Represents a double value. */ numberValue?: number; /** * Sub-parameter values. * Structure is documented below. */ oneofValue?: outputs.cloudsecuritycompliance.CloudControlParameterSpecDefaultValueOneofValue; /** * A list of strings. * Structure is documented below. */ stringListValue?: outputs.cloudsecuritycompliance.CloudControlParameterSpecDefaultValueStringListValue; /** * Represents a string value. */ stringValue?: string; } interface CloudControlParameterSpecDefaultValueOneofValue { /** * The name of the parameter. */ name?: string; /** * The value of the parameter. * Structure is documented below. */ parameterValue?: outputs.cloudsecuritycompliance.CloudControlParameterSpecDefaultValueOneofValueParameterValue; } interface CloudControlParameterSpecDefaultValueOneofValueParameterValue { /** * Represents a boolean value. */ boolValue?: boolean; /** * Represents a double value. */ numberValue?: number; /** * A list of strings. * Structure is documented below. */ stringListValue?: outputs.cloudsecuritycompliance.CloudControlParameterSpecDefaultValueOneofValueParameterValueStringListValue; /** * Represents a string value. */ stringValue?: string; } interface CloudControlParameterSpecDefaultValueOneofValueParameterValueStringListValue { /** * The strings in the list. */ values: string[]; } interface CloudControlParameterSpecDefaultValueStringListValue { /** * The strings in the list. */ values: string[]; } interface CloudControlParameterSpecSubParameter { /** * Possible parameter value types. * Structure is documented below. */ defaultValue?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubParameterDefaultValue; /** * The description of the parameter. The maximum length is 2000 characters. */ description?: string; /** * The display name of the parameter. The maximum length is 200 characters. */ displayName?: string; /** * if the parameter is required */ isRequired: boolean; /** * The name of the parameter. */ name: string; /** * List of parameter substitutions. * Structure is documented below. */ substitutionRules?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubParameterSubstitutionRule[]; /** * Validation of the parameter. * Structure is documented below. */ validation?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubParameterValidation; /** * Parameter value type. * Possible values: * STRING * BOOLEAN * STRINGLIST * NUMBER * ONEOF */ valueType: string; } interface CloudControlParameterSpecSubParameterDefaultValue { /** * Represents a boolean value. */ boolValue?: boolean; /** * Represents a double value. */ numberValue?: number; /** * Sub-parameter values. * Structure is documented below. */ oneofValue?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubParameterDefaultValueOneofValue; /** * A list of strings. * Structure is documented below. */ stringListValue?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubParameterDefaultValueStringListValue; /** * Represents a string value. */ stringValue?: string; } interface CloudControlParameterSpecSubParameterDefaultValueOneofValue { /** * The name of the parameter. */ name?: string; /** * The value of the parameter. * Structure is documented below. */ parameterValue?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubParameterDefaultValueOneofValueParameterValue; } interface CloudControlParameterSpecSubParameterDefaultValueOneofValueParameterValue { /** * Represents a boolean value. */ boolValue?: boolean; /** * Represents a double value. */ numberValue?: number; /** * A list of strings. * Structure is documented below. */ stringListValue?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubParameterDefaultValueOneofValueParameterValueStringListValue; /** * Represents a string value. */ stringValue?: string; } interface CloudControlParameterSpecSubParameterDefaultValueOneofValueParameterValueStringListValue { /** * The strings in the list. */ values: string[]; } interface CloudControlParameterSpecSubParameterDefaultValueStringListValue { /** * The strings in the list. */ values: string[]; } interface CloudControlParameterSpecSubParameterSubstitutionRule { /** * Attribute at the given path is substituted entirely. * Structure is documented below. */ attributeSubstitutionRule?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubParameterSubstitutionRuleAttributeSubstitutionRule; /** * Placeholder is substituted in the rendered string. * Structure is documented below. */ placeholderSubstitutionRule?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubParameterSubstitutionRulePlaceholderSubstitutionRule; } interface CloudControlParameterSpecSubParameterSubstitutionRuleAttributeSubstitutionRule { /** * Fully qualified proto attribute path (in dot notation). * Example: rules[0].cel_expression.resource_types_values */ attribute?: string; } interface CloudControlParameterSpecSubParameterSubstitutionRulePlaceholderSubstitutionRule { /** * Fully qualified proto attribute path (e.g., dot notation) */ attribute?: string; } interface CloudControlParameterSpecSubParameterValidation { /** * Allowed set of values for the parameter. * Structure is documented below. */ allowedValues?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubParameterValidationAllowedValues; /** * Number range for number parameters. * Structure is documented below. */ intRange?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubParameterValidationIntRange; /** * Regular Expression Validator for parameter values. * Structure is documented below. */ regexpPattern?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubParameterValidationRegexpPattern; } interface CloudControlParameterSpecSubParameterValidationAllowedValues { /** * List of allowed values for the parameter. * Structure is documented below. */ values: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubParameterValidationAllowedValuesValue[]; } interface CloudControlParameterSpecSubParameterValidationAllowedValuesValue { /** * Represents a boolean value. */ boolValue?: boolean; /** * Represents a double value. */ numberValue?: number; /** * Sub-parameter values. * Structure is documented below. */ oneofValue?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubParameterValidationAllowedValuesValueOneofValue; /** * A list of strings. * Structure is documented below. */ stringListValue?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubParameterValidationAllowedValuesValueStringListValue; /** * Represents a string value. */ stringValue?: string; } interface CloudControlParameterSpecSubParameterValidationAllowedValuesValueOneofValue { /** * The name of the parameter. */ name?: string; /** * The value of the parameter. * Structure is documented below. */ parameterValue?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubParameterValidationAllowedValuesValueOneofValueParameterValue; } interface CloudControlParameterSpecSubParameterValidationAllowedValuesValueOneofValueParameterValue { /** * Represents a boolean value. */ boolValue?: boolean; /** * Represents a double value. */ numberValue?: number; /** * A list of strings. * Structure is documented below. */ stringListValue?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubParameterValidationAllowedValuesValueOneofValueParameterValueStringListValue; /** * Represents a string value. */ stringValue?: string; } interface CloudControlParameterSpecSubParameterValidationAllowedValuesValueOneofValueParameterValueStringListValue { /** * The strings in the list. */ values: string[]; } interface CloudControlParameterSpecSubParameterValidationAllowedValuesValueStringListValue { /** * The strings in the list. */ values: string[]; } interface CloudControlParameterSpecSubParameterValidationIntRange { /** * Maximum allowed value for the numeric parameter (inclusive). */ max: string; /** * Minimum allowed value for the numeric parameter (inclusive). */ min: string; } interface CloudControlParameterSpecSubParameterValidationRegexpPattern { /** * Regex Pattern to match the value(s) of parameter. */ pattern: string; } interface CloudControlParameterSpecSubstitutionRule { /** * Attribute at the given path is substituted entirely. * Structure is documented below. */ attributeSubstitutionRule?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubstitutionRuleAttributeSubstitutionRule; /** * Placeholder is substituted in the rendered string. * Structure is documented below. */ placeholderSubstitutionRule?: outputs.cloudsecuritycompliance.CloudControlParameterSpecSubstitutionRulePlaceholderSubstitutionRule; } interface CloudControlParameterSpecSubstitutionRuleAttributeSubstitutionRule { /** * Fully qualified proto attribute path (in dot notation). * Example: rules[0].cel_expression.resource_types_values */ attribute?: string; } interface CloudControlParameterSpecSubstitutionRulePlaceholderSubstitutionRule { /** * Fully qualified proto attribute path (e.g., dot notation) */ attribute?: string; } interface CloudControlParameterSpecValidation { /** * Allowed set of values for the parameter. * Structure is documented below. */ allowedValues?: outputs.cloudsecuritycompliance.CloudControlParameterSpecValidationAllowedValues; /** * Number range for number parameters. * Structure is documented below. */ intRange?: outputs.cloudsecuritycompliance.CloudControlParameterSpecValidationIntRange; /** * Regular Expression Validator for parameter values. * Structure is documented below. */ regexpPattern?: outputs.cloudsecuritycompliance.CloudControlParameterSpecValidationRegexpPattern; } interface CloudControlParameterSpecValidationAllowedValues { /** * List of allowed values for the parameter. * Structure is documented below. */ values: outputs.cloudsecuritycompliance.CloudControlParameterSpecValidationAllowedValuesValue[]; } interface CloudControlParameterSpecValidationAllowedValuesValue { /** * Represents a boolean value. */ boolValue?: boolean; /** * Represents a double value. */ numberValue?: number; /** * Sub-parameter values. * Structure is documented below. */ oneofValue?: outputs.cloudsecuritycompliance.CloudControlParameterSpecValidationAllowedValuesValueOneofValue; /** * A list of strings. * Structure is documented below. */ stringListValue?: outputs.cloudsecuritycompliance.CloudControlParameterSpecValidationAllowedValuesValueStringListValue; /** * Represents a string value. */ stringValue?: string; } interface CloudControlParameterSpecValidationAllowedValuesValueOneofValue { /** * The name of the parameter. */ name?: string; /** * The value of the parameter. * Structure is documented below. */ parameterValue?: outputs.cloudsecuritycompliance.CloudControlParameterSpecValidationAllowedValuesValueOneofValueParameterValue; } interface CloudControlParameterSpecValidationAllowedValuesValueOneofValueParameterValue { /** * Represents a boolean value. */ boolValue?: boolean; /** * Represents a double value. */ numberValue?: number; /** * A list of strings. * Structure is documented below. */ stringListValue?: outputs.cloudsecuritycompliance.CloudControlParameterSpecValidationAllowedValuesValueOneofValueParameterValueStringListValue; /** * Represents a string value. */ stringValue?: string; } interface CloudControlParameterSpecValidationAllowedValuesValueOneofValueParameterValueStringListValue { /** * The strings in the list. */ values: string[]; } interface CloudControlParameterSpecValidationAllowedValuesValueStringListValue { /** * The strings in the list. */ values: string[]; } interface CloudControlParameterSpecValidationIntRange { /** * Maximum allowed value for the numeric parameter (inclusive). */ max: string; /** * Minimum allowed value for the numeric parameter (inclusive). */ min: string; } interface CloudControlParameterSpecValidationRegexpPattern { /** * Regex Pattern to match the value(s) of parameter. */ pattern: string; } interface CloudControlRule { /** * A [CEL * expression](https://cloud.google.com/certificate-authority-service/docs/using-cel). * Structure is documented below. */ celExpression?: outputs.cloudsecuritycompliance.CloudControlRuleCelExpression; /** * Description of the Rule. The maximum length is 2000 characters. */ description?: string; /** * The functionality enabled by the Rule. */ ruleActionTypes: string[]; } interface CloudControlRuleCelExpression { /** * Logic expression in CEL language. * The max length of the condition is 1000 characters. */ expression: string; /** * A list of strings. * Structure is documented below. */ resourceTypesValues?: outputs.cloudsecuritycompliance.CloudControlRuleCelExpressionResourceTypesValues; } interface CloudControlRuleCelExpressionResourceTypesValues { /** * The strings in the list. */ values: string[]; } interface FrameworkCloudControlDetail { /** * Major revision of cloudcontrol */ majorRevisionId: string; /** * The name of the CloudControl in the format: * ā€œorganizations/{organization}/locations/{location}/cloudControls/{cloud-control}ā€ */ name: string; /** * Parameters is a key-value pair that is required by the CloudControl. The * specification of these parameters will be present in cloudcontrol.Eg: { * "name": "location","value": "us-west-1"}. * Structure is documented below. */ parameters?: outputs.cloudsecuritycompliance.FrameworkCloudControlDetailParameter[]; } interface FrameworkCloudControlDetailParameter { /** * The name of the parameter. */ name: string; /** * Possible parameter value types. * Structure is documented below. */ parameterValue: outputs.cloudsecuritycompliance.FrameworkCloudControlDetailParameterParameterValue; } interface FrameworkCloudControlDetailParameterParameterValue { /** * Represents a boolean value. */ boolValue?: boolean; /** * Represents a double value. */ numberValue?: number; /** * Sub-parameter values. * Structure is documented below. */ oneofValue?: outputs.cloudsecuritycompliance.FrameworkCloudControlDetailParameterParameterValueOneofValue; /** * A list of strings. * Structure is documented below. */ stringListValue?: outputs.cloudsecuritycompliance.FrameworkCloudControlDetailParameterParameterValueStringListValue; /** * Represents a string value. */ stringValue?: string; } interface FrameworkCloudControlDetailParameterParameterValueOneofValue { /** * The name of the parameter. */ name?: string; /** * The value of the parameter. * Structure is documented below. */ parameterValue?: outputs.cloudsecuritycompliance.FrameworkCloudControlDetailParameterParameterValueOneofValueParameterValue; } interface FrameworkCloudControlDetailParameterParameterValueOneofValueParameterValue { /** * Represents a boolean value. */ boolValue?: boolean; /** * Represents a double value. */ numberValue?: number; /** * A list of strings. * Structure is documented below. */ stringListValue?: outputs.cloudsecuritycompliance.FrameworkCloudControlDetailParameterParameterValueOneofValueParameterValueStringListValue; /** * Represents a string value. */ stringValue?: string; } interface FrameworkCloudControlDetailParameterParameterValueOneofValueParameterValueStringListValue { /** * The strings in the list. */ values: string[]; } interface FrameworkCloudControlDetailParameterParameterValueStringListValue { /** * The strings in the list. */ values: string[]; } interface FrameworkDeploymentCloudControlDeploymentReference { /** * (Output) * The name of the CloudControlDeployment. The format is: * organizations/{org}/locations/{location}/cloudControlDeployments/{cloud_control_deployment_id} */ cloudControlDeployment: string; } interface FrameworkDeploymentCloudControlMetadata { /** * CloudControlDetails contains the details of a CloudControl. * Structure is documented below. */ cloudControlDetails: outputs.cloudsecuritycompliance.FrameworkDeploymentCloudControlMetadataCloudControlDetails; /** * Enforcement mode for the framework deployment. * Possible values: * PREVENTIVE * DETECTIVE * AUDIT */ enforcementMode: string; } interface FrameworkDeploymentCloudControlMetadataCloudControlDetails { /** * Major revision of cloudcontrol */ majorRevisionId: string; /** * The name of the CloudControl in the format: * ā€œorganizations/{organization}/locations/{location}/ * cloudControls/{cloud-control}ā€ */ name: string; /** * Parameters is a key-value pair that is required by the CloudControl. The * specification of these parameters will be present in cloudcontrol.Eg: { * "name": "location","value": "us-west-1"}. * Structure is documented below. */ parameters?: outputs.cloudsecuritycompliance.FrameworkDeploymentCloudControlMetadataCloudControlDetailsParameter[]; } interface FrameworkDeploymentCloudControlMetadataCloudControlDetailsParameter { /** * The name of the parameter. */ name: string; /** * Possible parameter value types. * Structure is documented below. */ parameterValue: outputs.cloudsecuritycompliance.FrameworkDeploymentCloudControlMetadataCloudControlDetailsParameterParameterValue; } interface FrameworkDeploymentCloudControlMetadataCloudControlDetailsParameterParameterValue { /** * Represents a boolean value. */ boolValue?: boolean; /** * Represents a double value. */ numberValue?: number; /** * Sub-parameter values. * Structure is documented below. */ oneofValue?: outputs.cloudsecuritycompliance.FrameworkDeploymentCloudControlMetadataCloudControlDetailsParameterParameterValueOneofValue; /** * A list of strings. * Structure is documented below. */ stringListValue?: outputs.cloudsecuritycompliance.FrameworkDeploymentCloudControlMetadataCloudControlDetailsParameterParameterValueStringListValue; /** * Represents a string value. */ stringValue?: string; } interface FrameworkDeploymentCloudControlMetadataCloudControlDetailsParameterParameterValueOneofValue { /** * The name of the parameter. */ name?: string; /** * The value of the parameter. * Structure is documented below. */ parameterValue?: outputs.cloudsecuritycompliance.FrameworkDeploymentCloudControlMetadataCloudControlDetailsParameterParameterValueOneofValueParameterValue; } interface FrameworkDeploymentCloudControlMetadataCloudControlDetailsParameterParameterValueOneofValueParameterValue { /** * Represents a boolean value. */ boolValue?: boolean; /** * Represents a double value. */ numberValue?: number; /** * A list of strings. * Structure is documented below. */ stringListValue?: outputs.cloudsecuritycompliance.FrameworkDeploymentCloudControlMetadataCloudControlDetailsParameterParameterValueOneofValueParameterValueStringListValue; /** * Represents a string value. */ stringValue?: string; } interface FrameworkDeploymentCloudControlMetadataCloudControlDetailsParameterParameterValueOneofValueParameterValueStringListValue { /** * The strings in the list. */ values: string[]; } interface FrameworkDeploymentCloudControlMetadataCloudControlDetailsParameterParameterValueStringListValue { /** * The strings in the list. */ values: string[]; } interface FrameworkDeploymentFramework { /** * In the format: * organizations/{org}/locations/{location}/frameworks/{framework} */ framework: string; /** * Major revision id of the framework. */ majorRevisionId: string; } interface FrameworkDeploymentTargetResourceConfig { /** * CRM node in format organizations/{organization}, folders/{folder}, * or projects/{project} */ existingTargetResource?: string; /** * TargetResourceCreationConfig contains the config to create a new resource to * be used as the targetResource of a deployment. * Structure is documented below. */ targetResourceCreationConfig?: outputs.cloudsecuritycompliance.FrameworkDeploymentTargetResourceConfigTargetResourceCreationConfig; } interface FrameworkDeploymentTargetResourceConfigTargetResourceCreationConfig { /** * FolderCreationConfig contains the config to create a new folder to be used * as the targetResource of a deployment. * Structure is documented below. */ folderCreationConfig?: outputs.cloudsecuritycompliance.FrameworkDeploymentTargetResourceConfigTargetResourceCreationConfigFolderCreationConfig; /** * ProjectCreationConfig contains the config to create a new project to be used * as the targetResource of a deployment. * Structure is documented below. */ projectCreationConfig?: outputs.cloudsecuritycompliance.FrameworkDeploymentTargetResourceConfigTargetResourceCreationConfigProjectCreationConfig; } interface FrameworkDeploymentTargetResourceConfigTargetResourceCreationConfigFolderCreationConfig { /** * Display name of the folder to be created */ folderDisplayName: string; /** * The parent of the folder to be created. It can be an organizations/{org} or * folders/{folder} */ parent: string; } interface FrameworkDeploymentTargetResourceConfigTargetResourceCreationConfigProjectCreationConfig { /** * Billing account id to be used for the project. */ billingAccountId: string; /** * organizations/{org} or folders/{folder} */ parent: string; /** * Display name of the project to be created. */ projectDisplayName: string; } } export declare namespace cloudtasks { interface QueueAppEngineRoutingOverride { /** * (Output) * The host that the task is sent to. */ host: string; /** * App instance. * By default, the task is sent to an instance which is available when the task is attempted. */ instance?: string; /** * App service. * By default, the task is sent to the service which is the default service when the task is attempted. */ service?: string; /** * App version. * By default, the task is sent to the version which is the default version when the task is attempted. */ version?: string; } interface QueueHttpTarget { /** * HTTP target headers. * This map contains the header field names and values. * Headers will be set when running the CreateTask and/or BufferTask. * These headers represent a subset of the headers that will be configured for the task's HTTP request. * Some HTTP request headers will be ignored or replaced. * Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. * The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. * Structure is documented below. */ headerOverrides?: outputs.cloudtasks.QueueHttpTargetHeaderOverride[]; /** * The HTTP method to use for the request. * When specified, it overrides HttpRequest for the task. * Note that if the value is set to GET the body of the task will be ignored at execution time. * Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. */ httpMethod: string; /** * If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. * This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. * Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. * Structure is documented below. */ oauthToken?: outputs.cloudtasks.QueueHttpTargetOauthToken; /** * If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. * This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. * Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. * Structure is documented below. */ oidcToken?: outputs.cloudtasks.QueueHttpTargetOidcToken; /** * URI override. * When specified, overrides the execution URI for all the tasks in the queue. * Structure is documented below. */ uriOverride?: outputs.cloudtasks.QueueHttpTargetUriOverride; } interface QueueHttpTargetHeaderOverride { /** * Header embodying a key and a value. * Structure is documented below. */ header: outputs.cloudtasks.QueueHttpTargetHeaderOverrideHeader; } interface QueueHttpTargetHeaderOverrideHeader { /** * The Key of the header. */ key: string; /** * The Value of the header. */ value: string; } interface QueueHttpTargetOauthToken { /** * OAuth scope to be used for generating OAuth access token. * If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. */ scope: string; /** * Service account email to be used for generating OAuth token. * The service account must be within the same project as the queue. * The caller must have iam.serviceAccounts.actAs permission for the service account. */ serviceAccountEmail: string; } interface QueueHttpTargetOidcToken { /** * Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. */ audience: string; /** * Service account email to be used for generating OIDC token. * The service account must be within the same project as the queue. * The caller must have iam.serviceAccounts.actAs permission for the service account. */ serviceAccountEmail: string; } interface QueueHttpTargetUriOverride { /** * Host override. * When specified, replaces the host part of the task URL. * For example, if the task URL is "https://www.google.com", and host value * is set to "example.net", the overridden URI will be changed to "https://example.net". * Host value cannot be an empty string (INVALID_ARGUMENT). */ host?: string; /** * URI path. * When specified, replaces the existing path of the task URL. * Setting the path value to an empty string clears the URI path segment. * Structure is documented below. */ pathOverride?: outputs.cloudtasks.QueueHttpTargetUriOverridePathOverride; /** * Port override. * When specified, replaces the port part of the task URI. * For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. * Note that the port value must be a positive integer. * Setting the port to 0 (Zero) clears the URI port. */ port?: string; /** * URI query. * When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. * Structure is documented below. */ queryOverride?: outputs.cloudtasks.QueueHttpTargetUriOverrideQueryOverride; /** * Scheme override. * When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). * Possible values are: `HTTP`, `HTTPS`. */ scheme: string; /** * URI Override Enforce Mode * When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. * Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. */ uriOverrideEnforceMode: string; } interface QueueHttpTargetUriOverridePathOverride { /** * The URI path (e.g., /users/1234). Default is an empty string. */ path: string; } interface QueueHttpTargetUriOverrideQueryOverride { /** * The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. */ queryParams: string; } interface QueueIamBindingCondition { description?: string; expression: string; title: string; } interface QueueIamMemberCondition { description?: string; expression: string; title: string; } interface QueueRateLimits { /** * (Output) * The max burst size. * Max burst size limits how fast tasks in queue are processed when many tasks are * in the queue and the rate is high. This field allows the queue to have a high * rate so processing starts shortly after a task is enqueued, but still limits * resource usage when many tasks are enqueued in a short period of time. */ maxBurstSize: number; /** * The maximum number of concurrent tasks that Cloud Tasks allows to * be dispatched for this queue. After this threshold has been * reached, Cloud Tasks stops dispatching tasks until the number of * concurrent requests decreases. */ maxConcurrentDispatches: number; /** * The maximum rate at which tasks are dispatched from this queue. * If unspecified when the queue is created, Cloud Tasks will pick the default. */ maxDispatchesPerSecond: number; } interface QueueRetryConfig { /** * Number of attempts per task. * Cloud Tasks will attempt the task maxAttempts times (that is, if * the first attempt fails, then there will be maxAttempts - 1 * retries). Must be >= -1. * If unspecified when the queue is created, Cloud Tasks will pick * the default. * -1 indicates unlimited attempts. */ maxAttempts: number; /** * A task will be scheduled for retry between minBackoff and * maxBackoff duration after it fails, if the queue's RetryConfig * specifies that the task should be retried. */ maxBackoff: string; /** * The time between retries will double maxDoublings times. * A task's retry interval starts at minBackoff, then doubles maxDoublings times, * then increases linearly, and finally retries retries at intervals of maxBackoff * up to maxAttempts times. */ maxDoublings: number; /** * If positive, maxRetryDuration specifies the time limit for * retrying a failed task, measured from when the task was first * attempted. Once maxRetryDuration time has passed and the task has * been attempted maxAttempts times, no further attempts will be * made and the task will be deleted. * If zero, then the task age is unlimited. */ maxRetryDuration: string; /** * A task will be scheduled for retry between minBackoff and * maxBackoff duration after it fails, if the queue's RetryConfig * specifies that the task should be retried. */ minBackoff: string; } interface QueueStackdriverLoggingConfig { /** * Specifies the fraction of operations to write to Stackdriver Logging. * This field may contain any value between 0.0 and 1.0, inclusive. 0.0 is the * default and means that no operations are logged. */ samplingRatio: number; } } export declare namespace colab { interface NotebookExecutionCustomEnvironmentSpec { /** * 'The machine configuration of the runtime.' * Structure is documented below. */ machineSpec?: outputs.colab.NotebookExecutionCustomEnvironmentSpecMachineSpec; /** * The network configuration for the runtime. * Structure is documented below. */ networkSpec?: outputs.colab.NotebookExecutionCustomEnvironmentSpecNetworkSpec; /** * The configuration for the data disk of the runtime. * Structure is documented below. */ persistentDiskSpec?: outputs.colab.NotebookExecutionCustomEnvironmentSpecPersistentDiskSpec; } interface NotebookExecutionCustomEnvironmentSpecMachineSpec { /** * The number of accelerators used by the runtime. */ acceleratorCount?: number; /** * The type of hardware accelerator used by the runtime. If specified, acceleratorCount must also be specified. */ acceleratorType?: string; /** * The Compute Engine machine type selected for the runtime. */ machineType?: string; } interface NotebookExecutionCustomEnvironmentSpecNetworkSpec { /** * Enable public internet access for the runtime. */ enableInternetAccess?: boolean; /** * The name of the VPC that this runtime is in. */ network?: string; /** * The name of the subnetwork that this runtime is in. */ subnetwork?: string; } interface NotebookExecutionCustomEnvironmentSpecPersistentDiskSpec { /** * The disk size of the runtime in GB. If specified, the diskType must also be specified. The minimum size is 10GB and the maximum is 65536GB. */ diskSizeGb?: string; /** * The type of the persistent disk. */ diskType?: string; } interface NotebookExecutionDataformRepositorySource { /** * The commit SHA to read repository with. If unset, the file will be read at HEAD. */ commitSha?: string; /** * The resource name of the Dataform Repository. */ dataformRepositoryResourceName: string; } interface NotebookExecutionDirectNotebookSource { /** * The base64-encoded contents of the input notebook file. */ content: string; } interface NotebookExecutionGcsNotebookSource { /** * The version of the Cloud Storage object to read. If unset, the current version of the object is read. See https://cloud.google.com/storage/docs/metadata#generation-number. */ generation?: string; /** * The Cloud Storage uri pointing to the ipynb file. */ uri: string; } interface RuntimeNotebookRuntimeTemplateRef { /** * The resource name of the NotebookRuntimeTemplate based on which a NotebookRuntime will be created. */ notebookRuntimeTemplate: string; } interface RuntimeTemplateDataPersistentDiskSpec { /** * The disk size of the runtime in GB. If specified, the diskType must also be specified. The minimum size is 10GB and the maximum is 65536GB. */ diskSizeGb: string; /** * The type of the persistent disk. */ diskType: string; } interface RuntimeTemplateEncryptionSpec { /** * The Cloud KMS encryption key (customer-managed encryption key) used to protect the runtime. */ kmsKeyName?: string; } interface RuntimeTemplateEucConfig { /** * Disable end user credential access for the runtime. */ eucDisabled: boolean; } interface RuntimeTemplateIamBindingCondition { description?: string; expression: string; title: string; } interface RuntimeTemplateIamMemberCondition { description?: string; expression: string; title: string; } interface RuntimeTemplateIdleShutdownConfig { /** * The duration after which the runtime is automatically shut down. An input of 0s disables the idle shutdown feature, and a valid range is [10m, 24h]. */ idleTimeout: string; } interface RuntimeTemplateMachineSpec { /** * The number of accelerators used by the runtime. */ acceleratorCount: number; /** * The type of hardware accelerator used by the runtime. If specified, acceleratorCount must also be specified. */ acceleratorType?: string; /** * The Compute Engine machine type selected for the runtime. */ machineType: string; } interface RuntimeTemplateNetworkSpec { /** * Enable public internet access for the runtime. */ enableInternetAccess?: boolean; /** * The name of the VPC that this runtime is in. */ network: string; /** * The name of the subnetwork that this runtime is in. */ subnetwork?: string; } interface RuntimeTemplateShieldedVmConfig { /** * Enables secure boot for the runtime. */ enableSecureBoot: boolean; } interface RuntimeTemplateSoftwareConfig { /** * Environment variables to be passed to the container. * Structure is documented below. */ envs?: outputs.colab.RuntimeTemplateSoftwareConfigEnv[]; /** * Post startup script config. * Structure is documented below. */ postStartupScriptConfig?: outputs.colab.RuntimeTemplateSoftwareConfigPostStartupScriptConfig; } interface RuntimeTemplateSoftwareConfigEnv { /** * Name of the environment variable. Must be a valid C identifier. */ name?: string; /** * Variables that reference a $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. */ value?: string; } interface RuntimeTemplateSoftwareConfigPostStartupScriptConfig { /** * Post startup script to run after runtime is started. */ postStartupScript?: string; /** * Post startup script behavior that defines download and execution behavior. * Possible values are: `RUN_ONCE`, `RUN_EVERY_START`, `DOWNLOAD_AND_RUN_EVERY_START`. */ postStartupScriptBehavior?: string; /** * Post startup script url to download. Example: https://bucket/script.sh. */ postStartupScriptUrl?: string; } interface ScheduleCreateNotebookExecutionJobRequest { /** * The NotebookExecutionJob to create. * Structure is documented below. */ notebookExecutionJob: outputs.colab.ScheduleCreateNotebookExecutionJobRequestNotebookExecutionJob; } interface ScheduleCreateNotebookExecutionJobRequestNotebookExecutionJob { /** * The Dataform Repository containing the input notebook. * Structure is documented below. */ dataformRepositorySource?: outputs.colab.ScheduleCreateNotebookExecutionJobRequestNotebookExecutionJobDataformRepositorySource; /** * Required. The display name of the Notebook Execution. */ displayName: string; /** * Max running time of the execution job in seconds (default 86400s / 24 hrs). A duration in seconds with up to nine fractional digits, ending with "s". Example: "3.5s". */ executionTimeout?: string; /** * The user email to run the execution as. */ executionUser?: string; /** * The Cloud Storage uri for the input notebook. * Structure is documented below. */ gcsNotebookSource?: outputs.colab.ScheduleCreateNotebookExecutionJobRequestNotebookExecutionJobGcsNotebookSource; /** * The Cloud Storage location to upload the result to. Format:`gs://bucket-name` */ gcsOutputUri: string; /** * The NotebookRuntimeTemplate to source compute configuration from. */ notebookRuntimeTemplateResourceName: string; /** * The service account to run the execution as. */ serviceAccount?: string; } interface ScheduleCreateNotebookExecutionJobRequestNotebookExecutionJobDataformRepositorySource { /** * The commit SHA to read repository with. If unset, the file will be read at HEAD. */ commitSha?: string; /** * The resource name of the Dataform Repository. */ dataformRepositoryResourceName: string; } interface ScheduleCreateNotebookExecutionJobRequestNotebookExecutionJobGcsNotebookSource { /** * The version of the Cloud Storage object to read. If unset, the current version of the object is read. See https://cloud.google.com/storage/docs/metadata#generation-number. */ generation?: string; /** * The Cloud Storage uri pointing to the ipynb file. Format: gs://bucket/notebook_file.ipynb */ uri: string; } } export declare namespace composer { interface EnvironmentConfig { /** * The URI of the Apache Airflow Web UI hosted within this * environment. */ airflowUri: string; /** * The Cloud Storage prefix of the DAGs for this environment. * Although Cloud Storage objects reside in a flat namespace, a * hierarchical file tree can be simulated using '/'-delimited * object name prefixes. DAG objects for this environment * reside in a simulated directory with this prefix. */ dagGcsPrefix: string; /** * The configuration setting for Airflow data retention mechanism. This field is supported for Cloud Composer environments in versions composer-2.0.32-airflow-2.1.4. or newer */ dataRetentionConfig: outputs.composer.EnvironmentConfigDataRetentionConfig; /** * The configuration of Cloud SQL instance that is used by the Apache Airflow software. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ databaseConfig: outputs.composer.EnvironmentConfigDatabaseConfig; /** * Optional. If true, builds performed during operations that install Python packages have only private connectivity to Google services. If false, the builds also have access to the internet. */ enablePrivateBuildsOnly: boolean; /** * Optional. If true, a private Composer environment will be created. */ enablePrivateEnvironment: boolean; /** * The encryption options for the Composer environment and its dependencies. */ encryptionConfig: outputs.composer.EnvironmentConfigEncryptionConfig; /** * The size of the Cloud Composer environment. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. */ environmentSize: string; /** * The Kubernetes Engine cluster used to run this environment. */ gkeCluster: string; /** * The configuration for Cloud Composer maintenance window. */ maintenanceWindow: outputs.composer.EnvironmentConfigMaintenanceWindow; /** * Configuration options for the master authorized networks feature. Enabled master authorized networks will disallow all external traffic to access Kubernetes master through HTTPS except traffic from the given CIDR blocks, Google Compute Engine Public IPs and Google Prod IPs. */ masterAuthorizedNetworksConfig?: outputs.composer.EnvironmentConfigMasterAuthorizedNetworksConfig; /** * The configuration used for the Kubernetes Engine cluster. */ nodeConfig: outputs.composer.EnvironmentConfigNodeConfig; /** * The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ nodeCount: number; /** * The configuration used for the Private IP Cloud Composer environment. */ privateEnvironmentConfig: outputs.composer.EnvironmentConfigPrivateEnvironmentConfig; /** * The recovery configuration settings for the Cloud Composer environment */ recoveryConfig?: outputs.composer.EnvironmentConfigRecoveryConfig; /** * Whether high resilience is enabled or not. This field is supported for Cloud Composer environments in versions composer-2.1.15-airflow-*.*.* and newer. */ resilienceMode: string; /** * The configuration settings for software inside the environment. */ softwareConfig: outputs.composer.EnvironmentConfigSoftwareConfig; /** * The configuration settings for the Airflow web server App Engine instance. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ webServerConfig: outputs.composer.EnvironmentConfigWebServerConfig; /** * Network-level access control policy for the Airflow web server. */ webServerNetworkAccessControl: outputs.composer.EnvironmentConfigWebServerNetworkAccessControl; /** * The workloads configuration settings for the GKE cluster associated with the Cloud Composer environment. Supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. */ workloadsConfig: outputs.composer.EnvironmentConfigWorkloadsConfig; } interface EnvironmentConfigDataRetentionConfig { /** * Optional. The policy for airflow metadata database retention. */ airflowMetadataRetentionConfigs: outputs.composer.EnvironmentConfigDataRetentionConfigAirflowMetadataRetentionConfig[]; /** * Optional. The configuration setting for Task Logs. */ taskLogsRetentionConfigs?: outputs.composer.EnvironmentConfigDataRetentionConfigTaskLogsRetentionConfig[]; } interface EnvironmentConfigDataRetentionConfigAirflowMetadataRetentionConfig { /** * How many days data should be retained for. This field is supported for Cloud Composer environments in composer 3 and newer. */ retentionDays: number; /** * Whether database retention is enabled or not. This field is supported for Cloud Composer environments in composer 3 and newer. */ retentionMode: string; } interface EnvironmentConfigDataRetentionConfigTaskLogsRetentionConfig { /** * Whether logs in cloud logging only is enabled or not. This field is supported for Cloud Composer environments in versions composer-2.0.32-airflow-2.1.4 and newer but not in composer-3* */ storageMode?: string; } interface EnvironmentConfigDatabaseConfig { /** * Optional. Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. If not specified, db-n1-standard-2 will be used. */ machineType?: string; /** * Optional. Cloud SQL database preferred zone. */ zone?: string; } interface EnvironmentConfigEncryptionConfig { /** * Optional. Customer-managed Encryption Key available through Google's Key Management Service. Cannot be updated. */ kmsKeyName: string; } interface EnvironmentConfigMaintenanceWindow { /** * Maintenance window end time. It is used only to calculate the duration of the maintenance window. The value for end-time must be in the future, relative to 'start_time'. */ endTime: string; /** * Maintenance window recurrence. Format is a subset of RFC-5545 (https://tools.ietf.org/html/rfc5545) 'RRULE'. The only allowed values for 'FREQ' field are 'FREQ=DAILY' and 'FREQ=WEEKLY;BYDAY=...'. Example values: 'FREQ=WEEKLY;BYDAY=TU,WE', 'FREQ=DAILY'. */ recurrence: string; /** * Start time of the first recurrence of the maintenance window. */ startTime: string; } interface EnvironmentConfigMasterAuthorizedNetworksConfig { /** * cidr_blocks define up to 50 external networks that could access Kubernetes master through HTTPS. */ cidrBlocks?: outputs.composer.EnvironmentConfigMasterAuthorizedNetworksConfigCidrBlock[]; /** * Whether or not master authorized networks is enabled. */ enabled: boolean; } interface EnvironmentConfigMasterAuthorizedNetworksConfigCidrBlock { /** * cidr_block must be specified in CIDR notation. */ cidrBlock: string; /** * display_name is a field for users to identify CIDR blocks. */ displayName?: string; } interface EnvironmentConfigNodeConfig { /** * IPv4 cidr range that will be used by Composer internal components. */ composerInternalIpv4CidrBlock: string; /** * PSC (Private Service Connect) Network entry point. Customers can pre-create the Network Attachment and point Cloud Composer environment to use. It is possible to share network attachment among many environments, provided enough IP addresses are available. */ composerNetworkAttachment: string; /** * The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ diskSizeGb: number; /** * Deploys 'ip-masq-agent' daemon set in the GKE cluster and defines nonMasqueradeCIDRs equals to pod IP range so IP masquerading is used for all destination addresses, except between pods traffic. See: https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent */ enableIpMasqAgent: boolean; /** * Configuration for controlling how IPs are allocated in the GKE cluster. Cannot be updated. */ ipAllocationPolicy: outputs.composer.EnvironmentConfigNodeConfigIpAllocationPolicy; /** * The Compute Engine machine type used for cluster instances, specified as a name or relative resource name. For example: "projects/{project}/zones/{zone}/machineTypes/{machineType}". Must belong to the enclosing environment's project and region/zone. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ machineType: string; /** * The maximum pods per node in the GKE cluster allocated during environment creation. Lowering this value reduces IP address consumption by the Cloud Composer Kubernetes cluster. This value can only be set during environment creation, and only if the environment is VPC-Native. The range of possible values is 8-110, and the default is 32. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ maxPodsPerNode: number; /** * The Compute Engine machine type used for cluster instances, specified as a name or relative resource name. For example: "projects/{project}/zones/{zone}/machineTypes/{machineType}". Must belong to the enclosing environment's project and region/zone. The network must belong to the environment's project. If unspecified, the "default" network ID in the environment's project is used. If a Custom Subnet Network is provided, subnetwork must also be provided. */ network: string; /** * The set of Google API scopes to be made available on all node VMs. Cannot be updated. If empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ oauthScopes: string[]; /** * The Google Cloud Platform Service Account to be used by the node VMs. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated. If given, note that the service account must have roles/composer.worker for any GCP resources created under the Cloud Composer Environment. */ serviceAccount: string; /** * The Compute Engine subnetwork to be used for machine communications, specified as a self-link, relative resource name (e.g. "projects/{project}/regions/{region}/subnetworks/{subnetwork}"), or by name. If subnetwork is provided, network must also be provided and the subnetwork must belong to the enclosing environment's project and region. */ subnetwork: string; /** * The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with RFC1035. Cannot be updated. */ tags?: string[]; /** * The Compute Engine zone in which to deploy the VMs running the Apache Airflow software, specified as the zone name or relative resource name (e.g. "projects/{project}/zones/{zone}"). Must belong to the enclosing environment's project and region. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ zone: string; } interface EnvironmentConfigNodeConfigIpAllocationPolicy { /** * The IP address range used to allocate IP addresses to pods in the cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when useIpAliases is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. /14) to have GKE choose a range with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. Specify either clusterSecondaryRangeName or clusterIpv4CidrBlock but not both. */ clusterIpv4CidrBlock?: string; /** * The name of the cluster's secondary range used to allocate IP addresses to pods. Specify either clusterSecondaryRangeName or clusterIpv4CidrBlock but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when useIpAliases is true. */ clusterSecondaryRangeName?: string; /** * The IP address range used to allocate IP addresses in this cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when useIpAliases is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. /14) to have GKE choose a range with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. Specify either servicesSecondaryRangeName or servicesIpv4CidrBlock but not both. */ servicesIpv4CidrBlock?: string; /** * The name of the services' secondary range used to allocate IP addresses to the cluster. Specify either servicesSecondaryRangeName or servicesIpv4CidrBlock but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when useIpAliases is true. */ servicesSecondaryRangeName?: string; /** * Whether or not to enable Alias IPs in the GKE cluster. If true, a VPC-native cluster is created. Defaults to true if the ipAllocationPolicy block is present in config. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters. */ useIpAliases?: boolean; } interface EnvironmentConfigPrivateEnvironmentConfig { /** * When specified, the environment will use Private Service Connect instead of VPC peerings to connect to Cloud SQL in the Tenant Project, and the PSC endpoint in the Customer Project will use an IP address from this subnetwork. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. */ cloudComposerConnectionSubnetwork: string; /** * The CIDR block from which IP range for Cloud Composer Network in tenant project will be reserved. Needs to be disjoint from private_cluster_config.master_ipv4_cidr_block and cloud_sql_ipv4_cidr_block. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. */ cloudComposerNetworkIpv4CidrBlock: string; /** * The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from web_server_ipv4_cidr_block. */ cloudSqlIpv4CidrBlock: string; /** * Mode of internal communication within the Composer environment. Must be one of "VPC_PEERING" or "PRIVATE_SERVICE_CONNECT". */ connectionType: string; /** * If true, access to the public endpoint of the GKE cluster is denied. If this field is set to true, ip_allocation_policy.use_ip_aliases must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ enablePrivateEndpoint?: boolean; /** * When enabled, IPs from public (non-RFC1918) ranges can be used for ip_allocation_policy.cluster_ipv4_cidr_block and ip_allocation_policy.service_ipv4_cidr_block. */ enablePrivatelyUsedPublicIps: boolean; /** * The IP range in CIDR notation to use for the hosted master network. This range is used for assigning internal IP addresses to the cluster master or set of masters and to the internal load balancer virtual IP. This range must not overlap with any other ranges in use within the cluster's network. If left blank, the default value of '172.16.0.0/28' is used. */ masterIpv4CidrBlock: string; /** * The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from masterIpv4CidrBlock and cloud_sql_ipv4_cidr_block. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ webServerIpv4CidrBlock: string; } interface EnvironmentConfigRecoveryConfig { /** * The configuration settings for scheduled snapshots. */ scheduledSnapshotsConfig?: outputs.composer.EnvironmentConfigRecoveryConfigScheduledSnapshotsConfig; } interface EnvironmentConfigRecoveryConfigScheduledSnapshotsConfig { /** * When enabled, Cloud Composer periodically saves snapshots of your environment to a Cloud Storage bucket. */ enabled: boolean; /** * Snapshot schedule, in the unix-cron format. */ snapshotCreationSchedule?: string; /** * the URI of a bucket folder where to save the snapshot. */ snapshotLocation?: string; /** * A time zone for the schedule. This value is a time offset and does not take into account daylight saving time changes. Valid values are from UTC-12 to UTC+12. Examples: UTC, UTC-01, UTC+03. */ timeZone?: string; } interface EnvironmentConfigSoftwareConfig { /** * Apache Airflow configuration properties to override. Property keys contain the section and property names, separated by a hyphen, for example "core-dags_are_paused_at_creation". Section names must not contain hyphens ("-"), opening square brackets ("["), or closing square brackets ("]"). The property name must not be empty and cannot contain "=" or ";". Section and property names cannot contain characters: "." Apache Airflow configuration property names must be written in snake_case. Property values can contain any character, and can be written in any lower/upper case format. Certain Apache Airflow configuration property values are blacklisted, and cannot be overridden. */ airflowConfigOverrides?: { [key: string]: string; }; /** * The configuration for Cloud Data Lineage integration. Supported for Cloud Composer environments in versions composer-2.1.2-airflow-*.*.* and newer */ cloudDataLineageIntegration: outputs.composer.EnvironmentConfigSoftwareConfigCloudDataLineageIntegration; /** * Additional environment variables to provide to the Apache Airflow scheduler, worker, and webserver processes. Environment variable names must match the regular expression [a-zA-Z_][a-zA-Z0-9_]*. They cannot specify Apache Airflow software configuration overrides (they cannot match the regular expression AIRFLOW__[A-Z0-9_]+__[A-Z0-9_]+), and they cannot match any of the following reserved names: AIRFLOW_HOME C_FORCE_ROOT CONTAINER_NAME DAGS_FOLDER GCP_PROJECT GCS_BUCKET GKE_CLUSTER_NAME SQL_DATABASE SQL_INSTANCE SQL_PASSWORD SQL_PROJECT SQL_REGION SQL_USER. */ envVariables?: { [key: string]: string; }; /** * The version of the software running in the environment. This encapsulates both the version of Cloud Composer functionality and the version of Apache Airflow. It must match the regular expression composer-([0-9]+(\.[0-9]+\.[0-9]+(-preview\.[0-9]+)?)?|latest)-airflow-([0-9]+(\.[0-9]+(\.[0-9]+)?)?). The Cloud Composer portion of the image version is a full semantic version, or an alias in the form of major version number or 'latest'. The Apache Airflow portion of the image version is a full semantic version that points to one of the supported Apache Airflow versions, or an alias in the form of only major or major.minor versions specified. See documentation for more details and version list. */ imageVersion: string; /** * Custom Python Package Index (PyPI) packages to be installed in the environment. Keys refer to the lowercase package name (e.g. "numpy"). Values are the lowercase extras and version specifier (e.g. "==1.12.0", "[devel,gcp_api]", "[devel]>=1.8.2, <1.9.2"). To specify a package without pinning it to a version specifier, use the empty string as the value. */ pypiPackages?: { [key: string]: string; }; /** * The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '2'. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use Python major version 3. */ pythonVersion: string; /** * The number of schedulers for Airflow. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*. */ schedulerCount: number; /** * Should be either 'ENABLED' or 'DISABLED'. Defaults to 'ENABLED'. Used in Composer 3. */ webServerPluginsMode: string; } interface EnvironmentConfigSoftwareConfigCloudDataLineageIntegration { /** * Whether or not Cloud Data Lineage integration is enabled. */ enabled: boolean; } interface EnvironmentConfigWebServerConfig { /** * Optional. Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. If not specified, composer-n1-webserver-2 will be used. Value custom is returned only in response, if Airflow web server parameters were manually changed to a non-standard values. */ machineType: string; } interface EnvironmentConfigWebServerNetworkAccessControl { /** * A collection of allowed IP ranges with descriptions. */ allowedIpRanges: outputs.composer.EnvironmentConfigWebServerNetworkAccessControlAllowedIpRange[]; } interface EnvironmentConfigWebServerNetworkAccessControlAllowedIpRange { /** * A description of this ip range. */ description?: string; /** * IP address or range, defined using CIDR notation, of requests that this rule applies to. Examples: 192.168.1.1 or 192.168.0.0/16 or 2001:db8::/32 or 2001:0db8:0000:0042:0000:8a2e:0370:7334. IP range prefixes should be properly truncated. For example, 1.2.3.4/24 should be truncated to 1.2.3.0/24. Similarly, for IPv6, 2001:db8::1/32 should be truncated to 2001:db8::/32. */ value: string; } interface EnvironmentConfigWorkloadsConfig { /** * Configuration for resources used by DAG processor. */ dagProcessor: outputs.composer.EnvironmentConfigWorkloadsConfigDagProcessor; /** * Configuration for resources used by Airflow schedulers. */ scheduler: outputs.composer.EnvironmentConfigWorkloadsConfigScheduler; /** * Configuration for resources used by Airflow triggerers. */ triggerer: outputs.composer.EnvironmentConfigWorkloadsConfigTriggerer; /** * Configuration for resources used by Airflow web server. */ webServer: outputs.composer.EnvironmentConfigWorkloadsConfigWebServer; /** * Configuration for resources used by Airflow workers. */ worker: outputs.composer.EnvironmentConfigWorkloadsConfigWorker; } interface EnvironmentConfigWorkloadsConfigDagProcessor { /** * Number of DAG processors. */ count: number; /** * CPU request and limit for DAG processor. */ cpu: number; /** * Memory (GB) request and limit for DAG processor. */ memoryGb: number; /** * Storage (GB) request and limit for DAG processor. */ storageGb: number; } interface EnvironmentConfigWorkloadsConfigScheduler { /** * The number of schedulers. */ count: number; /** * CPU request and limit for a single Airflow scheduler replica */ cpu: number; /** * Memory (GB) request and limit for a single Airflow scheduler replica. */ memoryGb: number; /** * Storage (GB) request and limit for a single Airflow scheduler replica. */ storageGb: number; } interface EnvironmentConfigWorkloadsConfigTriggerer { /** * The number of triggerers. */ count: number; /** * CPU request and limit for a single Airflow triggerer replica. */ cpu: number; /** * Memory (GB) request and limit for a single Airflow triggerer replica. */ memoryGb: number; } interface EnvironmentConfigWorkloadsConfigWebServer { /** * CPU request and limit for Airflow web server. */ cpu: number; /** * Memory (GB) request and limit for Airflow web server. */ memoryGb: number; /** * Storage (GB) request and limit for Airflow web server. */ storageGb: number; } interface EnvironmentConfigWorkloadsConfigWorker { /** * CPU request and limit for a single Airflow worker replica. */ cpu: number; /** * Maximum number of workers for autoscaling. */ maxCount: number; /** * Memory (GB) request and limit for a single Airflow worker replica. */ memoryGb: number; /** * Minimum number of workers for autoscaling. */ minCount: number; /** * Storage (GB) request and limit for a single Airflow worker replica. */ storageGb: number; } interface EnvironmentStorageConfig { /** * Optional. Name of an existing Cloud Storage bucket to be used by the environment. */ bucket: string; } interface GetEnvironmentConfig { /** * The URI of the Apache Airflow Web UI hosted within the * environment. */ airflowUri: string; /** * The Cloud Storage prefix of the DAGs for the environment. */ dagGcsPrefix: string; /** * The configuration setting for Airflow data retention mechanism. This field is supported for Cloud Composer environments in versions composer-2.0.32-airflow-2.1.4. or newer */ dataRetentionConfigs: outputs.composer.GetEnvironmentConfigDataRetentionConfig[]; /** * The configuration of Cloud SQL instance that is used by the Apache Airflow software. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ databaseConfigs: outputs.composer.GetEnvironmentConfigDatabaseConfig[]; /** * Optional. If true, builds performed during operations that install Python packages have only private connectivity to Google services. If false, the builds also have access to the internet. */ enablePrivateBuildsOnly: boolean; /** * Optional. If true, a private Composer environment will be created. */ enablePrivateEnvironment: boolean; /** * The encryption options for the Composer environment and its dependencies. */ encryptionConfigs: outputs.composer.GetEnvironmentConfigEncryptionConfig[]; /** * The size of the Cloud Composer environment. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. */ environmentSize: string; /** * The Kubernetes Engine cluster used to run the environment. */ gkeCluster: string; /** * The configuration for Cloud Composer maintenance window. */ maintenanceWindows: outputs.composer.GetEnvironmentConfigMaintenanceWindow[]; /** * Configuration options for the master authorized networks feature. Enabled master authorized networks will disallow all external traffic to access Kubernetes master through HTTPS except traffic from the given CIDR blocks, Google Compute Engine Public IPs and Google Prod IPs. */ masterAuthorizedNetworksConfigs: outputs.composer.GetEnvironmentConfigMasterAuthorizedNetworksConfig[]; /** * The configuration used for the Kubernetes Engine cluster. */ nodeConfigs: outputs.composer.GetEnvironmentConfigNodeConfig[]; /** * The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ nodeCount: number; /** * The configuration used for the Private IP Cloud Composer environment. */ privateEnvironmentConfigs: outputs.composer.GetEnvironmentConfigPrivateEnvironmentConfig[]; /** * The recovery configuration settings for the Cloud Composer environment */ recoveryConfigs: outputs.composer.GetEnvironmentConfigRecoveryConfig[]; /** * Whether high resilience is enabled or not. This field is supported for Cloud Composer environments in versions composer-2.1.15-airflow-*.*.* and newer. */ resilienceMode: string; /** * The configuration settings for software inside the environment. */ softwareConfigs: outputs.composer.GetEnvironmentConfigSoftwareConfig[]; /** * The configuration settings for the Airflow web server App Engine instance. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ webServerConfigs: outputs.composer.GetEnvironmentConfigWebServerConfig[]; /** * Network-level access control policy for the Airflow web server. */ webServerNetworkAccessControls: outputs.composer.GetEnvironmentConfigWebServerNetworkAccessControl[]; /** * The workloads configuration settings for the GKE cluster associated with the Cloud Composer environment. Supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. */ workloadsConfigs: outputs.composer.GetEnvironmentConfigWorkloadsConfig[]; } interface GetEnvironmentConfigDataRetentionConfig { /** * Optional. The policy for airflow metadata database retention. */ airflowMetadataRetentionConfigs: outputs.composer.GetEnvironmentConfigDataRetentionConfigAirflowMetadataRetentionConfig[]; /** * Optional. The configuration setting for Task Logs. */ taskLogsRetentionConfigs: outputs.composer.GetEnvironmentConfigDataRetentionConfigTaskLogsRetentionConfig[]; } interface GetEnvironmentConfigDataRetentionConfigAirflowMetadataRetentionConfig { /** * How many days data should be retained for. This field is supported for Cloud Composer environments in composer 3 and newer. */ retentionDays: number; /** * Whether database retention is enabled or not. This field is supported for Cloud Composer environments in composer 3 and newer. */ retentionMode: string; } interface GetEnvironmentConfigDataRetentionConfigTaskLogsRetentionConfig { /** * Whether logs in cloud logging only is enabled or not. This field is supported for Cloud Composer environments in versions composer-2.0.32-airflow-2.1.4 and newer but not in composer-3* */ storageMode: string; } interface GetEnvironmentConfigDatabaseConfig { /** * Optional. Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. If not specified, db-n1-standard-2 will be used. */ machineType: string; /** * Optional. Cloud SQL database preferred zone. */ zone: string; } interface GetEnvironmentConfigEncryptionConfig { /** * Optional. Customer-managed Encryption Key available through Google's Key Management Service. Cannot be updated. */ kmsKeyName: string; } interface GetEnvironmentConfigMaintenanceWindow { /** * Maintenance window end time. It is used only to calculate the duration of the maintenance window. The value for end-time must be in the future, relative to 'start_time'. */ endTime: string; /** * Maintenance window recurrence. Format is a subset of RFC-5545 (https://tools.ietf.org/html/rfc5545) 'RRULE'. The only allowed values for 'FREQ' field are 'FREQ=DAILY' and 'FREQ=WEEKLY;BYDAY=...'. Example values: 'FREQ=WEEKLY;BYDAY=TU,WE', 'FREQ=DAILY'. */ recurrence: string; /** * Start time of the first recurrence of the maintenance window. */ startTime: string; } interface GetEnvironmentConfigMasterAuthorizedNetworksConfig { /** * cidr_blocks define up to 50 external networks that could access Kubernetes master through HTTPS. */ cidrBlocks: outputs.composer.GetEnvironmentConfigMasterAuthorizedNetworksConfigCidrBlock[]; /** * Whether or not master authorized networks is enabled. */ enabled: boolean; } interface GetEnvironmentConfigMasterAuthorizedNetworksConfigCidrBlock { /** * cidr_block must be specified in CIDR notation. */ cidrBlock: string; /** * display_name is a field for users to identify CIDR blocks. */ displayName: string; } interface GetEnvironmentConfigNodeConfig { /** * IPv4 cidr range that will be used by Composer internal components. */ composerInternalIpv4CidrBlock: string; /** * PSC (Private Service Connect) Network entry point. Customers can pre-create the Network Attachment and point Cloud Composer environment to use. It is possible to share network attachment among many environments, provided enough IP addresses are available. */ composerNetworkAttachment: string; /** * The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ diskSizeGb: number; /** * Deploys 'ip-masq-agent' daemon set in the GKE cluster and defines nonMasqueradeCIDRs equals to pod IP range so IP masquerading is used for all destination addresses, except between pods traffic. See: https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent */ enableIpMasqAgent: boolean; /** * Configuration for controlling how IPs are allocated in the GKE cluster. Cannot be updated. */ ipAllocationPolicies: outputs.composer.GetEnvironmentConfigNodeConfigIpAllocationPolicy[]; /** * The Compute Engine machine type used for cluster instances, specified as a name or relative resource name. For example: "projects/{project}/zones/{zone}/machineTypes/{machineType}". Must belong to the enclosing environment's project and region/zone. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ machineType: string; /** * The maximum pods per node in the GKE cluster allocated during environment creation. Lowering this value reduces IP address consumption by the Cloud Composer Kubernetes cluster. This value can only be set during environment creation, and only if the environment is VPC-Native. The range of possible values is 8-110, and the default is 32. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ maxPodsPerNode: number; /** * The Compute Engine machine type used for cluster instances, specified as a name or relative resource name. For example: "projects/{project}/zones/{zone}/machineTypes/{machineType}". Must belong to the enclosing environment's project and region/zone. The network must belong to the environment's project. If unspecified, the "default" network ID in the environment's project is used. If a Custom Subnet Network is provided, subnetwork must also be provided. */ network: string; /** * The set of Google API scopes to be made available on all node VMs. Cannot be updated. If empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ oauthScopes: string[]; /** * The Google Cloud Platform Service Account to be used by the node VMs. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated. If given, note that the service account must have roles/composer.worker for any GCP resources created under the Cloud Composer Environment. */ serviceAccount: string; /** * The Compute Engine subnetwork to be used for machine communications, specified as a self-link, relative resource name (e.g. "projects/{project}/regions/{region}/subnetworks/{subnetwork}"), or by name. If subnetwork is provided, network must also be provided and the subnetwork must belong to the enclosing environment's project and region. */ subnetwork: string; /** * The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with RFC1035. Cannot be updated. */ tags: string[]; /** * The Compute Engine zone in which to deploy the VMs running the Apache Airflow software, specified as the zone name or relative resource name (e.g. "projects/{project}/zones/{zone}"). Must belong to the enclosing environment's project and region. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ zone: string; } interface GetEnvironmentConfigNodeConfigIpAllocationPolicy { /** * The IP address range used to allocate IP addresses to pods in the cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when useIpAliases is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. /14) to have GKE choose a range with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. Specify either clusterSecondaryRangeName or clusterIpv4CidrBlock but not both. */ clusterIpv4CidrBlock: string; /** * The name of the cluster's secondary range used to allocate IP addresses to pods. Specify either clusterSecondaryRangeName or clusterIpv4CidrBlock but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when useIpAliases is true. */ clusterSecondaryRangeName: string; /** * The IP address range used to allocate IP addresses in this cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when useIpAliases is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. /14) to have GKE choose a range with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. Specify either servicesSecondaryRangeName or servicesIpv4CidrBlock but not both. */ servicesIpv4CidrBlock: string; /** * The name of the services' secondary range used to allocate IP addresses to the cluster. Specify either servicesSecondaryRangeName or servicesIpv4CidrBlock but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when useIpAliases is true. */ servicesSecondaryRangeName: string; /** * Whether or not to enable Alias IPs in the GKE cluster. If true, a VPC-native cluster is created. Defaults to true if the ipAllocationPolicy block is present in config. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters. */ useIpAliases: boolean; } interface GetEnvironmentConfigPrivateEnvironmentConfig { /** * When specified, the environment will use Private Service Connect instead of VPC peerings to connect to Cloud SQL in the Tenant Project, and the PSC endpoint in the Customer Project will use an IP address from this subnetwork. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. */ cloudComposerConnectionSubnetwork: string; /** * The CIDR block from which IP range for Cloud Composer Network in tenant project will be reserved. Needs to be disjoint from private_cluster_config.master_ipv4_cidr_block and cloud_sql_ipv4_cidr_block. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. */ cloudComposerNetworkIpv4CidrBlock: string; /** * The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from web_server_ipv4_cidr_block. */ cloudSqlIpv4CidrBlock: string; /** * Mode of internal communication within the Composer environment. Must be one of "VPC_PEERING" or "PRIVATE_SERVICE_CONNECT". */ connectionType: string; /** * If true, access to the public endpoint of the GKE cluster is denied. If this field is set to true, ip_allocation_policy.use_ip_aliases must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ enablePrivateEndpoint: boolean; /** * When enabled, IPs from public (non-RFC1918) ranges can be used for ip_allocation_policy.cluster_ipv4_cidr_block and ip_allocation_policy.service_ipv4_cidr_block. */ enablePrivatelyUsedPublicIps: boolean; /** * The IP range in CIDR notation to use for the hosted master network. This range is used for assigning internal IP addresses to the cluster master or set of masters and to the internal load balancer virtual IP. This range must not overlap with any other ranges in use within the cluster's network. If left blank, the default value of '172.16.0.0/28' is used. */ masterIpv4CidrBlock: string; /** * The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from masterIpv4CidrBlock and cloud_sql_ipv4_cidr_block. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. */ webServerIpv4CidrBlock: string; } interface GetEnvironmentConfigRecoveryConfig { /** * The configuration settings for scheduled snapshots. */ scheduledSnapshotsConfigs: outputs.composer.GetEnvironmentConfigRecoveryConfigScheduledSnapshotsConfig[]; } interface GetEnvironmentConfigRecoveryConfigScheduledSnapshotsConfig { /** * When enabled, Cloud Composer periodically saves snapshots of your environment to a Cloud Storage bucket. */ enabled: boolean; /** * Snapshot schedule, in the unix-cron format. */ snapshotCreationSchedule: string; /** * the URI of a bucket folder where to save the snapshot. */ snapshotLocation: string; /** * A time zone for the schedule. This value is a time offset and does not take into account daylight saving time changes. Valid values are from UTC-12 to UTC+12. Examples: UTC, UTC-01, UTC+03. */ timeZone: string; } interface GetEnvironmentConfigSoftwareConfig { /** * Apache Airflow configuration properties to override. Property keys contain the section and property names, separated by a hyphen, for example "core-dags_are_paused_at_creation". Section names must not contain hyphens ("-"), opening square brackets ("["), or closing square brackets ("]"). The property name must not be empty and cannot contain "=" or ";". Section and property names cannot contain characters: "." Apache Airflow configuration property names must be written in snake_case. Property values can contain any character, and can be written in any lower/upper case format. Certain Apache Airflow configuration property values are blacklisted, and cannot be overridden. */ airflowConfigOverrides: { [key: string]: string; }; /** * The configuration for Cloud Data Lineage integration. Supported for Cloud Composer environments in versions composer-2.1.2-airflow-*.*.* and newer */ cloudDataLineageIntegrations: outputs.composer.GetEnvironmentConfigSoftwareConfigCloudDataLineageIntegration[]; /** * Additional environment variables to provide to the Apache Airflow scheduler, worker, and webserver processes. Environment variable names must match the regular expression [a-zA-Z_][a-zA-Z0-9_]*. They cannot specify Apache Airflow software configuration overrides (they cannot match the regular expression AIRFLOW__[A-Z0-9_]+__[A-Z0-9_]+), and they cannot match any of the following reserved names: AIRFLOW_HOME C_FORCE_ROOT CONTAINER_NAME DAGS_FOLDER GCP_PROJECT GCS_BUCKET GKE_CLUSTER_NAME SQL_DATABASE SQL_INSTANCE SQL_PASSWORD SQL_PROJECT SQL_REGION SQL_USER. */ envVariables: { [key: string]: string; }; /** * The version of the software running in the environment. This encapsulates both the version of Cloud Composer functionality and the version of Apache Airflow. It must match the regular expression composer-([0-9]+(\.[0-9]+\.[0-9]+(-preview\.[0-9]+)?)?|latest)-airflow-([0-9]+(\.[0-9]+(\.[0-9]+)?)?). The Cloud Composer portion of the image version is a full semantic version, or an alias in the form of major version number or 'latest'. The Apache Airflow portion of the image version is a full semantic version that points to one of the supported Apache Airflow versions, or an alias in the form of only major or major.minor versions specified. See documentation for more details and version list. */ imageVersion: string; /** * Custom Python Package Index (PyPI) packages to be installed in the environment. Keys refer to the lowercase package name (e.g. "numpy"). Values are the lowercase extras and version specifier (e.g. "==1.12.0", "[devel,gcp_api]", "[devel]>=1.8.2, <1.9.2"). To specify a package without pinning it to a version specifier, use the empty string as the value. */ pypiPackages: { [key: string]: string; }; /** * The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '2'. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use Python major version 3. */ pythonVersion: string; /** * The number of schedulers for Airflow. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*. */ schedulerCount: number; /** * Should be either 'ENABLED' or 'DISABLED'. Defaults to 'ENABLED'. Used in Composer 3. */ webServerPluginsMode: string; } interface GetEnvironmentConfigSoftwareConfigCloudDataLineageIntegration { /** * Whether or not Cloud Data Lineage integration is enabled. */ enabled: boolean; } interface GetEnvironmentConfigWebServerConfig { /** * Optional. Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. If not specified, composer-n1-webserver-2 will be used. Value custom is returned only in response, if Airflow web server parameters were manually changed to a non-standard values. */ machineType: string; } interface GetEnvironmentConfigWebServerNetworkAccessControl { /** * A collection of allowed IP ranges with descriptions. */ allowedIpRanges: outputs.composer.GetEnvironmentConfigWebServerNetworkAccessControlAllowedIpRange[]; } interface GetEnvironmentConfigWebServerNetworkAccessControlAllowedIpRange { /** * A description of this ip range. */ description: string; /** * IP address or range, defined using CIDR notation, of requests that this rule applies to. Examples: 192.168.1.1 or 192.168.0.0/16 or 2001:db8::/32 or 2001:0db8:0000:0042:0000:8a2e:0370:7334. IP range prefixes should be properly truncated. For example, 1.2.3.4/24 should be truncated to 1.2.3.0/24. Similarly, for IPv6, 2001:db8::1/32 should be truncated to 2001:db8::/32. */ value: string; } interface GetEnvironmentConfigWorkloadsConfig { /** * Configuration for resources used by DAG processor. */ dagProcessors: outputs.composer.GetEnvironmentConfigWorkloadsConfigDagProcessor[]; /** * Configuration for resources used by Airflow schedulers. */ schedulers: outputs.composer.GetEnvironmentConfigWorkloadsConfigScheduler[]; /** * Configuration for resources used by Airflow triggerers. */ triggerers: outputs.composer.GetEnvironmentConfigWorkloadsConfigTriggerer[]; /** * Configuration for resources used by Airflow web server. */ webServers: outputs.composer.GetEnvironmentConfigWorkloadsConfigWebServer[]; /** * Configuration for resources used by Airflow workers. */ workers: outputs.composer.GetEnvironmentConfigWorkloadsConfigWorker[]; } interface GetEnvironmentConfigWorkloadsConfigDagProcessor { /** * Number of DAG processors. */ count: number; /** * CPU request and limit for DAG processor. */ cpu: number; /** * Memory (GB) request and limit for DAG processor. */ memoryGb: number; /** * Storage (GB) request and limit for DAG processor. */ storageGb: number; } interface GetEnvironmentConfigWorkloadsConfigScheduler { /** * The number of schedulers. */ count: number; /** * CPU request and limit for a single Airflow scheduler replica */ cpu: number; /** * Memory (GB) request and limit for a single Airflow scheduler replica. */ memoryGb: number; /** * Storage (GB) request and limit for a single Airflow scheduler replica. */ storageGb: number; } interface GetEnvironmentConfigWorkloadsConfigTriggerer { /** * The number of triggerers. */ count: number; /** * CPU request and limit for a single Airflow triggerer replica. */ cpu: number; /** * Memory (GB) request and limit for a single Airflow triggerer replica. */ memoryGb: number; } interface GetEnvironmentConfigWorkloadsConfigWebServer { /** * CPU request and limit for Airflow web server. */ cpu: number; /** * Memory (GB) request and limit for Airflow web server. */ memoryGb: number; /** * Storage (GB) request and limit for Airflow web server. */ storageGb: number; } interface GetEnvironmentConfigWorkloadsConfigWorker { /** * CPU request and limit for a single Airflow worker replica. */ cpu: number; /** * Maximum number of workers for autoscaling. */ maxCount: number; /** * Memory (GB) request and limit for a single Airflow worker replica. */ memoryGb: number; /** * Minimum number of workers for autoscaling. */ minCount: number; /** * Storage (GB) request and limit for a single Airflow worker replica. */ storageGb: number; } interface GetEnvironmentStorageConfig { /** * Optional. Name of an existing Cloud Storage bucket to be used by the environment. */ bucket: string; } interface GetImageVersionsImageVersion { /** * The string identifier of the image version, in the form: "composer-x.y.z-airflow-a.b.c" */ imageVersionId: string; /** * Supported python versions for this image version */ supportedPythonVersions: string[]; } } export declare namespace compute { interface AutoscalerAutoscalingPolicy { /** * The number of seconds that the autoscaler should wait before it * starts collecting information from a new instance. This prevents * the autoscaler from collecting information when the instance is * initializing, during which the collected usage would not be * reliable. The default time autoscaler waits is 60 seconds. * Virtual machine initialization times might vary because of * numerous factors. We recommend that you test how long an * instance may take to initialize. To do this, create an instance * and time the startup process. */ cooldownPeriod?: number; /** * Defines the CPU utilization policy that allows the autoscaler to * scale based on the average CPU utilization of a managed instance * group. * Structure is documented below. */ cpuUtilization: outputs.compute.AutoscalerAutoscalingPolicyCpuUtilization; /** * Configuration parameters of autoscaling based on a load balancer. * Structure is documented below. */ loadBalancingUtilization?: outputs.compute.AutoscalerAutoscalingPolicyLoadBalancingUtilization; /** * The maximum number of instances that the autoscaler can scale up * to. This is required when creating or updating an autoscaler. The * maximum number of replicas should not be lower than minimal number * of replicas. */ maxReplicas: number; /** * Configuration parameters of autoscaling based on a custom metric. * Structure is documented below. */ metrics?: outputs.compute.AutoscalerAutoscalingPolicyMetric[]; /** * The minimum number of replicas that the autoscaler can scale down * to. This cannot be less than 0. If not provided, autoscaler will * choose a default value depending on maximum number of instances * allowed. */ minReplicas: number; /** * Defines operating mode for this policy. */ mode?: string; /** * (Optional, Beta) * Defines scale down controls to reduce the risk of response latency * and outages due to abrupt scale-in events * Structure is documented below. */ scaleDownControl: outputs.compute.AutoscalerAutoscalingPolicyScaleDownControl; /** * Defines scale in controls to reduce the risk of response latency * and outages due to abrupt scale-in events * Structure is documented below. */ scaleInControl?: outputs.compute.AutoscalerAutoscalingPolicyScaleInControl; /** * Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. * Structure is documented below. */ scalingSchedules?: outputs.compute.AutoscalerAutoscalingPolicyScalingSchedule[]; } interface AutoscalerAutoscalingPolicyCpuUtilization { /** * Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are: * - NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics. * - OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand. */ predictiveMethod?: string; /** * The target CPU utilization that the autoscaler should maintain. * Must be a float value in the range (0, 1]. If not specified, the * default is 0.6. * If the CPU level is below the target utilization, the autoscaler * scales down the number of instances until it reaches the minimum * number of instances you specified or until the average CPU of * your instances reaches the target utilization. * If the average CPU is above the target utilization, the autoscaler * scales up until it reaches the maximum number of instances you * specified or until the average utilization reaches the target * utilization. */ target: number; } interface AutoscalerAutoscalingPolicyLoadBalancingUtilization { /** * Fraction of backend capacity utilization (set in HTTP(s) load * balancing configuration) that autoscaler should maintain. Must * be a positive float value. If not defined, the default is 0.8. */ target: number; } interface AutoscalerAutoscalingPolicyMetric { /** * A filter string to be used as the filter string for * a Stackdriver Monitoring TimeSeries.list API call. * This filter is used to select a specific TimeSeries for * the purpose of autoscaling and to determine whether the metric * is exporting per-instance or per-group data. * You can only use the AND operator for joining selectors. * You can only use direct equality comparison operator (=) without * any functions for each selector. * You can specify the metric in both the filter string and in the * metric field. However, if specified in both places, the metric must * be identical. * The monitored resource type determines what kind of values are * expected for the metric. If it is a gce_instance, the autoscaler * expects the metric to include a separate TimeSeries for each * instance in a group. In such a case, you cannot filter on resource * labels. * If the resource type is any other value, the autoscaler expects * this metric to contain values that apply to the entire autoscaled * instance group and resource label filtering can be performed to * point autoscaler at the correct TimeSeries to scale upon. * This is called a per-group metric for the purpose of autoscaling. * If not specified, the type defaults to gce_instance. * You should provide a filter that is selective enough to pick just * one TimeSeries for the autoscaled group or for each of the instances * (if you are using gceInstance resource type). If multiple * TimeSeries are returned upon the query execution, the autoscaler * will sum their respective values to obtain its scaling value. */ filter?: string; /** * The identifier (type) of the Stackdriver Monitoring metric. * The metric cannot have negative values. * The metric must have a value type of INT64 or DOUBLE. */ name: string; /** * If scaling is based on a per-group metric value that represents the * total amount of work to be done or resource usage, set this value to * an amount assigned for a single instance of the scaled group. * The autoscaler will keep the number of instances proportional to the * value of this metric, the metric itself should not change value due * to group resizing. * For example, a good metric to use with the target is * `pubsub.googleapis.com/subscription/num_undelivered_messages` * or a custom metric exporting the total number of requests coming to * your instances. * A bad example would be a metric exporting an average or median * latency, since this value can't include a chunk assignable to a * single instance, it could be better used with utilizationTarget * instead. */ singleInstanceAssignment?: number; /** * The target value of the metric that autoscaler should * maintain. This must be a positive value. A utilization * metric scales number of virtual machines handling requests * to increase or decrease proportionally to the metric. * For example, a good metric to use as a utilizationTarget is * www.googleapis.com/compute/instance/network/received_bytes_count. * The autoscaler will work to keep this value constant for each * of the instances. */ target?: number; /** * Defines how target utilization value is expressed for a * Stackdriver Monitoring metric. * Possible values are: `GAUGE`, `DELTA_PER_SECOND`, `DELTA_PER_MINUTE`. */ type?: string; } interface AutoscalerAutoscalingPolicyScaleDownControl { /** * A nested object resource. * Structure is documented below. */ maxScaledDownReplicas?: outputs.compute.AutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicas; /** * How long back autoscaling should look when computing recommendations * to include directives regarding slower scale down, as described above. */ timeWindowSec?: number; } interface AutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicas { /** * Specifies a fixed number of VM instances. This must be a positive * integer. */ fixed?: number; /** * Specifies a percentage of instances between 0 to 100%, inclusive. * For example, specify 80 for 80%. */ percent?: number; } interface AutoscalerAutoscalingPolicyScaleInControl { /** * A nested object resource. * Structure is documented below. */ maxScaledInReplicas?: outputs.compute.AutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas; /** * How long back autoscaling should look when computing recommendations * to include directives regarding slower scale down, as described above. */ timeWindowSec?: number; } interface AutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas { /** * Specifies a fixed number of VM instances. This must be a positive * integer. */ fixed?: number; /** * Specifies a percentage of instances between 0 to 100%, inclusive. * For example, specify 80 for 80%. */ percent?: number; } interface AutoscalerAutoscalingPolicyScalingSchedule { /** * A description of a scaling schedule. */ description?: string; /** * A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect. */ disabled?: boolean; /** * The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300. */ durationSec: number; /** * Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule. */ minRequiredReplicas: number; /** * The identifier for this object. Format specified above. */ name: string; /** * The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field). */ schedule: string; /** * The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database. */ timeZone?: string; } interface BackendBucketCdnPolicy { /** * Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings. * Structure is documented below. */ bypassCacheOnRequestHeaders?: outputs.compute.BackendBucketCdnPolicyBypassCacheOnRequestHeader[]; /** * The CacheKeyPolicy for this CdnPolicy. * Structure is documented below. */ cacheKeyPolicy?: outputs.compute.BackendBucketCdnPolicyCacheKeyPolicy; /** * Specifies the cache setting for all responses from this backend. * The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC * Possible values are: `USE_ORIGIN_HEADERS`, `FORCE_CACHE_ALL`, `CACHE_ALL_STATIC`. */ cacheMode: string; /** * Specifies the maximum allowed TTL for cached content served by this origin. When the * `cacheMode` is set to "USE_ORIGIN_HEADERS", you must omit this field. */ clientTtl: number; /** * Specifies the default TTL for cached content served by this origin for responses * that do not have an existing valid TTL (max-age or s-max-age). When the `cacheMode` * is set to "USE_ORIGIN_HEADERS", you must omit this field. */ defaultTtl: number; /** * Specifies the maximum allowed TTL for cached content served by this origin. When the * `cacheMode` is set to "USE_ORIGIN_HEADERS", you must omit this field. */ maxTtl: number; /** * Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. */ negativeCaching: boolean; /** * Sets a cache TTL for the specified HTTP status code. negativeCaching must be enabled to configure negativeCachingPolicy. * Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's default cache TTLs. * Structure is documented below. */ negativeCachingPolicies?: outputs.compute.BackendBucketCdnPolicyNegativeCachingPolicy[]; /** * If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin. */ requestCoalescing?: boolean; /** * Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. */ serveWhileStale: number; /** * Maximum number of seconds the response to a signed URL request will * be considered fresh. After this time period, * the response will be revalidated before being served. * When serving responses to signed URL requests, * Cloud CDN will internally behave as though * all responses from this backend had a "Cache-Control: public, * max-age=[TTL]" header, regardless of any existing Cache-Control * header. The actual headers served in responses will not be altered. */ signedUrlCacheMaxAgeSec?: number; } interface BackendBucketCdnPolicyBypassCacheOnRequestHeader { /** * The header field name to match on when bypassing cache. Values are case-insensitive. */ headerName?: string; } interface BackendBucketCdnPolicyCacheKeyPolicy { /** * Allows HTTP request headers (by name) to be used in the * cache key. */ includeHttpHeaders?: string[]; /** * Names of query string parameters to include in cache keys. * Default parameters are always included. '&' and '=' will * be percent encoded and not treated as delimiters. */ queryStringWhitelists?: string[]; } interface BackendBucketCdnPolicyNegativeCachingPolicy { /** * The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 308, 404, 405, 410, 421, 451 and 501 * can be specified as values, and you cannot specify a status code more than once. */ code?: number; /** * The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s * (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. */ ttl?: number; } interface BackendBucketIamBindingCondition { description?: string; expression: string; title: string; } interface BackendBucketIamMemberCondition { description?: string; expression: string; title: string; } interface BackendBucketParams { /** * Resource manager tags to be bound to the backend bucket. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. */ resourceManagerTags?: { [key: string]: string; }; } interface BackendServiceBackend { /** * Specifies the balancing mode for this backend. * For global HTTP(S) or TCP/SSL load balancing, the default is * UTILIZATION. Valid values are UTILIZATION, RATE (for HTTP(S)), * CUSTOM_METRICS (for HTTP(s)) and CONNECTION (for TCP/SSL). * See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) * for an explanation of load balancing modes. * Default value is `UTILIZATION`. * Possible values are: `UTILIZATION`, `RATE`, `CONNECTION`, `CUSTOM_METRICS`, `IN_FLIGHT`. */ balancingMode?: string; /** * A multiplier applied to the group's maximum servicing capacity * (based on UTILIZATION, RATE or CONNECTION). * Default value is 1, which means the group will serve up to 100% * of its configured capacity (depending on balancingMode). A * setting of 0 means the group is completely drained, offering * 0% of its available Capacity. Valid range is [0.0,1.0]. */ capacityScaler?: number; /** * The set of custom metrics that are used for CUSTOM_METRICS BalancingMode. * Structure is documented below. */ customMetrics?: outputs.compute.BackendServiceBackendCustomMetric[]; /** * An optional description of this resource. * Provide this property when you create the resource. */ description?: string; /** * The fully-qualified URL of an Instance Group or Network Endpoint * Group resource. In case of instance group this defines the list * of instances that serve traffic. Member virtual machine * instances from each instance group must live in the same zone as * the instance group itself. No two backends in a backend service * are allowed to use same Instance Group resource. * For Network Endpoint Groups this defines list of endpoints. All * endpoints of Network Endpoint Group must be hosted on instances * located in the same zone as the Network Endpoint Group. * Backend services cannot mix Instance Group and * Network Endpoint Group backends. * Note that you must specify an Instance Group or Network Endpoint * Group resource using the fully-qualified URL, rather than a * partial URL. */ group: string; /** * The max number of simultaneous connections for the group. Can * be used with either CONNECTION or UTILIZATION balancing modes. * For CONNECTION mode, either maxConnections or one * of maxConnectionsPerInstance or maxConnectionsPerEndpoint, * as appropriate for group type, must be set. */ maxConnections: number; /** * The max number of simultaneous connections that a single backend * network endpoint can handle. This is used to calculate the * capacity of the group. Can be used in either CONNECTION or * UTILIZATION balancing modes. * For CONNECTION mode, either * maxConnections or maxConnectionsPerEndpoint must be set. */ maxConnectionsPerEndpoint: number; /** * The max number of simultaneous connections that a single * backend instance can handle. This is used to calculate the * capacity of the group. Can be used in either CONNECTION or * UTILIZATION balancing modes. * For CONNECTION mode, either maxConnections or * maxConnectionsPerInstance must be set. */ maxConnectionsPerInstance: number; /** * (Optional, Beta) * Defines a maximum number of in-flight requests for the whole NEG * or instance group. Not available if backend's balancingMode is RATE * or CONNECTION. */ maxInFlightRequests: number; /** * (Optional, Beta) * Defines a maximum number of in-flight requests for a single endpoint. * Not available if backend's balancingMode is RATE or CONNECTION. */ maxInFlightRequestsPerEndpoint: number; /** * (Optional, Beta) * Defines a maximum number of in-flight requests for a single VM. * Not available if backend's balancingMode is RATE or CONNECTION. */ maxInFlightRequestsPerInstance: number; /** * The max requests per second (RPS) of the group. * Can be used with either RATE or UTILIZATION balancing modes, * but required if RATE mode. For RATE mode, either maxRate or one * of maxRatePerInstance or maxRatePerEndpoint, as appropriate for * group type, must be set. */ maxRate: number; /** * The max requests per second (RPS) that a single backend network * endpoint can handle. This is used to calculate the capacity of * the group. Can be used in either balancing mode. For RATE mode, * either maxRate or maxRatePerEndpoint must be set. */ maxRatePerEndpoint: number; /** * The max requests per second (RPS) that a single backend * instance can handle. This is used to calculate the capacity of * the group. Can be used in either balancing mode. For RATE mode, * either maxRate or maxRatePerInstance must be set. */ maxRatePerInstance: number; /** * Used when balancingMode is UTILIZATION. This ratio defines the * CPU utilization target for the group. Valid range is [0.0, 1.0]. */ maxUtilization: number; /** * This field indicates whether this backend should be fully utilized before sending traffic to backends * with default preference. This field cannot be set when loadBalancingScheme is set to 'EXTERNAL'. The possible values are: * - PREFERRED: Backends with this preference level will be filled up to their capacity limits first, * based on RTT. * - DEFAULT: If preferred backends don't have enough capacity, backends in this layer would be used and * traffic would be assigned based on the load balancing algorithm you use. This is the default * Possible values are: `PREFERRED`, `DEFAULT`. */ preference?: string; /** * (Optional, Beta) * This field specifies how long a connection should be kept alive for: * - LONG: Most of the requests are expected to take more than multiple * seconds to finish. * - SHORT: Most requests are expected to finish with a sub-second latency. * Possible values are: `LONG`, `SHORT`. */ trafficDuration?: string; } interface BackendServiceBackendCustomMetric { /** * If true, the metric data is not used for load balancing. */ dryRun: boolean; /** * Optional parameter to define a target utilization for the Custom Metrics * balancing mode. The valid range is [0.0, 1.0]. */ maxUtilization?: number; /** * Name of a custom utilization signal. The name must be 1-64 characters * long and match the regular expression a-z? which * means the first character must be a lowercase letter, and all following * characters must be a dash, period, underscore, lowercase letter, or * digit, except the last character, which cannot be a dash, period, or * underscore. For usage guidelines, see Custom Metrics balancing mode. This * field can only be used for a global or regional backend service with the * loadBalancingScheme set to EXTERNAL_MANAGED, * INTERNAL_MANAGED INTERNAL_SELF_MANAGED. */ name: string; } interface BackendServiceCdnPolicy { /** * Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. * The cache is bypassed for all cdnPolicy.cacheMode settings. * Structure is documented below. */ bypassCacheOnRequestHeaders?: outputs.compute.BackendServiceCdnPolicyBypassCacheOnRequestHeader[]; /** * The CacheKeyPolicy for this CdnPolicy. * Structure is documented below. */ cacheKeyPolicy?: outputs.compute.BackendServiceCdnPolicyCacheKeyPolicy; /** * Specifies the cache setting for all responses from this backend. * The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC * Possible values are: `USE_ORIGIN_HEADERS`, `FORCE_CACHE_ALL`, `CACHE_ALL_STATIC`. */ cacheMode: string; /** * Specifies the maximum allowed TTL for cached content served by this origin. */ clientTtl: number; /** * Specifies the default TTL for cached content served by this origin for responses * that do not have an existing valid TTL (max-age or s-max-age). */ defaultTtl: number; /** * Specifies the maximum allowed TTL for cached content served by this origin. */ maxTtl: number; /** * Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. */ negativeCaching: boolean; /** * Sets a cache TTL for the specified HTTP status code. negativeCaching must be enabled to configure negativeCachingPolicy. * Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's default cache TTLs. * Structure is documented below. */ negativeCachingPolicies?: outputs.compute.BackendServiceCdnPolicyNegativeCachingPolicy[]; /** * If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests * to the origin. */ requestCoalescing: boolean; /** * Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. */ serveWhileStale: number; /** * Maximum number of seconds the response to a signed URL request * will be considered fresh, defaults to 1hr (3600s). After this * time period, the response will be revalidated before * being served. * When serving responses to signed URL requests, Cloud CDN will * internally behave as though all responses from this backend had a * "Cache-Control: public, max-age=[TTL]" header, regardless of any * existing Cache-Control header. The actual headers served in * responses will not be altered. */ signedUrlCacheMaxAgeSec?: number; } interface BackendServiceCdnPolicyBypassCacheOnRequestHeader { /** * The header field name to match on when bypassing cache. Values are case-insensitive. */ headerName: string; } interface BackendServiceCdnPolicyCacheKeyPolicy { /** * If true requests to different hosts will be cached separately. */ includeHost?: boolean; /** * Allows HTTP request headers (by name) to be used in the * cache key. */ includeHttpHeaders?: string[]; /** * Names of cookies to include in cache keys. */ includeNamedCookies?: string[]; /** * If true, http and https requests will be cached separately. */ includeProtocol?: boolean; /** * If true, include query string parameters in the cache key * according to queryStringWhitelist and * query_string_blacklist. If neither is set, the entire query * string will be included. * If false, the query string will be excluded from the cache * key entirely. */ includeQueryString?: boolean; /** * Names of query string parameters to exclude in cache keys. * All other parameters will be included. Either specify * queryStringWhitelist or query_string_blacklist, not both. * '&' and '=' will be percent encoded and not treated as * delimiters. */ queryStringBlacklists?: string[]; /** * Names of query string parameters to include in cache keys. * All other parameters will be excluded. Either specify * queryStringWhitelist or query_string_blacklist, not both. * '&' and '=' will be percent encoded and not treated as * delimiters. */ queryStringWhitelists?: string[]; } interface BackendServiceCdnPolicyNegativeCachingPolicy { /** * The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 308, 404, 405, 410, 421, 451 and 501 * can be specified as values, and you cannot specify a status code more than once. */ code?: number; /** * The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s * (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. */ ttl?: number; } interface BackendServiceCircuitBreakers { /** * (Optional, Beta) * The timeout for new network connections to hosts. * Structure is documented below. */ connectTimeout?: outputs.compute.BackendServiceCircuitBreakersConnectTimeout; /** * The maximum number of connections to the backend cluster. * Defaults to 1024. */ maxConnections?: number; /** * The maximum number of pending requests to the backend cluster. * Defaults to 1024. */ maxPendingRequests?: number; /** * The maximum number of parallel requests to the backend cluster. * Defaults to 1024. */ maxRequests?: number; /** * Maximum requests for a single backend connection. This parameter * is respected by both the HTTP/1.1 and HTTP/2 implementations. If * not specified, there is no limit. Setting this parameter to 1 * will effectively disable keep alive. */ maxRequestsPerConnection?: number; /** * The maximum number of parallel retries to the backend cluster. * Defaults to 3. */ maxRetries?: number; } interface BackendServiceCircuitBreakersConnectTimeout { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface BackendServiceConsistentHash { /** * Hash is based on HTTP Cookie. This field describes a HTTP cookie * that will be used as the hash key for the consistent hash load * balancer. If the cookie is not present, it will be generated. * This field is applicable if the sessionAffinity is set to HTTP_COOKIE. * Structure is documented below. */ httpCookie?: outputs.compute.BackendServiceConsistentHashHttpCookie; /** * The hash based on the value of the specified header field. * This field is applicable if the sessionAffinity is set to HEADER_FIELD. */ httpHeaderName?: string; /** * The minimum number of virtual nodes to use for the hash ring. * Larger ring sizes result in more granular load * distributions. If the number of hosts in the load balancing pool * is larger than the ring size, each host will be assigned a single * virtual node. * Defaults to 1024. */ minimumRingSize?: number; } interface BackendServiceConsistentHashHttpCookie { /** * Name of the cookie. */ name?: string; /** * Path to set for the cookie. */ path?: string; /** * Lifetime of the cookie. * Structure is documented below. */ ttl?: outputs.compute.BackendServiceConsistentHashHttpCookieTtl; } interface BackendServiceConsistentHashHttpCookieTtl { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface BackendServiceCustomMetric { /** * If true, the metric data is not used for load balancing. */ dryRun: boolean; /** * Name of a custom utilization signal. The name must be 1-64 characters * long and match the regular expression a-z? which * means the first character must be a lowercase letter, and all following * characters must be a dash, period, underscore, lowercase letter, or * digit, except the last character, which cannot be a dash, period, or * underscore. For usage guidelines, see Custom Metrics balancing mode. This * field can only be used for a global or regional backend service with the * loadBalancingScheme set to EXTERNAL_MANAGED, * INTERNAL_MANAGED INTERNAL_SELF_MANAGED. */ name: string; } interface BackendServiceDynamicForwarding { /** * (Optional, Beta) * IP:PORT based dynamic forwarding configuration. * Structure is documented below. */ ipPortSelection?: outputs.compute.BackendServiceDynamicForwardingIpPortSelection; } interface BackendServiceDynamicForwardingIpPortSelection { /** * (Optional, Beta) * A boolean flag enabling IP:PORT based dynamic forwarding. */ enabled?: boolean; } interface BackendServiceIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface BackendServiceIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface BackendServiceIap { /** * Whether the serving infrastructure will authenticate and authorize all incoming requests. */ enabled: boolean; /** * OAuth2 Client ID for IAP */ oauth2ClientId?: string; /** * OAuth2 Client Secret for IAP * **Note**: This property is sensitive and will not be displayed in the plan. */ oauth2ClientSecret?: string; /** * (Output) * OAuth2 Client Secret SHA-256 for IAP * **Note**: This property is sensitive and will not be displayed in the plan. */ oauth2ClientSecretSha256: string; } interface BackendServiceLocalityLbPolicy { /** * The configuration for a custom policy implemented by the user and * deployed with the client. * Structure is documented below. */ customPolicy?: outputs.compute.BackendServiceLocalityLbPolicyCustomPolicy; /** * The configuration for a built-in load balancing policy. * Structure is documented below. */ policy?: outputs.compute.BackendServiceLocalityLbPolicyPolicy; } interface BackendServiceLocalityLbPolicyCustomPolicy { /** * An optional, arbitrary JSON object with configuration data, understood * by a locally installed custom policy implementation. */ data?: string; /** * Identifies the custom policy. * The value should match the type the custom implementation is registered * with on the gRPC clients. It should follow protocol buffer * message naming conventions and include the full path (e.g. * myorg.CustomLbPolicy). The maximum length is 256 characters. * Note that specifying the same custom policy more than once for a * backend is not a valid configuration and will be rejected. */ name: string; } interface BackendServiceLocalityLbPolicyPolicy { /** * The name of a locality load balancer policy to be used. The value * should be one of the predefined ones as supported by localityLbPolicy, * although at the moment only ROUND_ROBIN is supported. * This field should only be populated when the customPolicy field is not * used. * Note that specifying the same policy more than once for a backend is * not a valid configuration and will be rejected. * The possible values are: * * `ROUND_ROBIN`: This is a simple policy in which each healthy backend * is selected in round robin order. * * `LEAST_REQUEST`: An O(1) algorithm which selects two random healthy * hosts and picks the host which has fewer active requests. * * `RING_HASH`: The ring/modulo hash load balancer implements consistent * hashing to backends. The algorithm has the property that the * addition/removal of a host from a set of N hosts only affects * 1/N of the requests. * * `RANDOM`: The load balancer selects a random healthy host. * * `ORIGINAL_DESTINATION`: Backend host is selected based on the client * connection metadata, i.e., connections are opened * to the same address as the destination address of * the incoming connection before the connection * was redirected to the load balancer. * * `MAGLEV`: used as a drop in replacement for the ring hash load balancer. * Maglev is not as stable as ring hash but has faster table lookup * build times and host selection times. For more information about * Maglev, refer to https://ai.google/research/pubs/pub44824 * Possible values are: `ROUND_ROBIN`, `LEAST_REQUEST`, `RING_HASH`, `RANDOM`, `ORIGINAL_DESTINATION`, `MAGLEV`. */ name: string; } interface BackendServiceLogConfig { /** * Whether to enable logging for the load balancer traffic served by this backend service. */ enable?: boolean; /** * This field can only be specified if logging is enabled for this backend service and "logConfig.optionalMode" * was set to CUSTOM. Contains a list of optional fields you want to include in the logs. * For example: serverInstance, serverGkeDetails.cluster, serverGkeDetails.pod.podNamespace * For example: orca_load_report, tls.protocol */ optionalFields?: string[]; /** * Specifies the optional logging mode for the load balancer traffic. * Supported values: INCLUDE_ALL_OPTIONAL, EXCLUDE_ALL_OPTIONAL, CUSTOM. * Possible values are: `INCLUDE_ALL_OPTIONAL`, `EXCLUDE_ALL_OPTIONAL`, `CUSTOM`. */ optionalMode: string; /** * This field can only be specified if logging is enabled for this backend service. The value of * the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer * where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. * The default value is 1.0. */ sampleRate?: number; } interface BackendServiceMaxStreamDuration { /** * Span of time that's a fraction of a second at nanosecond resolution. * Durations less than one second are represented with a 0 seconds field and a positive nanos field. * Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. (int64 format) */ seconds: string; } interface BackendServiceNetworkPassThroughLbTrafficPolicy { /** * When configured, new connections are load balanced across healthy backend endpoints in the local zone. * Structure is documented below. */ zonalAffinity?: outputs.compute.BackendServiceNetworkPassThroughLbTrafficPolicyZonalAffinity; } interface BackendServiceNetworkPassThroughLbTrafficPolicyZonalAffinity { /** * (Optional, Beta) * This field indicates whether zonal affinity is enabled or not. * Default value is `ZONAL_AFFINITY_DISABLED`. * Possible values are: `ZONAL_AFFINITY_DISABLED`, `ZONAL_AFFINITY_SPILL_CROSS_ZONE`, `ZONAL_AFFINITY_STAY_WITHIN_ZONE`. */ spillover?: string; /** * (Optional, Beta) * The value of the field must be in [0, 1]. When the ratio of the count of healthy backend endpoints in a zone * to the count of backend endpoints in that same zone is equal to or above this threshold, the load balancer * distributes new connections to all healthy endpoints in the local zone only. When the ratio of the count * of healthy backend endpoints in a zone to the count of backend endpoints in that same zone is below this * threshold, the load balancer distributes all new connections to all healthy endpoints across all zones. */ spilloverRatio?: number; } interface BackendServiceOutlierDetection { /** * The base time that a host is ejected for. The real time is equal to the base * time multiplied by the number of times the host has been ejected. Defaults to * 30000ms or 30s. * Structure is documented below. */ baseEjectionTime?: outputs.compute.BackendServiceOutlierDetectionBaseEjectionTime; /** * Number of errors before a host is ejected from the connection pool. When the * backend host is accessed over HTTP, a 5xx return code qualifies as an error. * Defaults to 5. */ consecutiveErrors?: number; /** * The number of consecutive gateway failures (502, 503, 504 status or connection * errors that are mapped to one of those status codes) before a consecutive * gateway failure ejection occurs. Defaults to 5. */ consecutiveGatewayFailure?: number; /** * The percentage chance that a host will be actually ejected when an outlier * status is detected through consecutive 5xx. This setting can be used to disable * ejection or to ramp it up slowly. Defaults to 100. */ enforcingConsecutiveErrors?: number; /** * The percentage chance that a host will be actually ejected when an outlier * status is detected through consecutive gateway failures. This setting can be * used to disable ejection or to ramp it up slowly. Defaults to 0. */ enforcingConsecutiveGatewayFailure?: number; /** * The percentage chance that a host will be actually ejected when an outlier * status is detected through success rate statistics. This setting can be used to * disable ejection or to ramp it up slowly. Defaults to 100. */ enforcingSuccessRate?: number; /** * Time interval between ejection sweep analysis. This can result in both new * ejections as well as hosts being returned to service. Defaults to 10 seconds. * Structure is documented below. */ interval?: outputs.compute.BackendServiceOutlierDetectionInterval; /** * Maximum percentage of hosts in the load balancing pool for the backend service * that can be ejected. Defaults to 10%. */ maxEjectionPercent?: number; /** * The number of hosts in a cluster that must have enough request volume to detect * success rate outliers. If the number of hosts is less than this setting, outlier * detection via success rate statistics is not performed for any host in the * cluster. Defaults to 5. */ successRateMinimumHosts?: number; /** * The minimum number of total requests that must be collected in one interval (as * defined by the interval duration above) to include this host in success rate * based outlier detection. If the volume is lower than this setting, outlier * detection via success rate statistics is not performed for that host. Defaults * to 100. */ successRateRequestVolume?: number; /** * This factor is used to determine the ejection threshold for success rate outlier * ejection. The ejection threshold is the difference between the mean success * rate, and the product of this factor and the standard deviation of the mean * success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided * by a thousand to get a double. That is, if the desired factor is 1.9, the * runtime value should be 1900. Defaults to 1900. */ successRateStdevFactor?: number; } interface BackendServiceOutlierDetectionBaseEjectionTime { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations * less than one second are represented with a 0 `seconds` field and a positive * `nanos` field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 * inclusive. */ seconds: number; } interface BackendServiceOutlierDetectionInterval { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations * less than one second are represented with a 0 `seconds` field and a positive * `nanos` field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 * inclusive. */ seconds: number; } interface BackendServiceParams { /** * Resource manager tags to be bound to the backend service. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. */ resourceManagerTags?: { [key: string]: string; }; } interface BackendServiceSecuritySettings { /** * The configuration needed to generate a signature for access to private storage buckets that support AWS's Signature Version 4 for authentication. * Allowed only for INTERNET_IP_PORT and INTERNET_FQDN_PORT NEG backends. * Structure is documented below. * * * The `awsV4Authentication` block supports: */ awsV4Authentication?: outputs.compute.BackendServiceSecuritySettingsAwsV4Authentication; /** * ClientTlsPolicy is a resource that specifies how a client should authenticate * connections to backends of a service. This resource itself does not affect * configuration unless it is attached to a backend service resource. */ clientTlsPolicy?: string; /** * A list of alternate names to verify the subject identity in the certificate. * If specified, the client will verify that the server certificate's subject * alt name matches one of the specified values. */ subjectAltNames?: string[]; } interface BackendServiceSecuritySettingsAwsV4Authentication { /** * The access key used for s3 bucket authentication. * Required for updating or creating a backend that uses AWS v4 signature authentication, but will not be returned as part of the configuration when queried with a REST API GET request. */ accessKey?: string; /** * The identifier of an access key used for s3 bucket authentication. */ accessKeyId?: string; /** * The optional version identifier for the access key. You can use this to keep track of different iterations of your access key. */ accessKeyVersion?: string; /** * The name of the cloud region of your origin. This is a free-form field with the name of the region your cloud uses to host your origin. * For example, "us-east-1" for AWS or "us-ashburn-1" for OCI. */ originRegion?: string; } interface BackendServiceStrongSessionAffinityCookie { /** * Name of the cookie. */ name?: string; /** * Path to set for the cookie. */ path?: string; /** * Lifetime of the cookie. * Structure is documented below. */ ttl?: outputs.compute.BackendServiceStrongSessionAffinityCookieTtl; } interface BackendServiceStrongSessionAffinityCookieTtl { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface BackendServiceTlsSettings { /** * Reference to the BackendAuthenticationConfig resource from the networksecurity.googleapis.com namespace. * Can be used in authenticating TLS connections to the backend, as specified by the authenticationMode field. * Can only be specified if authenticationMode is not NONE. */ authenticationConfig?: string; /** * Server Name Indication - see RFC3546 section 3.1. If set, the load balancer sends this string as the SNI hostname in the * TLS connection to the backend, and requires that this string match a Subject Alternative Name (SAN) in the backend's * server certificate. With a Regional Internet NEG backend, if the SNI is specified here, the load balancer uses it * regardless of whether the Regional Internet NEG is specified with FQDN or IP address and port. */ sni?: string; /** * A list of Subject Alternative Names (SANs) that the Load Balancer verifies during a TLS handshake with the backend. * When the server presents its X.509 certificate to the Load Balancer, the Load Balancer inspects the certificate's SAN field, * and requires that at least one SAN match one of the subjectAltNames in the list. This field is limited to 5 entries. * When both sni and subjectAltNames are specified, the load balancer matches the backend certificate's SAN only to * subjectAltNames. * Structure is documented below. */ subjectAltNames?: outputs.compute.BackendServiceTlsSettingsSubjectAltName[]; } interface BackendServiceTlsSettingsSubjectAltName { /** * The SAN specified as a DNS Name. */ dnsName?: string; /** * The SAN specified as a URI. */ uniformResourceIdentifier?: string; } interface DiskAsyncPrimaryDisk { /** * Primary disk for asynchronous disk replication. */ disk: string; } interface DiskAsyncReplicationSecondaryDisk { /** * The secondary disk. */ disk: string; /** * Output-only. Status of replication on the secondary disk. * * - - - */ state: string; } interface DiskDiskEncryptionKey { /** * The self link of the encryption key used to encrypt the disk. Also called KmsKeyName * in the cloud console. Your project's Compute Engine System service account * (`service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com`) must have * `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature. * See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys */ kmsKeySelfLink?: string; /** * The service account used for the encryption request for the given KMS key. * If absent, the Compute Engine Service Agent service account is used. */ kmsKeyServiceAccount?: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. * **Note**: This property is sensitive and will not be displayed in the plan. */ rawKey?: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit * customer-supplied encryption key to either encrypt or decrypt * this resource. You can provide either the rawKey or the rsaEncryptedKey. * **Note**: This property is sensitive and will not be displayed in the plan. */ rsaEncryptedKey?: string; /** * (Output) * The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied * encryption key that protects this resource. */ sha256: string; } interface DiskGuestOsFeature { /** * The type of supported feature. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. */ type: string; } interface DiskIamBindingCondition { description?: string; expression: string; title: string; } interface DiskIamMemberCondition { description?: string; expression: string; title: string; } interface DiskParams { /** * Resource manager tags to be bound to the disk. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. */ resourceManagerTags?: { [key: string]: string; }; } interface DiskSourceImageEncryptionKey { /** * The self link of the encryption key used to encrypt the disk. Also called KmsKeyName * in the cloud console. Your project's Compute Engine System service account * (`service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com`) must have * `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature. * See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys */ kmsKeySelfLink?: string; /** * The service account used for the encryption request for the given KMS key. * If absent, the Compute Engine Service Agent service account is used. */ kmsKeyServiceAccount?: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. */ rawKey?: string; /** * (Output) * The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied * encryption key that protects this resource. */ sha256: string; } interface DiskSourceSnapshotEncryptionKey { /** * The self link of the encryption key used to encrypt the disk. Also called KmsKeyName * in the cloud console. Your project's Compute Engine System service account * (`service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com`) must have * `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature. * See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys */ kmsKeySelfLink?: string; /** * The service account used for the encryption request for the given KMS key. * If absent, the Compute Engine Service Agent service account is used. */ kmsKeyServiceAccount?: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. */ rawKey?: string; /** * (Output) * The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied * encryption key that protects this resource. */ sha256: string; } interface ExternalVpnGatewayInterface { /** * The numeric ID for this interface. Allowed values are based on the redundancy type * of this external VPN gateway * * `0 - SINGLE_IP_INTERNALLY_REDUNDANT` * * `0, 1 - TWO_IPS_REDUNDANCY` * * `0, 1, 2, 3 - FOUR_IPS_REDUNDANCY` */ id?: number; /** * IP address of the interface in the external VPN gateway. * Only IPv4 is supported. This IP address can be either from * your on-premise gateway or another Cloud provider's VPN gateway, * it cannot be an IP address from Google Compute Engine. */ ipAddress?: string; /** * IPv6 address of the interface in the external VPN gateway. This IPv6 * address can be either from your on-premise gateway or another Cloud * provider's VPN gateway, it cannot be an IP address from Google Compute * Engine. Must specify an IPv6 address (not IPV4-mapped) using any format * described in RFC 4291 (e.g. 2001:db8:0:0:2d9:51:0:0). The output format * is RFC 5952 format (e.g. 2001:db8::2d9:51:0:0). */ ipv6Address?: string; } interface ExternalVpnGatewayParams { /** * Resource manager tags to be bound to the ExternalVpnGateway. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. */ resourceManagerTags?: { [key: string]: string; }; } interface FirewallAllow { /** * An optional list of ports to which this rule applies. This field * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. * Example inputs include: [22], [80, 443], and * ["12345-12349"]. */ ports?: string[]; /** * The IP protocol to which this rule applies. The protocol type is * required when creating a firewall rule. This value can either be * one of the following well known protocol strings (tcp, udp, * icmp, esp, ah, sctp, ipip, all), or the IP protocol number. */ protocol: string; } interface FirewallDeny { /** * An optional list of ports to which this rule applies. This field * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. * Example inputs include: [22], [80, 443], and * ["12345-12349"]. */ ports?: string[]; /** * The IP protocol to which this rule applies. The protocol type is * required when creating a firewall rule. This value can either be * one of the following well known protocol strings (tcp, udp, * icmp, esp, ah, sctp, ipip, all), or the IP protocol number. */ protocol: string; } interface FirewallLogConfig { /** * This field denotes whether to include or exclude metadata for firewall logs. * Possible values are: `EXCLUDE_ALL_METADATA`, `INCLUDE_ALL_METADATA`. */ metadata: string; } interface FirewallParams { /** * Resource manager tags to be bound to the firewall. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. The field is ignored when empty. * The field is immutable and causes resource replacement when mutated. This field is only * set at create time and modifying this field after creation will trigger recreation. * To apply tags to an existing resource, see the gcp.tags.TagBinding resource. */ resourceManagerTags?: { [key: string]: string; }; } interface FirewallPolicyRuleMatch { /** * Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. */ destAddressGroups?: string[]; /** * Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100. */ destFqdns?: string[]; /** * CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. */ destIpRanges?: string[]; /** * (Optional, Beta) * Network scope of the traffic destination. * Possible values are: `INTERNET`, `INTRA_VPC`, `NON_INTERNET`, `VPC_NETWORKS`. */ destNetworkScope?: string; /** * Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. */ destRegionCodes?: string[]; /** * Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination. */ destThreatIntelligences?: string[]; /** * Pairs of IP protocols and ports that the rule should match. * Structure is documented below. */ layer4Configs: outputs.compute.FirewallPolicyRuleMatchLayer4Config[]; /** * Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. */ srcAddressGroups?: string[]; /** * Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100. */ srcFqdns?: string[]; /** * CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. */ srcIpRanges?: string[]; /** * (Optional, Beta) * Network scope of the traffic source. * Possible values are: `INTERNET`, `INTRA_VPC`, `NON_INTERNET`, `VPC_NETWORKS`. */ srcNetworkScope?: string; /** * (Optional, Beta) * Networks of the traffic source. It can be either a full or partial url. */ srcNetworks?: string[]; /** * Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000. */ srcRegionCodes?: string[]; /** * List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256. * Structure is documented below. * * * The `layer4Configs` block supports: */ srcSecureTags?: outputs.compute.FirewallPolicyRuleMatchSrcSecureTag[]; /** * Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source. */ srcThreatIntelligences?: string[]; } interface FirewallPolicyRuleMatchLayer4Config { /** * The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. * This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. */ ipProtocol: string; /** * An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. * Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. */ ports?: string[]; } interface FirewallPolicyRuleMatchSrcSecureTag { /** * Name of the secure tag, created with TagManager's TagValue API. */ name?: string; /** * (Output) * State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. */ state: string; } interface FirewallPolicyRuleTargetSecureTag { /** * Name of the secure tag, created with TagManager's TagValue API. */ name?: string; /** * (Output) * State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. */ state: string; } interface FirewallPolicyWithRulesPredefinedRule { /** * (Output) * The Action to perform when the client connection triggers the rule. Can currently be either * "allow", "deny", "applySecurityProfileGroup" or "gotoNext". */ action: string; /** * An optional description of this resource. */ description: string; /** * (Output) * The direction in which this rule applies. If unspecified an INGRESS rule is created. */ direction: string; /** * (Output) * Denotes whether the firewall policy rule is disabled. When set to true, * the firewall policy rule is not enforced and traffic behaves as if it did * not exist. If this is unspecified, the firewall policy rule will be * enabled. */ disabled: boolean; /** * (Output) * Denotes whether to enable logging for a particular rule. * If logging is enabled, logs will be exported to the * configured export destination in Stackdriver. */ enableLogging: boolean; /** * (Output) * A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. * Structure is documented below. */ matches: outputs.compute.FirewallPolicyWithRulesPredefinedRuleMatch[]; /** * (Output) * An integer indicating the priority of a rule in the list. The priority must be a value * between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the * highest priority and 2147483647 is the lowest priority. */ priority: number; /** * (Output) * An optional name for the rule. This field is not a unique identifier * and can be updated. */ ruleName: string; /** * (Output) * A fully-qualified URL of a SecurityProfile resource instance. * Example: * https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group * Must be specified if action is 'apply_security_profile_group'. */ securityProfileGroup: string; /** * (Output) * A list of network resource URLs to which this rule applies. * This field allows you to control which network's VMs get * this rule. If this field is left blank, all VMs * within the organization will receive the rule. */ targetResources: string[]; /** * (Output) * A list of secure tags that controls which instances the firewall rule * applies to. If targetSecureTag are specified, then the * firewall rule applies only to instances in the VPC network that have one * of those EFFECTIVE secure tags, if all the targetSecureTag are in * INEFFECTIVE state, then this rule will be ignored. * targetSecureTag may not be set at the same time as * targetServiceAccounts. * If neither targetServiceAccounts nor * targetSecureTag are specified, the firewall rule applies * to all instances on the specified network. * Maximum number of target secure tags allowed is 256. * Structure is documented below. */ targetSecureTags: outputs.compute.FirewallPolicyWithRulesPredefinedRuleTargetSecureTag[]; /** * (Output) * A list of service accounts indicating the sets of * instances that are applied with this rule. */ targetServiceAccounts: string[]; /** * (Output) * Boolean flag indicating if the traffic should be TLS decrypted. * It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. */ tlsInspect: boolean; } interface FirewallPolicyWithRulesPredefinedRuleMatch { /** * Address groups which should be matched against the traffic destination. * Maximum number of destination address groups is 10. */ destAddressGroups: string[]; /** * Fully Qualified Domain Name (FQDN) which should be matched against * traffic destination. Maximum number of destination fqdn allowed is 100. */ destFqdns: string[]; /** * Destination IP address range in CIDR format. Required for * EGRESS rules. */ destIpRanges: string[]; /** * Region codes whose IP addresses will be used to match for destination * of traffic. Should be specified as 2 letter country code defined as per * ISO 3166 alpha-2 country codes. ex."US" * Maximum number of destination region codes allowed is 5000. */ destRegionCodes: string[]; /** * Names of Network Threat Intelligence lists. * The IPs in these lists will be matched against traffic destination. */ destThreatIntelligences: string[]; /** * Pairs of IP protocols and ports that the rule should match. * Structure is documented below. */ layer4Configs: outputs.compute.FirewallPolicyWithRulesPredefinedRuleMatchLayer4Config[]; /** * Address groups which should be matched against the traffic source. * Maximum number of source address groups is 10. */ srcAddressGroups: string[]; /** * Fully Qualified Domain Name (FQDN) which should be matched against * traffic source. Maximum number of source fqdn allowed is 100. */ srcFqdns: string[]; /** * Source IP address range in CIDR format. Required for * INGRESS rules. */ srcIpRanges: string[]; /** * Region codes whose IP addresses will be used to match for source * of traffic. Should be specified as 2 letter country code defined as per * ISO 3166 alpha-2 country codes. ex."US" * Maximum number of source region codes allowed is 5000. */ srcRegionCodes: string[]; /** * List of secure tag values, which should be matched at the source * of the traffic. * For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, * and there is no srcIpRange, this rule will be ignored. * Maximum number of source tag values allowed is 256. * Structure is documented below. */ srcSecureTags: outputs.compute.FirewallPolicyWithRulesPredefinedRuleMatchSrcSecureTag[]; /** * Names of Network Threat Intelligence lists. * The IPs in these lists will be matched against traffic source. */ srcThreatIntelligences: string[]; } interface FirewallPolicyWithRulesPredefinedRuleMatchLayer4Config { /** * (Output) * The IP protocol to which this rule applies. The protocol * type is required when creating a firewall rule. * This value can either be one of the following well * known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), * or the IP protocol number. */ ipProtocol: string; /** * (Output) * An optional list of ports to which this rule applies. This field * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. * Example inputs include: ["22"], ["80","443"], and * ["12345-12349"]. */ ports: string[]; } interface FirewallPolicyWithRulesPredefinedRuleMatchSrcSecureTag { /** * Name of the secure tag, created with TagManager's TagValue API. * @pattern tagValues/[0-9]+ */ name: string; /** * (Output) * [Output Only] State of the secure tag, either `EFFECTIVE` or * `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted * or its network is deleted. * * The `layer4Config` block supports: */ state: string; } interface FirewallPolicyWithRulesPredefinedRuleTargetSecureTag { /** * Name of the secure tag, created with TagManager's TagValue API. * @pattern tagValues/[0-9]+ */ name: string; /** * (Output) * [Output Only] State of the secure tag, either `EFFECTIVE` or * `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted * or its network is deleted. */ state: string; } interface FirewallPolicyWithRulesRule { /** * The Action to perform when the client connection triggers the rule. Can currently be either * "allow", "deny", "applySecurityProfileGroup" or "gotoNext". */ action: string; /** * A description of the rule. */ description?: string; /** * The direction in which this rule applies. If unspecified an INGRESS rule is created. * Possible values are: `INGRESS`, `EGRESS`. */ direction?: string; /** * Denotes whether the firewall policy rule is disabled. When set to true, * the firewall policy rule is not enforced and traffic behaves as if it did * not exist. If this is unspecified, the firewall policy rule will be * enabled. */ disabled?: boolean; /** * Denotes whether to enable logging for a particular rule. * If logging is enabled, logs will be exported to the * configured export destination in Stackdriver. */ enableLogging?: boolean; /** * A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. * Structure is documented below. */ match: outputs.compute.FirewallPolicyWithRulesRuleMatch; /** * An integer indicating the priority of a rule in the list. The priority must be a value * between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the * highest priority and 2147483647 is the lowest priority. */ priority: number; /** * An optional name for the rule. This field is not a unique identifier * and can be updated. */ ruleName?: string; /** * A fully-qualified URL of a SecurityProfile resource instance. * Example: * https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group * Must be specified if action is 'apply_security_profile_group'. */ securityProfileGroup?: string; /** * A list of network resource URLs to which this rule applies. * This field allows you to control which network's VMs get * this rule. If this field is left blank, all VMs * within the organization will receive the rule. */ targetResources?: string[]; /** * A list of secure tags that controls which instances the firewall rule * applies to. If targetSecureTag are specified, then the * firewall rule applies only to instances in the VPC network that have one * of those EFFECTIVE secure tags, if all the targetSecureTag are in * INEFFECTIVE state, then this rule will be ignored. * targetSecureTag may not be set at the same time as * targetServiceAccounts. * If neither targetServiceAccounts nor * targetSecureTag are specified, the firewall rule applies * to all instances on the specified network. * Maximum number of target secure tags allowed is 256. * Structure is documented below. */ targetSecureTags?: outputs.compute.FirewallPolicyWithRulesRuleTargetSecureTag[]; /** * A list of service accounts indicating the sets of * instances that are applied with this rule. */ targetServiceAccounts?: string[]; /** * Boolean flag indicating if the traffic should be TLS decrypted. * It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. */ tlsInspect?: boolean; } interface FirewallPolicyWithRulesRuleMatch { /** * Address groups which should be matched against the traffic destination. * Maximum number of destination address groups is 10. */ destAddressGroups?: string[]; /** * Fully Qualified Domain Name (FQDN) which should be matched against * traffic destination. Maximum number of destination fqdn allowed is 100. */ destFqdns?: string[]; /** * Destination IP address range in CIDR format. Required for * EGRESS rules. */ destIpRanges?: string[]; /** * (Optional, Beta) * Network scope of the traffic destination. * Possible values are: `INTERNET`, `INTRA_VPC`, `NON_INTERNET`, `VPC_NETWORKS`. */ destNetworkScope?: string; /** * Region codes whose IP addresses will be used to match for destination * of traffic. Should be specified as 2 letter country code defined as per * ISO 3166 alpha-2 country codes. ex."US" * Maximum number of destination region codes allowed is 5000. */ destRegionCodes?: string[]; /** * Names of Network Threat Intelligence lists. * The IPs in these lists will be matched against traffic destination. */ destThreatIntelligences?: string[]; /** * Pairs of IP protocols and ports that the rule should match. * Structure is documented below. */ layer4Configs: outputs.compute.FirewallPolicyWithRulesRuleMatchLayer4Config[]; /** * Address groups which should be matched against the traffic source. * Maximum number of source address groups is 10. */ srcAddressGroups?: string[]; /** * Fully Qualified Domain Name (FQDN) which should be matched against * traffic source. Maximum number of source fqdn allowed is 100. */ srcFqdns?: string[]; /** * Source IP address range in CIDR format. Required for * INGRESS rules. */ srcIpRanges?: string[]; /** * (Optional, Beta) * Network scope of the traffic source. * Possible values are: `INTERNET`, `INTRA_VPC`, `NON_INTERNET`, `VPC_NETWORKS`. */ srcNetworkScope?: string; /** * (Optional, Beta) * Networks of the traffic source. It can be either a full or partial url. */ srcNetworks?: string[]; /** * Region codes whose IP addresses will be used to match for source * of traffic. Should be specified as 2 letter country code defined as per * ISO 3166 alpha-2 country codes. ex."US" * Maximum number of source region codes allowed is 5000. */ srcRegionCodes?: string[]; /** * List of secure tag values, which should be matched at the source * of the traffic. * For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, * and there is no srcIpRange, this rule will be ignored. * Maximum number of source tag values allowed is 256. * Structure is documented below. */ srcSecureTags?: outputs.compute.FirewallPolicyWithRulesRuleMatchSrcSecureTag[]; /** * Names of Network Threat Intelligence lists. * The IPs in these lists will be matched against traffic source. */ srcThreatIntelligences?: string[]; } interface FirewallPolicyWithRulesRuleMatchLayer4Config { /** * (Output) * The IP protocol to which this rule applies. The protocol * type is required when creating a firewall rule. * This value can either be one of the following well * known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), * or the IP protocol number. */ ipProtocol: string; /** * (Output) * An optional list of ports to which this rule applies. This field * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. * Example inputs include: ["22"], ["80","443"], and * ["12345-12349"]. */ ports?: string[]; } interface FirewallPolicyWithRulesRuleMatchSrcSecureTag { /** * Name of the secure tag, created with TagManager's TagValue API. * @pattern tagValues/[0-9]+ */ name?: string; /** * (Output) * [Output Only] State of the secure tag, either `EFFECTIVE` or * `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted * or its network is deleted. * * The `layer4Config` block supports: */ state: string; } interface FirewallPolicyWithRulesRuleTargetSecureTag { /** * Name of the secure tag, created with TagManager's TagValue API. * @pattern tagValues/[0-9]+ */ name?: string; /** * (Output) * [Output Only] State of the secure tag, either `EFFECTIVE` or * `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted * or its network is deleted. */ state: string; } interface ForwardingRuleServiceDirectoryRegistrations { /** * Service Directory namespace to register the forwarding rule under. */ namespace: string; /** * Service Directory service to register the forwarding rule under. */ service?: string; } interface FutureReservationAggregateReservation { /** * futureReservations.list of reserved resources (CPUs, memory, accelerators). * Structure is documented below. */ reservedResources: outputs.compute.FutureReservationAggregateReservationReservedResource[]; /** * The VM family that all instances scheduled against this reservation must belong to. * Possible values are: `VM_FAMILY_CLOUD_TPU_DEVICE_CT3`, `VM_FAMILY_CLOUD_TPU_LITE_DEVICE_CT5L`, `VM_FAMILY_CLOUD_TPU_LITE_POD_SLICE_CT5LP`, `VM_FAMILY_CLOUD_TPU_LITE_POD_SLICE_CT6E`, `VM_FAMILY_CLOUD_TPU_POD_SLICE_CT3P`, `VM_FAMILY_CLOUD_TPU_POD_SLICE_CT4P`, `VM_FAMILY_CLOUD_TPU_POD_SLICE_CT5P`. */ vmFamily?: string; /** * The workload type of the instances that will target this reservation. * Possible values are: `BATCH`, `SERVING`, `UNSPECIFIED`. */ workloadType?: string; } interface FutureReservationAggregateReservationReservedResource { /** * Properties of accelerator resources in this reservation. * Structure is documented below. */ accelerator?: outputs.compute.FutureReservationAggregateReservationReservedResourceAccelerator; } interface FutureReservationAggregateReservationReservedResourceAccelerator { /** * Number of accelerators of specified type. */ acceleratorCount?: number; /** * Full or partial URL to accelerator type. e.g. "projects/{PROJECT}/zones/{ZONE}/acceleratorTypes/ct4l" */ acceleratorType?: string; } interface FutureReservationAutoCreatedReservationsDuration { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. */ seconds?: string; } interface FutureReservationCommitmentInfo { /** * name of the commitment where capacity is being delivered to. */ commitmentName?: string; /** * Indicates if a Commitment needs to be created as part of FR delivery. If this field is not present, then no commitment needs to be created. * Possible values are: `INVALID`, `THIRTY_SIX_MONTH`, `TWELVE_MONTH`. */ commitmentPlan?: string; /** * Only applicable if FR is delivering to the same reservation. If set, all parent commitments will be extended to match the end date of the plan for this commitment. * Possible values are: `EXTEND`. */ previousCommitmentTerms?: string; } interface FutureReservationShareSettings { /** * A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. * Structure is documented below. */ projectMaps?: outputs.compute.FutureReservationShareSettingsProjectMap[]; /** * list of Project names to specify consumer projects for this shared-reservation. This is only valid when shareType's value is SPECIFIC_PROJECTS. */ projects?: string[]; /** * Type of sharing for this future reservation. * Possible values are: `LOCAL`, `SPECIFIC_PROJECTS`. */ shareType: string; } interface FutureReservationShareSettingsProjectMap { /** * The identifier for this object. Format specified above. */ id: string; /** * The project ID, should be same as the key of this project config in the parent map. */ projectId?: string; } interface FutureReservationSpecificSkuProperties { /** * Properties of the SKU instances being reserved. * Structure is documented below. */ instanceProperties?: outputs.compute.FutureReservationSpecificSkuPropertiesInstanceProperties; /** * The instance template that will be used to populate the ReservedInstanceProperties of the future reservation */ sourceInstanceTemplate?: string; /** * Total number of instances for which capacity assurance is requested at a future time period. */ totalCount?: string; } interface FutureReservationSpecificSkuPropertiesInstanceProperties { /** * Specifies accelerator type and count. * Structure is documented below. */ guestAccelerators?: outputs.compute.FutureReservationSpecificSkuPropertiesInstancePropertiesGuestAccelerator[]; /** * Specifies amount of local ssd to reserve with each instance. The type of disk is local-ssd. * Structure is documented below. */ localSsds?: outputs.compute.FutureReservationSpecificSkuPropertiesInstancePropertiesLocalSsd[]; /** * An opaque location hint used to place the allocation close to other resources. This field is for use by internal tools that use the public API. */ locationHint?: string; /** * Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern. */ machineType?: string; /** * Specifies the number of hours after reservation creation where instances using the reservation won't be scheduled for maintenance. */ maintenanceFreezeDurationHours?: number; /** * Specifies the frequency of planned maintenance events. The accepted values are: PERIODIC * Possible values are: `PERIODIC`. */ maintenanceInterval?: string; /** * Minimum cpu platform the reservation. */ minCpuPlatform?: string; } interface FutureReservationSpecificSkuPropertiesInstancePropertiesGuestAccelerator { /** * The number of the guest accelerator cards exposed to this instance. */ acceleratorCount?: number; /** * Full or partial URL of the accelerator type resource to attach to this instance. */ acceleratorType?: string; } interface FutureReservationSpecificSkuPropertiesInstancePropertiesLocalSsd { /** * Specifies the size of the disk in base-2 GB. */ diskSizeGb?: string; /** * Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. * Possible values are: `SCSI`, `NVME`. */ interface?: string; } interface FutureReservationStatus { /** * The current status of the requested amendment. * Possible values are: . */ amendmentStatus?: string; /** * Fully qualified urls of the automatically created reservations at startTime. */ autoCreatedReservations?: string[]; /** * This count indicates the fulfilled capacity so far. This is set during "PROVISIONING" state. This count also includes capacity delivered as part of existing matching reservations. */ fulfilledCount?: string; /** * This field represents the future reservation before an amendment was requested. If the amendment is declined, the Future Reservation will be reverted to the last known good state. The last known good state is not set when updating a future reservation whose Procurement Status is DRAFTING. * Structure is documented below. */ lastKnownGoodState?: outputs.compute.FutureReservationStatusLastKnownGoodState; /** * The lock time of the FutureReservation before an amendment was requested. */ lockTime?: string; /** * The status of the last known good state for the Future Reservation * Possible values are: . */ procurementStatus?: string; /** * Future Reservation configuration to indicate instance properties and total count. * Structure is documented below. */ specificSkuProperties?: outputs.compute.FutureReservationStatusSpecificSkuProperties; } interface FutureReservationStatusLastKnownGoodState { /** * An optional description of this resource. */ description?: string; /** * Represents the matching usage for the future reservation before an amendment was requested. * Structure is documented below. */ existingMatchingUsageInfo?: outputs.compute.FutureReservationStatusLastKnownGoodStateExistingMatchingUsageInfo; /** * The previous instance-related properties of the Future Reservation. * Structure is documented below. */ futureReservationSpecs?: outputs.compute.FutureReservationStatusLastKnownGoodStateFutureReservationSpecs; /** * The lock time of the FutureReservation before an amendment was requested. */ lockTime?: string; /** * Name prefix for the reservations to be created at the time of delivery. The name prefix must comply with RFC1035. Maximum allowed length for name prefix is 20. Automatically created reservations name format will be -date-####. */ namePrefix?: string; /** * The status of the last known good state for the Future Reservation * Possible values are: . */ procurementStatus?: string; } interface FutureReservationStatusLastKnownGoodStateExistingMatchingUsageInfo { /** * Count representing minimum(FR totalCount, matching_reserved_capacity+matching_unreserved_instances). */ count?: string; /** * Timestamp when the matching usage was calculated. */ timeStamp?: string; } interface FutureReservationStatusLastKnownGoodStateFutureReservationSpecs { /** * Settings for sharing the future reservation * Structure is documented below. */ shareSettings?: outputs.compute.FutureReservationStatusLastKnownGoodStateFutureReservationSpecsShareSettings; /** * Future Reservation configuration to indicate instance properties and total count. * Structure is documented below. */ specificSkuProperties?: outputs.compute.FutureReservationStatusLastKnownGoodStateFutureReservationSpecsSpecificSkuProperties; /** * Time window for this Future Reservation. * Structure is documented below. */ timeWindow?: outputs.compute.FutureReservationStatusLastKnownGoodStateFutureReservationSpecsTimeWindow; } interface FutureReservationStatusLastKnownGoodStateFutureReservationSpecsShareSettings { /** * A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. * Structure is documented below. */ projectMaps?: outputs.compute.FutureReservationStatusLastKnownGoodStateFutureReservationSpecsShareSettingsProjectMap[]; /** * list of Project names to specify consumer projects for this shared-reservation. This is only valid when shareType's value is SPECIFIC_PROJECTS. */ projects?: string[]; /** * Type of sharing for this future reservation. * Possible values are: `LOCAL`, `SPECIFIC_PROJECTS`. */ shareType?: string; } interface FutureReservationStatusLastKnownGoodStateFutureReservationSpecsShareSettingsProjectMap { /** * The ID of the project in which the resource belongs. * If it is not provided, the provider project is used. */ project: string; /** * The project ID, should be same as the key of this project config in the parent map. */ projectId?: string; } interface FutureReservationStatusLastKnownGoodStateFutureReservationSpecsSpecificSkuProperties { /** * Properties of the SKU instances being reserved. * Structure is documented below. */ instanceProperties?: outputs.compute.FutureReservationStatusLastKnownGoodStateFutureReservationSpecsSpecificSkuPropertiesInstanceProperties; /** * The instance template that will be used to populate the ReservedInstanceProperties of the future reservation */ sourceInstanceTemplate?: string; /** * Total number of instances for which capacity assurance is requested at a future time period. */ totalCount?: string; } interface FutureReservationStatusLastKnownGoodStateFutureReservationSpecsSpecificSkuPropertiesInstanceProperties { /** * Specifies accelerator type and count. * Structure is documented below. */ guestAccelerators?: outputs.compute.FutureReservationStatusLastKnownGoodStateFutureReservationSpecsSpecificSkuPropertiesInstancePropertiesGuestAccelerator[]; /** * Specifies amount of local ssd to reserve with each instance. The type of disk is local-ssd. * Structure is documented below. */ localSsds?: outputs.compute.FutureReservationStatusLastKnownGoodStateFutureReservationSpecsSpecificSkuPropertiesInstancePropertiesLocalSsd[]; /** * An opaque location hint used to place the allocation close to other resources. This field is for use by internal tools that use the public API. */ locationHint?: string; /** * Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern. */ machineType?: string; /** * Specifies the number of hours after reservation creation where instances using the reservation won't be scheduled for maintenance. */ maintenanceFreezeDurationHours?: number; /** * Specifies the frequency of planned maintenance events. The accepted values are: PERIODIC * Possible values are: `PERIODIC`. */ maintenanceInterval?: string; /** * Minimum cpu platform the reservation. */ minCpuPlatform?: string; } interface FutureReservationStatusLastKnownGoodStateFutureReservationSpecsSpecificSkuPropertiesInstancePropertiesGuestAccelerator { /** * The number of the guest accelerator cards exposed to this instance. */ acceleratorCount?: number; /** * Full or partial URL of the accelerator type resource to attach to this instance. */ acceleratorType?: string; } interface FutureReservationStatusLastKnownGoodStateFutureReservationSpecsSpecificSkuPropertiesInstancePropertiesLocalSsd { /** * Specifies the size of the disk in base-2 GB. */ diskSizeGb?: string; /** * Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. * Possible values are: `SCSI`, `NVME`. */ interface?: string; } interface FutureReservationStatusLastKnownGoodStateFutureReservationSpecsTimeWindow { /** * Duration of the future reservation * Structure is documented below. */ duration?: outputs.compute.FutureReservationStatusLastKnownGoodStateFutureReservationSpecsTimeWindowDuration; /** * End time of the future reservation in RFC3339 format. */ endTime?: string; /** * Start time of the future reservation in RFC3339 format. */ startTime?: string; } interface FutureReservationStatusLastKnownGoodStateFutureReservationSpecsTimeWindowDuration { /** * Span of time that's a fraction of a second at nanosecond resolution. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. */ seconds?: string; } interface FutureReservationStatusSpecificSkuProperties { /** * ID of the instance template used to populate the Future Reservation properties. */ sourceInstanceTemplateId?: string; } interface FutureReservationTimeWindow { /** * Duration of the future reservation * Structure is documented below. */ duration?: outputs.compute.FutureReservationTimeWindowDuration; /** * End time of the future reservation in RFC3339 format. */ endTime?: string; /** * Start time of the future reservation in RFC3339 format. */ startTime: string; } interface FutureReservationTimeWindowDuration { /** * Span of time that's a fraction of a second at nanosecond resolution. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. */ seconds?: string; } interface GetAddressesAddress { /** * The IP address (for example `1.2.3.4`). */ address: string; /** * The IP address type, can be `EXTERNAL` or `INTERNAL`. */ addressType: string; /** * The IP address description. */ description: string; /** * A map containing IP labels. */ labels: { [key: string]: string; }; /** * The IP address name. */ name: string; /** * The prefix length of the IP range. If not present, it means the address field is a single IP address. */ prefixLength: number; /** * Region that should be considered to search addresses. * All regions are considered if missing. */ region: string; /** * The URI of the created resource. */ selfLink: string; /** * Indicates if the address is used. Possible values are: RESERVED or IN_USE. */ status: string; } interface GetBackendBucketCdnPolicy { /** * Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings. */ bypassCacheOnRequestHeaders: outputs.compute.GetBackendBucketCdnPolicyBypassCacheOnRequestHeader[]; /** * The CacheKeyPolicy for this CdnPolicy. */ cacheKeyPolicies: outputs.compute.GetBackendBucketCdnPolicyCacheKeyPolicy[]; /** * Specifies the cache setting for all responses from this backend. * The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC Possible values: ["USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC"] */ cacheMode: string; /** * Specifies the maximum allowed TTL for cached content served by this origin. When the * 'cache_mode' is set to "USE_ORIGIN_HEADERS", you must omit this field. */ clientTtl: number; /** * Specifies the default TTL for cached content served by this origin for responses * that do not have an existing valid TTL (max-age or s-max-age). When the 'cache_mode' * is set to "USE_ORIGIN_HEADERS", you must omit this field. */ defaultTtl: number; /** * Specifies the maximum allowed TTL for cached content served by this origin. When the * 'cache_mode' is set to "USE_ORIGIN_HEADERS", you must omit this field. */ maxTtl: number; /** * Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. */ negativeCaching: boolean; /** * Sets a cache TTL for the specified HTTP status code. negativeCaching must be enabled to configure negativeCachingPolicy. * Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's default cache TTLs. */ negativeCachingPolicies: outputs.compute.GetBackendBucketCdnPolicyNegativeCachingPolicy[]; /** * If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin. */ requestCoalescing: boolean; /** * Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. */ serveWhileStale: number; /** * Maximum number of seconds the response to a signed URL request will * be considered fresh. After this time period, * the response will be revalidated before being served. * When serving responses to signed URL requests, * Cloud CDN will internally behave as though * all responses from this backend had a "Cache-Control: public, * max-age=[TTL]" header, regardless of any existing Cache-Control * header. The actual headers served in responses will not be altered. */ signedUrlCacheMaxAgeSec: number; } interface GetBackendBucketCdnPolicyBypassCacheOnRequestHeader { /** * The header field name to match on when bypassing cache. Values are case-insensitive. */ headerName: string; } interface GetBackendBucketCdnPolicyCacheKeyPolicy { /** * Allows HTTP request headers (by name) to be used in the * cache key. */ includeHttpHeaders: string[]; /** * Names of query string parameters to include in cache keys. * Default parameters are always included. '&' and '=' will * be percent encoded and not treated as delimiters. */ queryStringWhitelists: string[]; } interface GetBackendBucketCdnPolicyNegativeCachingPolicy { /** * The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 308, 404, 405, 410, 421, 451 and 501 * can be specified as values, and you cannot specify a status code more than once. */ code: number; /** * The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s * (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. */ ttl: number; } interface GetBackendBucketParam { /** * Resource manager tags to be bound to the backend bucket. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. */ resourceManagerTags: { [key: string]: string; }; } interface GetBackendServiceBackend { /** * Specifies the balancing mode for this backend. * * For global HTTP(S) or TCP/SSL load balancing, the default is * UTILIZATION. Valid values are UTILIZATION, RATE (for HTTP(S)), * CUSTOM_METRICS (for HTTP(s)) and CONNECTION (for TCP/SSL). * * See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) * for an explanation of load balancing modes. Default value: "UTILIZATION" Possible values: ["UTILIZATION", "RATE", "CONNECTION", "CUSTOM_METRICS", "IN_FLIGHT"] */ balancingMode: string; /** * A multiplier applied to the group's maximum servicing capacity * (based on UTILIZATION, RATE or CONNECTION). * * Default value is 1, which means the group will serve up to 100% * of its configured capacity (depending on balancingMode). A * setting of 0 means the group is completely drained, offering * 0% of its available Capacity. Valid range is [0.0,1.0]. */ capacityScaler: number; /** * The set of custom metrics that are used for CUSTOM_METRICS BalancingMode. */ customMetrics: outputs.compute.GetBackendServiceBackendCustomMetric[]; /** * Textual description for the Backend Service. */ description: string; /** * The fully-qualified URL of an Instance Group or Network Endpoint * Group resource. In case of instance group this defines the list * of instances that serve traffic. Member virtual machine * instances from each instance group must live in the same zone as * the instance group itself. No two backends in a backend service * are allowed to use same Instance Group resource. * * For Network Endpoint Groups this defines list of endpoints. All * endpoints of Network Endpoint Group must be hosted on instances * located in the same zone as the Network Endpoint Group. * * Backend services cannot mix Instance Group and * Network Endpoint Group backends. * * Note that you must specify an Instance Group or Network Endpoint * Group resource using the fully-qualified URL, rather than a * partial URL. */ group: string; /** * The max number of simultaneous connections for the group. Can * be used with either CONNECTION or UTILIZATION balancing modes. * * For CONNECTION mode, either maxConnections or one * of maxConnectionsPerInstance or maxConnectionsPerEndpoint, * as appropriate for group type, must be set. */ maxConnections: number; /** * The max number of simultaneous connections that a single backend * network endpoint can handle. This is used to calculate the * capacity of the group. Can be used in either CONNECTION or * UTILIZATION balancing modes. * * For CONNECTION mode, either * maxConnections or maxConnectionsPerEndpoint must be set. */ maxConnectionsPerEndpoint: number; /** * The max number of simultaneous connections that a single * backend instance can handle. This is used to calculate the * capacity of the group. Can be used in either CONNECTION or * UTILIZATION balancing modes. * * For CONNECTION mode, either maxConnections or * maxConnectionsPerInstance must be set. */ maxConnectionsPerInstance: number; /** * Defines a maximum number of in-flight requests for the whole NEG * or instance group. Not available if backend's balancingMode is RATE * or CONNECTION. */ maxInFlightRequests: number; /** * Defines a maximum number of in-flight requests for a single endpoint. * Not available if backend's balancingMode is RATE or CONNECTION. */ maxInFlightRequestsPerEndpoint: number; /** * Defines a maximum number of in-flight requests for a single VM. * Not available if backend's balancingMode is RATE or CONNECTION. */ maxInFlightRequestsPerInstance: number; /** * The max requests per second (RPS) of the group. * * Can be used with either RATE or UTILIZATION balancing modes, * but required if RATE mode. For RATE mode, either maxRate or one * of maxRatePerInstance or maxRatePerEndpoint, as appropriate for * group type, must be set. */ maxRate: number; /** * The max requests per second (RPS) that a single backend network * endpoint can handle. This is used to calculate the capacity of * the group. Can be used in either balancing mode. For RATE mode, * either maxRate or maxRatePerEndpoint must be set. */ maxRatePerEndpoint: number; /** * The max requests per second (RPS) that a single backend * instance can handle. This is used to calculate the capacity of * the group. Can be used in either balancing mode. For RATE mode, * either maxRate or maxRatePerInstance must be set. */ maxRatePerInstance: number; /** * Used when balancingMode is UTILIZATION. This ratio defines the * CPU utilization target for the group. Valid range is [0.0, 1.0]. */ maxUtilization: number; /** * This field indicates whether this backend should be fully utilized before sending traffic to backends * with default preference. This field cannot be set when loadBalancingScheme is set to 'EXTERNAL'. The possible values are: * - PREFERRED: Backends with this preference level will be filled up to their capacity limits first, * based on RTT. * - DEFAULT: If preferred backends don't have enough capacity, backends in this layer would be used and * traffic would be assigned based on the load balancing algorithm you use. This is the default Possible values: ["PREFERRED", "DEFAULT"] */ preference: string; /** * This field specifies how long a connection should be kept alive for: * - LONG: Most of the requests are expected to take more than multiple * seconds to finish. * - SHORT: Most requests are expected to finish with a sub-second latency. Possible values: ["LONG", "SHORT"] */ trafficDuration: string; } interface GetBackendServiceBackendCustomMetric { /** * If true, the metric data is collected and reported to Cloud * Monitoring, but is not used for load balancing. */ dryRun: boolean; /** * Optional parameter to define a target utilization for the Custom Metrics * balancing mode. The valid range is [0.0, 1.0]. */ maxUtilization: number; /** * The name of the Backend Service. * * - - - */ name: string; } interface GetBackendServiceCdnPolicy { /** * Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. * The cache is bypassed for all cdnPolicy.cacheMode settings. */ bypassCacheOnRequestHeaders: outputs.compute.GetBackendServiceCdnPolicyBypassCacheOnRequestHeader[]; /** * The CacheKeyPolicy for this CdnPolicy. */ cacheKeyPolicies: outputs.compute.GetBackendServiceCdnPolicyCacheKeyPolicy[]; /** * Specifies the cache setting for all responses from this backend. * The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC Possible values: ["USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC"] */ cacheMode: string; /** * Specifies the maximum allowed TTL for cached content served by this origin. */ clientTtl: number; /** * Specifies the default TTL for cached content served by this origin for responses * that do not have an existing valid TTL (max-age or s-max-age). */ defaultTtl: number; /** * Specifies the maximum allowed TTL for cached content served by this origin. */ maxTtl: number; /** * Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. */ negativeCaching: boolean; /** * Sets a cache TTL for the specified HTTP status code. negativeCaching must be enabled to configure negativeCachingPolicy. * Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's default cache TTLs. */ negativeCachingPolicies: outputs.compute.GetBackendServiceCdnPolicyNegativeCachingPolicy[]; /** * If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests * to the origin. */ requestCoalescing: boolean; /** * Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. */ serveWhileStale: number; /** * Maximum number of seconds the response to a signed URL request * will be considered fresh, defaults to 1hr (3600s). After this * time period, the response will be revalidated before * being served. * * When serving responses to signed URL requests, Cloud CDN will * internally behave as though all responses from this backend had a * "Cache-Control: public, max-age=[TTL]" header, regardless of any * existing Cache-Control header. The actual headers served in * responses will not be altered. */ signedUrlCacheMaxAgeSec: number; } interface GetBackendServiceCdnPolicyBypassCacheOnRequestHeader { /** * The header field name to match on when bypassing cache. Values are case-insensitive. */ headerName: string; } interface GetBackendServiceCdnPolicyCacheKeyPolicy { /** * If true requests to different hosts will be cached separately. */ includeHost: boolean; /** * Allows HTTP request headers (by name) to be used in the * cache key. */ includeHttpHeaders: string[]; /** * Names of cookies to include in cache keys. */ includeNamedCookies: string[]; /** * If true, http and https requests will be cached separately. */ includeProtocol: boolean; /** * If true, include query string parameters in the cache key * according to queryStringWhitelist and * query_string_blacklist. If neither is set, the entire query * string will be included. * * If false, the query string will be excluded from the cache * key entirely. */ includeQueryString: boolean; /** * Names of query string parameters to exclude in cache keys. * * All other parameters will be included. Either specify * queryStringWhitelist or query_string_blacklist, not both. * '&' and '=' will be percent encoded and not treated as * delimiters. */ queryStringBlacklists: string[]; /** * Names of query string parameters to include in cache keys. * * All other parameters will be excluded. Either specify * queryStringWhitelist or query_string_blacklist, not both. * '&' and '=' will be percent encoded and not treated as * delimiters. */ queryStringWhitelists: string[]; } interface GetBackendServiceCdnPolicyNegativeCachingPolicy { /** * The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 308, 404, 405, 410, 421, 451 and 501 * can be specified as values, and you cannot specify a status code more than once. */ code: number; /** * The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s * (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. */ ttl: number; } interface GetBackendServiceCircuitBreaker { /** * The timeout for new network connections to hosts. */ connectTimeouts: outputs.compute.GetBackendServiceCircuitBreakerConnectTimeout[]; /** * The maximum number of connections to the backend cluster. * Defaults to 1024. */ maxConnections: number; /** * The maximum number of pending requests to the backend cluster. * Defaults to 1024. */ maxPendingRequests: number; /** * The maximum number of parallel requests to the backend cluster. * Defaults to 1024. */ maxRequests: number; /** * Maximum requests for a single backend connection. This parameter * is respected by both the HTTP/1.1 and HTTP/2 implementations. If * not specified, there is no limit. Setting this parameter to 1 * will effectively disable keep alive. */ maxRequestsPerConnection: number; /** * The maximum number of parallel retries to the backend cluster. * Defaults to 3. */ maxRetries: number; } interface GetBackendServiceCircuitBreakerConnectTimeout { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface GetBackendServiceConsistentHash { /** * Hash is based on HTTP Cookie. This field describes a HTTP cookie * that will be used as the hash key for the consistent hash load * balancer. If the cookie is not present, it will be generated. * This field is applicable if the sessionAffinity is set to HTTP_COOKIE. */ httpCookies: outputs.compute.GetBackendServiceConsistentHashHttpCooky[]; /** * The hash based on the value of the specified header field. * This field is applicable if the sessionAffinity is set to HEADER_FIELD. */ httpHeaderName: string; /** * The minimum number of virtual nodes to use for the hash ring. * Larger ring sizes result in more granular load * distributions. If the number of hosts in the load balancing pool * is larger than the ring size, each host will be assigned a single * virtual node. * Defaults to 1024. */ minimumRingSize: number; } interface GetBackendServiceConsistentHashHttpCooky { /** * The name of the Backend Service. * * - - - */ name: string; /** * Path to set for the cookie. */ path: string; /** * Lifetime of the cookie. */ ttls: outputs.compute.GetBackendServiceConsistentHashHttpCookyTtl[]; } interface GetBackendServiceConsistentHashHttpCookyTtl { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface GetBackendServiceCustomMetric { /** * If true, the metric data is not used for load balancing. */ dryRun: boolean; /** * The name of the Backend Service. * * - - - */ name: string; } interface GetBackendServiceDynamicForwarding { /** * IP:PORT based dynamic forwarding configuration. */ ipPortSelections: outputs.compute.GetBackendServiceDynamicForwardingIpPortSelection[]; } interface GetBackendServiceDynamicForwardingIpPortSelection { /** * A boolean flag enabling IP:PORT based dynamic forwarding. */ enabled: boolean; } interface GetBackendServiceIap { /** * Whether the serving infrastructure will authenticate and authorize all incoming requests. */ enabled: boolean; /** * OAuth2 Client ID for IAP */ oauth2ClientId: string; /** * OAuth2 Client Secret for IAP */ oauth2ClientSecret: string; /** * OAuth2 Client Secret SHA-256 for IAP */ oauth2ClientSecretSha256: string; } interface GetBackendServiceLocalityLbPolicy { /** * The configuration for a custom policy implemented by the user and * deployed with the client. */ customPolicies: outputs.compute.GetBackendServiceLocalityLbPolicyCustomPolicy[]; /** * The configuration for a built-in load balancing policy. */ policies: outputs.compute.GetBackendServiceLocalityLbPolicyPolicy[]; } interface GetBackendServiceLocalityLbPolicyCustomPolicy { /** * An optional, arbitrary JSON object with configuration data, understood * by a locally installed custom policy implementation. */ data: string; /** * The name of the Backend Service. * * - - - */ name: string; } interface GetBackendServiceLocalityLbPolicyPolicy { /** * The name of the Backend Service. * * - - - */ name: string; } interface GetBackendServiceLogConfig { /** * Whether to enable logging for the load balancer traffic served by this backend service. */ enable: boolean; /** * This field can only be specified if logging is enabled for this backend service and "logConfig.optionalMode" * was set to CUSTOM. Contains a list of optional fields you want to include in the logs. * For example: serverInstance, serverGkeDetails.cluster, serverGkeDetails.pod.podNamespace * For example: orca_load_report, tls.protocol */ optionalFields: string[]; /** * Specifies the optional logging mode for the load balancer traffic. * Supported values: INCLUDE_ALL_OPTIONAL, EXCLUDE_ALL_OPTIONAL, CUSTOM. Possible values: ["INCLUDE_ALL_OPTIONAL", "EXCLUDE_ALL_OPTIONAL", "CUSTOM"] */ optionalMode: string; /** * This field can only be specified if logging is enabled for this backend service. The value of * the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer * where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. * The default value is 1.0. */ sampleRate: number; } interface GetBackendServiceMaxStreamDuration { /** * Span of time that's a fraction of a second at nanosecond resolution. * Durations less than one second are represented with a 0 seconds field and a positive nanos field. * Must be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. (int64 format) */ seconds: string; } interface GetBackendServiceNetworkPassThroughLbTrafficPolicy { /** * When configured, new connections are load balanced across healthy backend endpoints in the local zone. */ zonalAffinities: outputs.compute.GetBackendServiceNetworkPassThroughLbTrafficPolicyZonalAffinity[]; } interface GetBackendServiceNetworkPassThroughLbTrafficPolicyZonalAffinity { /** * This field indicates whether zonal affinity is enabled or not. Default value: "ZONAL_AFFINITY_DISABLED" Possible values: ["ZONAL_AFFINITY_DISABLED", "ZONAL_AFFINITY_SPILL_CROSS_ZONE", "ZONAL_AFFINITY_STAY_WITHIN_ZONE"] */ spillover: string; /** * The value of the field must be in [0, 1]. When the ratio of the count of healthy backend endpoints in a zone * to the count of backend endpoints in that same zone is equal to or above this threshold, the load balancer * distributes new connections to all healthy endpoints in the local zone only. When the ratio of the count * of healthy backend endpoints in a zone to the count of backend endpoints in that same zone is below this * threshold, the load balancer distributes all new connections to all healthy endpoints across all zones. */ spilloverRatio: number; } interface GetBackendServiceOutlierDetection { /** * The base time that a host is ejected for. The real time is equal to the base * time multiplied by the number of times the host has been ejected. Defaults to * 30000ms or 30s. */ baseEjectionTimes: outputs.compute.GetBackendServiceOutlierDetectionBaseEjectionTime[]; /** * Number of errors before a host is ejected from the connection pool. When the * backend host is accessed over HTTP, a 5xx return code qualifies as an error. * Defaults to 5. */ consecutiveErrors: number; /** * The number of consecutive gateway failures (502, 503, 504 status or connection * errors that are mapped to one of those status codes) before a consecutive * gateway failure ejection occurs. Defaults to 5. */ consecutiveGatewayFailure: number; /** * The percentage chance that a host will be actually ejected when an outlier * status is detected through consecutive 5xx. This setting can be used to disable * ejection or to ramp it up slowly. Defaults to 100. */ enforcingConsecutiveErrors: number; /** * The percentage chance that a host will be actually ejected when an outlier * status is detected through consecutive gateway failures. This setting can be * used to disable ejection or to ramp it up slowly. Defaults to 0. */ enforcingConsecutiveGatewayFailure: number; /** * The percentage chance that a host will be actually ejected when an outlier * status is detected through success rate statistics. This setting can be used to * disable ejection or to ramp it up slowly. Defaults to 100. */ enforcingSuccessRate: number; /** * Time interval between ejection sweep analysis. This can result in both new * ejections as well as hosts being returned to service. Defaults to 10 seconds. */ intervals: outputs.compute.GetBackendServiceOutlierDetectionInterval[]; /** * Maximum percentage of hosts in the load balancing pool for the backend service * that can be ejected. Defaults to 10%. */ maxEjectionPercent: number; /** * The number of hosts in a cluster that must have enough request volume to detect * success rate outliers. If the number of hosts is less than this setting, outlier * detection via success rate statistics is not performed for any host in the * cluster. Defaults to 5. */ successRateMinimumHosts: number; /** * The minimum number of total requests that must be collected in one interval (as * defined by the interval duration above) to include this host in success rate * based outlier detection. If the volume is lower than this setting, outlier * detection via success rate statistics is not performed for that host. Defaults * to 100. */ successRateRequestVolume: number; /** * This factor is used to determine the ejection threshold for success rate outlier * ejection. The ejection threshold is the difference between the mean success * rate, and the product of this factor and the standard deviation of the mean * success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided * by a thousand to get a double. That is, if the desired factor is 1.9, the * runtime value should be 1900. Defaults to 1900. */ successRateStdevFactor: number; } interface GetBackendServiceOutlierDetectionBaseEjectionTime { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations * less than one second are represented with a 0 'seconds' field and a positive * 'nanos' field. Must be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 * inclusive. */ seconds: number; } interface GetBackendServiceOutlierDetectionInterval { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations * less than one second are represented with a 0 'seconds' field and a positive * 'nanos' field. Must be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 * inclusive. */ seconds: number; } interface GetBackendServiceParam { /** * Resource manager tags to be bound to the backend service. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. */ resourceManagerTags: { [key: string]: string; }; } interface GetBackendServiceSecuritySetting { /** * The configuration needed to generate a signature for access to private storage buckets that support AWS's Signature Version 4 for authentication. * Allowed only for INTERNET_IP_PORT and INTERNET_FQDN_PORT NEG backends. */ awsV4Authentications: outputs.compute.GetBackendServiceSecuritySettingAwsV4Authentication[]; /** * ClientTlsPolicy is a resource that specifies how a client should authenticate * connections to backends of a service. This resource itself does not affect * configuration unless it is attached to a backend service resource. */ clientTlsPolicy: string; /** * A list of alternate names to verify the subject identity in the certificate. * If specified, the client will verify that the server certificate's subject * alt name matches one of the specified values. */ subjectAltNames: string[]; } interface GetBackendServiceSecuritySettingAwsV4Authentication { /** * The access key used for s3 bucket authentication. * Required for updating or creating a backend that uses AWS v4 signature authentication, but will not be returned as part of the configuration when queried with a REST API GET request. */ accessKey: string; /** * The identifier of an access key used for s3 bucket authentication. */ accessKeyId: string; /** * The optional version identifier for the access key. You can use this to keep track of different iterations of your access key. */ accessKeyVersion: string; /** * The name of the cloud region of your origin. This is a free-form field with the name of the region your cloud uses to host your origin. * For example, "us-east-1" for AWS or "us-ashburn-1" for OCI. */ originRegion: string; } interface GetBackendServiceStrongSessionAffinityCooky { /** * The name of the Backend Service. * * - - - */ name: string; /** * Path to set for the cookie. */ path: string; /** * Lifetime of the cookie. */ ttls: outputs.compute.GetBackendServiceStrongSessionAffinityCookyTtl[]; } interface GetBackendServiceStrongSessionAffinityCookyTtl { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface GetBackendServiceTlsSetting { /** * Reference to the BackendAuthenticationConfig resource from the networksecurity.googleapis.com namespace. * Can be used in authenticating TLS connections to the backend, as specified by the authenticationMode field. * Can only be specified if authenticationMode is not NONE. */ authenticationConfig: string; /** * Server Name Indication - see RFC3546 section 3.1. If set, the load balancer sends this string as the SNI hostname in the * TLS connection to the backend, and requires that this string match a Subject Alternative Name (SAN) in the backend's * server certificate. With a Regional Internet NEG backend, if the SNI is specified here, the load balancer uses it * regardless of whether the Regional Internet NEG is specified with FQDN or IP address and port. */ sni: string; /** * A list of Subject Alternative Names (SANs) that the Load Balancer verifies during a TLS handshake with the backend. * When the server presents its X.509 certificate to the Load Balancer, the Load Balancer inspects the certificate's SAN field, * and requires that at least one SAN match one of the subjectAltNames in the list. This field is limited to 5 entries. * When both sni and subjectAltNames are specified, the load balancer matches the backend certificate's SAN only to * subjectAltNames. */ subjectAltNames: outputs.compute.GetBackendServiceTlsSettingSubjectAltName[]; } interface GetBackendServiceTlsSettingSubjectAltName { /** * The SAN specified as a DNS Name. */ dnsName: string; /** * The SAN specified as a URI. */ uniformResourceIdentifier: string; } interface GetDiskAsyncPrimaryDisk { /** * Primary disk for asynchronous disk replication. */ disk: string; } interface GetDiskDiskEncryptionKey { /** * The self link of the encryption key used to encrypt the disk. Also called KmsKeyName * in the cloud console. Your project's Compute Engine System service account * ('service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com') must have * 'roles/cloudkms.cryptoKeyEncrypterDecrypter' to use this feature. * See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys */ kmsKeySelfLink: string; /** * The service account used for the encryption request for the given KMS key. * If absent, the Compute Engine Service Agent service account is used. */ kmsKeyServiceAccount: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. */ rawKey: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit * customer-supplied encryption key to either encrypt or decrypt * this resource. You can provide either the rawKey or the rsaEncryptedKey. */ rsaEncryptedKey: string; /** * The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied * encryption key that protects this resource. */ sha256: string; } interface GetDiskGuestOsFeature { /** * URL of the disk type resource describing which disk type to use to * create the disk. */ type: string; } interface GetDiskParam { /** * Resource manager tags to be bound to the disk. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. */ resourceManagerTags: { [key: string]: string; }; } interface GetDiskSourceImageEncryptionKey { /** * The self link of the encryption key used to encrypt the disk. Also called KmsKeyName * in the cloud console. Your project's Compute Engine System service account * ('service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com') must have * 'roles/cloudkms.cryptoKeyEncrypterDecrypter' to use this feature. * See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys */ kmsKeySelfLink: string; /** * The service account used for the encryption request for the given KMS key. * If absent, the Compute Engine Service Agent service account is used. */ kmsKeyServiceAccount: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. */ rawKey: string; /** * The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied * encryption key that protects this resource. */ sha256: string; } interface GetDiskSourceSnapshotEncryptionKey { /** * The self link of the encryption key used to encrypt the disk. Also called KmsKeyName * in the cloud console. Your project's Compute Engine System service account * ('service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com') must have * 'roles/cloudkms.cryptoKeyEncrypterDecrypter' to use this feature. * See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys */ kmsKeySelfLink: string; /** * The service account used for the encryption request for the given KMS key. * If absent, the Compute Engine Service Agent service account is used. */ kmsKeyServiceAccount: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. */ rawKey: string; /** * The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied * encryption key that protects this resource. */ sha256: string; } interface GetForwardingRuleServiceDirectoryRegistration { /** * Service Directory namespace to register the forwarding rule under. */ namespace: string; /** * Service Directory service to register the forwarding rule under. */ service: string; } interface GetForwardingRulesRule { /** * The 'ports', 'portRange', and 'allPorts' fields are mutually exclusive. * Only packets addressed to ports in the specified range will be forwarded * to the backends configured with this forwarding rule. * * The 'allPorts' field has the following limitations: * * It requires that the forwarding rule 'IPProtocol' be TCP, UDP, SCTP, or * L3_DEFAULT. * * It's applicable only to the following products: internal passthrough * Network Load Balancers, backend service-based external passthrough Network * Load Balancers, and internal and external protocol forwarding. * * Set this field to true to allow packets addressed to any port or packets * lacking destination port information (for example, UDP fragments after the * first fragment) to be forwarded to the backends configured with this * forwarding rule. The L3_DEFAULT protocol requires 'allPorts' be set to * true. */ allPorts: boolean; /** * This field is used along with the 'backend_service' field for * internal load balancing or with the 'target' field for internal * TargetInstance. * * If the field is set to 'TRUE', clients can access ILB from all * regions. * * Otherwise only allows access from clients in the same region as the * internal load balancer. */ allowGlobalAccess: boolean; /** * This is used in PSC consumer ForwardingRule to control whether the PSC endpoint can be accessed from another region. */ allowPscGlobalAccess: boolean; /** * Identifies the backend service to which the forwarding rule sends traffic. * * Required for Internal TCP/UDP Load Balancing and Network Load Balancing; * must be omitted for all other load balancer types. */ backendService: string; /** * [Output Only] The URL for the corresponding base Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule that has the same IP address, protocol, and port settings with the current Forwarding Rule, but without sourceIPRanges specified. Always empty if the current Forwarding Rule does not have sourceIPRanges specified. */ baseForwardingRule: string; /** * Creation timestamp in RFC3339 text format. */ creationTimestamp: string; /** * An optional description of this resource. Provide this property when * you create the resource. */ description: string; /** * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. */ effectiveLabels: { [key: string]: string; }; /** * The unique identifier number for the resource. This identifier is defined by the server. */ forwardingRuleId: number; /** * IP address for which this forwarding rule accepts traffic. When a client * sends traffic to this IP address, the forwarding rule directs the traffic * to the referenced 'target' or 'backendService'. * * While creating a forwarding rule, specifying an 'IPAddress' is * required under the following circumstances: * * * When the 'target' is set to 'targetGrpcProxy' and * 'validateForProxyless' is set to 'true', the * 'IPAddress' should be set to '0.0.0.0'. * * When the 'target' is a Private Service Connect Google APIs * bundle, you must specify an 'IPAddress'. * * Otherwise, you can optionally specify an IP address that references an * existing static (reserved) IP address resource. When omitted, Google Cloud * assigns an ephemeral IP address. * * Use one of the following formats to specify an IP address while creating a * forwarding rule: * * * IP address number, as in '100.1.2.3' * * IPv6 address range, as in '2600:1234::/96' * * Full resource URL, as in * 'https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name' * * Partial URL or by name, as in: * * 'projects/project_id/regions/region/addresses/address-name' * * 'regions/region/addresses/address-name' * * 'global/addresses/address-name' * * 'address-name' * * The forwarding rule's 'target' or 'backendService', * and in most cases, also the 'loadBalancingScheme', determine the * type of IP address that you can use. For detailed information, see * [IP address * specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). * * When reading an 'IPAddress', the API always returns the IP * address number. */ ipAddress: string; /** * Resource reference of a PublicDelegatedPrefix. The PDP must be a sub-PDP * in EXTERNAL_IPV6_FORWARDING_RULE_CREATION mode. * Use one of the following formats to specify a sub-PDP when creating an * IPv6 NetLB forwarding rule using BYOIP: * Full resource URL, as in: * * 'https://www.googleapis.com/compute/v1/projects/{{projectId}}/regions/{{region}}/publicDelegatedPrefixes/{{sub-pdp-name}}' * Partial URL, as in: * * 'projects/{{projectId}}/regions/region/publicDelegatedPrefixes/{{sub-pdp-name}}' * * 'regions/{{region}}/publicDelegatedPrefixes/{{sub-pdp-name}}' */ ipCollection: string; /** * The IP protocol to which this rule applies. * * For protocol forwarding, valid * options are 'TCP', 'UDP', 'ESP', * 'AH', 'SCTP', 'ICMP' and * 'L3_DEFAULT'. * * The valid IP protocols are different for different load balancing products * as described in [Load balancing * features](https://cloud.google.com/load-balancing/docs/features#protocols_from_the_load_balancer_to_the_backends). * * A Forwarding Rule with protocol L3_DEFAULT can attach with target instance or * backend service with UNSPECIFIED protocol. * A forwarding rule with "L3_DEFAULT" IPProtocal cannot be attached to a backend service with TCP or UDP. Possible values: ["TCP", "UDP", "ESP", "AH", "SCTP", "ICMP", "L3_DEFAULT"] */ ipProtocol: string; /** * The IP address version that will be used by this forwarding rule. * Valid options are IPV4 and IPV6. * * If not set, the IPv4 address will be used by default. Possible values: ["IPV4", "IPV6"] */ ipVersion: string; /** * Indicates whether or not this load balancer can be used as a collector for * packet mirroring. To prevent mirroring loops, instances behind this * load balancer will not have their traffic mirrored even if a * 'PacketMirroring' rule applies to them. * * This can only be set to true for load balancers that have their * 'loadBalancingScheme' set to 'INTERNAL'. */ isMirroringCollector: boolean; /** * The fingerprint used for optimistic locking of this resource. Used * internally during updates. */ labelFingerprint: string; /** * Labels to apply to this forwarding rule. A list of key->value pairs. * * * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. * Please refer to the field 'effective_labels' for all of the labels present on the resource. */ labels: { [key: string]: string; }; /** * Specifies the forwarding rule type. * * Note that an empty string value ('""') is also supported for some use * cases, for example PSC (private service connection) regional forwarding * rules. * * For more information about forwarding rules, refer to * [Forwarding rule concepts](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts). Default value: "EXTERNAL" Possible values: ["EXTERNAL", "EXTERNAL_MANAGED", "INTERNAL", "INTERNAL_MANAGED"] */ loadBalancingScheme: string; /** * Name of the resource; provided by the client when the resource is created. * The name must be 1-63 characters long, and comply with * [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). * * Specifically, the name must be 1-63 characters long and match the regular * expression 'a-z?' which means the first * character must be a lowercase letter, and all following characters must * be a dash, lowercase letter, or digit, except the last character, which * cannot be a dash. * * For Private Service Connect forwarding rules that forward traffic to Google * APIs, the forwarding rule name must be a 1-20 characters string with * lowercase letters and numbers and must start with a letter. */ name: string; /** * This field is not used for external load balancing. * * For Internal TCP/UDP Load Balancing, this field identifies the network that * the load balanced IP should belong to for this Forwarding Rule. * If the subnetwork is specified, the network of the subnetwork will be used. * If neither subnetwork nor this field is specified, the default network will * be used. * * For Private Service Connect forwarding rules that forward traffic to Google * APIs, a network must be provided. */ network: string; /** * This signifies the networking tier used for configuring * this load balancer and can only take the following values: * 'PREMIUM', 'STANDARD'. * * For regional ForwardingRule, the valid values are 'PREMIUM' and * 'STANDARD'. For GlobalForwardingRule, the valid value is * 'PREMIUM'. * * If this field is not specified, it is assumed to be 'PREMIUM'. * If 'IPAddress' is specified, this value must be equal to the * networkTier of the Address. Possible values: ["PREMIUM", "STANDARD"] */ networkTier: string; /** * This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC forwarding rules do not use this field. */ noAutomateDnsZone: boolean; /** * The 'ports', 'portRange', and 'allPorts' fields are mutually exclusive. * Only packets addressed to ports in the specified range will be forwarded * to the backends configured with this forwarding rule. * * The 'portRange' field has the following limitations: * * It requires that the forwarding rule 'IPProtocol' be TCP, UDP, or SCTP, * and * * It's applicable only to the following products: external passthrough * Network Load Balancers, internal and external proxy Network Load * Balancers, internal and external Application Load Balancers, external * protocol forwarding, and Classic VPN. * * Some products have restrictions on what ports can be used. See * [port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#port_specifications) * for details. * * For external forwarding rules, two or more forwarding rules cannot use the * same '[IPAddress, IPProtocol]' pair, and cannot have overlapping * 'portRange's. * * For internal forwarding rules within the same VPC network, two or more * forwarding rules cannot use the same '[IPAddress, IPProtocol]' pair, and * cannot have overlapping 'portRange's. * * @pattern: \d+(?:-\d+)? */ portRange: string; /** * The 'ports', 'portRange', and 'allPorts' fields are mutually exclusive. * Only packets addressed to ports in the specified range will be forwarded * to the backends configured with this forwarding rule. * * The 'ports' field has the following limitations: * * It requires that the forwarding rule 'IPProtocol' be TCP, UDP, or SCTP, * and * * It's applicable only to the following products: internal passthrough * Network Load Balancers, backend service-based external passthrough Network * Load Balancers, and internal protocol forwarding. * * You can specify a list of up to five ports by number, separated by * commas. The ports can be contiguous or discontiguous. * * For external forwarding rules, two or more forwarding rules cannot use the * same '[IPAddress, IPProtocol]' pair if they share at least one port * number. * * For internal forwarding rules within the same VPC network, two or more * forwarding rules cannot use the same '[IPAddress, IPProtocol]' pair if * they share at least one port number. * * @pattern: \d+(?:-\d+)? */ ports: string[]; /** * The name of the project. */ project: string; /** * The PSC connection id of the PSC Forwarding Rule. */ pscConnectionId: string; /** * The PSC connection status of the PSC Forwarding Rule. Possible values: 'STATUS_UNSPECIFIED', 'PENDING', 'ACCEPTED', 'REJECTED', 'CLOSED' */ pscConnectionStatus: string; /** * The combination of labels configured directly on the resource * and default labels configured on the provider. */ pulumiLabels: { [key: string]: string; }; /** * This is used in PSC consumer ForwardingRule to make terraform recreate the ForwardingRule when the status is closed */ recreateClosedPsc: boolean; /** * The region you want to get the forwarding rules from. * * These arguments must be set in either the provider or the resource in order for the information to be queried. */ region: string; /** * The URI of the resource. */ selfLink: string; /** * Service Directory resources to register this forwarding rule with. * * Currently, only supports a single Service Directory resource. */ serviceDirectoryRegistrations: outputs.compute.GetForwardingRulesRuleServiceDirectoryRegistration[]; /** * An optional prefix to the service name for this Forwarding Rule. * If specified, will be the first label of the fully qualified service * name. * * The label must be 1-63 characters long, and comply with RFC1035. * Specifically, the label must be 1-63 characters long and match the * regular expression 'a-z?' which means the first * character must be a lowercase letter, and all following characters * must be a dash, lowercase letter, or digit, except the last * character, which cannot be a dash. * * This field is only used for INTERNAL load balancing. */ serviceLabel: string; /** * The internal fully qualified service name for this Forwarding Rule. * * This field is only used for INTERNAL load balancing. */ serviceName: string; /** * If not empty, this Forwarding Rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a Forwarding Rule can only have up to 64 source IP ranges, and this field can only be used with a regional Forwarding Rule whose scheme is EXTERNAL. Each sourceIpRange entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24). */ sourceIpRanges: string[]; /** * This field identifies the subnetwork that the load balanced IP should * belong to for this Forwarding Rule, used in internal load balancing and * network load balancing with IPv6. * * If the network specified is in auto subnet mode, this field is optional. * However, a subnetwork must be specified if the network is in custom subnet * mode or when creating external forwarding rule with IPv6. */ subnetwork: string; /** * The URL of the target resource to receive the matched traffic. For * regional forwarding rules, this target must be in the same region as the * forwarding rule. For global forwarding rules, this target must be a global * load balancing resource. * * The forwarded traffic must be of a type appropriate to the target object. * * For load balancers, see the "Target" column in [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). * * For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. */ target: string; } interface GetForwardingRulesRuleServiceDirectoryRegistration { /** * Service Directory namespace to register the forwarding rule under. */ namespace: string; /** * Service Directory service to register the forwarding rule under. */ service: string; } interface GetGlobalForwardingRuleMetadataFilter { /** * The list of label value pairs that must match labels in the * provided metadata based on filterMatchCriteria * * This list must not be empty and can have at the most 64 entries. */ filterLabels: outputs.compute.GetGlobalForwardingRuleMetadataFilterFilterLabel[]; /** * Specifies how individual filterLabel matches within the list of * filterLabels contribute towards the overall metadataFilter match. * * MATCH_ANY - At least one of the filterLabels must have a matching * label in the provided metadata. * MATCH_ALL - All filterLabels must have matching labels in the * provided metadata. Possible values: ["MATCH_ANY", "MATCH_ALL"] */ filterMatchCriteria: string; } interface GetGlobalForwardingRuleMetadataFilterFilterLabel { /** * The name of the global forwarding rule. * * - - - */ name: string; /** * The value that the label must match. The value has a maximum * length of 1024 characters. */ value: string; } interface GetGlobalForwardingRuleServiceDirectoryRegistration { /** * Service Directory namespace to register the forwarding rule under. */ namespace: string; /** * [Optional] Service Directory region to register this global forwarding rule under. * Default to "us-central1". Only used for PSC for Google APIs. All PSC for * Google APIs Forwarding Rules on the same network should use the same Service * Directory region. */ serviceDirectoryRegion: string; } interface GetHcVpnGatewayParam { /** * Resource manager tags to be bound to the HaVpnGateway. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. */ resourceManagerTags: { [key: string]: string; }; } interface GetHcVpnGatewayVpnInterface { /** * The numeric ID of this VPN gateway interface. */ id: number; /** * URL of the interconnect attachment resource. When the value * of this field is present, the VPN Gateway will be used for * IPsec-encrypted Cloud Interconnect; all Egress or Ingress * traffic for this VPN Gateway interface will go through the * specified interconnect attachment resource. * * Not currently available publicly. */ interconnectAttachment: string; /** * The external IP address for this VPN gateway interface. */ ipAddress: string; } interface GetHealthCheckGrpcHealthCheck { /** * The gRPC service name for the health check. * The value of grpcServiceName has the following meanings by convention: * - Empty serviceName means the overall status of all services at the backend. * - Non-empty serviceName means the health of that gRPC service, as defined by the owner of the service. * The grpcServiceName can only be ASCII. */ grpcServiceName: string; /** * The port number for the health check request. * Must be specified if portName and portSpecification are not set * or if portSpecification is USE_FIXED_PORT. Valid values are 1 through 65535. */ port: number; /** * Port name as defined in InstanceGroup#NamedPort#name. If both port and * portName are defined, port takes precedence. */ portName: string; /** * Specifies how port is selected for health checking, can be one of the * following values: * * * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. * * * 'USE_NAMED_PORT': The 'portName' is used for health checking. * * * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * * If not specified, gRPC health check follows behavior specified in 'port' and * 'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"] */ portSpecification: string; } interface GetHealthCheckGrpcTlsHealthCheck { /** * The gRPC service name for the health check. * The value of grpcServiceName has the following meanings by convention: * - Empty serviceName means the overall status of all services at the backend. * - Non-empty serviceName means the health of that gRPC service, as defined by the owner of the service. * The grpcServiceName can only be ASCII. */ grpcServiceName: string; /** * The port number for the health check request. * Must be specified if portSpecification is USE_FIXED_PORT. Valid values are 1 through 65535. */ port: number; /** * Specifies how port is selected for health checking, can be one of the * following values: * * * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. * * * 'USE_NAMED_PORT': Not supported for GRPC with TLS health checking. * * * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * * If not specified, gRPC with TLS health check follows behavior specified in the 'port' field. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"] */ portSpecification: string; } interface GetHealthCheckHttp2HealthCheck { /** * The value of the host header in the HTTP2 health check request. * If left empty (default value), the public IP on behalf of which this health * check is performed will be used. */ host: string; /** * The TCP port number for the HTTP2 health check request. * The default value is 443. */ port: number; /** * Port name as defined in InstanceGroup#NamedPort#name. If both port and * portName are defined, port takes precedence. */ portName: string; /** * Specifies how port is selected for health checking, can be one of the * following values: * * * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. * * * 'USE_NAMED_PORT': The 'portName' is used for health checking. * * * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * * If not specified, HTTP2 health check follows behavior specified in 'port' and * 'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"] */ portSpecification: string; /** * Specifies the type of proxy header to append before sending data to the * backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"] */ proxyHeader: string; /** * The request path of the HTTP2 health check request. * The default value is /. */ requestPath: string; /** * The bytes to match against the beginning of the response data. If left empty * (the default value), any response will indicate health. The response data * can only be ASCII. */ response: string; } interface GetHealthCheckHttpHealthCheck { /** * The value of the host header in the HTTP health check request. * If left empty (default value), the public IP on behalf of which this health * check is performed will be used. */ host: string; /** * The TCP port number for the HTTP health check request. * The default value is 80. */ port: number; /** * Port name as defined in InstanceGroup#NamedPort#name. If both port and * portName are defined, port takes precedence. */ portName: string; /** * Specifies how port is selected for health checking, can be one of the * following values: * * * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. * * * 'USE_NAMED_PORT': The 'portName' is used for health checking. * * * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * * If not specified, HTTP health check follows behavior specified in 'port' and * 'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"] */ portSpecification: string; /** * Specifies the type of proxy header to append before sending data to the * backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"] */ proxyHeader: string; /** * The request path of the HTTP health check request. * The default value is /. */ requestPath: string; /** * The bytes to match against the beginning of the response data. If left empty * (the default value), any response will indicate health. The response data * can only be ASCII. */ response: string; } interface GetHealthCheckHttpsHealthCheck { /** * The value of the host header in the HTTPS health check request. * If left empty (default value), the public IP on behalf of which this health * check is performed will be used. */ host: string; /** * The TCP port number for the HTTPS health check request. * The default value is 443. */ port: number; /** * Port name as defined in InstanceGroup#NamedPort#name. If both port and * portName are defined, port takes precedence. */ portName: string; /** * Specifies how port is selected for health checking, can be one of the * following values: * * * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. * * * 'USE_NAMED_PORT': The 'portName' is used for health checking. * * * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * * If not specified, HTTPS health check follows behavior specified in 'port' and * 'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"] */ portSpecification: string; /** * Specifies the type of proxy header to append before sending data to the * backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"] */ proxyHeader: string; /** * The request path of the HTTPS health check request. * The default value is /. */ requestPath: string; /** * The bytes to match against the beginning of the response data. If left empty * (the default value), any response will indicate health. The response data * can only be ASCII. */ response: string; } interface GetHealthCheckLogConfig { /** * Indicates whether or not to export logs. This is false by default, * which means no health check logging will be done. */ enable: boolean; } interface GetHealthCheckSslHealthCheck { /** * The TCP port number for the SSL health check request. * The default value is 443. */ port: number; /** * Port name as defined in InstanceGroup#NamedPort#name. If both port and * portName are defined, port takes precedence. */ portName: string; /** * Specifies how port is selected for health checking, can be one of the * following values: * * * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. * * * 'USE_NAMED_PORT': The 'portName' is used for health checking. * * * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * * If not specified, SSL health check follows behavior specified in 'port' and * 'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"] */ portSpecification: string; /** * Specifies the type of proxy header to append before sending data to the * backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"] */ proxyHeader: string; /** * The application data to send once the SSL connection has been * established (default value is empty). If both request and response are * empty, the connection establishment alone will indicate health. The request * data can only be ASCII. */ request: string; /** * The bytes to match against the beginning of the response data. If left empty * (the default value), any response will indicate health. The response data * can only be ASCII. */ response: string; } interface GetHealthCheckTcpHealthCheck { /** * The TCP port number for the TCP health check request. * The default value is 443. */ port: number; /** * Port name as defined in InstanceGroup#NamedPort#name. If both port and * portName are defined, port takes precedence. */ portName: string; /** * Specifies how port is selected for health checking, can be one of the * following values: * * * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. * * * 'USE_NAMED_PORT': The 'portName' is used for health checking. * * * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * * If not specified, TCP health check follows behavior specified in 'port' and * 'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"] */ portSpecification: string; /** * Specifies the type of proxy header to append before sending data to the * backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"] */ proxyHeader: string; /** * The application data to send once the TCP connection has been * established (default value is empty). If both request and response are * empty, the connection establishment alone will indicate health. The request * data can only be ASCII. */ request: string; /** * The bytes to match against the beginning of the response data. If left empty * (the default value), any response will indicate health. The response data * can only be ASCII. */ response: string; } interface GetImagesImage { /** * The size of the image tar.gz archive stored in Google Cloud Storage in bytes. */ archiveSizeBytes: number; /** * The creation timestamp in RFC3339 text format. */ creationTimestamp: string; /** * An optional description of this image. */ description: string; /** * The size of the image when restored onto a persistent disk in gigabytes. */ diskSizeGb: number; /** * The family name of the image. */ family: string; imageId: number; /** * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. */ labels: { [key: string]: string; }; /** * The name of the image. */ name: string; /** * The URI of the image. */ selfLink: string; /** * The URL of the source disk used to create this image. */ sourceDisk: string; /** * The ID value of the disk used to create this image. */ sourceDiskId: string; /** * The ID value of the image used to create this image. */ sourceImageId: string; } interface GetInstanceAdvancedMachineFeature { /** * Whether to enable nested virtualization or not. */ enableNestedVirtualization: boolean; /** * Whether to enable UEFI networking for the instance. */ enableUefiNetworking: boolean; /** * The PMU is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are "STANDARD", "ENHANCED", and "ARCHITECTURAL". */ performanceMonitoringUnit: string; /** * The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. */ threadsPerCore: number; /** * Turbo frequency mode to use for the instance. Currently supported modes is "ALL_CORE_MAX". */ turboMode: string; /** * The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance\'s nominal CPU count and the underlying platform\'s SMT width. */ visibleCoreCount: number; } interface GetInstanceAttachedDisk { /** * Name with which the attached disk is accessible * under `/dev/disk/by-id/` */ deviceName: string; /** * A 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to encrypt this disk. Only one of kms_key_self_link, diskEncryptionKeyRsa and diskEncryptionKeyRaw may be set. */ diskEncryptionKeyRaw: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, diskEncryptionKeyRsa and diskEncryptionKeyRaw may be set. */ diskEncryptionKeyRsa: string; /** * The [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4) * encoded SHA-256 hash of the [customer-supplied encryption key] * () that protects this resource. */ diskEncryptionKeySha256: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used */ diskEncryptionServiceAccount: string; /** * Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error. Setting this parameter cause VM recreation. */ forceAttach: boolean; /** * The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link, diskEncryptionKeyRsa and diskEncryptionKeyRaw may be set. */ kmsKeySelfLink: string; /** * Read/write mode for the disk. One of `"READ_ONLY"` or `"READ_WRITE"`. */ mode: string; /** * The selfLink of the disk attached to this instance. */ source: string; } interface GetInstanceBootDisk { /** * Whether the disk will be auto-deleted when the instance is deleted. */ autoDelete: boolean; /** * Name with which the attached disk is accessible * under `/dev/disk/by-id/` */ deviceName: string; /** * A 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to encrypt this disk. Only one of kms_key_self_link, diskEncryptionKeyRaw and diskEncryptionKeyRsa may be set. */ diskEncryptionKeyRaw: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, diskEncryptionKeyRaw and diskEncryptionKeyRsa may be set. */ diskEncryptionKeyRsa: string; /** * The [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4) * encoded SHA-256 hash of the [customer-supplied encryption key] * () that protects this resource. */ diskEncryptionKeySha256: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used */ diskEncryptionServiceAccount: string; /** * Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error. Setting this parameter cause VM recreation. */ forceAttach: boolean; /** * A list of features to enable on the guest operating system. Applicable only for bootable images. */ guestOsFeatures: string[]; /** * Parameters with which a disk was created alongside the instance. * Structure is documented below. */ initializeParams: outputs.compute.GetInstanceBootDiskInitializeParam[]; /** * The disk interface used for attaching this disk. One of `SCSI` or `NVME`. */ interface: string; /** * The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link, diskEncryptionKeyRaw and diskEncryptionKeyRsa may be set. */ kmsKeySelfLink: string; /** * Read/write mode for the disk. One of `"READ_ONLY"` or `"READ_WRITE"`. */ mode: string; /** * The selfLink of the disk attached to this instance. */ source: string; } interface GetInstanceBootDiskInitializeParam { /** * The architecture of the disk. One of "X86_64" or "ARM64". */ architecture: string; /** * A flag to enable confidential compute mode on boot disk */ enableConfidentialCompute: boolean; /** * The image from which this disk was initialised. */ image: string; /** * A set of key/value label pairs assigned to the disk. */ labels: { [key: string]: string; }; /** * Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. */ provisionedIops: number; /** * Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. */ provisionedThroughput: number; /** * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. */ resourceManagerTags: { [key: string]: string; }; /** * A list of selfLinks to resource policies attached to the selected `bootDisk` */ resourcePolicies: string[]; /** * The size of the image in gigabytes. */ size: number; /** * The snapshot from which this disk was initialised. */ snapshot: string; /** * The encryption key used to decrypt the source image. */ sourceImageEncryptionKeys: outputs.compute.GetInstanceBootDiskInitializeParamSourceImageEncryptionKey[]; /** * The encryption key used to decrypt the source snapshot. */ sourceSnapshotEncryptionKeys: outputs.compute.GetInstanceBootDiskInitializeParamSourceSnapshotEncryptionKey[]; /** * The URL of the storage pool in which the new disk is created */ storagePool: string; /** * The accelerator type resource exposed to this instance. E.g. `nvidia-tesla-k80`. */ type: string; } interface GetInstanceBootDiskInitializeParamSourceImageEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ kmsKeySelfLink: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rawKey: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rsaEncryptedKey: string; /** * The SHA256 hash of the encryption key used to encrypt this disk. */ sha256: string; } interface GetInstanceBootDiskInitializeParamSourceSnapshotEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ kmsKeySelfLink: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rawKey: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rsaEncryptedKey: string; /** * The SHA256 hash of the encryption key used to encrypt this disk. */ sha256: string; } interface GetInstanceConfidentialInstanceConfig { /** * The confidential computing technology the instance uses. * SEV is an AMD feature. TDX is an Intel feature. One of the following * values is required: SEV, SEV_SNP, TDX. If SEV_SNP, minCpuPlatform = * "AMD Milan" is currently required. */ confidentialInstanceType: string; /** * Defines whether the instance should have confidential compute enabled. Field will be deprecated in a future release */ enableConfidentialCompute: boolean; } interface GetInstanceGroupManagerAllInstancesConfig { /** * The label key-value pairs that you want to patch onto the instance, */ labels: { [key: string]: string; }; /** * The metadata key-value pairs that you want to patch onto the instance. For more information, see Project and instance metadata, */ metadata: { [key: string]: string; }; } interface GetInstanceGroupManagerAutoHealingPolicy { /** * The health check resource that signals autohealing. */ healthCheck: string; /** * The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. Between 0 and 3600. */ initialDelaySec: number; } interface GetInstanceGroupManagerInstanceLifecyclePolicy { /** * Specifies the action that a MIG performs on a failed VM. If the value of the "onFailedHealthCheck" field is DEFAULT_ACTION, then the same action also applies to the VMs on which your application fails a health check. Valid values are: REPAIR, DO_NOTHING. If REPAIR (default), then MIG automatically repairs a failed VM by recreating it. For more information, see about repairing VMs in a MIG. If DO_NOTHING, then MIG does not repair a failed VM. */ defaultActionOnFailure: string; /** * Specifies whether to apply the group's latest configuration when repairing a VM. Valid options are: YES, NO. If YES and you updated the group's instance template or per-instance configurations after the VM was created, then these changes are applied when VM is repaired. If NO (default), then updates are applied in accordance with the group's update policy type. */ forceUpdateOnRepair: string; /** * Specifies the action that a MIG performs on an unhealthy VM. A VM is marked as unhealthy when the application running on that VM fails a health check. Valid values are: DEFAULT_ACTION, DO_NOTHING, REPAIR. If DEFAULT_ACTION (default), then MIG uses the same action configured for the "defaultActionOnFailure" field. If DO_NOTHING, then MIG does not repair unhealthy VM. If REPAIR, then MIG automatically repairs an unhealthy VM by recreating it. */ onFailedHealthCheck: string; /** * Configuration for VM repairs in the MIG. */ onRepairs: outputs.compute.GetInstanceGroupManagerInstanceLifecyclePolicyOnRepair[]; } interface GetInstanceGroupManagerInstanceLifecyclePolicyOnRepair { /** * Specifies whether the MIG can change a VM's zone during a repair. If "YES", MIG can select a different zone for the VM during a repair. Else if "NO", MIG cannot change a VM's zone during a repair. The default value of allowChangingZone is "NO". */ allowChangingZone: string; } interface GetInstanceGroupManagerNamedPort { /** * The name of the instance group. Either `name` or `selfLink` must be provided. */ name: string; /** * The port number. */ port: number; } interface GetInstanceGroupManagerParam { /** * Resource manager tags to bind to the managed instance group. The tags are key-value pairs. Keys must be in the format tagKeys/123 and values in the format tagValues/456. */ resourceManagerTags: { [key: string]: string; }; } interface GetInstanceGroupManagerResourcePolicy { /** * The URL of the workload policy that is specified for this managed instance group. It can be a full or partial URL. */ workloadPolicy: string; } interface GetInstanceGroupManagerStandbyPolicy { /** * Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0. */ initialDelaySec: number; /** * Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. The default mode is "MANUAL". */ mode: string; } interface GetInstanceGroupManagerStatefulDisk { /** * A value that prescribes what should happen to the stateful disk when the VM instance is deleted. The available options are NEVER and ON_PERMANENT_INSTANCE_DELETION. NEVER - detach the disk when the VM is deleted, but do not delete the disk. ON_PERMANENT_INSTANCE_DELETION will delete the stateful disk when the VM is permanently deleted from the instance group. The default is NEVER. */ deleteRule: string; /** * The device name of the disk to be attached. */ deviceName: string; } interface GetInstanceGroupManagerStatefulExternalIp { /** * A value that prescribes what should happen to an associated static Address resource when a VM instance is permanently deleted. The available options are NEVER and ON_PERMANENT_INSTANCE_DELETION. NEVER - detach the IP when the VM is deleted, but do not delete the address resource. ON_PERMANENT_INSTANCE_DELETION will delete the stateful address when the VM is permanently deleted from the instance group. The default is NEVER. */ deleteRule: string; /** * The network interface name */ interfaceName: string; } interface GetInstanceGroupManagerStatefulInternalIp { /** * A value that prescribes what should happen to an associated static Address resource when a VM instance is permanently deleted. The available options are NEVER and ON_PERMANENT_INSTANCE_DELETION. NEVER - detach the IP when the VM is deleted, but do not delete the address resource. ON_PERMANENT_INSTANCE_DELETION will delete the stateful address when the VM is permanently deleted from the instance group. The default is NEVER. */ deleteRule: string; /** * The network interface name */ interfaceName: string; } interface GetInstanceGroupManagerStatus { /** * Status of all-instances configuration on the group. */ allInstancesConfigs: outputs.compute.GetInstanceGroupManagerStatusAllInstancesConfig[]; /** * A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified. */ isStable: boolean; /** * Stateful status of the given Instance Group Manager. */ statefuls: outputs.compute.GetInstanceGroupManagerStatusStateful[]; /** * A status of consistency of Instances' versions with their target version specified by version field on Instance Group Manager. */ versionTargets: outputs.compute.GetInstanceGroupManagerStatusVersionTarget[]; } interface GetInstanceGroupManagerStatusAllInstancesConfig { /** * Current all-instances configuration revision. This value is in RFC3339 text format. */ currentRevision: string; /** * A bit indicating whether this configuration has been applied to all managed instances in the group. */ effective: boolean; } interface GetInstanceGroupManagerStatusStateful { /** * A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful config even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions. */ hasStatefulConfig: boolean; /** * Status of per-instance configs on the instances. */ perInstanceConfigs: outputs.compute.GetInstanceGroupManagerStatusStatefulPerInstanceConfig[]; } interface GetInstanceGroupManagerStatusStatefulPerInstanceConfig { /** * A bit indicating if all of the group's per-instance configs (listed in the output of a listPerInstanceConfigs API call) have status EFFECTIVE or there are no per-instance-configs. */ allEffective: boolean; } interface GetInstanceGroupManagerStatusVersionTarget { /** * A bit indicating whether version target has been reached in this managed instance group, i.e. all instances are in their target version. Instances' target version are specified by version field on Instance Group Manager. */ isReached: boolean; } interface GetInstanceGroupManagerUpdatePolicy { /** * Specifies a fixed number of VM instances. This must be a positive integer. Conflicts with max_surge_percent. Both cannot be 0 */ maxSurgeFixed: number; /** * Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%. Conflicts with max_surge_fixed. */ maxSurgePercent: number; /** * Specifies a fixed number of VM instances. This must be a positive integer. */ maxUnavailableFixed: number; /** * Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%. */ maxUnavailablePercent: number; /** * Minimum number of seconds to wait for after a newly created instance becomes available. This value must be from range [0, 3600]. */ minReadySec: number; /** * Minimal action to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to update without stopping instances, RESTART to restart existing instances or REPLACE to delete and create new instances from the target template. If you specify a REFRESH, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action. */ minimalAction: string; /** * Most disruptive action that is allowed to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to allow actions that do not need instance restart, RESTART to allow actions that can be applied without instance replacing or REPLACE to allow all possible actions. If the Updater determines that the minimal update action needed is more disruptive than most disruptive allowed action you specify it will not perform the update at all. */ mostDisruptiveAllowedAction: string; /** * The instance replacement method for managed instance groups. Valid values are: "RECREATE", "SUBSTITUTE". If SUBSTITUTE (default), the group replaces VM instances with new instances that have randomly generated names. If RECREATE, instance names are preserved. You must also set maxUnavailableFixed or maxUnavailablePercent to be greater than 0. */ replacementMethod: string; /** * The type of update process. You can specify either PROACTIVE so that the instance group manager proactively executes actions in order to bring instances to their target versions or OPPORTUNISTIC so that no action is proactively executed but the update will be performed as part of other actions (for example, resizes or recreateInstances calls). */ type: string; } interface GetInstanceGroupManagerVersion { /** * The full URL to an instance template from which all new instances of this version will be created. */ instanceTemplate: string; /** * The name of the instance group. Either `name` or `selfLink` must be provided. */ name: string; /** * The number of instances calculated as a fixed number or a percentage depending on the settings. */ targetSizes: outputs.compute.GetInstanceGroupManagerVersionTargetSize[]; } interface GetInstanceGroupManagerVersionTargetSize { /** * The number of instances which are managed for this version. Conflicts with percent. */ fixed: number; /** * The number of instances (calculated as percentage) which are managed for this version. Conflicts with fixed. Note that when using percent, rounding will be in favor of explicitly set targetSize values; a managed instance group with 2 instances and 2 versions, one of which has a target_size.percent of 60 will create 2 instances of that version. */ percent: number; } interface GetInstanceGroupNamedPort { /** * The name of the instance group. Either `name` or `selfLink` must be provided. */ name: string; port: number; } interface GetInstanceGuestAccelerator { /** * The number of the guest accelerator cards exposed to this instance. */ count: number; /** * The accelerator type resource exposed to this instance. E.g. `nvidia-tesla-k80`. */ type: string; } interface GetInstanceGuestAttributesQueryValue { /** * Key of the guest_attribute. */ key: string; /** * Namespace of the guest_attribute. */ namespace: string; /** * Value of the guest_attribute. */ value: string; } interface GetInstanceInstanceEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS. */ kmsKeySelfLink: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount: string; /** * The SHA256 hash of the customer's encryption key. */ sha256: string; } interface GetInstanceNetworkInterface { /** * Access configurations, i.e. IPs via which this * instance can be accessed via the Internet. Structure documented below. */ accessConfigs: outputs.compute.GetInstanceNetworkInterfaceAccessConfig[]; /** * An array of alias IP ranges for this network interface. Structure documented below. */ aliasIpRanges: outputs.compute.GetInstanceNetworkInterfaceAliasIpRange[]; /** * Indicates whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. */ igmpQuery: string; /** * The prefix length of the primary internal IPv6 range. */ internalIpv6PrefixLength: number; /** * An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. */ ipv6AccessConfigs: outputs.compute.GetInstanceNetworkInterfaceIpv6AccessConfig[]; /** * One of EXTERNAL, INTERNAL to indicate whether the IP can be accessed from the Internet. This field is always inherited from its subnetwork. */ ipv6AccessType: string; /** * An IPv6 internal network address for this network interface. If not specified, Google Cloud will automatically assign an internal IPv6 address from the instance's subnetwork. */ ipv6Address: string; /** * MAC address assigned to this network interface. */ macAddress: string; /** * The name of the instance. One of `name` or `selfLink` must be provided. */ name: string; /** * The name or selfLink of the network attached to this interface. */ network: string; /** * The URL of the network attachment to this interface. */ networkAttachment: string; /** * The internal ip address of the instance, either manually or dynamically assigned. */ networkIp: string; /** * The type of vNIC to be used on this interface. Possible values:GVNIC, VIRTIO_NET, IDPF, MRDMA, and IRDMA */ nicType: string; /** * Name of the parent network interface of a dynamic network interface. */ parentNicName: string; /** * The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified. */ queueCount: number; /** * A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy. */ securityPolicy: string; /** * The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. */ stackType: string; /** * The name or selfLink of the subnetwork attached to this interface. */ subnetwork: string; /** * The project in which the subnetwork belongs. */ subnetworkProject: string; /** * VLAN tag of a dynamic network interface, must be an integer in the range from 2 to 255 inclusively. */ vlan: number; } interface GetInstanceNetworkInterfaceAccessConfig { /** * If the instance has an access config, either the given external ip (in the `natIp` field) or the ephemeral (generated) ip (if you didn't provide one). */ natIp: string; /** * The [networking tier][network-tier] used for configuring this instance. One of `PREMIUM` or `STANDARD`. */ networkTier: string; /** * The DNS domain name for the public PTR record. */ publicPtrDomainName: string; /** * A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy. */ securityPolicy: string; } interface GetInstanceNetworkInterfaceAliasIpRange { /** * The IP CIDR range represented by this alias IP range. */ ipCidrRange: string; /** * The subnetwork secondary range name specifying * the secondary range from which to allocate the IP CIDR range for this alias IP * range. */ subnetworkRangeName: string; } interface GetInstanceNetworkInterfaceIpv6AccessConfig { /** * The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork. */ externalIpv6: string; /** * The prefix length of the external IPv6 range. */ externalIpv6PrefixLength: string; /** * The name of the instance. One of `name` or `selfLink` must be provided. */ name: string; /** * The [networking tier][network-tier] used for configuring this instance. One of `PREMIUM` or `STANDARD`. */ networkTier: string; /** * The DNS domain name for the public PTR record. */ publicPtrDomainName: string; /** * A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy. */ securityPolicy: string; } interface GetInstanceNetworkPerformanceConfig { /** * The egress bandwidth tier for the instance. */ totalEgressBandwidthTier: string; } interface GetInstanceParam { /** * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. */ resourceManagerTags: { [key: string]: string; }; } interface GetInstanceReservationAffinity { /** * Specifies the label selector for the reservation to use. */ specificReservations: outputs.compute.GetInstanceReservationAffinitySpecificReservation[]; /** * The accelerator type resource exposed to this instance. E.g. `nvidia-tesla-k80`. */ type: string; } interface GetInstanceReservationAffinitySpecificReservation { /** * Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify compute.googleapis.com/reservation-name as the key and specify the name of your reservation as the only value. */ key: string; /** * Corresponds to the label values of a reservation resource. */ values: string[]; } interface GetInstanceScheduling { /** * Specifies if the instance should be * restarted if it was terminated by Compute Engine (not a user). */ automaticRestart: boolean; /** * Specifies the availability domain, which this instance should be scheduled on. */ availabilityDomain: number; /** * Settings for the instance to perform a graceful shutdown. */ gracefulShutdowns: outputs.compute.GetInstanceSchedulingGracefulShutdown[]; /** * Beta Time in seconds for host error detection. */ hostErrorTimeoutSeconds: number; /** * Describe the type of termination action for `SPOT` VM. Can be `STOP` or `DELETE`. Read more on [here](https://cloud.google.com/compute/docs/instances/create-use-spot) */ instanceTerminationAction: string; /** * Specifies the maximum amount of time a Local Ssd Vm should wait while * recovery of the Local Ssd state is attempted. Its value should be in * between 0 and 168 hours with hour granularity and the default value being 1 * hour. */ localSsdRecoveryTimeouts: outputs.compute.GetInstanceSchedulingLocalSsdRecoveryTimeout[]; /** * Specifies the frequency of planned maintenance events. The accepted values are: PERIODIC */ maintenanceInterval: string; /** * The timeout for new network connections to hosts. */ maxRunDurations: outputs.compute.GetInstanceSchedulingMaxRunDuration[]; minNodeCpus: number; /** * Specifies node affinities or anti-affinities to determine which sole-tenant nodes your instances and managed instance groups will use as host systems. */ nodeAffinities: outputs.compute.GetInstanceSchedulingNodeAffinity[]; /** * Describes maintenance behavior for the * instance. One of `MIGRATE` or `TERMINATE`, for more info, read * [here](https://cloud.google.com/compute/docs/instances/setting-instance-scheduling-options) */ onHostMaintenance: string; /** * Defines the behaviour for instances with the instance_termination_action. */ onInstanceStopActions: outputs.compute.GetInstanceSchedulingOnInstanceStopAction[]; /** * Whether the instance is preemptible. */ preemptible: boolean; /** * Describe the type of preemptible VM. */ provisioningModel: string; /** * Default is false and there will be 120 seconds between GCE ACPI G2 Soft Off and ACPI G3 Mechanical Off for Standard VMs and 30 seconds for Spot VMs. */ skipGuestOsShutdown: boolean; /** * Specifies the timestamp, when the instance will be terminated, * in RFC3339 text format. If specified, the instance termination action * will be performed at the termination time. */ terminationTime: string; } interface GetInstanceSchedulingGracefulShutdown { /** * Opts-in for graceful shutdown. */ enabled: boolean; /** * The time allotted for the instance to gracefully shut down. * If the graceful shutdown isn't complete after this time, then the instance * transitions to the STOPPING state. */ maxDurations: outputs.compute.GetInstanceSchedulingGracefulShutdownMaxDuration[]; } interface GetInstanceSchedulingGracefulShutdownMaxDuration { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * The value must be between 1 and 3600, which is 3,600 seconds (one hour). */ seconds: number; } interface GetInstanceSchedulingLocalSsdRecoveryTimeout { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface GetInstanceSchedulingMaxRunDuration { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface GetInstanceSchedulingNodeAffinity { key: string; operator: string; values: string[]; } interface GetInstanceSchedulingOnInstanceStopAction { /** * If true, the contents of any attached Local SSD disks will be discarded. */ discardLocalSsd: boolean; } interface GetInstanceScratchDisk { /** * Name with which the attached disk is accessible * under `/dev/disk/by-id/` */ deviceName: string; /** * The disk interface used for attaching this disk. One of `SCSI` or `NVME`. */ interface: string; /** * The size of the image in gigabytes. */ size: number; } interface GetInstanceServiceAccount { /** * The service account e-mail address. */ email: string; /** * A list of service scopes. */ scopes: string[]; } interface GetInstanceShieldedInstanceConfig { /** * - Whether integrity monitoring is enabled for the instance. */ enableIntegrityMonitoring: boolean; /** * - Whether secure boot is enabled for the instance. */ enableSecureBoot: boolean; /** * - Whether the instance uses vTPM. */ enableVtpm: boolean; } interface GetInstanceTemplateAdvancedMachineFeature { /** * Whether to enable nested virtualization or not. */ enableNestedVirtualization: boolean; /** * Whether to enable UEFI networking or not. */ enableUefiNetworking: boolean; /** * The PMU is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are "STANDARD", "ENHANCED", and "ARCHITECTURAL". */ performanceMonitoringUnit: string; /** * The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. */ threadsPerCore: number; /** * Turbo frequency mode to use for the instance. Currently supported modes is "ALL_CORE_MAX". */ turboMode: string; /** * The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance\'s nominal CPU count and the underlying platform\'s SMT width. */ visibleCoreCount: number; } interface GetInstanceTemplateConfidentialInstanceConfig { /** * The confidential computing technology the instance uses. * SEV is an AMD feature. TDX is an Intel feature. One of the following * values is required: SEV, SEV_SNP, TDX. If SEV_SNP, minCpuPlatform = * "AMD Milan" is currently required. */ confidentialInstanceType: string; /** * Defines whether the instance should have confidential compute enabled. `onHostMaintenance` has to be set to TERMINATE or this will fail to create the VM. */ enableConfidentialCompute: boolean; } interface GetInstanceTemplateDisk { /** * The architecture of the image. Allowed values are ARM64 or X86_64. */ architecture: string; /** * Whether or not the disk should be auto-deleted. * This defaults to true. */ autoDelete: boolean; /** * Indicates that this is a boot disk. */ boot: boolean; /** * A unique device name that is reflected into the * /dev/ tree of a Linux operating system running within the instance. If not * specified, the server chooses a default device name to apply to this disk. */ deviceName: string; /** * Encrypts or decrypts a disk using a customer-supplied encryption key. */ diskEncryptionKeys: outputs.compute.GetInstanceTemplateDiskDiskEncryptionKey[]; /** * Name of the disk. When not provided, this defaults * to the name of the instance. */ diskName: string; /** * The size of the image in gigabytes. If not * specified, it will inherit the size of its base image. For SCRATCH disks, * the size must be exactly 375GB. */ diskSizeGb: number; /** * The GCE disk type. Such as `"pd-ssd"`, `"local-ssd"`, * `"pd-balanced"` or `"pd-standard"`. */ diskType: string; /** * A list of features to enable on the guest operating system. Applicable only for bootable images. */ guestOsFeatures: string[]; /** * Specifies the disk interface to use for attaching this disk, * which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI * and the request will fail if you attempt to attach a persistent disk in any other format * than SCSI. Local SSDs can use either NVME or SCSI. */ interface: string; /** * (Optional) A set of ket/value label pairs to assign to disk created from * this template */ labels: { [key: string]: string; }; /** * The mode in which to attach this disk, either READ_WRITE * or READ_ONLY. If you are attaching or creating a boot disk, this must * read-write mode. */ mode: string; /** * Indicates how many IOPS to provision for the disk. This * sets the number of I/O operations per second that the disk can handle. * Values must be between 10,000 and 120,000. For more details, see the * [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk). */ provisionedIops: number; /** * Indicates how much throughput to provision for the disk, in MB/s. This sets the amount of data that can be read or written from the disk per second. Values must greater than or equal to 1. For more details, see the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks). */ provisionedThroughput: number; /** * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. */ resourceManagerTags: { [key: string]: string; }; /** * (Optional) -- A list of short names of resource policies to attach to this disk for automatic snapshot creations. Currently a max of 1 resource policy is supported. */ resourcePolicies: string[]; /** * The name (**not self_link**) * of the disk (such as those managed by `gcp.compute.Disk`) to attach. * > **Note:** Either `source` or `sourceImage` is **required** in a disk block unless the disk type is `local-ssd`. Check the API [docs](https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates/insert) for details. */ source: string; /** * The image from which to * initialize this disk. This can be one of: the image's `selfLink`, * `projects/{project}/global/images/{image}`, * `projects/{project}/global/images/family/{family}`, `global/images/{image}`, * `global/images/family/{family}`, `family/{family}`, `{project}/{family}`, * `{project}/{image}`, `{family}`, or `{image}`. * > **Note:** Either `source` or `sourceImage` is **required** in a disk block unless the disk type is `local-ssd`. Check the API [docs](https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates/insert) for details. */ sourceImage: string; /** * The customer-supplied encryption key of the source * image. Required if the source image is protected by a * customer-supplied encryption key. * * Instance templates do not store customer-supplied * encryption keys, so you cannot create disks for * instances in a managed instance group if the source * images are encrypted with your own keys. */ sourceImageEncryptionKeys: outputs.compute.GetInstanceTemplateDiskSourceImageEncryptionKey[]; /** * The source snapshot to create this disk. When creating * a new instance, one of initializeParams.sourceSnapshot, * initializeParams.sourceImage, or disks.source is * required except for local SSD. */ sourceSnapshot: string; /** * The customer-supplied encryption key of the source snapshot. */ sourceSnapshotEncryptionKeys: outputs.compute.GetInstanceTemplateDiskSourceSnapshotEncryptionKey[]; /** * The accelerator type resource to expose to this instance. E.g. `nvidia-tesla-k80`. */ type: string; } interface GetInstanceTemplateDiskDiskEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS */ kmsKeySelfLink: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount: string; } interface GetInstanceTemplateDiskSourceImageEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS */ kmsKeySelfLink: string; /** * The service account being used for the encryption * request for the given KMS key. If absent, the Compute * Engine default service account is used. */ kmsKeyServiceAccount: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rawKey: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rsaEncryptedKey: string; } interface GetInstanceTemplateDiskSourceSnapshotEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS */ kmsKeySelfLink: string; /** * The service account being used for the encryption * request for the given KMS key. If absent, the Compute * Engine default service account is used. */ kmsKeyServiceAccount: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rawKey: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rsaEncryptedKey: string; } interface GetInstanceTemplateGuestAccelerator { /** * The number of the guest accelerator cards exposed to this instance. */ count: number; /** * The accelerator type resource to expose to this instance. E.g. `nvidia-tesla-k80`. */ type: string; } interface GetInstanceTemplateNetworkInterface { /** * Access configurations, i.e. IPs via which this * instance can be accessed via the Internet. Omit to ensure that the instance * is not accessible from the Internet (this means that ssh provisioners will * not work unless you are running the provider can send traffic to the instance's * network (e.g. via tunnel or because it is running on another cloud instance * on that network). This block can be repeated multiple times. Structure documented below. */ accessConfigs: outputs.compute.GetInstanceTemplateNetworkInterfaceAccessConfig[]; /** * An * array of alias IP ranges for this network interface. Can only be specified for network * interfaces on subnet-mode networks. Structure documented below. */ aliasIpRanges: outputs.compute.GetInstanceTemplateNetworkInterfaceAliasIpRange[]; /** * Indicates whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. */ igmpQuery: string; /** * The prefix length of the primary internal IPv6 range. */ internalIpv6PrefixLength: number; /** * An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. */ ipv6AccessConfigs: outputs.compute.GetInstanceTemplateNetworkInterfaceIpv6AccessConfig[]; /** * One of EXTERNAL, INTERNAL to indicate whether the IP can be accessed from the Internet. This field is always inherited from its subnetwork. */ ipv6AccessType: string; /** * An IPv6 internal network address for this network interface. If not specified, Google Cloud will automatically assign an internal IPv6 address from the instance's subnetwork. */ ipv6Address: string; /** * The name of the instance template. One of `name`, `filter` or `selfLinkUnique` must be provided. */ name: string; /** * The name or selfLink of the network to attach this interface to. * Use `network` attribute for Legacy or Auto subnetted networks and * `subnetwork` for custom subnetted networks. */ network: string; /** * The URL of the network attachment that this interface should connect to in the following format: projects/{projectNumber}/regions/{region_name}/networkAttachments/{network_attachment_name}. */ networkAttachment: string; /** * The private IP address to assign to the instance. If * empty, the address will be automatically assigned. */ networkIp: string; /** * The type of vNIC to be used on this interface. Possible values:GVNIC, VIRTIO_NET, MRDMA, and IRDMA */ nicType: string; /** * Name of the parent network interface of a dynamic network interface. */ parentNicName: string; /** * The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified. */ queueCount: number; /** * The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. */ stackType: string; /** * the name of the subnetwork to attach this interface * to. The subnetwork must exist in the same `region` this instance will be * created in. Either `network` or `subnetwork` must be provided. */ subnetwork: string; /** * The ID of the project in which the subnetwork belongs. * If it is not provided, the provider project is used. */ subnetworkProject: string; /** * VLAN tag of a dynamic network interface, must be an integer in the range from 2 to 255 inclusively. */ vlan: number; } interface GetInstanceTemplateNetworkInterfaceAccessConfig { /** * The IP address that will be 1:1 mapped to the instance's * network ip. If not given, one will be generated. */ natIp: string; /** * The [networking tier][network-tier] used for configuring * this instance template. This field can take the following values: PREMIUM or * STANDARD. If this field is not specified, it is assumed to be PREMIUM. */ networkTier: string; /** * The DNS domain name for the public PTR record.The DNS domain name for the public PTR record. */ publicPtrDomainName: string; } interface GetInstanceTemplateNetworkInterfaceAliasIpRange { /** * The IP CIDR range represented by this alias IP range. This IP CIDR range * must belong to the specified subnetwork and cannot contain IP addresses reserved by * system or used by other network interfaces. At the time of writing only a * netmask (e.g. /24) may be supplied, with a CIDR format resulting in an API * error. */ ipCidrRange: string; /** * The subnetwork secondary range name specifying * the secondary range from which to allocate the IP CIDR range for this alias IP * range. If left unspecified, the primary range of the subnetwork will be used. */ subnetworkRangeName: string; } interface GetInstanceTemplateNetworkInterfaceIpv6AccessConfig { /** * The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically. */ externalIpv6: string; /** * The prefix length of the external IPv6 range. */ externalIpv6PrefixLength: string; /** * The name of the instance template. One of `name`, `filter` or `selfLinkUnique` must be provided. */ name: string; /** * The [networking tier][network-tier] used for configuring * this instance template. This field can take the following values: PREMIUM or * STANDARD. If this field is not specified, it is assumed to be PREMIUM. */ networkTier: string; /** * The domain name to be used when creating DNSv6 records for the external IPv6 ranges. */ publicPtrDomainName: string; } interface GetInstanceTemplateNetworkPerformanceConfig { /** * The egress bandwidth tier for the instance. */ totalEgressBandwidthTier: string; } interface GetInstanceTemplateReservationAffinity { /** * Specifies the label selector for the reservation to use. */ specificReservations: outputs.compute.GetInstanceTemplateReservationAffinitySpecificReservation[]; /** * The accelerator type resource to expose to this instance. E.g. `nvidia-tesla-k80`. */ type: string; } interface GetInstanceTemplateReservationAffinitySpecificReservation { /** * The key for the node affinity label. */ key: string; /** * Corresponds to the label values of a reservation resource. */ values: string[]; } interface GetInstanceTemplateScheduling { /** * Specifies whether the instance should be * automatically restarted if it is terminated by Compute Engine (not * terminated by a user). This defaults to true. */ automaticRestart: boolean; /** * Specifies the availability domain, which this instance should be scheduled on. */ availabilityDomain: number; /** * Settings for the instance to perform a graceful shutdown. */ gracefulShutdowns: outputs.compute.GetInstanceTemplateSchedulingGracefulShutdown[]; /** * Beta Time in seconds for host error detection. */ hostErrorTimeoutSeconds: number; /** * Describe the type of termination action for `SPOT` VM. Can be `STOP` or `DELETE`. Read more on [here](https://cloud.google.com/compute/docs/instances/create-use-spot) */ instanceTerminationAction: string; /** * Specifies the maximum amount of time a Local Ssd Vm should wait while * recovery of the Local Ssd state is attempted. Its value should be in * between 0 and 168 hours with hour granularity and the default value being 1 * hour. */ localSsdRecoveryTimeouts: outputs.compute.GetInstanceTemplateSchedulingLocalSsdRecoveryTimeout[]; /** * Specifies the frequency of planned maintenance events. The accepted values are: PERIODIC */ maintenanceInterval: string; /** * The timeout for new network connections to hosts. */ maxRunDurations: outputs.compute.GetInstanceTemplateSchedulingMaxRunDuration[]; /** * Minimum number of cpus for the instance. */ minNodeCpus: number; /** * Specifies node affinities or anti-affinities * to determine which sole-tenant nodes your instances and managed instance * groups will use as host systems. Read more on sole-tenant node creation * [here](https://cloud.google.com/compute/docs/nodes/create-nodes). * Structure documented below. */ nodeAffinities: outputs.compute.GetInstanceTemplateSchedulingNodeAffinity[]; /** * Defines the maintenance behavior for this * instance. */ onHostMaintenance: string; /** * Defines the behaviour for instances with the instance_termination_action. */ onInstanceStopActions: outputs.compute.GetInstanceTemplateSchedulingOnInstanceStopAction[]; /** * Allows instance to be preempted. This defaults to * false. Read more on this * [here](https://cloud.google.com/compute/docs/instances/preemptible). */ preemptible: boolean; /** * Describe the type of preemptible VM. */ provisioningModel: string; /** * Default is false and there will be 120 seconds between GCE ACPI G2 Soft Off and ACPI G3 Mechanical Off for Standard VMs and 30 seconds for Spot VMs. */ skipGuestOsShutdown: boolean; /** * Specifies the timestamp, when the instance will be terminated, * in RFC3339 text format. If specified, the instance termination action * will be performed at the termination time. */ terminationTime: string; } interface GetInstanceTemplateSchedulingGracefulShutdown { /** * Opts-in for graceful shutdown. */ enabled: boolean; /** * The time allotted for the instance to gracefully shut down. * If the graceful shutdown isn't complete after this time, then the instance * transitions to the STOPPING state. */ maxDurations: outputs.compute.GetInstanceTemplateSchedulingGracefulShutdownMaxDuration[]; } interface GetInstanceTemplateSchedulingGracefulShutdownMaxDuration { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * The value must be between 1 and 3600, which is 3,600 seconds (one hour). */ seconds: number; } interface GetInstanceTemplateSchedulingLocalSsdRecoveryTimeout { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface GetInstanceTemplateSchedulingMaxRunDuration { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface GetInstanceTemplateSchedulingNodeAffinity { /** * The key for the node affinity label. */ key: string; /** * The operator. Can be `IN` for node-affinities * or `NOT_IN` for anti-affinities. */ operator: string; values: string[]; } interface GetInstanceTemplateSchedulingOnInstanceStopAction { /** * If true, the contents of any attached Local SSD disks will be discarded. */ discardLocalSsd: boolean; } interface GetInstanceTemplateServiceAccount { /** * The service account e-mail address. If not given, the * default Google Compute Engine service account is used. */ email: string; /** * A list of service scopes. Both OAuth2 URLs and gcloud * short names are supported. To allow full access to all Cloud APIs, use the * `cloud-platform` scope. See a complete list of scopes [here](https://cloud.google.com/sdk/gcloud/reference/alpha/compute/instances/set-scopes#--scopes). */ scopes: string[]; } interface GetInstanceTemplateShieldedInstanceConfig { /** * - Compare the most recent boot measurements to the integrity policy baseline and return a pair of pass/fail results depending on whether they match or not. Defaults to true. */ enableIntegrityMonitoring: boolean; /** * - Verify the digital signature of all boot components, and halt the boot process if signature verification fails. Defaults to false. */ enableSecureBoot: boolean; /** * - Use a virtualized trusted platform module, which is a specialized computer chip you can use to encrypt objects like keys and certificates. Defaults to true. */ enableVtpm: boolean; } interface GetInterconnectLocationsLocation { /** * The postal address of the Point of Presence. */ address: string; /** * The availability zone for this InterconnectLocation. */ availabilityZone: string; /** * A list of features available at this InterconnectLocation. */ availableFeatures: string[]; /** * A list of link types available at this InterconnectLocation. */ availableLinkTypes: string[]; /** * The city for this location. */ city: string; /** * The continent for this location. */ continent: string; /** * A textual description of the resource. */ description: string; /** * The name of the provider for this facility. */ facilityProvider: string; /** * A provider-assigned Identifier for this facility. */ facilityProviderFacilityId: string; name: string; /** * The PeeringDB facility ID for this facility. */ peeringdbFacilityId: string; /** * The URI of the created resource. */ selfLink: string; /** * The status of this InterconnectLocation. */ status: string; /** * Reserved for future use. */ supportsPzs: boolean; } interface GetMachineTypesMachineType { /** * A list of accelerator configurations assigned to this machine type. Structure is documented below. */ accelerators: outputs.compute.GetMachineTypesMachineTypeAccelerator[]; /** * (Beta) The configuration of bundled local SSD for the machine type. Structure is documented below. */ bundledLocalSsds: outputs.compute.GetMachineTypesMachineTypeBundledLocalSsd[]; /** * The deprecation status associated with this machine type. Structure is documented below. */ deprecateds: outputs.compute.GetMachineTypesMachineTypeDeprecated[]; /** * A textual description of the machine type. */ description: string; /** * The number of virtual CPUs that are available to the instance. */ guestCpus: number; /** * Whether this machine type has a shared CPU. */ isSharedCpus: boolean; /** * The maximum persistent disks allowed. */ maximumPersistentDisks: number; /** * The maximum total persistent disks size (GB) allowed. */ maximumPersistentDisksSizeGb: number; /** * The amount of physical memory available to the instance, defined in MB. */ memoryMb: number; /** * The name of the machine type. */ name: string; /** * The server-defined URL for the machine type. */ selfLink: string; } interface GetMachineTypesMachineTypeAccelerator { /** * Number of accelerator cards exposed to the guest. */ guestAcceleratorCount: number; /** * The accelerator type resource name, not a full URL, e.g. `nvidia-tesla-t4`. */ guestAcceleratorType: string; } interface GetMachineTypesMachineTypeBundledLocalSsd { /** * (Beta) The default disk interface if the interface is not specified. */ defaultInterface: string; /** * (Beta) The number of partitions. */ partitionCount: number; } interface GetMachineTypesMachineTypeDeprecated { /** * The URL of the suggested replacement for a deprecated machine type. */ replacement: string; /** * The deprecation state of this resource. This can be `ACTIVE`, `DEPRECATED`, `OBSOLETE`, or `DELETED`. */ state: string; } interface GetNetworkAttachmentConnectionEndpoint { /** * The IPv4 address assigned to the producer instance network interface. This value will be a range in case of Serverless. */ ipAddress: string; /** * The project id or number of the interface to which the IP was assigned. */ projectIdOrNum: string; /** * Alias IP ranges from the same subnetwork. */ secondaryIpCidrRanges: string; /** * The status of a connected endpoint to this network attachment. */ status: string; /** * The subnetwork used to assign the IP to the producer instance network interface. */ subnetwork: string; } interface GetRegionBackendServiceBackend { /** * Specifies the balancing mode for this backend. * * See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) * for an explanation of load balancing modes. Default value: "UTILIZATION" Possible values: ["UTILIZATION", "RATE", "CONNECTION", "CUSTOM_METRICS"] */ balancingMode: string; /** * A multiplier applied to the group's maximum servicing capacity * (based on UTILIZATION, RATE or CONNECTION). * * ~>**NOTE**: This field cannot be set for * INTERNAL region backend services (default loadBalancingScheme), * but is required for non-INTERNAL backend service. The total * capacityScaler for all backends must be non-zero. * * A setting of 0 means the group is completely drained, offering * 0% of its available Capacity. Valid range is [0.0,1.0]. */ capacityScaler: number; /** * The set of custom metrics that are used for CUSTOM_METRICS BalancingMode. */ customMetrics: outputs.compute.GetRegionBackendServiceBackendCustomMetric[]; /** * An optional description of this resource. * Provide this property when you create the resource. */ description: string; /** * This field designates whether this is a failover backend. More * than one failover backend can be configured for a given RegionBackendService. */ failover: boolean; /** * The fully-qualified URL of an Instance Group or Network Endpoint * Group resource. In case of instance group this defines the list * of instances that serve traffic. Member virtual machine * instances from each instance group must live in the same zone as * the instance group itself. No two backends in a backend service * are allowed to use same Instance Group resource. * * For Network Endpoint Groups this defines list of endpoints. All * endpoints of Network Endpoint Group must be hosted on instances * located in the same zone as the Network Endpoint Group. * * Backend services cannot mix Instance Group and * Network Endpoint Group backends. * * When the 'load_balancing_scheme' is INTERNAL, only instance groups * are supported. * * Note that you must specify an Instance Group or Network Endpoint * Group resource using the fully-qualified URL, rather than a * partial URL. */ group: string; /** * The max number of simultaneous connections for the group. Can * be used with either CONNECTION or UTILIZATION balancing modes. * Cannot be set for INTERNAL backend services. * * For CONNECTION mode, either maxConnections or one * of maxConnectionsPerInstance or maxConnectionsPerEndpoint, * as appropriate for group type, must be set. */ maxConnections: number; /** * The max number of simultaneous connections that a single backend * network endpoint can handle. Cannot be set * for INTERNAL backend services. * * This is used to calculate the capacity of the group. Can be * used in either CONNECTION or UTILIZATION balancing modes. For * CONNECTION mode, either maxConnections or * maxConnectionsPerEndpoint must be set. */ maxConnectionsPerEndpoint: number; /** * The max number of simultaneous connections that a single * backend instance can handle. Cannot be set for INTERNAL backend * services. * * This is used to calculate the capacity of the group. * Can be used in either CONNECTION or UTILIZATION balancing modes. * For CONNECTION mode, either maxConnections or * maxConnectionsPerInstance must be set. */ maxConnectionsPerInstance: number; /** * Defines a maximum number of in-flight requests for the whole NEG * or instance group. Not available if backend's balancingMode is RATE * or CONNECTION. */ maxInFlightRequests: number; /** * Defines a maximum number of in-flight requests for a single endpoint. * Not available if backend's balancingMode is RATE or CONNECTION. */ maxInFlightRequestsPerEndpoint: number; /** * Defines a maximum number of in-flight requests for a single VM. * Not available if backend's balancingMode is RATE or CONNECTION. */ maxInFlightRequestsPerInstance: number; /** * The max requests per second (RPS) of the group. Cannot be set * for INTERNAL backend services. * * Can be used with either RATE or UTILIZATION balancing modes, * but required if RATE mode. Either maxRate or one * of maxRatePerInstance or maxRatePerEndpoint, as appropriate for * group type, must be set. */ maxRate: number; /** * The max requests per second (RPS) that a single backend network * endpoint can handle. This is used to calculate the capacity of * the group. Can be used in either balancing mode. For RATE mode, * either maxRate or maxRatePerEndpoint must be set. Cannot be set * for INTERNAL backend services. */ maxRatePerEndpoint: number; /** * The max requests per second (RPS) that a single backend * instance can handle. This is used to calculate the capacity of * the group. Can be used in either balancing mode. For RATE mode, * either maxRate or maxRatePerInstance must be set. Cannot be set * for INTERNAL backend services. */ maxRatePerInstance: number; /** * Used when balancingMode is UTILIZATION. This ratio defines the * CPU utilization target for the group. Valid range is [0.0, 1.0]. * Cannot be set for INTERNAL backend services. */ maxUtilization: number; /** * This field specifies how long a connection should be kept alive for: * - LONG: Most of the requests are expected to take more than multiple * seconds to finish. * - SHORT: Most requests are expected to finish with a sub-second latency. Possible values: ["LONG", "SHORT"] */ trafficDuration: string; } interface GetRegionBackendServiceBackendCustomMetric { /** * If true, the metric data is collected and reported to Cloud * Monitoring, but is not used for load balancing. */ dryRun: boolean; /** * Optional parameter to define a target utilization for the Custom Metrics * balancing mode. The valid range is [0.0, 1.0]. */ maxUtilization: number; /** * The name of the regional backend service. */ name: string; } interface GetRegionBackendServiceCdnPolicy { /** * The CacheKeyPolicy for this CdnPolicy. */ cacheKeyPolicies: outputs.compute.GetRegionBackendServiceCdnPolicyCacheKeyPolicy[]; /** * Specifies the cache setting for all responses from this backend. * The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC Possible values: ["USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC"] */ cacheMode: string; /** * Specifies the maximum allowed TTL for cached content served by this origin. */ clientTtl: number; /** * Specifies the default TTL for cached content served by this origin for responses * that do not have an existing valid TTL (max-age or s-max-age). */ defaultTtl: number; /** * Specifies the maximum allowed TTL for cached content served by this origin. */ maxTtl: number; /** * Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. */ negativeCaching: boolean; /** * Sets a cache TTL for the specified HTTP status code. negativeCaching must be enabled to configure negativeCachingPolicy. * Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's default cache TTLs. */ negativeCachingPolicies: outputs.compute.GetRegionBackendServiceCdnPolicyNegativeCachingPolicy[]; /** * Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. */ serveWhileStale: number; /** * Maximum number of seconds the response to a signed URL request * will be considered fresh, defaults to 1hr (3600s). After this * time period, the response will be revalidated before * being served. * * When serving responses to signed URL requests, Cloud CDN will * internally behave as though all responses from this backend had a * "Cache-Control: public, max-age=[TTL]" header, regardless of any * existing Cache-Control header. The actual headers served in * responses will not be altered. */ signedUrlCacheMaxAgeSec: number; } interface GetRegionBackendServiceCdnPolicyCacheKeyPolicy { /** * If true requests to different hosts will be cached separately. */ includeHost: boolean; /** * Names of cookies to include in cache keys. */ includeNamedCookies: string[]; /** * If true, http and https requests will be cached separately. */ includeProtocol: boolean; /** * If true, include query string parameters in the cache key * according to queryStringWhitelist and * query_string_blacklist. If neither is set, the entire query * string will be included. * * If false, the query string will be excluded from the cache * key entirely. */ includeQueryString: boolean; /** * Names of query string parameters to exclude in cache keys. * * All other parameters will be included. Either specify * queryStringWhitelist or query_string_blacklist, not both. * '&' and '=' will be percent encoded and not treated as * delimiters. */ queryStringBlacklists: string[]; /** * Names of query string parameters to include in cache keys. * * All other parameters will be excluded. Either specify * queryStringWhitelist or query_string_blacklist, not both. * '&' and '=' will be percent encoded and not treated as * delimiters. */ queryStringWhitelists: string[]; } interface GetRegionBackendServiceCdnPolicyNegativeCachingPolicy { /** * The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 308, 404, 405, 410, 421, 451 and 501 * can be specified as values, and you cannot specify a status code more than once. */ code: number; /** * The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s * (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. */ ttl: number; } interface GetRegionBackendServiceCircuitBreaker { /** * The timeout for new network connections to hosts. */ connectTimeouts: outputs.compute.GetRegionBackendServiceCircuitBreakerConnectTimeout[]; /** * The maximum number of connections to the backend cluster. * Defaults to 1024. */ maxConnections: number; /** * The maximum number of pending requests to the backend cluster. * Defaults to 1024. */ maxPendingRequests: number; /** * The maximum number of parallel requests to the backend cluster. * Defaults to 1024. */ maxRequests: number; /** * Maximum requests for a single backend connection. This parameter * is respected by both the HTTP/1.1 and HTTP/2 implementations. If * not specified, there is no limit. Setting this parameter to 1 * will effectively disable keep alive. */ maxRequestsPerConnection: number; /** * The maximum number of parallel retries to the backend cluster. * Defaults to 3. */ maxRetries: number; } interface GetRegionBackendServiceCircuitBreakerConnectTimeout { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface GetRegionBackendServiceConnectionTrackingPolicy { /** * Specifies connection persistence when backends are unhealthy. * * If set to 'DEFAULT_FOR_PROTOCOL', the existing connections persist on * unhealthy backends only for connection-oriented protocols (TCP and SCTP) * and only if the Tracking Mode is PER_CONNECTION (default tracking mode) * or the Session Affinity is configured for 5-tuple. They do not persist * for UDP. * * If set to 'NEVER_PERSIST', after a backend becomes unhealthy, the existing * connections on the unhealthy backend are never persisted on the unhealthy * backend. They are always diverted to newly selected healthy backends * (unless all backends are unhealthy). * * If set to 'ALWAYS_PERSIST', existing connections always persist on * unhealthy backends regardless of protocol and session affinity. It is * generally not recommended to use this mode overriding the default. Default value: "DEFAULT_FOR_PROTOCOL" Possible values: ["DEFAULT_FOR_PROTOCOL", "NEVER_PERSIST", "ALWAYS_PERSIST"] */ connectionPersistenceOnUnhealthyBackends: string; /** * Enable Strong Session Affinity for Network Load Balancing. This option is not available publicly. */ enableStrongAffinity: boolean; /** * Specifies how long to keep a Connection Tracking entry while there is * no matching traffic (in seconds). * * For L4 ILB the minimum(default) is 10 minutes and maximum is 16 hours. * * For NLB the minimum(default) is 60 seconds and the maximum is 16 hours. */ idleTimeoutSec: number; /** * Specifies the key used for connection tracking. There are two options: * 'PER_CONNECTION': The Connection Tracking is performed as per the * Connection Key (default Hash Method) for the specific protocol. * * 'PER_SESSION': The Connection Tracking is performed as per the * configured Session Affinity. It matches the configured Session Affinity. Default value: "PER_CONNECTION" Possible values: ["PER_CONNECTION", "PER_SESSION"] */ trackingMode: string; } interface GetRegionBackendServiceConsistentHash { /** * Hash is based on HTTP Cookie. This field describes a HTTP cookie * that will be used as the hash key for the consistent hash load * balancer. If the cookie is not present, it will be generated. * This field is applicable if the sessionAffinity is set to HTTP_COOKIE. */ httpCookies: outputs.compute.GetRegionBackendServiceConsistentHashHttpCooky[]; /** * The hash based on the value of the specified header field. * This field is applicable if the sessionAffinity is set to HEADER_FIELD. */ httpHeaderName: string; /** * The minimum number of virtual nodes to use for the hash ring. * Larger ring sizes result in more granular load * distributions. If the number of hosts in the load balancing pool * is larger than the ring size, each host will be assigned a single * virtual node. * Defaults to 1024. */ minimumRingSize: number; } interface GetRegionBackendServiceConsistentHashHttpCooky { /** * The name of the regional backend service. */ name: string; /** * Path to set for the cookie. */ path: string; /** * Lifetime of the cookie. */ ttls: outputs.compute.GetRegionBackendServiceConsistentHashHttpCookyTtl[]; } interface GetRegionBackendServiceConsistentHashHttpCookyTtl { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface GetRegionBackendServiceCustomMetric { /** * If true, the metric data is not used for load balancing. */ dryRun: boolean; /** * The name of the regional backend service. */ name: string; } interface GetRegionBackendServiceDynamicForwarding { /** * IP:PORT based dynamic forwarding configuration. */ ipPortSelections: outputs.compute.GetRegionBackendServiceDynamicForwardingIpPortSelection[]; } interface GetRegionBackendServiceDynamicForwardingIpPortSelection { /** * A boolean flag enabling IP:PORT based dynamic forwarding. */ enabled: boolean; } interface GetRegionBackendServiceFailoverPolicy { /** * On failover or failback, this field indicates whether connection drain * will be honored. Setting this to true has the following effect: connections * to the old active pool are not drained. Connections to the new active pool * use the timeout of 10 min (currently fixed). Setting to false has the * following effect: both old and new connections will have a drain timeout * of 10 min. * This can be set to true only if the protocol is TCP. * The default is false. */ disableConnectionDrainOnFailover: boolean; /** * This option is used only when no healthy VMs are detected in the primary * and backup instance groups. When set to true, traffic is dropped. When * set to false, new connections are sent across all VMs in the primary group. * The default is false. */ dropTrafficIfUnhealthy: boolean; /** * The value of the field must be in [0, 1]. If the ratio of the healthy * VMs in the primary backend is at or below this number, traffic arriving * at the load-balanced IP will be directed to the failover backend. * In case where 'failoverRatio' is not set or all the VMs in the backup * backend are unhealthy, the traffic will be directed back to the primary * backend in the "force" mode, where traffic will be spread to the healthy * VMs with the best effort, or to all VMs when no VM is healthy. * This field is only used with l4 load balancing. */ failoverRatio: number; } interface GetRegionBackendServiceHaPolicy { /** * Specifies whether fast IP move is enabled, and if so, the mechanism to achieve it. * Supported values are: * * * 'DISABLED': Fast IP Move is disabled. You can only use the haPolicy.leader API to * update the leader. * * * 'GARP_RA': Provides a method to very quickly define a new network endpoint as the * leader. This method is faster than updating the leader using the * haPolicy.leader API. Fast IP move works as follows: The VM hosting the * network endpoint that should become the new leader sends either a * Gratuitous ARP (GARP) packet (IPv4) or an ICMPv6 Router Advertisement(RA) * packet (IPv6). Google Cloud immediately but temporarily associates the * forwarding rule IP address with that VM, and both new and in-flight packets * are quickly delivered to that VM. Possible values: ["DISABLED", "GARP_RA"] */ fastIpMove: string; /** * Selects one of the network endpoints attached to the backend NEGs of this service as the * active endpoint (the leader) that receives all traffic. */ leaders: outputs.compute.GetRegionBackendServiceHaPolicyLeader[]; } interface GetRegionBackendServiceHaPolicyLeader { /** * A fully-qualified URL of the zonal Network Endpoint Group (NEG) that the leader is * attached to. */ backendGroup: string; /** * The network endpoint within the leader.backendGroup that is designated as the leader. */ networkEndpoints: outputs.compute.GetRegionBackendServiceHaPolicyLeaderNetworkEndpoint[]; } interface GetRegionBackendServiceHaPolicyLeaderNetworkEndpoint { /** * The name of the VM instance of the leader network endpoint. The instance must * already be attached to the NEG specified in the haPolicy.leader.backendGroup. */ instance: string; } interface GetRegionBackendServiceIap { /** * Whether the serving infrastructure will authenticate and authorize all incoming requests. */ enabled: boolean; /** * OAuth2 Client ID for IAP */ oauth2ClientId: string; /** * OAuth2 Client Secret for IAP */ oauth2ClientSecret: string; /** * OAuth2 Client Secret SHA-256 for IAP */ oauth2ClientSecretSha256: string; } interface GetRegionBackendServiceLogConfig { /** * Whether to enable logging for the load balancer traffic served by this backend service. */ enable: boolean; /** * Specifies the fields to include in logging. This field can only be specified if logging is enabled for this backend service. */ optionalFields: string[]; /** * Specifies the optional logging mode for the load balancer traffic. * Supported values: INCLUDE_ALL_OPTIONAL, EXCLUDE_ALL_OPTIONAL, CUSTOM. Possible values: ["INCLUDE_ALL_OPTIONAL", "EXCLUDE_ALL_OPTIONAL", "CUSTOM"] */ optionalMode: string; /** * This field can only be specified if logging is enabled for this backend service. The value of * the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer * where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. * The default value is 1.0. */ sampleRate: number; } interface GetRegionBackendServiceNetworkPassThroughLbTrafficPolicy { /** * When configured, new connections are load balanced across healthy backend endpoints in the local zone. */ zonalAffinities: outputs.compute.GetRegionBackendServiceNetworkPassThroughLbTrafficPolicyZonalAffinity[]; } interface GetRegionBackendServiceNetworkPassThroughLbTrafficPolicyZonalAffinity { /** * This field indicates whether zonal affinity is enabled or not. Default value: "ZONAL_AFFINITY_DISABLED" Possible values: ["ZONAL_AFFINITY_DISABLED", "ZONAL_AFFINITY_SPILL_CROSS_ZONE", "ZONAL_AFFINITY_STAY_WITHIN_ZONE"] */ spillover: string; /** * The value of the field must be in [0, 1]. When the ratio of the count of healthy backend endpoints in a zone * to the count of backend endpoints in that same zone is equal to or above this threshold, the load balancer * distributes new connections to all healthy endpoints in the local zone only. When the ratio of the count * of healthy backend endpoints in a zone to the count of backend endpoints in that same zone is below this * threshold, the load balancer distributes all new connections to all healthy endpoints across all zones. */ spilloverRatio: number; } interface GetRegionBackendServiceOutlierDetection { /** * The base time that a host is ejected for. The real time is equal to the base * time multiplied by the number of times the host has been ejected. Defaults to * 30000ms or 30s. */ baseEjectionTimes: outputs.compute.GetRegionBackendServiceOutlierDetectionBaseEjectionTime[]; /** * Number of errors before a host is ejected from the connection pool. When the * backend host is accessed over HTTP, a 5xx return code qualifies as an error. * Defaults to 5. */ consecutiveErrors: number; /** * The number of consecutive gateway failures (502, 503, 504 status or connection * errors that are mapped to one of those status codes) before a consecutive * gateway failure ejection occurs. Defaults to 5. */ consecutiveGatewayFailure: number; /** * The percentage chance that a host will be actually ejected when an outlier * status is detected through consecutive 5xx. This setting can be used to disable * ejection or to ramp it up slowly. Defaults to 100. */ enforcingConsecutiveErrors: number; /** * The percentage chance that a host will be actually ejected when an outlier * status is detected through consecutive gateway failures. This setting can be * used to disable ejection or to ramp it up slowly. Defaults to 0. */ enforcingConsecutiveGatewayFailure: number; /** * The percentage chance that a host will be actually ejected when an outlier * status is detected through success rate statistics. This setting can be used to * disable ejection or to ramp it up slowly. Defaults to 100. */ enforcingSuccessRate: number; /** * Time interval between ejection sweep analysis. This can result in both new * ejections as well as hosts being returned to service. Defaults to 10 seconds. */ intervals: outputs.compute.GetRegionBackendServiceOutlierDetectionInterval[]; /** * Maximum percentage of hosts in the load balancing pool for the backend service * that can be ejected. Defaults to 10%. */ maxEjectionPercent: number; /** * The number of hosts in a cluster that must have enough request volume to detect * success rate outliers. If the number of hosts is less than this setting, outlier * detection via success rate statistics is not performed for any host in the * cluster. Defaults to 5. */ successRateMinimumHosts: number; /** * The minimum number of total requests that must be collected in one interval (as * defined by the interval duration above) to include this host in success rate * based outlier detection. If the volume is lower than this setting, outlier * detection via success rate statistics is not performed for that host. Defaults * to 100. */ successRateRequestVolume: number; /** * This factor is used to determine the ejection threshold for success rate outlier * ejection. The ejection threshold is the difference between the mean success * rate, and the product of this factor and the standard deviation of the mean * success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided * by a thousand to get a double. That is, if the desired factor is 1.9, the * runtime value should be 1900. Defaults to 1900. */ successRateStdevFactor: number; } interface GetRegionBackendServiceOutlierDetectionBaseEjectionTime { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations * less than one second are represented with a 0 'seconds' field and a positive * 'nanos' field. Must be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 * inclusive. */ seconds: number; } interface GetRegionBackendServiceOutlierDetectionInterval { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations * less than one second are represented with a 0 'seconds' field and a positive * 'nanos' field. Must be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 * inclusive. */ seconds: number; } interface GetRegionBackendServiceParam { /** * Resource manager tags to be bound to the region backend service. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. */ resourceManagerTags: { [key: string]: string; }; } interface GetRegionBackendServiceStrongSessionAffinityCooky { /** * The name of the regional backend service. */ name: string; /** * Path to set for the cookie. */ path: string; /** * Lifetime of the cookie. */ ttls: outputs.compute.GetRegionBackendServiceStrongSessionAffinityCookyTtl[]; } interface GetRegionBackendServiceStrongSessionAffinityCookyTtl { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface GetRegionBackendServiceSubsetting { /** * The algorithm used for subsetting. Possible values: ["CONSISTENT_HASH_SUBSETTING"] */ policy: string; /** * The number of backends per backend group assigned to each proxy instance or each service mesh client. * An input parameter to the CONSISTENT_HASH_SUBSETTING algorithm. Can only be set if policy is set to * CONSISTENT_HASH_SUBSETTING. Can only be set if load balancing scheme is INTERNAL_MANAGED or INTERNAL_SELF_MANAGED. * subsetSize is optional for Internal HTTP(S) load balancing and required for Traffic Director. * If you do not provide this value, Cloud Load Balancing will calculate it dynamically to optimize the number * of proxies/clients visible to each backend and vice versa. * Must be greater than 0. If subsetSize is larger than the number of backends/endpoints, then subsetting is disabled. */ subsetSize: number; } interface GetRegionBackendServiceTlsSetting { /** * Reference to the BackendAuthenticationConfig resource from the networksecurity.googleapis.com namespace. * Can be used in authenticating TLS connections to the backend, as specified by the authenticationMode field. * Can only be specified if authenticationMode is not NONE. */ authenticationConfig: string; /** * Server Name Indication - see RFC3546 section 3.1. If set, the load balancer sends this string as the SNI hostname in the * TLS connection to the backend, and requires that this string match a Subject Alternative Name (SAN) in the backend's * server certificate. With a Regional Internet NEG backend, if the SNI is specified here, the load balancer uses it * regardless of whether the Regional Internet NEG is specified with FQDN or IP address and port. */ sni: string; /** * A list of Subject Alternative Names (SANs) that the Load Balancer verifies during a TLS handshake with the backend. * When the server presents its X.509 certificate to the Load Balancer, the Load Balancer inspects the certificate's SAN field, * and requires that at least one SAN match one of the subjectAltNames in the list. This field is limited to 5 entries. * When both sni and subjectAltNames are specified, the load balancer matches the backend certificate's SAN only to * subjectAltNames. */ subjectAltNames: outputs.compute.GetRegionBackendServiceTlsSettingSubjectAltName[]; } interface GetRegionBackendServiceTlsSettingSubjectAltName { /** * The SAN specified as a DNS Name. */ dnsName: string; /** * The SAN specified as a URI. */ uniformResourceIdentifier: string; } interface GetRegionDiskAsyncPrimaryDisk { /** * Primary disk for asynchronous disk replication. */ disk: string; } interface GetRegionDiskDiskEncryptionKey { /** * The name of the encryption key that is stored in Google Cloud KMS. */ kmsKeyName: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. */ rawKey: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit * customer-supplied encryption key to either encrypt or decrypt * this resource. You can provide either the rawKey or the rsaEncryptedKey. */ rsaEncryptedKey: string; /** * The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied * encryption key that protects this resource. */ sha256: string; } interface GetRegionDiskGuestOsFeature { /** * The type of supported feature. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. Possible values: ["MULTI_IP_SUBNET", "SECURE_BOOT", "SEV_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS", "GVNIC", "SEV_LIVE_MIGRATABLE", "SEV_SNP_CAPABLE", "SUSPEND_RESUME_COMPATIBLE", "TDX_CAPABLE"] */ type: string; } interface GetRegionDiskSourceSnapshotEncryptionKey { /** * The name of the encryption key that is stored in Google Cloud KMS. */ kmsKeyName: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. */ rawKey: string; /** * The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied * encryption key that protects this resource. */ sha256: string; } interface GetRegionInstanceGroupInstance { /** * URL to the instance. */ instance: string; /** * List of named ports in the group, as a list of resources, each containing: */ namedPorts: outputs.compute.GetRegionInstanceGroupInstanceNamedPort[]; /** * String description of current state of the instance. */ status: string; } interface GetRegionInstanceGroupInstanceNamedPort { /** * The name of the instance group. One of `name` or `selfLink` must be provided. */ name: string; /** * Integer port number */ port: number; } interface GetRegionInstanceGroupManagerAllInstancesConfig { /** * The label key-value pairs that you want to patch onto the instance, */ labels: { [key: string]: string; }; /** * The metadata key-value pairs that you want to patch onto the instance. For more information, see Project and instance metadata, */ metadata: { [key: string]: string; }; } interface GetRegionInstanceGroupManagerAutoHealingPolicy { /** * The health check resource that signals autohealing. */ healthCheck: string; /** * The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. Between 0 and 3600. */ initialDelaySec: number; } interface GetRegionInstanceGroupManagerInstanceFlexibilityPolicy { /** * Named instance selections configuring properties that the group will use when creating new VMs. */ instanceSelections: outputs.compute.GetRegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection[]; } interface GetRegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection { /** * List of disks to be attached to the instances created from this selection. */ disks: outputs.compute.GetRegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDisk[]; /** * Full machine-type names, e.g. "n1-standard-16" */ machineTypes: string[]; /** * Name of the minimum CPU platform to be used by this instance selection. e.g. 'Intel Ice Lake' */ minCpuPlatform: string; /** * The name of the instance group. Either `name` or `selfLink` must be provided. */ name: string; /** * Preference of this instance selection. Lower number means higher preference. MIG will first try to create a VM based on the machine-type with lowest rank and fallback to next rank based on availability. Machine types and instance selections with the same rank have the same preference. */ rank: number; } interface GetRegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDisk { /** * The architecture of the image. Allowed values are ARM64 or X86_64. */ architecture: string; /** * Whether or not the disk should be auto-deleted. This defaults to true. */ autoDelete: boolean; /** * Indicates that this is a boot disk. This defaults to false. */ boot: boolean; /** * A unique device name that is reflected into the /dev/ tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk. */ deviceName: string; /** * Encrypts or decrypts a disk using a customer-supplied encryption key. */ diskEncryptionKeys: outputs.compute.GetRegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskDiskEncryptionKey[]; /** * Name of the disk. When not provided, this defaults to the name of the instance. */ diskName: string; /** * The size of the image in gigabytes. If not specified, it will inherit the size of its base image. For SCRATCH disks, the size must be one of 375 or 3000 GB, with a default of 375 GB. */ diskSizeGb: number; /** * The Google Compute Engine disk type. Such as "pd-ssd", "local-ssd", "pd-balanced" or "pd-standard". */ diskType: string; /** * A list of features to enable on the guest operating system. Applicable only for bootable images. */ guestOsFeatures: string[]; /** * Specifies the disk interface to use for attaching this disk. */ interface: string; /** * A set of key/value label pairs to assign to disks. */ labels: outputs.compute.GetRegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskLabel[]; /** * The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If you are attaching or creating a boot disk, this must read-write mode. */ mode: string; /** * Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. For more details, see the [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk) or the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks) depending on the selected disk_type. */ provisionedIops: number; /** * Indicates how much throughput to provision for the disk, in MB/s. This sets the amount of data that can be read or written from the disk per second. Values must greater than or equal to 1. For more details, see the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks). */ provisionedThroughput: number; /** * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. */ resourceManagerTags: outputs.compute.GetRegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskResourceManagerTag[]; /** * A list (short name or id) of resource policies to attach to this disk. Currently a max of 1 resource policy is supported. */ resourcePolicies: string[]; /** * The name (not self_link) of the disk (such as those managed by google_compute_disk) to attach. > Note: Either source or sourceImage is required when creating a new instance except for when creating a local SSD. */ source: string; /** * The image from which to initialize this disk. This can be one of: the image's self_link, projects/{project}/global/images/{image}, projects/{project}/global/images/family/{family}, global/images/{image}, global/images/family/{family}, family/{family}, {project}/{family}, {project}/{image}, {family}, or {image}. > Note: Either source or sourceImage is required when creating a new instance except for when creating a local SSD. */ sourceImage: string; /** * The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key. Instance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys. */ sourceImageEncryptionKeys: outputs.compute.GetRegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskSourceImageEncryptionKey[]; /** * The source snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot, initializeParams.sourceImage, or disks.source is required except for local SSD. */ sourceSnapshot: string; /** * The customer-supplied encryption key of the source snapshot. */ sourceSnapshotEncryptionKeys: outputs.compute.GetRegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskSourceSnapshotEncryptionKey[]; /** * The type of Google Compute Engine disk, can be either "SCRATCH" or "PERSISTENT". */ type: string; } interface GetRegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskDiskEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS. */ kmsKeySelfLink: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rawKey: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rsaEncryptedKey: string; } interface GetRegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskLabel { /** * The unique key of the label to assign to disks. */ key: string; /** * The value of the label to assign to disks. */ value: string; } interface GetRegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskResourceManagerTag { /** * The unique key of the resource manager tag to assign to disks. Keys must be in the format tagKeys/{tag_key_id}. */ key: string; /** * The value of the resource manager tag to assign to disks. Values must be in the format tagValues/456. */ value: string; } interface GetRegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskSourceImageEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ kmsKeySelfLink: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rawKey: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rsaEncryptedKey: string; } interface GetRegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskSourceSnapshotEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ kmsKeySelfLink: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rawKey: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rsaEncryptedKey: string; } interface GetRegionInstanceGroupManagerInstanceLifecyclePolicy { /** * Specifies the action that a MIG performs on a failed VM. If the value of the "onFailedHealthCheck" field is DEFAULT_ACTION, then the same action also applies to the VMs on which your application fails a health check. Valid values are: REPAIR, DO_NOTHING. If REPAIR (default), then MIG automatically repairs a failed VM by recreating it. For more information, see about repairing VMs in a MIG. If DO_NOTHING, then MIG does not repair a failed VM. */ defaultActionOnFailure: string; /** * Specifies whether to apply the group's latest configuration when repairing a VM. Valid options are: YES, NO. If YES and you updated the group's instance template or per-instance configurations after the VM was created, then these changes are applied when VM is repaired. If NO (default), then updates are applied in accordance with the group's update policy type. */ forceUpdateOnRepair: string; /** * Specifies the action that a MIG performs on an unhealthy VM. A VM is marked as unhealthy when the application running on that VM fails a health check. Valid values are: DEFAULT_ACTION, DO_NOTHING, REPAIR. If DEFAULT_ACTION (default), then MIG uses the same action configured for the "defaultActionOnFailure" field. If DO_NOTHING, then MIG does not repair unhealthy VM. If REPAIR, then MIG automatically repairs an unhealthy VM by recreating it. */ onFailedHealthCheck: string; /** * Configuration for VM repairs in the MIG. */ onRepairs: outputs.compute.GetRegionInstanceGroupManagerInstanceLifecyclePolicyOnRepair[]; } interface GetRegionInstanceGroupManagerInstanceLifecyclePolicyOnRepair { /** * Specifies whether the MIG can change a VM's zone during a repair. If "YES", MIG can select a different zone for the VM during a repair. Else if "NO", MIG cannot change a VM's zone during a repair. The default value of allowChangingZone is "NO". */ allowChangingZone: string; } interface GetRegionInstanceGroupManagerNamedPort { /** * The name of the instance group. Either `name` or `selfLink` must be provided. */ name: string; /** * The port number. */ port: number; } interface GetRegionInstanceGroupManagerParam { /** * Resource manager tags to bind to the managed instance group. The tags are key-value pairs. Keys must be in the format tagKeys/123 and values in the format tagValues/456. */ resourceManagerTags: { [key: string]: string; }; } interface GetRegionInstanceGroupManagerStandbyPolicy { /** * Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0. */ initialDelaySec: number; /** * Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. The default mode is "MANUAL". */ mode: string; } interface GetRegionInstanceGroupManagerStatefulDisk { /** * A value that prescribes what should happen to the stateful disk when the VM instance is deleted. The available options are NEVER and ON_PERMANENT_INSTANCE_DELETION. NEVER - detach the disk when the VM is deleted, but do not delete the disk. ON_PERMANENT_INSTANCE_DELETION will delete the stateful disk when the VM is permanently deleted from the instance group. The default is NEVER. */ deleteRule: string; /** * The device name of the disk to be attached. */ deviceName: string; } interface GetRegionInstanceGroupManagerStatefulExternalIp { /** * A value that prescribes what should happen to an associated static Address resource when a VM instance is permanently deleted. The available options are NEVER and ON_PERMANENT_INSTANCE_DELETION. NEVER - detach the IP when the VM is deleted, but do not delete the address resource. ON_PERMANENT_INSTANCE_DELETION will delete the stateful address when the VM is permanently deleted from the instance group. The default is NEVER. */ deleteRule: string; /** * The network interface name */ interfaceName: string; } interface GetRegionInstanceGroupManagerStatefulInternalIp { /** * A value that prescribes what should happen to an associated static Address resource when a VM instance is permanently deleted. The available options are NEVER and ON_PERMANENT_INSTANCE_DELETION. NEVER - detach the IP when the VM is deleted, but do not delete the address resource. ON_PERMANENT_INSTANCE_DELETION will delete the stateful address when the VM is permanently deleted from the instance group. The default is NEVER. */ deleteRule: string; /** * The network interface name */ interfaceName: string; } interface GetRegionInstanceGroupManagerStatus { /** * Status of all-instances configuration on the group. */ allInstancesConfigs: outputs.compute.GetRegionInstanceGroupManagerStatusAllInstancesConfig[]; /** * A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified. */ isStable: boolean; /** * Stateful status of the given Instance Group Manager. */ statefuls: outputs.compute.GetRegionInstanceGroupManagerStatusStateful[]; /** * A status of consistency of Instances' versions with their target version specified by version field on Instance Group Manager. */ versionTargets: outputs.compute.GetRegionInstanceGroupManagerStatusVersionTarget[]; } interface GetRegionInstanceGroupManagerStatusAllInstancesConfig { /** * Current all-instances configuration revision. This value is in RFC3339 text format. */ currentRevision: string; /** * A bit indicating whether this configuration has been applied to all managed instances in the group. */ effective: boolean; } interface GetRegionInstanceGroupManagerStatusStateful { /** * A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful config even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions. */ hasStatefulConfig: boolean; /** * Status of per-instance configs on the instances. */ perInstanceConfigs: outputs.compute.GetRegionInstanceGroupManagerStatusStatefulPerInstanceConfig[]; } interface GetRegionInstanceGroupManagerStatusStatefulPerInstanceConfig { /** * A bit indicating if all of the group's per-instance configs (listed in the output of a listPerInstanceConfigs API call) have status EFFECTIVE or there are no per-instance-configs. */ allEffective: boolean; } interface GetRegionInstanceGroupManagerStatusVersionTarget { /** * A bit indicating whether version target has been reached in this managed instance group, i.e. all instances are in their target version. Instances' target version are specified by version field on Instance Group Manager. */ isReached: boolean; } interface GetRegionInstanceGroupManagerUpdatePolicy { /** * The instance redistribution policy for regional managed instance groups. Valid values are: "PROACTIVE", "NONE". If PROACTIVE (default), the group attempts to maintain an even distribution of VM instances across zones in the region. If NONE, proactive redistribution is disabled. */ instanceRedistributionType: string; /** * Specifies a fixed number of VM instances. This must be a positive integer. Conflicts with max_surge_percent. Both cannot be 0 */ maxSurgeFixed: number; /** * Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%. Conflicts with max_surge_fixed. */ maxSurgePercent: number; /** * Specifies a fixed number of VM instances. This must be a positive integer. */ maxUnavailableFixed: number; /** * Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%. */ maxUnavailablePercent: number; /** * Minimum number of seconds to wait for after a newly created instance becomes available. This value must be from range [0, 3600]. */ minReadySec: number; /** * Minimal action to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to update without stopping instances, RESTART to restart existing instances or REPLACE to delete and create new instances from the target template. If you specify a REFRESH, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action. */ minimalAction: string; /** * Most disruptive action that is allowed to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to allow actions that do not need instance restart, RESTART to allow actions that can be applied without instance replacing or REPLACE to allow all possible actions. If the Updater determines that the minimal update action needed is more disruptive than most disruptive allowed action you specify it will not perform the update at all. */ mostDisruptiveAllowedAction: string; /** * The instance replacement method for regional managed instance groups. Valid values are: "RECREATE", "SUBSTITUTE". If SUBSTITUTE (default), the group replaces VM instances with new instances that have randomly generated names. If RECREATE, instance names are preserved. You must also set maxUnavailableFixed or maxUnavailablePercent to be greater than 0. */ replacementMethod: string; /** * The type of update process. You can specify either PROACTIVE so that the instance group manager proactively executes actions in order to bring instances to their target versions or OPPORTUNISTIC so that no action is proactively executed but the update will be performed as part of other actions (for example, resizes or recreateInstances calls). */ type: string; } interface GetRegionInstanceGroupManagerVersion { /** * The full URL to an instance template from which all new instances of this version will be created. */ instanceTemplate: string; /** * The name of the instance group. Either `name` or `selfLink` must be provided. */ name: string; /** * The number of instances calculated as a fixed number or a percentage depending on the settings. */ targetSizes: outputs.compute.GetRegionInstanceGroupManagerVersionTargetSize[]; } interface GetRegionInstanceGroupManagerVersionTargetSize { /** * The number of instances which are managed for this version. Conflicts with percent. */ fixed: number; /** * The number of instances (calculated as percentage) which are managed for this version. Conflicts with fixed. Note that when using percent, rounding will be in favor of explicitly set targetSize values; a managed instance group with 2 instances and 2 versions, one of which has a target_size.percent of 60 will create 2 instances of that version. */ percent: number; } interface GetRegionInstanceTemplateAdvancedMachineFeature { /** * Whether to enable nested virtualization or not. */ enableNestedVirtualization: boolean; /** * Whether to enable UEFI networking or not. */ enableUefiNetworking: boolean; /** * The PMU is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are "STANDARD", "ENHANCED", and "ARCHITECTURAL". */ performanceMonitoringUnit: string; /** * The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. */ threadsPerCore: number; /** * Turbo frequency mode to use for the instance. Currently supported modes is "ALL_CORE_MAX". */ turboMode: string; /** * The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance\'s nominal CPU count and the underlying platform\'s SMT width. */ visibleCoreCount: number; } interface GetRegionInstanceTemplateConfidentialInstanceConfig { /** * The confidential computing technology the instance uses. * SEV is an AMD feature. TDX is an Intel feature. One of the following * values is required: SEV, SEV_SNP, TDX. If SEV_SNP, minCpuPlatform = * "AMD Milan" is currently required. */ confidentialInstanceType: string; /** * Defines whether the instance should have confidential compute enabled. `onHostMaintenance` has to be set to TERMINATE or this will fail to create the VM. */ enableConfidentialCompute: boolean; } interface GetRegionInstanceTemplateDisk { /** * The architecture of the image. Allowed values are ARM64 or X86_64. */ architecture: string; /** * Whether or not the disk should be auto-deleted. * This defaults to true. */ autoDelete: boolean; /** * Indicates that this is a boot disk. */ boot: boolean; /** * A unique device name that is reflected into the * /dev/ tree of a Linux operating system running within the instance. If not * specified, the server chooses a default device name to apply to this disk. */ deviceName: string; /** * Encrypts or decrypts a disk using a customer-supplied encryption key. */ diskEncryptionKeys: outputs.compute.GetRegionInstanceTemplateDiskDiskEncryptionKey[]; /** * Name of the disk. When not provided, this defaults * to the name of the instance. */ diskName: string; /** * The size of the image in gigabytes. If not * specified, it will inherit the size of its base image. For SCRATCH disks, * the size must be exactly 375GB. */ diskSizeGb: number; /** * The GCE disk type. Such as `"pd-ssd"`, `"local-ssd"`, * `"pd-balanced"` or `"pd-standard"`. */ diskType: string; /** * A list of features to enable on the guest operating system. Applicable only for bootable images. */ guestOsFeatures: string[]; /** * Specifies the disk interface to use for attaching this disk, * which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI * and the request will fail if you attempt to attach a persistent disk in any other format * than SCSI. Local SSDs can use either NVME or SCSI. */ interface: string; /** * (Optional) A set of ket/value label pairs to assign to disk created from * this template */ labels: { [key: string]: string; }; /** * The mode in which to attach this disk, either READ_WRITE * or READ_ONLY. If you are attaching or creating a boot disk, this must * read-write mode. */ mode: string; /** * Indicates how many IOPS to provision for the disk. This * sets the number of I/O operations per second that the disk can handle. * Values must be between 10,000 and 120,000. For more details, see the * [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk). */ provisionedIops: number; /** * Indicates how much throughput to provision for the disk, in MB/s. This sets the amount of data that can be read or written from the disk per second. Values must greater than or equal to 1. For more details, see the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks). */ provisionedThroughput: number; /** * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. */ resourceManagerTags: { [key: string]: string; }; /** * (Optional) -- A list of short names of resource policies to attach to this disk for automatic snapshot creations. Currently a max of 1 resource policy is supported. */ resourcePolicies: string[]; /** * The name (**not self_link**) * of the disk (such as those managed by `gcp.compute.Disk`) to attach. * > **Note:** Either `source` or `sourceImage` is **required** in a disk block unless the disk type is `local-ssd`. Check the API [docs](https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates/insert) for details. */ source: string; /** * The image from which to * initialize this disk. This can be one of: the image's `selfLink`, * `projects/{project}/global/images/{image}`, * `projects/{project}/global/images/family/{family}`, `global/images/{image}`, * `global/images/family/{family}`, `family/{family}`, `{project}/{family}`, * `{project}/{image}`, `{family}`, or `{image}`. * > **Note:** Either `source` or `sourceImage` is **required** in a disk block unless the disk type is `local-ssd`. Check the API [docs](https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates/insert) for details. */ sourceImage: string; /** * The customer-supplied encryption key of the source * image. Required if the source image is protected by a * customer-supplied encryption key. * * Instance templates do not store customer-supplied * encryption keys, so you cannot create disks for * instances in a managed instance group if the source * images are encrypted with your own keys. */ sourceImageEncryptionKeys: outputs.compute.GetRegionInstanceTemplateDiskSourceImageEncryptionKey[]; /** * The source snapshot to create this disk. When creating * a new instance, one of initializeParams.sourceSnapshot, * initializeParams.sourceImage, or disks.source is * required except for local SSD. */ sourceSnapshot: string; /** * The customer-supplied encryption key of the source snapshot. */ sourceSnapshotEncryptionKeys: outputs.compute.GetRegionInstanceTemplateDiskSourceSnapshotEncryptionKey[]; /** * The accelerator type resource to expose to this instance. E.g. `nvidia-tesla-k80`. */ type: string; } interface GetRegionInstanceTemplateDiskDiskEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS */ kmsKeySelfLink: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount: string; } interface GetRegionInstanceTemplateDiskSourceImageEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS */ kmsKeySelfLink: string; /** * The service account being used for the encryption * request for the given KMS key. If absent, the Compute * Engine default service account is used. */ kmsKeyServiceAccount: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rawKey: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rsaEncryptedKey: string; } interface GetRegionInstanceTemplateDiskSourceSnapshotEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS */ kmsKeySelfLink: string; /** * The service account being used for the encryption * request for the given KMS key. If absent, the Compute * Engine default service account is used. */ kmsKeyServiceAccount: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rawKey: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rsaEncryptedKey: string; } interface GetRegionInstanceTemplateGuestAccelerator { /** * The number of the guest accelerator cards exposed to this instance. */ count: number; /** * The accelerator type resource to expose to this instance. E.g. `nvidia-tesla-k80`. */ type: string; } interface GetRegionInstanceTemplateNetworkInterface { /** * Access configurations, i.e. IPs via which this * instance can be accessed via the Internet. Omit to ensure that the instance * is not accessible from the Internet (this means that ssh provisioners will * not work unless you are running Terraform can send traffic to the instance's * network (e.g. via tunnel or because it is running on another cloud instance * on that network). This block can be repeated multiple times. Structure documented below. */ accessConfigs: outputs.compute.GetRegionInstanceTemplateNetworkInterfaceAccessConfig[]; /** * An * array of alias IP ranges for this network interface. Can only be specified for network * interfaces on subnet-mode networks. Structure documented below. */ aliasIpRanges: outputs.compute.GetRegionInstanceTemplateNetworkInterfaceAliasIpRange[]; /** * Indicates whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. */ igmpQuery: string; /** * The prefix length of the primary internal IPv6 range. */ internalIpv6PrefixLength: number; /** * An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. */ ipv6AccessConfigs: outputs.compute.GetRegionInstanceTemplateNetworkInterfaceIpv6AccessConfig[]; /** * One of EXTERNAL, INTERNAL to indicate whether the IP can be accessed from the Internet. This field is always inherited from its subnetwork. */ ipv6AccessType: string; /** * An IPv6 internal network address for this network interface. If not specified, Google Cloud will automatically assign an internal IPv6 address from the instance's subnetwork. */ ipv6Address: string; /** * The name of the instance template. One of `name` or `filter` must be provided. */ name: string; /** * The name or selfLink of the network to attach this interface to. * Use `network` attribute for Legacy or Auto subnetted networks and * `subnetwork` for custom subnetted networks. */ network: string; /** * The URL of the network attachment that this interface should connect to in the following format: projects/{projectNumber}/regions/{region_name}/networkAttachments/{network_attachment_name}. */ networkAttachment: string; /** * The private IP address to assign to the instance. If * empty, the address will be automatically assigned. */ networkIp: string; /** * The type of vNIC to be used on this interface. Possible values:GVNIC, VIRTIO_NET, MRDMA, and IRDMA */ nicType: string; /** * Name of the parent network interface of a dynamic network interface. */ parentNicName: string; /** * The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified. */ queueCount: number; /** * The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. */ stackType: string; /** * the name of the subnetwork to attach this interface * to. The subnetwork must exist in the same `region` this instance will be * created in. Either `network` or `subnetwork` must be provided. */ subnetwork: string; /** * The ID of the project in which the subnetwork belongs. * If it is not provided, the provider project is used. */ subnetworkProject: string; /** * VLAN tag of a dynamic network interface, must be an integer in the range from 2 to 255 inclusively. */ vlan: number; } interface GetRegionInstanceTemplateNetworkInterfaceAccessConfig { /** * The IP address that will be 1:1 mapped to the instance's * network ip. If not given, one will be generated. */ natIp: string; /** * The [networking tier][network-tier] used for configuring * this instance template. This field can take the following values: PREMIUM or * STANDARD. If this field is not specified, it is assumed to be PREMIUM. */ networkTier: string; /** * The DNS domain name for the public PTR record.The DNS domain name for the public PTR record. */ publicPtrDomainName: string; } interface GetRegionInstanceTemplateNetworkInterfaceAliasIpRange { /** * The IP CIDR range represented by this alias IP range. This IP CIDR range * must belong to the specified subnetwork and cannot contain IP addresses reserved by * system or used by other network interfaces. At the time of writing only a * netmask (e.g. /24) may be supplied, with a CIDR format resulting in an API * error. */ ipCidrRange: string; /** * The subnetwork secondary range name specifying * the secondary range from which to allocate the IP CIDR range for this alias IP * range. If left unspecified, the primary range of the subnetwork will be used. */ subnetworkRangeName: string; } interface GetRegionInstanceTemplateNetworkInterfaceIpv6AccessConfig { /** * The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically. */ externalIpv6: string; /** * The prefix length of the external IPv6 range. */ externalIpv6PrefixLength: string; /** * The name of the instance template. One of `name` or `filter` must be provided. */ name: string; /** * The [networking tier][network-tier] used for configuring * this instance template. This field can take the following values: PREMIUM or * STANDARD. If this field is not specified, it is assumed to be PREMIUM. */ networkTier: string; /** * The domain name to be used when creating DNSv6 records for the external IPv6 ranges. */ publicPtrDomainName: string; } interface GetRegionInstanceTemplateNetworkPerformanceConfig { /** * The egress bandwidth tier for the instance. */ totalEgressBandwidthTier: string; } interface GetRegionInstanceTemplateReservationAffinity { /** * Specifies the label selector for the reservation to use. */ specificReservations: outputs.compute.GetRegionInstanceTemplateReservationAffinitySpecificReservation[]; /** * The accelerator type resource to expose to this instance. E.g. `nvidia-tesla-k80`. */ type: string; } interface GetRegionInstanceTemplateReservationAffinitySpecificReservation { /** * The key for the node affinity label. */ key: string; /** * Corresponds to the label values of a reservation resource. */ values: string[]; } interface GetRegionInstanceTemplateScheduling { /** * Specifies whether the instance should be * automatically restarted if it is terminated by Compute Engine (not * terminated by a user). This defaults to true. */ automaticRestart: boolean; /** * Specifies the availability domain, which this instance should be scheduled on. */ availabilityDomain: number; /** * Settings for the instance to perform a graceful shutdown. */ gracefulShutdowns: outputs.compute.GetRegionInstanceTemplateSchedulingGracefulShutdown[]; /** * Beta Time in seconds for host error detection. */ hostErrorTimeoutSeconds: number; /** * Describe the type of termination action for `SPOT` VM. Can be `STOP` or `DELETE`. Read more on [here](https://cloud.google.com/compute/docs/instances/create-use-spot) */ instanceTerminationAction: string; /** * Specifies the maximum amount of time a Local Ssd Vm should wait while * recovery of the Local Ssd state is attempted. Its value should be in * between 0 and 168 hours with hour granularity and the default value being 1 * hour. */ localSsdRecoveryTimeouts: outputs.compute.GetRegionInstanceTemplateSchedulingLocalSsdRecoveryTimeout[]; /** * Specifies the frequency of planned maintenance events. The accepted values are: PERIODIC */ maintenanceInterval: string; /** * The timeout for new network connections to hosts. */ maxRunDurations: outputs.compute.GetRegionInstanceTemplateSchedulingMaxRunDuration[]; /** * Minimum number of cpus for the instance. */ minNodeCpus: number; /** * Specifies node affinities or anti-affinities * to determine which sole-tenant nodes your instances and managed instance * groups will use as host systems. Read more on sole-tenant node creation * [here](https://cloud.google.com/compute/docs/nodes/create-nodes). * Structure documented below. */ nodeAffinities: outputs.compute.GetRegionInstanceTemplateSchedulingNodeAffinity[]; /** * Defines the maintenance behavior for this * instance. */ onHostMaintenance: string; /** * Defines the behaviour for instances with the instance_termination_action. */ onInstanceStopActions: outputs.compute.GetRegionInstanceTemplateSchedulingOnInstanceStopAction[]; /** * Allows instance to be preempted. This defaults to * false. Read more on this * [here](https://cloud.google.com/compute/docs/instances/preemptible). */ preemptible: boolean; /** * Describe the type of preemptible VM. */ provisioningModel: string; /** * Default is false and there will be 120 seconds between GCE ACPI G2 Soft Off and ACPI G3 Mechanical Off for Standard VMs and 30 seconds for Spot VMs. */ skipGuestOsShutdown: boolean; /** * Specifies the timestamp, when the instance will be terminated, * in RFC3339 text format. If specified, the instance termination action * will be performed at the termination time. */ terminationTime: string; } interface GetRegionInstanceTemplateSchedulingGracefulShutdown { /** * Opts-in for graceful shutdown. */ enabled: boolean; /** * The time allotted for the instance to gracefully shut down. * If the graceful shutdown isn't complete after this time, then the instance * transitions to the STOPPING state. */ maxDurations: outputs.compute.GetRegionInstanceTemplateSchedulingGracefulShutdownMaxDuration[]; } interface GetRegionInstanceTemplateSchedulingGracefulShutdownMaxDuration { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * The value must be between 1 and 3600, which is 3,600 seconds (one hour). */ seconds: number; } interface GetRegionInstanceTemplateSchedulingLocalSsdRecoveryTimeout { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface GetRegionInstanceTemplateSchedulingMaxRunDuration { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface GetRegionInstanceTemplateSchedulingNodeAffinity { /** * The key for the node affinity label. */ key: string; /** * The operator. Can be `IN` for node-affinities * or `NOT_IN` for anti-affinities. */ operator: string; values: string[]; } interface GetRegionInstanceTemplateSchedulingOnInstanceStopAction { /** * If true, the contents of any attached Local SSD disks will be discarded. */ discardLocalSsd: boolean; } interface GetRegionInstanceTemplateServiceAccount { /** * The service account e-mail address. If not given, the * default Google Compute Engine service account is used. */ email: string; /** * A list of service scopes. Both OAuth2 URLs and gcloud * short names are supported. To allow full access to all Cloud APIs, use the * `cloud-platform` scope. See a complete list of scopes [here](https://cloud.google.com/sdk/gcloud/reference/alpha/compute/instances/set-scopes#--scopes). */ scopes: string[]; } interface GetRegionInstanceTemplateShieldedInstanceConfig { /** * - Compare the most recent boot measurements to the integrity policy baseline and return a pair of pass/fail results depending on whether they match or not. Defaults to true. */ enableIntegrityMonitoring: boolean; /** * - Verify the digital signature of all boot components, and halt the boot process if signature verification fails. Defaults to false. */ enableSecureBoot: boolean; /** * - Use a virtualized trusted platform module, which is a specialized computer chip you can use to encrypt objects like keys and certificates. Defaults to true. */ enableVtpm: boolean; } interface GetRegionNetworkEndpointGroupAppEngine { /** * Optional serving service. * The service name must be 1-63 characters long, and comply with RFC1035. * Example value: "default", "my-service". */ service: string; /** * A template to parse service and version fields from a request URL. * URL mask allows for routing to multiple App Engine services without * having to create multiple Network Endpoint Groups and backend services. * * For example, the request URLs "foo1-dot-appname.appspot.com/v1" and * "foo1-dot-appname.appspot.com/v2" can be backed by the same Serverless NEG with * URL mask "-dot-appname.appspot.com/". The URL mask will parse * them to { service = "foo1", version = "v1" } and { service = "foo1", version = "v2" } respectively. */ urlMask: string; /** * Optional serving version. * The version must be 1-63 characters long, and comply with RFC1035. * Example value: "v1", "v2". */ version: string; } interface GetRegionNetworkEndpointGroupCloudFunction { /** * A user-defined name of the Cloud Function. * The function name is case-sensitive and must be 1-63 characters long. * Example value: "func1". */ function: string; /** * A template to parse function field from a request URL. URL mask allows * for routing to multiple Cloud Functions without having to create * multiple Network Endpoint Groups and backend services. * * For example, request URLs "mydomain.com/function1" and "mydomain.com/function2" * can be backed by the same Serverless NEG with URL mask "/". The URL mask * will parse them to { function = "function1" } and { function = "function2" } respectively. */ urlMask: string; } interface GetRegionNetworkEndpointGroupCloudRun { /** * Cloud Run service is the main resource of Cloud Run. * The service must be 1-63 characters long, and comply with RFC1035. * Example value: "run-service". */ service: string; /** * Cloud Run tag represents the "named-revision" to provide * additional fine-grained traffic routing information. * The tag must be 1-63 characters long, and comply with RFC1035. * Example value: "revision-0010". */ tag: string; /** * A template to parse service and tag fields from a request URL. * URL mask allows for routing to multiple Run services without having * to create multiple network endpoint groups and backend services. * * For example, request URLs "foo1.domain.com/bar1" and "foo1.domain.com/bar2" * an be backed by the same Serverless Network Endpoint Group (NEG) with * URL mask ".domain.com/". The URL mask will parse them to { service="bar1", tag="foo1" } * and { service="bar2", tag="foo2" } respectively. */ urlMask: string; } interface GetRegionNetworkEndpointGroupPscData { /** * The PSC producer port to use when consumer PSC NEG connects to a producer. If * this flag isn't specified for a PSC NEG with endpoint type * private-service-connect, then PSC NEG will be connected to a first port in the * available PSC producer port range. */ producerPort: string; } interface GetRegionNetworkEndpointGroupServerlessDeployment { /** * The platform of the NEG backend target(s). Possible values: * API Gateway: apigateway.googleapis.com */ platform: string; /** * The user-defined name of the workload/instance. This value must be provided explicitly or in the urlMask. * The resource identified by this value is platform-specific and is as follows: API Gateway: The gateway ID, App Engine: The service name, * Cloud Functions: The function name, Cloud Run: The service name */ resource: string; /** * A template to parse platform-specific fields from a request URL. URL mask allows for routing to multiple resources * on the same serverless platform without having to create multiple Network Endpoint Groups and backend resources. * The fields parsed by this template are platform-specific and are as follows: API Gateway: The gateway ID, * App Engine: The service and version, Cloud Functions: The function name, Cloud Run: The service and tag */ urlMask: string; /** * The optional resource version. The version identified by this value is platform-specific and is follows: * API Gateway: Unused, App Engine: The service version, Cloud Functions: Unused, Cloud Run: The service tag */ version: string; } interface GetRegionSecurityPolicyAdvancedOptionsConfig { /** * Custom configuration to apply the JSON parsing. Only applicable when JSON parsing is set to STANDARD. */ jsonCustomConfigs: outputs.compute.GetRegionSecurityPolicyAdvancedOptionsConfigJsonCustomConfig[]; /** * JSON body parsing. Supported values include: "DISABLED", "STANDARD", "STANDARD_WITH_GRAPHQL". Possible values: ["DISABLED", "STANDARD", "STANDARD_WITH_GRAPHQL"] */ jsonParsing: string; /** * Logging level. Supported values include: "NORMAL", "VERBOSE". Possible values: ["NORMAL", "VERBOSE"] */ logLevel: string; /** * The maximum request size chosen by the customer with Waf enabled. Values supported are "8KB", "16KB, "32KB", "48KB" and "64KB". * Values are case insensitive. Possible values: ["8KB", "16KB", "32KB", "48KB", "64KB"] */ requestBodyInspectionSize: string; /** * An optional list of case-insensitive request header names to use for resolving the callers client IP address. */ userIpRequestHeaders: string[]; } interface GetRegionSecurityPolicyAdvancedOptionsConfigJsonCustomConfig { /** * A list of custom Content-Type header values to apply the JSON parsing. */ contentTypes: string[]; } interface GetRegionSecurityPolicyDdosProtectionConfig { /** * Google Cloud Armor offers the following options to help protect systems against DDoS attacks: * - STANDARD: basic always-on protection for network load balancers, protocol forwarding, or VMs with public IP addresses. * - ADVANCED: additional protections for Managed Protection Plus subscribers who use network load balancers, protocol forwarding, or VMs with public IP addresses. * - ADVANCED_PREVIEW: flag to enable the security policy in preview mode. Possible values: ["ADVANCED", "ADVANCED_PREVIEW", "STANDARD"] */ ddosProtection: string; } interface GetRegionSecurityPolicyRule { /** * The Action to perform when the rule is matched. The following are the valid actions: * * * allow: allow access to target. * * * deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for STATUS are 403, 404, and 502. * * * rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rateLimitOptions to be set. * * * redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. This action is only supported in Global Security Policies of type CLOUD_ARMOR. * * * throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rateLimitOptions to be set for this. */ action: string; /** * An optional description of this resource. Provide this property when you create the resource. */ description: string; /** * A match condition that incoming traffic is evaluated against. * If it evaluates to true, the corresponding 'action' is enforced. */ matches: outputs.compute.GetRegionSecurityPolicyRuleMatch[]; /** * A match condition that incoming packets are evaluated against for CLOUD_ARMOR_NETWORK security policies. If it matches, the corresponding 'action' is enforced. * The match criteria for a rule consists of built-in match fields (like 'srcIpRanges') and potentially multiple user-defined match fields ('userDefinedFields'). * Field values may be extracted directly from the packet or derived from it (e.g. 'srcRegionCodes'). Some fields may not be present in every packet (e.g. 'srcPorts'). A user-defined field is only present if the base header is found in the packet and the entire field is in bounds. * Each match field may specify which values can match it, listing one or more ranges, prefixes, or exact values that are considered a match for the field. A field value must be present in order to match a specified match field. If no match values are specified for a match field, then any field value is considered to match it, and it's not required to be present. For strings specifying '*' is also equivalent to match all. * For a packet to match a rule, all specified match fields must match the corresponding field values derived from the packet. * Example: * networkMatch: srcIpRanges: - "192.0.2.0/24" - "198.51.100.0/24" userDefinedFields: - name: "ipv4FragmentOffset" values: - "1-0x1fff" * The above match condition matches packets with a source IP in 192.0.2.0/24 or 198.51.100.0/24 and a user-defined field named "ipv4FragmentOffset" with a value between 1 and 0x1fff inclusive */ networkMatches: outputs.compute.GetRegionSecurityPolicyRuleNetworkMatch[]; /** * Preconfigured WAF configuration to be applied for the rule. * If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect. */ preconfiguredWafConfigs: outputs.compute.GetRegionSecurityPolicyRulePreconfiguredWafConfig[]; /** * If set to true, the specified action is not enforced. */ preview: boolean; /** * An integer indicating the priority of a rule in the list. * The priority must be a positive value between 0 and 2147483647. * Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest priority. */ priority: number; /** * Must be specified if the action is "rateBasedBan" or "throttle". Cannot be specified for any other actions. */ rateLimitOptions: outputs.compute.GetRegionSecurityPolicyRuleRateLimitOption[]; } interface GetRegionSecurityPolicyRuleMatch { /** * The configuration options available when specifying versionedExpr. * This field must be specified if versionedExpr is specified and cannot be specified if versionedExpr is not specified. */ configs: outputs.compute.GetRegionSecurityPolicyRuleMatchConfig[]; /** * User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. See [Sample expressions](https://cloud.google.com/armor/docs/configure-security-policies#sample-expressions) for examples. */ exprs: outputs.compute.GetRegionSecurityPolicyRuleMatchExpr[]; /** * Preconfigured versioned expression. If this field is specified, config must also be specified. * Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding srcIpRange field in config. Possible values: ["SRC_IPS_V1"] */ versionedExpr: string; } interface GetRegionSecurityPolicyRuleMatchConfig { /** * CIDR IP address range. Maximum number of srcIpRanges allowed is 10. */ srcIpRanges: string[]; } interface GetRegionSecurityPolicyRuleMatchExpr { /** * Textual representation of an expression in Common Expression Language syntax. The application context of the containing message determines which well-known feature set of CEL is supported. */ expression: string; } interface GetRegionSecurityPolicyRuleNetworkMatch { /** * Destination IPv4/IPv6 addresses or CIDR prefixes, in standard text format. */ destIpRanges: string[]; /** * Destination port numbers for TCP/UDP/SCTP. Each element can be a 16-bit unsigned decimal number (e.g. "80") or range (e.g. "0-1023"). */ destPorts: string[]; /** * IPv4 protocol / IPv6 next header (after extension headers). Each element can be an 8-bit unsigned decimal number (e.g. "6"), range (e.g. "253-254"), or one of the following protocol names: "tcp", "udp", "icmp", "esp", "ah", "ipip", or "sctp". */ ipProtocols: string[]; /** * BGP Autonomous System Number associated with the source IP address. */ srcAsns: number[]; /** * Source IPv4/IPv6 addresses or CIDR prefixes, in standard text format. */ srcIpRanges: string[]; /** * Source port numbers for TCP/UDP/SCTP. Each element can be a 16-bit unsigned decimal number (e.g. "80") or range (e.g. "0-1023"). */ srcPorts: string[]; /** * Two-letter ISO 3166-1 alpha-2 country code associated with the source IP address. */ srcRegionCodes: string[]; /** * User-defined fields. Each element names a defined field and lists the matching values for that field. */ userDefinedFields: outputs.compute.GetRegionSecurityPolicyRuleNetworkMatchUserDefinedField[]; } interface GetRegionSecurityPolicyRuleNetworkMatchUserDefinedField { /** * The name of the Region Security Policy. */ name: string; /** * Matching values of the field. Each element can be a 32-bit unsigned decimal or hexadecimal (starting with "0x") number (e.g. "64") or range (e.g. "0x400-0x7ff"). */ values: string[]; } interface GetRegionSecurityPolicyRulePreconfiguredWafConfig { /** * An exclusion to apply during preconfigured WAF evaluation. */ exclusions: outputs.compute.GetRegionSecurityPolicyRulePreconfiguredWafConfigExclusion[]; } interface GetRegionSecurityPolicyRulePreconfiguredWafConfigExclusion { /** * Request cookie whose value will be excluded from inspection during preconfigured WAF evaluation. */ requestCookies: outputs.compute.GetRegionSecurityPolicyRulePreconfiguredWafConfigExclusionRequestCooky[]; /** * Request header whose value will be excluded from inspection during preconfigured WAF evaluation. */ requestHeaders: outputs.compute.GetRegionSecurityPolicyRulePreconfiguredWafConfigExclusionRequestHeader[]; /** * Request query parameter whose value will be excluded from inspection during preconfigured WAF evaluation. * Note that the parameter can be in the query string or in the POST body. */ requestQueryParams: outputs.compute.GetRegionSecurityPolicyRulePreconfiguredWafConfigExclusionRequestQueryParam[]; /** * Request URI from the request line to be excluded from inspection during preconfigured WAF evaluation. * When specifying this field, the query or fragment part should be excluded. */ requestUris: outputs.compute.GetRegionSecurityPolicyRulePreconfiguredWafConfigExclusionRequestUri[]; /** * A list of target rule IDs under the WAF rule set to apply the preconfigured WAF exclusion. * If omitted, it refers to all the rule IDs under the WAF rule set. */ targetRuleIds: string[]; /** * Target WAF rule set to apply the preconfigured WAF exclusion. */ targetRuleSet: string; } interface GetRegionSecurityPolicyRulePreconfiguredWafConfigExclusionRequestCooky { /** * You can specify an exact match or a partial match by using a field operator and a field value. * Available options: * EQUALS: The operator matches if the field value equals the specified value. * STARTS_WITH: The operator matches if the field value starts with the specified value. * ENDS_WITH: The operator matches if the field value ends with the specified value. * CONTAINS: The operator matches if the field value contains the specified value. * EQUALS_ANY: The operator matches if the field value is any value. Possible values: ["CONTAINS", "ENDS_WITH", "EQUALS", "EQUALS_ANY", "STARTS_WITH"] */ operator: string; /** * A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. * The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. */ value: string; } interface GetRegionSecurityPolicyRulePreconfiguredWafConfigExclusionRequestHeader { /** * You can specify an exact match or a partial match by using a field operator and a field value. * Available options: * EQUALS: The operator matches if the field value equals the specified value. * STARTS_WITH: The operator matches if the field value starts with the specified value. * ENDS_WITH: The operator matches if the field value ends with the specified value. * CONTAINS: The operator matches if the field value contains the specified value. * EQUALS_ANY: The operator matches if the field value is any value. Possible values: ["CONTAINS", "ENDS_WITH", "EQUALS", "EQUALS_ANY", "STARTS_WITH"] */ operator: string; /** * A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. * The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. */ value: string; } interface GetRegionSecurityPolicyRulePreconfiguredWafConfigExclusionRequestQueryParam { /** * You can specify an exact match or a partial match by using a field operator and a field value. * Available options: * EQUALS: The operator matches if the field value equals the specified value. * STARTS_WITH: The operator matches if the field value starts with the specified value. * ENDS_WITH: The operator matches if the field value ends with the specified value. * CONTAINS: The operator matches if the field value contains the specified value. * EQUALS_ANY: The operator matches if the field value is any value. Possible values: ["CONTAINS", "ENDS_WITH", "EQUALS", "EQUALS_ANY", "STARTS_WITH"] */ operator: string; /** * A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. * The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. */ value: string; } interface GetRegionSecurityPolicyRulePreconfiguredWafConfigExclusionRequestUri { /** * You can specify an exact match or a partial match by using a field operator and a field value. * Available options: * EQUALS: The operator matches if the field value equals the specified value. * STARTS_WITH: The operator matches if the field value starts with the specified value. * ENDS_WITH: The operator matches if the field value ends with the specified value. * CONTAINS: The operator matches if the field value contains the specified value. * EQUALS_ANY: The operator matches if the field value is any value. Possible values: ["CONTAINS", "ENDS_WITH", "EQUALS", "EQUALS_ANY", "STARTS_WITH"] */ operator: string; /** * A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. * The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. */ value: string; } interface GetRegionSecurityPolicyRuleRateLimitOption { /** * Can only be specified if the action for the rule is "rateBasedBan". * If specified, determines the time (in seconds) the traffic will continue to be banned by the rate limit after the rate falls below the threshold. */ banDurationSec: number; /** * Can only be specified if the action for the rule is "rateBasedBan". * If specified, the key will be banned for the configured 'banDurationSec' when the number of requests that exceed the 'rateLimitThreshold' also exceed this 'banThreshold'. */ banThresholds: outputs.compute.GetRegionSecurityPolicyRuleRateLimitOptionBanThreshold[]; /** * Action to take for requests that are under the configured rate limit threshold. * Valid option is "allow" only. */ conformAction: string; /** * Determines the key to enforce the rateLimitThreshold on. Possible values are: * * ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKey" is not configured. * * IP: The source IP address of the request is the key. Each IP has this limit enforced separately. * * HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. * * XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. * * HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. * * HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. * * SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. * * REGION_CODE: The country/region from which the request originates. * * TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. * * TLS_JA4_FINGERPRINT: JA4 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. * * USER_IP: The IP address of the originating client, which is resolved based on "userIpRequestHeaders" configured with the security policy. If there is no "userIpRequestHeaders" configuration or an IP address cannot be resolved from it, the key type defaults to IP. Possible values: ["ALL", "IP", "HTTP_HEADER", "XFF_IP", "HTTP_COOKIE", "HTTP_PATH", "SNI", "REGION_CODE", "TLS_JA3_FINGERPRINT", "TLS_JA4_FINGERPRINT", "USER_IP"] */ enforceOnKey: string; /** * If specified, any combination of values of enforceOnKeyType/enforceOnKeyName is treated as the key on which ratelimit threshold/action is enforced. * You can specify up to 3 enforceOnKeyConfigs. * If enforceOnKeyConfigs is specified, enforceOnKey must not be specified. */ enforceOnKeyConfigs: outputs.compute.GetRegionSecurityPolicyRuleRateLimitOptionEnforceOnKeyConfig[]; /** * Rate limit key name applicable only for the following key types: * HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. * HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. */ enforceOnKeyName: string; /** * Action to take for requests that are above the configured rate limit threshold, to deny with a specified HTTP response code. * Valid options are deny(STATUS), where valid values for STATUS are 403, 404, 429, and 502. */ exceedAction: string; /** * Threshold at which to begin ratelimiting. */ rateLimitThresholds: outputs.compute.GetRegionSecurityPolicyRuleRateLimitOptionRateLimitThreshold[]; } interface GetRegionSecurityPolicyRuleRateLimitOptionBanThreshold { /** * Number of HTTP(S) requests for calculating the threshold. */ count: number; /** * Interval over which the threshold is computed. */ intervalSec: number; } interface GetRegionSecurityPolicyRuleRateLimitOptionEnforceOnKeyConfig { /** * Rate limit key name applicable only for the following key types: * HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. * HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. */ enforceOnKeyName: string; /** * Determines the key to enforce the rateLimitThreshold on. Possible values are: * * ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKeyConfigs" is not configured. * * IP: The source IP address of the request is the key. Each IP has this limit enforced separately. * * HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. * * XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. * * HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. * * HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. * * SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. * * REGION_CODE: The country/region from which the request originates. * * TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. * * TLS_JA4_FINGERPRINT: JA4 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. * * USER_IP: The IP address of the originating client, which is resolved based on "userIpRequestHeaders" configured with the security policy. If there is no "userIpRequestHeaders" configuration or an IP address cannot be resolved from it, the key type defaults to IP. Possible values: ["ALL", "IP", "HTTP_HEADER", "XFF_IP", "HTTP_COOKIE", "HTTP_PATH", "SNI", "REGION_CODE", "TLS_JA3_FINGERPRINT", "TLS_JA4_FINGERPRINT", "USER_IP"] */ enforceOnKeyType: string; } interface GetRegionSecurityPolicyRuleRateLimitOptionRateLimitThreshold { /** * Number of HTTP(S) requests for calculating the threshold. */ count: number; /** * Interval over which the threshold is computed. */ intervalSec: number; } interface GetRegionSecurityPolicyUserDefinedField { /** * The base relative to which 'offset' is measured. Possible values are: * - IPV4: Points to the beginning of the IPv4 header. * - IPV6: Points to the beginning of the IPv6 header. * - TCP: Points to the beginning of the TCP header, skipping over any IPv4 options or IPv6 extension headers. Not present for non-first fragments. * - UDP: Points to the beginning of the UDP header, skipping over any IPv4 options or IPv6 extension headers. Not present for non-first fragments. Possible values: ["IPV4", "IPV6", "TCP", "UDP"] */ base: string; /** * If specified, apply this mask (bitwise AND) to the field to ignore bits before matching. * Encoded as a hexadecimal number (starting with "0x"). * The last byte of the field (in network byte order) corresponds to the least significant byte of the mask. */ mask: string; /** * The name of the Region Security Policy. */ name: string; /** * Offset of the first byte of the field (in network byte order) relative to 'base'. */ offset: number; /** * Size of the field in bytes. Valid values: 1-4. */ size: number; } interface GetReservationBlockHealthInfo { /** * The number of sub-blocks that are degraded. */ degradedSubBlockCount: number; /** * The health status of the reservation block. */ healthStatus: string; /** * The number of sub-blocks that are healthy. */ healthySubBlockCount: number; } interface GetReservationBlockPhysicalTopology { /** * The hash of the capacity block within the cluster. */ block: string; /** * The cluster name of the reservation block. */ cluster: string; } interface GetReservationBlockReservationMaintenance { /** * Number of instances that have ongoing maintenance. */ instanceMaintenanceOngoingCount: number; /** * Number of instances that have pending maintenance. */ instanceMaintenancePendingCount: number; /** * Number of hosts in the block that have ongoing maintenance. */ maintenanceOngoingCount: number; /** * Number of hosts in the block that have pending maintenance. */ maintenancePendingCount: number; /** * The type of maintenance for the reservation. */ schedulingType: string; /** * Number of sub-block infrastructure that has ongoing maintenance. */ subblockInfraMaintenanceOngoingCount: number; /** * Number of sub-block infrastructure that has pending maintenance. */ subblockInfraMaintenancePendingCount: number; } interface GetReservationDeleteAfterDuration { /** * Number of nanoseconds for the auto-delete duration. */ nanos: number; /** * Number of seconds for the auto-delete duration. */ seconds: string; } interface GetReservationReservationSharingPolicy { /** * Sharing config for all Google Cloud services. Possible values: ["ALLOW_ALL", "DISALLOW_ALL"] */ serviceShareType: string; } interface GetReservationResourceStatus { /** * Health information for the reservation. */ healthInfos: outputs.compute.GetReservationResourceStatusHealthInfo[]; /** * The number of reservation blocks associated with this reservation. */ reservationBlockCount: number; /** * Maintenance information for this reservation */ reservationMaintenances: outputs.compute.GetReservationResourceStatusReservationMaintenance[]; /** * Allocation Properties of this reservation. */ specificSkuAllocations: outputs.compute.GetReservationResourceStatusSpecificSkuAllocation[]; } interface GetReservationResourceStatusHealthInfo { /** * The number of reservation blocks that are degraded. */ degradedBlockCount: number; /** * The health status of the reservation. */ healthStatus: string; /** * The number of reservation blocks that are healthy. */ healthyBlockCount: number; } interface GetReservationResourceStatusReservationMaintenance { /** * Describes number of instances that have ongoing maintenance. */ instanceMaintenanceOngoingCount: number; /** * Describes number of instances that have pending maintenance. */ instanceMaintenancePendingCount: number; /** * Progress for ongoing maintenance for this group of VMs/hosts. Describes number of hosts in the block that have ongoing maintenance. */ maintenanceOngoingCount: number; /** * Progress for ongoing maintenance for this group of VMs/hosts. Describes number of hosts in the block that have pending maintenance. */ maintenancePendingCount: number; /** * The type of maintenance for the reservation. */ schedulingType: string; /** * Describes number of subblock Infrastructure that has ongoing maintenance. Here, Subblock Infrastructure Maintenance pertains to upstream hardware contained in the Subblock that is necessary for a VM Family(e.g. NVLink Domains). Not all VM Families will support this field. */ subblockInfraMaintenanceOngoingCount: number; /** * Describes number of subblock Infrastructure that has pending maintenance. Here, Subblock Infrastructure Maintenance pertains to upstream hardware contained in the Subblock that is necessary for a VM Family (e.g. NVLink Domains). Not all VM Families will support this field. */ subblockInfraMaintenancePendingCount: number; /** * Maintenance information on this group of VMs. */ upcomingGroupMaintenances: outputs.compute.GetReservationResourceStatusReservationMaintenanceUpcomingGroupMaintenance[]; } interface GetReservationResourceStatusReservationMaintenanceUpcomingGroupMaintenance { /** * Indicates if the maintenance can be customer triggered. */ canReschedule: boolean; /** * The latest time for the planned maintenance window to start. This timestamp value is in RFC3339 text format. */ latestWindowStartTime: string; /** * Indicates whether the UpcomingMaintenance will be triggered on VM shutdown. */ maintenanceOnShutdown: boolean; /** * The reasons for the maintenance. Only valid for vms. */ maintenanceReasons: string[]; /** * Status of the maintenance. */ maintenanceStatus: string; /** * Defines the type of maintenance. */ type: string; /** * The time by which the maintenance disruption will be completed. This timestamp value is in RFC3339 text format. */ windowEndTime: string; /** * The current start time of the maintenance window. This timestamp value is in RFC3339 text format. */ windowStartTime: string; } interface GetReservationResourceStatusSpecificSkuAllocation { /** * ID of the instance template used to populate reservation properties. */ sourceInstanceTemplateId: string; /** * Per service utilization breakdown. The Key is the Google Cloud managed service name. */ utilizations: { [key: string]: string; }; } interface GetReservationShareSetting { /** * A map of project number and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. */ projectMaps: outputs.compute.GetReservationShareSettingProjectMap[]; /** * List of project IDs with which the reservation is shared. */ projects: string[]; /** * Type of sharing for this shared-reservation Possible values: ["LOCAL", "SPECIFIC_PROJECTS"] */ shareType: string; } interface GetReservationShareSettingProjectMap { id: string; /** * The project id/number, should be same as the key of this project config in the project map. */ projectId: string; } interface GetReservationSpecificReservation { /** * Indicates how many instances are actually usable currently. */ assuredCount: number; /** * The number of resources that are allocated. */ count: number; /** * How many instances are in use. */ inUseCount: number; /** * The instance properties for the reservation. */ instanceProperties: outputs.compute.GetReservationSpecificReservationInstanceProperty[]; /** * Specifies the instance template to create the reservation. If you use this field, you must exclude the * instanceProperties field. */ sourceInstanceTemplate: string; } interface GetReservationSpecificReservationInstanceProperty { /** * Guest accelerator type and count. */ guestAccelerators: outputs.compute.GetReservationSpecificReservationInstancePropertyGuestAccelerator[]; /** * The amount of local ssd to reserve with each instance. This * reserves disks of type 'local-ssd'. */ localSsds: outputs.compute.GetReservationSpecificReservationInstancePropertyLocalSsd[]; /** * An opaque location hint used to place the allocation close to other resources. This field is for use by internal tools that use the public API. */ locationHint: string; /** * The name of the machine type to reserve. */ machineType: string; /** * Specifies the frequency of planned maintenance events. Possible values: ["AS_NEEDED", "PERIODIC", "RECURRENT"] */ maintenanceInterval: string; /** * The minimum CPU platform for the reservation. For example, * '"Intel Skylake"'. See * the CPU platform availability reference](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform#availablezones) * for information on available CPU platforms. */ minCpuPlatform: string; } interface GetReservationSpecificReservationInstancePropertyGuestAccelerator { /** * The number of the guest accelerator cards exposed to * this instance. */ acceleratorCount: number; /** * The full or partial URL of the accelerator type to * attach to this instance. For example: * 'projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100' * * If you are creating an instance template, specify only the accelerator name. */ acceleratorType: string; } interface GetReservationSpecificReservationInstancePropertyLocalSsd { /** * The size of the disk in base-2 GB. */ diskSizeGb: number; /** * The disk interface to use for attaching this disk. Default value: "SCSI" Possible values: ["SCSI", "NVME"] */ interface: string; } interface GetReservationSubBlockHealthInfo { /** * The number of degraded hosts in the reservation sub-block. */ degradedHostCount: number; /** * The number of degraded infrastructure (e.g. NVLink domain) in the reservation sub-block. */ degradedInfraCount: number; /** * The health status of the reservation sub-block. */ healthStatus: string; /** * The number of healthy hosts in the reservation sub-block. */ healthyHostCount: number; /** * The number of healthy infrastructure (e.g. NVLink domain) in the reservation sub-block. */ healthyInfraCount: number; } interface GetReservationSubBlockPhysicalTopology { /** * The hash of the capacity block within the cluster. */ block: string; /** * The cluster name of the reservation sub-block. */ cluster: string; /** * The hash of the capacity sub-block within the capacity block. */ subBlock: string; } interface GetReservationSubBlockReservationSubBlockMaintenance { /** * Number of instances that have ongoing maintenance. */ instanceMaintenanceOngoingCount: number; /** * Number of instances that have pending maintenance. */ instanceMaintenancePendingCount: number; /** * Number of hosts in the sub-block that have ongoing maintenance. */ maintenanceOngoingCount: number; /** * Number of hosts in the sub-block that have pending maintenance. */ maintenancePendingCount: number; /** * The type of maintenance for the reservation. */ schedulingType: string; /** * Number of sub-block infrastructure that has ongoing maintenance. */ subblockInfraMaintenanceOngoingCount: number; /** * Number of sub-block infrastructure that has pending maintenance. */ subblockInfraMaintenancePendingCount: number; } interface GetResourcePolicyDiskConsistencyGroupPolicy { /** * Enable disk consistency on the resource policy. */ enabled: boolean; } interface GetResourcePolicyGroupPlacementPolicy { /** * The number of availability domains instances will be spread across. If two instances are in different * availability domain, they will not be put in the same low latency network */ availabilityDomainCount: number; /** * Collocation specifies whether to place VMs inside the same availability domain on the same low-latency network. * Specify 'COLLOCATED' to enable collocation. Can only be specified with 'vm_count'. If compute instances are created * with a COLLOCATED policy, then exactly 'vm_count' instances must be created at the same time with the resource policy * attached. Possible values: ["COLLOCATED"] */ collocation: string; /** * Specifies the shape of the GPU slice, in slice based GPU families eg. A4X. */ gpuTopology: string; /** * Specifies the number of max logical switches. */ maxDistance: number; /** * Specifies the shape of the TPU slice. */ tpuTopology: string; /** * Number of VMs in this placement group. Google does not recommend that you use this field * unless you use a compact policy and you want your policy to work only if it contains this * exact number of VMs. */ vmCount: number; } interface GetResourcePolicyInstanceSchedulePolicy { /** * The expiration time of the schedule. The timestamp is an RFC3339 string. */ expirationTime: string; /** * The start time of the schedule. The timestamp is an RFC3339 string. */ startTime: string; /** * Specifies the time zone to be used in interpreting the schedule. The value of this field must be a time zone name * from the tz database: http://en.wikipedia.org/wiki/Tz_database. */ timeZone: string; /** * Specifies the schedule for starting instances. */ vmStartSchedules: outputs.compute.GetResourcePolicyInstanceSchedulePolicyVmStartSchedule[]; /** * Specifies the schedule for stopping instances. */ vmStopSchedules: outputs.compute.GetResourcePolicyInstanceSchedulePolicyVmStopSchedule[]; } interface GetResourcePolicyInstanceSchedulePolicyVmStartSchedule { /** * Specifies the frequency for the operation, using the unix-cron format. */ schedule: string; } interface GetResourcePolicyInstanceSchedulePolicyVmStopSchedule { /** * Specifies the frequency for the operation, using the unix-cron format. */ schedule: string; } interface GetResourcePolicySnapshotSchedulePolicy { /** * Retention policy applied to snapshots created by this resource policy. */ retentionPolicies: outputs.compute.GetResourcePolicySnapshotSchedulePolicyRetentionPolicy[]; /** * Contains one of an 'hourlySchedule', 'dailySchedule', or 'weeklySchedule'. */ schedules: outputs.compute.GetResourcePolicySnapshotSchedulePolicySchedule[]; /** * Properties with which the snapshots are created, such as labels. */ snapshotProperties: outputs.compute.GetResourcePolicySnapshotSchedulePolicySnapshotProperty[]; } interface GetResourcePolicySnapshotSchedulePolicyRetentionPolicy { /** * Maximum age of the snapshot that is allowed to be kept. */ maxRetentionDays: number; /** * Specifies the behavior to apply to scheduled snapshots when * the source disk is deleted. Default value: "KEEP_AUTO_SNAPSHOTS" Possible values: ["KEEP_AUTO_SNAPSHOTS", "APPLY_RETENTION_POLICY"] */ onSourceDiskDelete: string; } interface GetResourcePolicySnapshotSchedulePolicySchedule { /** * The policy will execute every nth day at the specified time. */ dailySchedules: outputs.compute.GetResourcePolicySnapshotSchedulePolicyScheduleDailySchedule[]; /** * The policy will execute every nth hour starting at the specified time. */ hourlySchedules: outputs.compute.GetResourcePolicySnapshotSchedulePolicyScheduleHourlySchedule[]; /** * Allows specifying a snapshot time for each day of the week. */ weeklySchedules: outputs.compute.GetResourcePolicySnapshotSchedulePolicyScheduleWeeklySchedule[]; } interface GetResourcePolicySnapshotSchedulePolicyScheduleDailySchedule { /** * Defines a schedule with units measured in days. The value determines how many days pass between the start of each cycle. Days in cycle for snapshot schedule policy must be 1. */ daysInCycle: number; /** * This must be in UTC format that resolves to one of * 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, * both 13:00-5 and 08:00 are valid. */ startTime: string; } interface GetResourcePolicySnapshotSchedulePolicyScheduleHourlySchedule { /** * The number of hours between snapshots. */ hoursInCycle: number; /** * Time within the window to start the operations. * It must be in an hourly format "HH:MM", * where HH : [00-23] and MM : [00] GMT. eg: 21:00 */ startTime: string; } interface GetResourcePolicySnapshotSchedulePolicyScheduleWeeklySchedule { /** * May contain up to seven (one for each day of the week) snapshot times. */ dayOfWeeks: outputs.compute.GetResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeek[]; } interface GetResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeek { /** * The day of the week to create the snapshot. e.g. MONDAY Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"] */ day: string; /** * Time within the window to start the operations. * It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. */ startTime: string; } interface GetResourcePolicySnapshotSchedulePolicySnapshotProperty { /** * Creates the new snapshot in the snapshot chain labeled with the * specified name. The chain name must be 1-63 characters long and comply * with RFC1035. */ chainName: string; /** * Whether to perform a 'guest aware' snapshot. */ guestFlush: boolean; /** * A set of key-value pairs. */ labels: { [key: string]: string; }; /** * Cloud Storage bucket location to store the auto snapshot * (regional or multi-regional) */ storageLocations: string[]; } interface GetResourcePolicyWorkloadPolicy { /** * The accelerator topology. This field can be set only when the workload policy type is HIGH_THROUGHPUT * and cannot be set if max topology distance is set. */ acceleratorTopology: string; /** * The maximum topology distance. This field can be set only when the workload policy type is HIGH_THROUGHPUT * and cannot be set if accelerator topology is set. Possible values: ["BLOCK", "CLUSTER", "SUBBLOCK"] */ maxTopologyDistance: string; /** * The type of workload policy. Possible values: ["HIGH_AVAILABILITY", "HIGH_THROUGHPUT"] */ type: string; } interface GetRouterBgp { /** * User-specified flag to indicate which mode to use for advertisement. Default value: "DEFAULT" Possible values: ["DEFAULT", "CUSTOM"] */ advertiseMode: string; /** * User-specified list of prefix groups to advertise in custom mode. * This field can only be populated if advertiseMode is CUSTOM and * is advertised to all peers of the router. These groups will be * advertised in addition to any specified prefixes. Leave this field * blank to advertise no custom groups. * * This enum field has the one valid value: ALL_SUBNETS */ advertisedGroups: string[]; /** * User-specified list of individual IP ranges to advertise in * custom mode. This field can only be populated if advertiseMode * is CUSTOM and is advertised to all peers of the router. These IP * ranges will be advertised in addition to any specified groups. * Leave this field blank to advertise no custom IP ranges. */ advertisedIpRanges: outputs.compute.GetRouterBgpAdvertisedIpRange[]; /** * Local BGP Autonomous System Number (ASN). Must be an RFC6996 * private ASN, either 16-bit or 32-bit. The value will be fixed for * this router resource. All VPN tunnels that link to this router * will have the same local ASN. */ asn: number; /** * Explicitly specifies a range of valid BGP Identifiers for this Router. * It is provided as a link-local IPv4 range (from 169.254.0.0/16), of * size at least /30, even if the BGP sessions are over IPv6. It must * not overlap with any IPv4 BGP session ranges. Other vendors commonly * call this router ID. */ identifierRange: string; /** * The interval in seconds between BGP keepalive messages that are sent * to the peer. Hold time is three times the interval at which keepalive * messages are sent, and the hold time is the maximum number of seconds * allowed to elapse between successive keepalive messages that BGP * receives from a peer. * * BGP will use the smaller of either the local hold time value or the * peer's hold time value as the hold time for the BGP connection * between the two peers. If set, this value must be between 20 and 60. * The default is 20. */ keepaliveInterval: number; } interface GetRouterBgpAdvertisedIpRange { /** * User-specified description for the IP range. */ description: string; /** * The IP range to advertise. The value must be a * CIDR-formatted string. */ range: string; } interface GetRouterMd5AuthenticationKey { /** * Value of the key used for MD5 authentication. */ key: string; /** * The name of the router. */ name: string; } interface GetRouterNatLogConfig { /** * Indicates whether or not to export logs. */ enable: boolean; /** * Specifies the desired filtering of logs on this NAT. Possible values: ["ERRORS_ONLY", "TRANSLATIONS_ONLY", "ALL"] */ filter: string; } interface GetRouterNatNat64Subnetwork { /** * Name of the NAT service. The name must be 1-63 characters long and * comply with RFC1035. */ name: string; } interface GetRouterNatRule { /** * The action to be enforced for traffic that matches this rule. */ actions: outputs.compute.GetRouterNatRuleAction[]; /** * An optional description of this rule. */ description: string; /** * CEL expression that specifies the match condition that egress traffic from a VM is evaluated against. * If it evaluates to true, the corresponding action is enforced. * * The following examples are valid match expressions for public NAT: * * "inIpRange(destination.ip, '1.1.0.0/16') || inIpRange(destination.ip, '2.2.0.0/16')" * * "destination.ip == '1.1.0.1' || destination.ip == '8.8.8.8'" * * The following example is a valid match expression for private NAT: * * "nexthop.hub == 'https://networkconnectivity.googleapis.com/v1alpha1/projects/my-project/global/hub/hub-1'" */ match: string; /** * An integer uniquely identifying a rule in the list. * The rule number must be a positive value between 0 and 65000, and must be unique among rules within a NAT. */ ruleNumber: number; } interface GetRouterNatRuleAction { /** * A list of URLs of the IP resources used for this NAT rule. * These IP addresses must be valid static external IP addresses assigned to the project. * This field is used for public NAT. */ sourceNatActiveIps: string[]; /** * A list of URLs of the subnetworks used as source ranges for this NAT Rule. * These subnetworks must have purpose set to PRIVATE_NAT. * This field is used for private NAT. */ sourceNatActiveRanges: string[]; /** * A list of URLs of the IP resources to be drained. * These IPs must be valid static external IPs that have been assigned to the NAT. * These IPs should be used for updating/patching a NAT rule only. * This field is used for public NAT. */ sourceNatDrainIps: string[]; /** * A list of URLs of subnetworks representing source ranges to be drained. * This is only supported on patch/update, and these subnetworks must have previously been used as active ranges in this NAT Rule. * This field is used for private NAT. */ sourceNatDrainRanges: string[]; } interface GetRouterNatSubnetwork { /** * Name of the NAT service. The name must be 1-63 characters long and * comply with RFC1035. */ name: string; /** * List of the secondary ranges of the subnetwork that are allowed * to use NAT. This can be populated only if * 'LIST_OF_SECONDARY_IP_RANGES' is one of the values in * sourceIpRangesToNat */ secondaryIpRangeNames: string[]; /** * List of options for which source IPs in the subnetwork * should have NAT enabled. Supported values include: * 'ALL_IP_RANGES', 'LIST_OF_SECONDARY_IP_RANGES', * 'PRIMARY_IP_RANGE'. */ sourceIpRangesToNats: string[]; } interface GetRouterParam { /** * Resource manager tags to be bound to the router. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. */ resourceManagerTags: { [key: string]: string; }; } interface GetRouterStatusBestRoute { asPaths: outputs.compute.GetRouterStatusBestRouteAsPath[]; /** * Creation timestamp in RFC3339 text format. */ creationTimestamp: string; /** * An optional description of this resource. Provide this property * when you create the resource. */ description: string; /** * The destination range of outgoing packets that this route applies to. * Only IPv4 is supported. */ destRange: string; /** * The name of the router. */ name: string; /** * The network name or resource link to the parent * network of this subnetwork. */ network: string; /** * URL to a gateway that should handle matching packets. * Currently, you can only specify the internet gateway, using a full or * partial valid URL: * * 'https://www.googleapis.com/compute/v1/projects/project/global/gateways/default-internet-gateway' * * 'projects/project/global/gateways/default-internet-gateway' * * 'global/gateways/default-internet-gateway' * * The string 'default-internet-gateway'. */ nextHopGateway: string; /** * The hub network that should handle matching packets, which should conform to RFC1035. */ nextHopHub: string; /** * The IP address or URL to a forwarding rule of type * loadBalancingScheme=INTERNAL that should handle matching * packets. * * With the GA provider you can only specify the forwarding * rule as a partial or full URL. For example, the following * are all valid values: * * 10.128.0.56 * * https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule * * regions/region/forwardingRules/forwardingRule * * When the beta provider, you can also specify the IP address * of a forwarding rule from the same VPC or any peered VPC. * * Note that this can only be used when the destinationRange is * a public (non-RFC 1918) IP CIDR range. */ nextHopIlb: string; /** * URL to an instance that should handle matching packets. * You can specify this as a full or partial URL. For example: * * 'https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance' * * 'projects/project/zones/zone/instances/instance' * * 'zones/zone/instances/instance' * * Just the instance name, with the zone in 'next_hop_instance_zone'. */ nextHopInstance: string; /** * The zone of the instance specified in next_hop_instance. Omit if nextHopInstance is specified as a URL. */ nextHopInstanceZone: string; /** * Internal fixed region-to-region cost that Google Cloud calculates based on factors such as network performance, distance, and available bandwidth between regions. */ nextHopInterRegionCost: string; /** * Network IP address of an instance that should handle matching packets. */ nextHopIp: string; /** * Multi-Exit Discriminator, a BGP route metric that indicates the desirability of a particular route in a network. */ nextHopMed: string; /** * URL to a Network that should handle matching packets. */ nextHopNetwork: string; /** * Indicates the origin of the route. Can be IGP (Interior Gateway Protocol), EGP (Exterior Gateway Protocol), or INCOMPLETE. */ nextHopOrigin: string; /** * The network peering name that should handle matching packets, which should conform to RFC1035. */ nextHopPeering: string; /** * URL to a VpnTunnel that should handle matching packets. */ nextHopVpnTunnel: string; /** * Additional params passed with the request, but not persisted as part of resource payload */ params: outputs.compute.GetRouterStatusBestRouteParam[]; /** * The priority of this route. Priority is used to break ties in cases * where there is more than one matching route of equal prefix length. * * In the case of two routes with equal prefix length, the one with the * lowest-numbered priority value wins. * * Default value is 1000. Valid range is 0 through 65535. */ priority: number; /** * The ID of the project in which the resource * belongs. If it is not provided, the provider project is used. */ project: string; /** * The status of the route, which can be one of the following values: * - 'ACTIVE' for an active route * - 'INACTIVE' for an inactive route */ routeStatus: string; /** * The type of this route, which can be one of the following values: * - 'TRANSIT' for a transit route that this router learned from another Cloud Router and will readvertise to one of its BGP peers * - 'SUBNET' for a route from a subnet of the VPC * - 'BGP' for a route learned from a BGP peer of this router * - 'STATIC' for a static route */ routeType: string; selfLink: string; /** * A list of instance tags to which this route applies. */ tags: string[]; /** * If potential misconfigurations are detected for this route, this field will be populated with warning messages. */ warnings: outputs.compute.GetRouterStatusBestRouteWarning[]; } interface GetRouterStatusBestRouteAsPath { /** * The AS numbers of the AS Path. */ asLists: number[]; /** * The type of the AS Path, which can be one of the following values: * - 'AS_SET': unordered set of autonomous systems that the route in has traversed * - 'AS_SEQUENCE': ordered set of autonomous systems that the route has traversed * - 'AS_CONFED_SEQUENCE': ordered set of Member Autonomous Systems in the local confederation that the route has traversed * - 'AS_CONFED_SET': unordered set of Member Autonomous Systems in the local confederation that the route has traversed */ pathSegmentType: string; } interface GetRouterStatusBestRouteParam { /** * Resource manager tags to be bound to the route. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. The field is ignored when empty. * The field is immutable and causes resource replacement when mutated. This field is only * set at create time and modifying this field after creation will trigger recreation. * To apply tags to an existing resource, see the gcp.tags.TagBinding resource. */ resourceManagerTags: { [key: string]: string; }; } interface GetRouterStatusBestRouteWarning { /** * A warning code, if applicable. For example, Compute Engine returns * NO_RESULTS_ON_PAGE if there are no results in the response. */ code: string; /** * Metadata about this warning in key: value format. For example: * "data": [ { "key": "scope", "value": "zones/us-east1-d" } */ datas: outputs.compute.GetRouterStatusBestRouteWarningData[]; /** * A human-readable description of the warning code. */ message: string; } interface GetRouterStatusBestRouteWarningData { /** * A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding). */ key: string; /** * A warning data value corresponding to the key. */ value: string; } interface GetRouterStatusBestRoutesForRouter { asPaths: outputs.compute.GetRouterStatusBestRoutesForRouterAsPath[]; /** * Creation timestamp in RFC3339 text format. */ creationTimestamp: string; /** * An optional description of this resource. Provide this property * when you create the resource. */ description: string; /** * The destination range of outgoing packets that this route applies to. * Only IPv4 is supported. */ destRange: string; /** * The name of the router. */ name: string; /** * The network name or resource link to the parent * network of this subnetwork. */ network: string; /** * URL to a gateway that should handle matching packets. * Currently, you can only specify the internet gateway, using a full or * partial valid URL: * * 'https://www.googleapis.com/compute/v1/projects/project/global/gateways/default-internet-gateway' * * 'projects/project/global/gateways/default-internet-gateway' * * 'global/gateways/default-internet-gateway' * * The string 'default-internet-gateway'. */ nextHopGateway: string; /** * The hub network that should handle matching packets, which should conform to RFC1035. */ nextHopHub: string; /** * The IP address or URL to a forwarding rule of type * loadBalancingScheme=INTERNAL that should handle matching * packets. * * With the GA provider you can only specify the forwarding * rule as a partial or full URL. For example, the following * are all valid values: * * 10.128.0.56 * * https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule * * regions/region/forwardingRules/forwardingRule * * When the beta provider, you can also specify the IP address * of a forwarding rule from the same VPC or any peered VPC. * * Note that this can only be used when the destinationRange is * a public (non-RFC 1918) IP CIDR range. */ nextHopIlb: string; /** * URL to an instance that should handle matching packets. * You can specify this as a full or partial URL. For example: * * 'https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance' * * 'projects/project/zones/zone/instances/instance' * * 'zones/zone/instances/instance' * * Just the instance name, with the zone in 'next_hop_instance_zone'. */ nextHopInstance: string; /** * The zone of the instance specified in next_hop_instance. Omit if nextHopInstance is specified as a URL. */ nextHopInstanceZone: string; /** * Internal fixed region-to-region cost that Google Cloud calculates based on factors such as network performance, distance, and available bandwidth between regions. */ nextHopInterRegionCost: string; /** * Network IP address of an instance that should handle matching packets. */ nextHopIp: string; /** * Multi-Exit Discriminator, a BGP route metric that indicates the desirability of a particular route in a network. */ nextHopMed: string; /** * URL to a Network that should handle matching packets. */ nextHopNetwork: string; /** * Indicates the origin of the route. Can be IGP (Interior Gateway Protocol), EGP (Exterior Gateway Protocol), or INCOMPLETE. */ nextHopOrigin: string; /** * The network peering name that should handle matching packets, which should conform to RFC1035. */ nextHopPeering: string; /** * URL to a VpnTunnel that should handle matching packets. */ nextHopVpnTunnel: string; /** * Additional params passed with the request, but not persisted as part of resource payload */ params: outputs.compute.GetRouterStatusBestRoutesForRouterParam[]; /** * The priority of this route. Priority is used to break ties in cases * where there is more than one matching route of equal prefix length. * * In the case of two routes with equal prefix length, the one with the * lowest-numbered priority value wins. * * Default value is 1000. Valid range is 0 through 65535. */ priority: number; /** * The ID of the project in which the resource * belongs. If it is not provided, the provider project is used. */ project: string; /** * The status of the route, which can be one of the following values: * - 'ACTIVE' for an active route * - 'INACTIVE' for an inactive route */ routeStatus: string; /** * The type of this route, which can be one of the following values: * - 'TRANSIT' for a transit route that this router learned from another Cloud Router and will readvertise to one of its BGP peers * - 'SUBNET' for a route from a subnet of the VPC * - 'BGP' for a route learned from a BGP peer of this router * - 'STATIC' for a static route */ routeType: string; selfLink: string; /** * A list of instance tags to which this route applies. */ tags: string[]; /** * If potential misconfigurations are detected for this route, this field will be populated with warning messages. */ warnings: outputs.compute.GetRouterStatusBestRoutesForRouterWarning[]; } interface GetRouterStatusBestRoutesForRouterAsPath { /** * The AS numbers of the AS Path. */ asLists: number[]; /** * The type of the AS Path, which can be one of the following values: * - 'AS_SET': unordered set of autonomous systems that the route in has traversed * - 'AS_SEQUENCE': ordered set of autonomous systems that the route has traversed * - 'AS_CONFED_SEQUENCE': ordered set of Member Autonomous Systems in the local confederation that the route has traversed * - 'AS_CONFED_SET': unordered set of Member Autonomous Systems in the local confederation that the route has traversed */ pathSegmentType: string; } interface GetRouterStatusBestRoutesForRouterParam { /** * Resource manager tags to be bound to the route. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. The field is ignored when empty. * The field is immutable and causes resource replacement when mutated. This field is only * set at create time and modifying this field after creation will trigger recreation. * To apply tags to an existing resource, see the gcp.tags.TagBinding resource. */ resourceManagerTags: { [key: string]: string; }; } interface GetRouterStatusBestRoutesForRouterWarning { /** * A warning code, if applicable. For example, Compute Engine returns * NO_RESULTS_ON_PAGE if there are no results in the response. */ code: string; /** * Metadata about this warning in key: value format. For example: * "data": [ { "key": "scope", "value": "zones/us-east1-d" } */ datas: outputs.compute.GetRouterStatusBestRoutesForRouterWarningData[]; /** * A human-readable description of the warning code. */ message: string; } interface GetRouterStatusBestRoutesForRouterWarningData { /** * A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding). */ key: string; /** * A warning data value corresponding to the key. */ value: string; } interface GetRoutersRouter { bgpPeers: outputs.compute.GetRoutersRouterBgpPeer[]; bgps: outputs.compute.GetRoutersRouterBgp[]; creationTimestamp: string; description: string; encryptedInterconnectRouter: boolean; interfaces: outputs.compute.GetRoutersRouterInterface[]; md5AuthenticationKeys: outputs.compute.GetRoutersRouterMd5AuthenticationKey[]; name: string; nats: outputs.compute.GetRoutersRouterNat[]; network: string; selfLink: string; } interface GetRoutersRouterBgp { advertiseMode: string; advertisedGroups: string[]; advertisedIpRanges: outputs.compute.GetRoutersRouterBgpAdvertisedIpRange[]; asn: number; keepaliveInterval: number; } interface GetRoutersRouterBgpAdvertisedIpRange { description: string; range: string; } interface GetRoutersRouterBgpPeer { advertiseMode: string; advertisedRoutePriority: number; enable: string; enableIpv6: boolean; interfaceName: string; ipAddress: string; managementType: string; name: string; peerAsn: number; peerIpAddress: string; } interface GetRoutersRouterInterface { ipRange: string; linkedInterconnectAttachment: string; linkedVpnTunnel: string; name: string; privateIpAddress: string; redundantInterface: string; subnetwork: string; } interface GetRoutersRouterMd5AuthenticationKey { key: string; name: string; } interface GetRoutersRouterNat { enableEndpointIndependentMapping: boolean; icmpIdleTimeoutSec: number; minPortsPerVm: number; name: string; natIpAllocateOption: string; natIps: string[]; sourceSubnetworkIpRangesToNat: string; tcpEstablishedIdleTimeoutSec: number; tcpTransitoryIdleTimeoutSec: number; udpIdleTimeoutSec: number; } interface GetSecurityPolicyAdaptiveProtectionConfig { /** * Auto Deploy Config of this security policy */ autoDeployConfigs: outputs.compute.GetSecurityPolicyAdaptiveProtectionConfigAutoDeployConfig[]; /** * Layer 7 DDoS Defense Config of this security policy */ layer7DdosDefenseConfigs: outputs.compute.GetSecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig[]; } interface GetSecurityPolicyAdaptiveProtectionConfigAutoDeployConfig { /** * Rules are only automatically deployed for alerts on potential attacks with confidence scores greater than this threshold. */ confidenceThreshold: number; /** * Google Cloud Armor stops applying the action in the automatically deployed rule to an identified attacker after this duration. The rule continues to operate against new requests. */ expirationSec: number; /** * Rules are only automatically deployed when the estimated impact to baseline traffic from the suggested mitigation is below this threshold. */ impactedBaselineThreshold: number; /** * Identifies new attackers only when the load to the backend service that is under attack exceeds this threshold. */ loadThreshold: number; } interface GetSecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig { /** * If set to true, enables CAAP for L7 DDoS detection. */ enable: boolean; /** * Rule visibility. Supported values include: "STANDARD", "PREMIUM". */ ruleVisibility: string; /** * Configuration options for layer7 adaptive protection for various customizable thresholds. */ thresholdConfigs: outputs.compute.GetSecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig[]; } interface GetSecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig { autoDeployConfidenceThreshold: number; autoDeployExpirationSec: number; autoDeployImpactedBaselineThreshold: number; autoDeployLoadThreshold: number; detectionAbsoluteQps: number; detectionLoadThreshold: number; detectionRelativeToBaselineQps: number; /** * The name of the security policy. Provide either this or a `selfLink`. */ name: string; trafficGranularityConfigs: outputs.compute.GetSecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig[]; } interface GetSecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig { /** * If enabled, traffic matching each unique value for the specified type constitutes a separate traffic unit. It can only be set to true if value is empty. */ enableEachUniqueValue: boolean; /** * Type of this configuration. */ type: string; /** * Requests that match this value constitute a granular traffic unit. */ value: string; } interface GetSecurityPolicyAdvancedOptionsConfig { /** * Custom configuration to apply the JSON parsing. Only applicable when JSON parsing is set to STANDARD. */ jsonCustomConfigs: outputs.compute.GetSecurityPolicyAdvancedOptionsConfigJsonCustomConfig[]; /** * JSON body parsing. Supported values include: "DISABLED", "STANDARD". */ jsonParsing: string; /** * Logging level. Supported values include: "NORMAL", "VERBOSE". */ logLevel: string; /** * The maximum request size chosen by the customer with Waf enabled. Values supported are "8KB", "16KB, "32KB", "48KB" and "64KB". Values are case insensitive. */ requestBodyInspectionSize: string; /** * An optional list of case-insensitive request header names to use for resolving the callers client IP address. */ userIpRequestHeaders: string[]; } interface GetSecurityPolicyAdvancedOptionsConfigJsonCustomConfig { /** * A list of custom Content-Type header values to apply the JSON parsing. */ contentTypes: string[]; } interface GetSecurityPolicyRecaptchaOptionsConfig { /** * A field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. */ redirectSiteKey: string; } interface GetSecurityPolicyRule { /** * Action to take when match matches the request. */ action: string; /** * An optional description of this rule. Max size is 64. */ description: string; /** * Additional actions that are performed on headers. */ headerActions: outputs.compute.GetSecurityPolicyRuleHeaderAction[]; /** * A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding action is enforced. */ matches: outputs.compute.GetSecurityPolicyRuleMatch[]; /** * Preconfigured WAF configuration to be applied for the rule. If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect. */ preconfiguredWafConfigs: outputs.compute.GetSecurityPolicyRulePreconfiguredWafConfig[]; /** * When set to true, the action specified above is not enforced. Stackdriver logs for requests that trigger a preview action are annotated as such. */ preview: boolean; /** * An unique positive integer indicating the priority of evaluation for a rule. Rules are evaluated from highest priority (lowest numerically) to lowest priority (highest numerically) in order. */ priority: number; /** * Rate limit threshold for this security policy. Must be specified if the action is "rateBasedBan" or "throttle". Cannot be specified for any other actions. */ rateLimitOptions: outputs.compute.GetSecurityPolicyRuleRateLimitOption[]; /** * Parameters defining the redirect action. Cannot be specified for any other actions. */ redirectOptions: outputs.compute.GetSecurityPolicyRuleRedirectOption[]; } interface GetSecurityPolicyRuleHeaderAction { /** * The list of request headers to add or overwrite if they're already present. */ requestHeadersToAdds: outputs.compute.GetSecurityPolicyRuleHeaderActionRequestHeadersToAdd[]; } interface GetSecurityPolicyRuleHeaderActionRequestHeadersToAdd { /** * The name of the header to set. */ headerName: string; /** * The value to set the named header to. */ headerValue: string; } interface GetSecurityPolicyRuleMatch { /** * The configuration options available when specifying versioned_expr. This field must be specified if versionedExpr is specified and cannot be specified if versionedExpr is not specified. */ configs: outputs.compute.GetSecurityPolicyRuleMatchConfig[]; /** * The configuration options available when specifying a user defined CEVAL expression (i.e., 'expr'). */ exprOptions: outputs.compute.GetSecurityPolicyRuleMatchExprOption[]; /** * User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. */ exprs: outputs.compute.GetSecurityPolicyRuleMatchExpr[]; /** * Predefined rule expression. If this field is specified, config must also be specified. Available options: SRC_IPS_V1: Must specify the corresponding srcIpRanges field in config. */ versionedExpr: string; } interface GetSecurityPolicyRuleMatchConfig { /** * Set of IP addresses or ranges (IPV4 or IPV6) in CIDR notation to match against inbound traffic. There is a limit of 10 IP ranges per rule. A value of '*' matches all IPs (can be used to override the default behavior). */ srcIpRanges: string[]; } interface GetSecurityPolicyRuleMatchExpr { /** * Textual representation of an expression in Common Expression Language syntax. The application context of the containing message determines which well-known feature set of CEL is supported. */ expression: string; } interface GetSecurityPolicyRuleMatchExprOption { /** * reCAPTCHA configuration options to be applied for the rule. If the rule does not evaluate reCAPTCHA tokens, this field has no effect. */ recaptchaOptions: outputs.compute.GetSecurityPolicyRuleMatchExprOptionRecaptchaOption[]; } interface GetSecurityPolicyRuleMatchExprOptionRecaptchaOption { /** * A list of site keys to be used during the validation of reCAPTCHA action-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created */ actionTokenSiteKeys: string[]; /** * A list of site keys to be used during the validation of reCAPTCHA session-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created. */ sessionTokenSiteKeys: string[]; } interface GetSecurityPolicyRulePreconfiguredWafConfig { /** * An exclusion to apply during preconfigured WAF evaluation. */ exclusions: outputs.compute.GetSecurityPolicyRulePreconfiguredWafConfigExclusion[]; } interface GetSecurityPolicyRulePreconfiguredWafConfigExclusion { /** * Request cookie whose value will be excluded from inspection during preconfigured WAF evaluation. */ requestCookies: outputs.compute.GetSecurityPolicyRulePreconfiguredWafConfigExclusionRequestCooky[]; /** * Request header whose value will be excluded from inspection during preconfigured WAF evaluation. */ requestHeaders: outputs.compute.GetSecurityPolicyRulePreconfiguredWafConfigExclusionRequestHeader[]; /** * Request query parameter whose value will be excluded from inspection during preconfigured WAF evaluation. Note that the parameter can be in the query string or in the POST body. */ requestQueryParams: outputs.compute.GetSecurityPolicyRulePreconfiguredWafConfigExclusionRequestQueryParam[]; /** * Request URI from the request line to be excluded from inspection during preconfigured WAF evaluation. When specifying this field, the query or fragment part should be excluded. */ requestUris: outputs.compute.GetSecurityPolicyRulePreconfiguredWafConfigExclusionRequestUri[]; /** * A list of target rule IDs under the WAF rule set to apply the preconfigured WAF exclusion. If omitted, it refers to all the rule IDs under the WAF rule set. */ targetRuleIds: string[]; /** * Target WAF rule set to apply the preconfigured WAF exclusion. */ targetRuleSet: string; } interface GetSecurityPolicyRulePreconfiguredWafConfigExclusionRequestCooky { /** * You can specify an exact match or a partial match by using a field operator and a field value. Available options: EQUALS: The operator matches if the field value equals the specified value. STARTS_WITH: The operator matches if the field value starts with the specified value. ENDS_WITH: The operator matches if the field value ends with the specified value. CONTAINS: The operator matches if the field value contains the specified value. EQUALS_ANY: The operator matches if the field value is any value. */ operator: string; /** * A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. */ value: string; } interface GetSecurityPolicyRulePreconfiguredWafConfigExclusionRequestHeader { /** * You can specify an exact match or a partial match by using a field operator and a field value. Available options: EQUALS: The operator matches if the field value equals the specified value. STARTS_WITH: The operator matches if the field value starts with the specified value. ENDS_WITH: The operator matches if the field value ends with the specified value. CONTAINS: The operator matches if the field value contains the specified value. EQUALS_ANY: The operator matches if the field value is any value. */ operator: string; /** * A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. */ value: string; } interface GetSecurityPolicyRulePreconfiguredWafConfigExclusionRequestQueryParam { /** * You can specify an exact match or a partial match by using a field operator and a field value. Available options: EQUALS: The operator matches if the field value equals the specified value. STARTS_WITH: The operator matches if the field value starts with the specified value. ENDS_WITH: The operator matches if the field value ends with the specified value. CONTAINS: The operator matches if the field value contains the specified value. EQUALS_ANY: The operator matches if the field value is any value. */ operator: string; /** * A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. */ value: string; } interface GetSecurityPolicyRulePreconfiguredWafConfigExclusionRequestUri { /** * You can specify an exact match or a partial match by using a field operator and a field value. Available options: EQUALS: The operator matches if the field value equals the specified value. STARTS_WITH: The operator matches if the field value starts with the specified value. ENDS_WITH: The operator matches if the field value ends with the specified value. CONTAINS: The operator matches if the field value contains the specified value. EQUALS_ANY: The operator matches if the field value is any value. */ operator: string; /** * A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. */ value: string; } interface GetSecurityPolicyRuleRateLimitOption { /** * Can only be specified if the action for the rule is "rateBasedBan". If specified, determines the time (in seconds) the traffic will continue to be banned by the rate limit after the rate falls below the threshold. */ banDurationSec: number; /** * Can only be specified if the action for the rule is "rateBasedBan". If specified, the key will be banned for the configured 'banDurationSec' when the number of requests that exceed the 'rateLimitThreshold' also exceed this 'banThreshold'. */ banThresholds: outputs.compute.GetSecurityPolicyRuleRateLimitOptionBanThreshold[]; /** * Action to take for requests that are under the configured rate limit threshold. Valid option is "allow" only. */ conformAction: string; /** * Determines the key to enforce the rateLimitThreshold on */ enforceOnKey: string; /** * Enforce On Key Config of this security policy */ enforceOnKeyConfigs: outputs.compute.GetSecurityPolicyRuleRateLimitOptionEnforceOnKeyConfig[]; /** * Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. */ enforceOnKeyName: string; /** * Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are "deny()" where valid values for status are 403, 404, 429, and 502, and "redirect" where the redirect parameters come from exceedRedirectOptions below. */ exceedAction: string; /** * Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. */ exceedRedirectOptions: outputs.compute.GetSecurityPolicyRuleRateLimitOptionExceedRedirectOption[]; /** * Threshold at which to begin ratelimiting. */ rateLimitThresholds: outputs.compute.GetSecurityPolicyRuleRateLimitOptionRateLimitThreshold[]; } interface GetSecurityPolicyRuleRateLimitOptionBanThreshold { /** * Number of HTTP(S) requests for calculating the threshold. */ count: number; /** * Interval over which the threshold is computed. */ intervalSec: number; } interface GetSecurityPolicyRuleRateLimitOptionEnforceOnKeyConfig { /** * Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. */ enforceOnKeyName: string; /** * Determines the key to enforce the rateLimitThreshold on */ enforceOnKeyType: string; } interface GetSecurityPolicyRuleRateLimitOptionExceedRedirectOption { /** * Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. */ target: string; /** * Type of the redirect action. */ type: string; } interface GetSecurityPolicyRuleRateLimitOptionRateLimitThreshold { /** * Number of HTTP(S) requests for calculating the threshold. */ count: number; /** * Interval over which the threshold is computed. */ intervalSec: number; } interface GetSecurityPolicyRuleRedirectOption { /** * Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. */ target: string; /** * Type of the redirect action. Available options: EXTERNAL_302: Must specify the corresponding target field in config. GOOGLE_RECAPTCHA: Cannot specify target field in config. */ type: string; } interface GetSnapshotSnapshotEncryptionKey { /** * The name of the encryption key that is stored in Google Cloud KMS. */ kmsKeySelfLink: string; /** * The service account used for the encryption request for the given KMS key. * If absent, the Compute Engine Service Agent service account is used. */ kmsKeyServiceAccount: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. */ rawKey: string; /** * Specifies an encryption key stored in Google Cloud KMS, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. */ rsaEncryptedKey: string; /** * The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied * encryption key that protects this resource. */ sha256: string; } interface GetSnapshotSourceDiskEncryptionKey { /** * The name of the encryption key that is stored in Google Cloud KMS. */ kmsKeySelfLink: string; /** * The service account used for the encryption request for the given KMS key. * If absent, the Compute Engine Service Agent service account is used. */ kmsKeyServiceAccount: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. */ rawKey: string; /** * Specifies an encryption key stored in Google Cloud KMS, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. */ rsaEncryptedKey: string; } interface GetStoragePoolResourceStatus { /** * Number of disks used. */ diskCount: string; /** * Timestamp of the last successful resize in RFC3339 text format. */ lastResizeTimestamp: string; /** * Maximum allowed aggregate disk size in gigabytes. */ maxTotalProvisionedDiskCapacityGb: string; /** * Space used by data stored in disks within the storage pool (in bytes). * This will reflect the total number of bytes written to the disks in the pool, * in contrast to the capacity of those disks. */ poolUsedCapacityBytes: string; /** * Sum of all the disks' provisioned IOPS, minus some amount * that is allowed per disk that is not counted towards pool's IOPS capacity. * For more information, see https://cloud.google.com/compute/docs/disks/storage-pools. */ poolUsedIops: string; /** * Sum of all the disks' provisioned throughput in MB/s. */ poolUsedThroughput: string; /** * Amount of data written into the pool, before it is compacted. */ poolUserWrittenBytes: string; /** * Sum of all the capacity provisioned in disks in this storage pool. * A disk's provisioned capacity is the same as its total capacity. */ totalProvisionedDiskCapacityGb: string; /** * Sum of all the disks' provisioned IOPS. */ totalProvisionedDiskIops: string; /** * Sum of all the disks' provisioned throughput in MB/s, * minus some amount that is allowed per disk that is not counted towards pool's throughput capacity. */ totalProvisionedDiskThroughput: string; } interface GetStoragePoolStatus { /** * Number of disks used. */ diskCount: string; /** * Timestamp of the last successful resize in RFC3339 text format. */ lastResizeTimestamp: string; /** * Maximum allowed aggregate disk size in gigabytes. */ maxTotalProvisionedDiskCapacityGb: string; /** * Space used by data stored in disks within the storage pool (in bytes). * This will reflect the total number of bytes written to the disks in the pool, in contrast to the capacity of those disks. */ poolUsedCapacityBytes: string; /** * Sum of all the disks' provisioned IOPS, minus some amount that is allowed per disk that is not counted towards pool's IOPS capacity. For more information, see https://cloud.google.com/compute/docs/disks/storage-pools. */ poolUsedIops: string; /** * Sum of all the disks' provisioned throughput in MB/s. */ poolUsedThroughput: string; /** * Amount of data written into the pool, before it is compacted. */ poolUserWrittenBytes: string; /** * Sum of all the capacity provisioned in disks in this storage pool. * A disk's provisioned capacity is the same as its total capacity. */ totalProvisionedDiskCapacityGb: string; /** * Sum of all the disks' provisioned IOPS. */ totalProvisionedDiskIops: string; /** * Sum of all the disks' provisioned throughput in MB/s, * minus some amount that is allowed per disk that is not counted towards pool's throughput capacity. */ totalProvisionedDiskThroughput: string; } interface GetStoragePoolTypesDeprecated { /** * An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DELETED. * This is only informational and the status will not change unless the client explicitly changes it. */ deleted: string; /** * An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DEPRECATED. * This is only informational and the status will not change unless the client explicitly changes it. */ deprecated: string; /** * An optional RFC3339 timestamp on or after which the state of this resource is intended to change to OBSOLETE. * This is only informational and the status will not change unless the client explicitly changes it. */ obsolete: string; /** * The URL of the suggested replacement for a deprecated resource. * The suggested replacement resource must be the same kind of resource as the deprecated resource. */ replacement: string; /** * The deprecation state of this resource. This can be ACTIVE, DEPRECATED, OBSOLETE, or DELETED. * Operations which communicate the end of life date for an image, can use ACTIVE. * Operations which create a new resource using a DEPRECATED resource will return successfully, * but with a warning indicating the deprecated resource and recommending its replacement. * Operations which use OBSOLETE or DELETED resources will be rejected and result in an error. */ state: string; } interface GetSubnetworkSecondaryIpRange { /** * The range of IP addresses belonging to this subnetwork * secondary range. */ ipCidrRange: string; /** * The name associated with this subnetwork secondary range, used * when adding an alias IP range to a VM instance. */ rangeName: string; } interface GetSubnetworksSubnetwork { /** * Description of the subnetwork. */ description: string; /** * The IP address range represented as a CIDR block. */ ipCidrRange: string; /** * The name of the subnetwork. */ name: string; /** * The self link of the parent network. */ network: string; /** * The name of the parent network computed from `network` attribute. */ networkName: string; /** * (Deprecated) The name of the parent network computed from `network` attribute. (deprecated and will be removed in a future major release. Use `networkName` instead.) * * @deprecated Use `networkName` instead. This field will be removed in a future major release. */ networkSelfLink: string; /** * Whether the VMs in the subnet can access Google services without assigned external IP addresses. */ privateIpGoogleAccess: boolean; /** * The self link of the subnetwork. */ selfLink: string; } interface GlobalForwardingRuleMetadataFilter { /** * The list of label value pairs that must match labels in the * provided metadata based on filterMatchCriteria * This list must not be empty and can have at the most 64 entries. * Structure is documented below. */ filterLabels: outputs.compute.GlobalForwardingRuleMetadataFilterFilterLabel[]; /** * Specifies how individual filterLabel matches within the list of * filterLabels contribute towards the overall metadataFilter match. * MATCH_ANY - At least one of the filterLabels must have a matching * label in the provided metadata. * MATCH_ALL - All filterLabels must have matching labels in the * provided metadata. * Possible values are: `MATCH_ANY`, `MATCH_ALL`. */ filterMatchCriteria: string; } interface GlobalForwardingRuleMetadataFilterFilterLabel { /** * Name of the metadata label. The length must be between * 1 and 1024 characters, inclusive. */ name: string; /** * The value that the label must match. The value has a maximum * length of 1024 characters. */ value: string; } interface GlobalForwardingRuleServiceDirectoryRegistrations { /** * Service Directory namespace to register the forwarding rule under. */ namespace: string; /** * [Optional] Service Directory region to register this global forwarding rule under. * Default to "us-central1". Only used for PSC for Google APIs. All PSC for * Google APIs Forwarding Rules on the same network should use the same Service * Directory region. */ serviceDirectoryRegion?: string; } interface HaVpnGatewayParams { /** * Resource manager tags to be bound to the HaVpnGateway. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. */ resourceManagerTags?: { [key: string]: string; }; } interface HaVpnGatewayVpnInterface { /** * The numeric ID of this VPN gateway interface. */ id?: number; /** * URL of the interconnect attachment resource. When the value * of this field is present, the VPN Gateway will be used for * IPsec-encrypted Cloud Interconnect; all Egress or Ingress * traffic for this VPN Gateway interface will go through the * specified interconnect attachment resource. * Not currently available publicly. */ interconnectAttachment?: string; /** * (Output) * The external IP address for this VPN gateway interface. */ ipAddress: string; } interface HealthCheckGrpcHealthCheck { /** * The gRPC service name for the health check. * The value of grpcServiceName has the following meanings by convention: * - Empty serviceName means the overall status of all services at the backend. * - Non-empty serviceName means the health of that gRPC service, as defined by the owner of the service. * The grpcServiceName can only be ASCII. */ grpcServiceName?: string; /** * The port number for the health check request. * Must be specified if portName and portSpecification are not set * or if portSpecification is USE_FIXED_PORT. Valid values are 1 through 65535. */ port?: number; /** * Port name as defined in InstanceGroup#NamedPort#name. If both port and * portName are defined, port takes precedence. */ portName?: string; /** * Specifies how port is selected for health checking, can be one of the * following values: * * `USE_FIXED_PORT`: The port number in `port` is used for health checking. * * `USE_NAMED_PORT`: The `portName` is used for health checking. * * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * If not specified, gRPC health check follows behavior specified in `port` and * `portName` fields. * Possible values are: `USE_FIXED_PORT`, `USE_NAMED_PORT`, `USE_SERVING_PORT`. */ portSpecification?: string; } interface HealthCheckGrpcTlsHealthCheck { /** * The gRPC service name for the health check. * The value of grpcServiceName has the following meanings by convention: * - Empty serviceName means the overall status of all services at the backend. * - Non-empty serviceName means the health of that gRPC service, as defined by the owner of the service. * The grpcServiceName can only be ASCII. */ grpcServiceName?: string; /** * The port number for the health check request. * Must be specified if portSpecification is USE_FIXED_PORT. Valid values are 1 through 65535. */ port?: number; /** * Specifies how port is selected for health checking, can be one of the * following values: * * `USE_FIXED_PORT`: The port number in `port` is used for health checking. * * `USE_NAMED_PORT`: Not supported for GRPC with TLS health checking. * * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * If not specified, gRPC with TLS health check follows behavior specified in the `port` field. * Possible values are: `USE_FIXED_PORT`, `USE_NAMED_PORT`, `USE_SERVING_PORT`. */ portSpecification?: string; } interface HealthCheckHttp2HealthCheck { /** * The value of the host header in the HTTP2 health check request. * If left empty (default value), the public IP on behalf of which this health * check is performed will be used. */ host?: string; /** * The TCP port number for the HTTP2 health check request. * The default value is 443. */ port?: number; /** * Port name as defined in InstanceGroup#NamedPort#name. If both port and * portName are defined, port takes precedence. */ portName?: string; /** * Specifies how port is selected for health checking, can be one of the * following values: * * * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. * * * 'USE_NAMED_PORT': The 'portName' is used for health checking. * * * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * * If not specified, HTTP2 health check follows behavior specified in 'port' and * 'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"] */ portSpecification?: string; /** * Specifies the type of proxy header to append before sending data to the * backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"] */ proxyHeader?: string; /** * The request path of the HTTP2 health check request. * The default value is /. */ requestPath?: string; /** * The bytes to match against the beginning of the response data. If left empty * (the default value), any response will indicate health. The response data * can only be ASCII. */ response?: string; } interface HealthCheckHttpHealthCheck { /** * The value of the host header in the HTTP health check request. * If left empty (default value), the public IP on behalf of which this health * check is performed will be used. */ host?: string; /** * The TCP port number for the HTTP health check request. * The default value is 80. */ port?: number; /** * Port name as defined in InstanceGroup#NamedPort#name. If both port and * portName are defined, port takes precedence. */ portName?: string; /** * Specifies how port is selected for health checking, can be one of the * following values: * * `USE_FIXED_PORT`: The port number in `port` is used for health checking. * * `USE_NAMED_PORT`: The `portName` is used for health checking. * * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * If not specified, HTTP health check follows behavior specified in `port` and * `portName` fields. * Possible values are: `USE_FIXED_PORT`, `USE_NAMED_PORT`, `USE_SERVING_PORT`. */ portSpecification?: string; /** * Specifies the type of proxy header to append before sending data to the * backend. * Default value is `NONE`. * Possible values are: `NONE`, `PROXY_V1`. */ proxyHeader?: string; /** * The request path of the HTTP health check request. * The default value is /. */ requestPath?: string; /** * The bytes to match against the beginning of the response data. If left empty * (the default value), any response will indicate health. The response data * can only be ASCII. */ response?: string; } interface HealthCheckHttpsHealthCheck { /** * The value of the host header in the HTTPS health check request. * If left empty (default value), the public IP on behalf of which this health * check is performed will be used. */ host?: string; /** * The TCP port number for the HTTPS health check request. * The default value is 443. */ port?: number; /** * Port name as defined in InstanceGroup#NamedPort#name. If both port and * portName are defined, port takes precedence. */ portName?: string; /** * Specifies how port is selected for health checking, can be one of the * following values: * * `USE_FIXED_PORT`: The port number in `port` is used for health checking. * * `USE_NAMED_PORT`: The `portName` is used for health checking. * * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * If not specified, HTTPS health check follows behavior specified in `port` and * `portName` fields. * Possible values are: `USE_FIXED_PORT`, `USE_NAMED_PORT`, `USE_SERVING_PORT`. */ portSpecification?: string; /** * Specifies the type of proxy header to append before sending data to the * backend. * Default value is `NONE`. * Possible values are: `NONE`, `PROXY_V1`. */ proxyHeader?: string; /** * The request path of the HTTPS health check request. * The default value is /. */ requestPath?: string; /** * The bytes to match against the beginning of the response data. If left empty * (the default value), any response will indicate health. The response data * can only be ASCII. */ response?: string; } interface HealthCheckLogConfig { /** * Indicates whether or not to export logs. This is false by default, * which means no health check logging will be done. */ enable?: boolean; } interface HealthCheckSslHealthCheck { /** * The TCP port number for the HTTP2 health check request. * The default value is 443. */ port?: number; /** * Port name as defined in InstanceGroup#NamedPort#name. If both port and * portName are defined, port takes precedence. */ portName?: string; /** * Specifies how port is selected for health checking, can be one of the * following values: * * `USE_FIXED_PORT`: The port number in `port` is used for health checking. * * `USE_NAMED_PORT`: The `portName` is used for health checking. * * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * If not specified, HTTP2 health check follows behavior specified in `port` and * `portName` fields. * Possible values are: `USE_FIXED_PORT`, `USE_NAMED_PORT`, `USE_SERVING_PORT`. */ portSpecification?: string; /** * Specifies the type of proxy header to append before sending data to the * backend. * Default value is `NONE`. * Possible values are: `NONE`, `PROXY_V1`. */ proxyHeader?: string; /** * The application data to send once the SSL connection has been * established (default value is empty). If both request and response are * empty, the connection establishment alone will indicate health. The request * data can only be ASCII. */ request?: string; /** * The bytes to match against the beginning of the response data. If left empty * (the default value), any response will indicate health. The response data * can only be ASCII. */ response?: string; } interface HealthCheckTcpHealthCheck { /** * The TCP port number for the TCP health check request. * The default value is 443. */ port?: number; /** * Port name as defined in InstanceGroup#NamedPort#name. If both port and * portName are defined, port takes precedence. */ portName?: string; /** * Specifies how port is selected for health checking, can be one of the * following values: * * `USE_FIXED_PORT`: The port number in `port` is used for health checking. * * `USE_NAMED_PORT`: The `portName` is used for health checking. * * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * If not specified, TCP health check follows behavior specified in `port` and * `portName` fields. * Possible values are: `USE_FIXED_PORT`, `USE_NAMED_PORT`, `USE_SERVING_PORT`. */ portSpecification?: string; /** * Specifies the type of proxy header to append before sending data to the * backend. * Default value is `NONE`. * Possible values are: `NONE`, `PROXY_V1`. */ proxyHeader?: string; /** * The application data to send once the TCP connection has been * established (default value is empty). If both request and response are * empty, the connection establishment alone will indicate health. The request * data can only be ASCII. */ request?: string; /** * The bytes to match against the beginning of the response data. If left empty * (the default value), any response will indicate health. The response data * can only be ASCII. */ response?: string; } interface ImageGuestOsFeature { /** * The type of supported feature. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. * Possible values are: `MULTI_IP_SUBNET`, `SECURE_BOOT`, `SEV_CAPABLE`, `UEFI_COMPATIBLE`, `VIRTIO_SCSI_MULTIQUEUE`, `WINDOWS`, `GVNIC`, `IDPF`, `SEV_LIVE_MIGRATABLE`, `SEV_SNP_CAPABLE`, `SUSPEND_RESUME_COMPATIBLE`, `TDX_CAPABLE`, `SEV_LIVE_MIGRATABLE_V2`. */ type: string; } interface ImageIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface ImageIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface ImageImageEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud * KMS. */ kmsKeySelfLink?: string; /** * The service account being used for the encryption request for the * given KMS key. If absent, the Compute Engine default service * account is used. */ kmsKeyServiceAccount?: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. * **Note**: This property is sensitive and will not be displayed in the plan. */ rawKey?: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. * **Note**: This property is sensitive and will not be displayed in the plan. */ rsaEncryptedKey?: string; } interface ImageRawDisk { /** * The format used to encode and transmit the block device, which * should be TAR. This is just a container and transmission format * and not a runtime format. Provided by the client when the disk * image is created. * Default value is `TAR`. * Possible values are: `TAR`. */ containerType?: string; /** * An optional SHA1 checksum of the disk image before unpackaging. * This is provided by the client when the disk image is created. */ sha1?: string; /** * The full Google Cloud Storage URL where disk storage is stored * You must provide either this property or the sourceDisk property * but not both. */ source: string; } interface ImageShieldedInstanceInitialState { /** * The Key Database (db). * Structure is documented below. */ dbs?: outputs.compute.ImageShieldedInstanceInitialStateDb[]; /** * The forbidden key database (dbx). * Structure is documented below. */ dbxs?: outputs.compute.ImageShieldedInstanceInitialStateDbx[]; /** * The Key Exchange Key (KEK). * Structure is documented below. */ keks?: outputs.compute.ImageShieldedInstanceInitialStateKek[]; /** * The Platform Key (PK). * Structure is documented below. */ pk?: outputs.compute.ImageShieldedInstanceInitialStatePk; } interface ImageShieldedInstanceInitialStateDb { /** * The raw content in the secure keys file. * A base64-encoded string. */ content: string; /** * The file type of source file. */ fileType?: string; } interface ImageShieldedInstanceInitialStateDbx { /** * The raw content in the secure keys file. * A base64-encoded string. */ content: string; /** * The file type of source file. */ fileType?: string; } interface ImageShieldedInstanceInitialStateKek { /** * The raw content in the secure keys file. * A base64-encoded string. */ content: string; /** * The file type of source file. */ fileType?: string; } interface ImageShieldedInstanceInitialStatePk { /** * The raw content in the secure keys file. * A base64-encoded string. */ content: string; /** * The file type of source file. */ fileType?: string; } interface ImageSourceDiskEncryptionKey { /** * The self link of the encryption key used to decrypt this resource. Also called KmsKeyName * in the cloud console. Your project's Compute Engine System service account * (`service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com`) must have * `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature. * See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys */ kmsKeySelfLink?: string; /** * The service account being used for the encryption request for the * given KMS key. If absent, the Compute Engine default service * account is used. */ kmsKeyServiceAccount?: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. * **Note**: This property is sensitive and will not be displayed in the plan. */ rawKey?: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit * customer-supplied encryption key to either encrypt or decrypt * this resource. You can provide either the rawKey or the rsaEncryptedKey. * **Note**: This property is sensitive and will not be displayed in the plan. */ rsaEncryptedKey?: string; } interface ImageSourceImageEncryptionKey { /** * The self link of the encryption key used to decrypt this resource. Also called KmsKeyName * in the cloud console. Your project's Compute Engine System service account * (`service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com`) must have * `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature. * See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys */ kmsKeySelfLink?: string; /** * The service account being used for the encryption request for the * given KMS key. If absent, the Compute Engine default service * account is used. */ kmsKeyServiceAccount?: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. * **Note**: This property is sensitive and will not be displayed in the plan. */ rawKey?: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit * customer-supplied encryption key to either encrypt or decrypt * this resource. You can provide either the rawKey or the rsaEncryptedKey. * **Note**: This property is sensitive and will not be displayed in the plan. */ rsaEncryptedKey?: string; } interface ImageSourceSnapshotEncryptionKey { /** * The self link of the encryption key used to decrypt this resource. Also called KmsKeyName * in the cloud console. Your project's Compute Engine System service account * (`service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com`) must have * `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature. * See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys */ kmsKeySelfLink?: string; /** * The service account being used for the encryption request for the * given KMS key. If absent, the Compute Engine default service * account is used. */ kmsKeyServiceAccount?: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. * **Note**: This property is sensitive and will not be displayed in the plan. */ rawKey?: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit * customer-supplied encryption key to either encrypt or decrypt * this resource. You can provide either the rawKey or the rsaEncryptedKey. * **Note**: This property is sensitive and will not be displayed in the plan. */ rsaEncryptedKey?: string; } interface InstanceAdvancedMachineFeatures { /** * Defines whether the instance should have nested virtualization enabled. Defaults to false. */ enableNestedVirtualization?: boolean; /** * Whether to enable UEFI networking for instance creation. */ enableUefiNetworking?: boolean; /** * [The PMU](https://cloud.google.com/compute/docs/pmu-overview) is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are `STANDARD`, `ENHANCED`, and `ARCHITECTURAL`. */ performanceMonitoringUnit?: string; /** * The number of threads per physical core. To disable [simultaneous multithreading (SMT)](https://cloud.google.com/compute/docs/instances/disabling-smt) set this to 1. */ threadsPerCore?: number; /** * Turbo frequency mode to use for the instance. Supported modes are currently either `ALL_CORE_MAX` or unset (default). */ turboMode?: string; /** * The number of physical cores to expose to an instance. [visible cores info (VC)](https://cloud.google.com/compute/docs/instances/customize-visible-cores). */ visibleCoreCount?: number; } interface InstanceAttachedDisk { /** * Name with which the attached disk will be accessible * under `/dev/disk/by-id/google-*` */ deviceName: string; /** * A 256-bit [customer-supplied encryption key] * (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption), * encoded in [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4) * to encrypt this disk. Only one of `kmsKeySelfLink`, `diskEncryptionKeyRsa` and `diskEncryptionKeyRaw` * may be set. */ diskEncryptionKeyRaw?: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit [customer-supplied encryption key] * (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption) to encrypt this disk. Only one of `kmsKeySelfLink`, `diskEncryptionKeyRsa` and `diskEncryptionKeyRaw` * may be set. */ diskEncryptionKeyRsa?: string; /** * The [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4) * encoded SHA-256 hash of the [customer-supplied encryption key] * (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption) that protects this resource. */ diskEncryptionKeySha256: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ diskEncryptionServiceAccount?: string; /** * Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error. Setting this parameter cause VM recreation. */ forceAttach?: boolean; /** * The selfLink of the encryption key that is * stored in Google Cloud KMS to encrypt this disk. Only one of `kmsKeySelfLink`, `diskEncryptionKeyRsa` and `diskEncryptionKeyRaw` * may be set. */ kmsKeySelfLink: string; /** * Either "READ_ONLY" or "READ_WRITE", defaults to "READ_WRITE" * If you have a persistent disk with data that you want to share * between multiple instances, detach it from any read-write instances and * attach it to one or more instances in read-only mode. */ mode?: string; /** * The name or selfLink of the disk to attach to this instance. */ source: string; } interface InstanceBootDisk { /** * Whether the disk will be auto-deleted when the instance * is deleted. Defaults to true. */ autoDelete?: boolean; /** * Name with which attached disk will be accessible. * On the instance, this device will be `/dev/disk/by-id/google-{{device_name}}`. */ deviceName: string; /** * A 256-bit [customer-supplied encryption key] * (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption), * encoded in [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4) * to encrypt this disk. Only one of `kmsKeySelfLink`, `diskEncryptionKeyRsa` and `diskEncryptionKeyRaw` * may be set. */ diskEncryptionKeyRaw?: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit [customer-supplied encryption key] * (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption) to encrypt this disk. Only one of `kmsKeySelfLink`, `diskEncryptionKeyRsa` and `diskEncryptionKeyRaw` */ diskEncryptionKeyRsa?: string; /** * The [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4) * encoded SHA-256 hash of the [customer-supplied encryption key] * (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption) that protects this resource. */ diskEncryptionKeySha256: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ diskEncryptionServiceAccount?: string; /** * boolean field that determines whether to force attach the regional * disk even if it's currently attached to another instance. If you try to force attach a zonal * disk to an instance, you will receive an error. Setting this parameter cause VM recreation. */ forceAttach?: boolean; /** * A list of features to enable on the guest operating system. Applicable only for bootable images. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. */ guestOsFeatures: string[]; /** * Parameters for a new disk that will be created * alongside the new instance. Either `initializeParams` or `source` must be set. * Structure is documented below. */ initializeParams: outputs.compute.InstanceBootDiskInitializeParams; /** * The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) */ interface?: string; /** * The selfLink of the encryption key that is * stored in Google Cloud KMS to encrypt this disk. Only one of `kmsKeySelfLink`, * `diskEncryptionKeyRsa` and `diskEncryptionKeyRaw` * may be set. */ kmsKeySelfLink: string; /** * The mode in which to attach this disk, either `READ_WRITE` * or `READ_ONLY`. If not specified, the default is to attach the disk in `READ_WRITE` mode. */ mode?: string; /** * The name or selfLink of the existing disk (such as those managed by * `gcp.compute.Disk`) or disk image. To create an instance from a snapshot, first create a * `gcp.compute.Disk` from a snapshot and reference it here. */ source: string; } interface InstanceBootDiskInitializeParams { /** * The architecture of the attached disk. Valid values are `ARM64` or `x8664`. */ architecture: string; /** * Whether this disk is using confidential compute mode. * Note: Only supported on hyperdisk skus, diskEncryptionKey is required when setting to true. */ enableConfidentialCompute?: boolean; /** * The image from which to initialize this disk. This can be * one of: the image's `selfLink`, `projects/{project}/global/images/{image}`, * `projects/{project}/global/images/family/{family}`, `global/images/{image}`, * `global/images/family/{family}`, `family/{family}`, `{project}/{family}`, * `{project}/{image}`, `{family}`, or `{image}`. If referred by family, the * images names must include the family name. If they don't, use the * [gcp.compute.Image data source](https://www.terraform.io/docs/providers/google/d/compute_image.html). * For instance, the image `centos-6-v20180104` includes its family name `centos-6`. * These images can be referred by family name here. */ image: string; /** * A set of key/value label pairs assigned to the disk. This * field is only applicable for persistent disks. */ labels: { [key: string]: string; }; /** * Indicates how many IOPS to provision for the disk. * This sets the number of I/O operations per second that the disk can handle. * For more details,see the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks). * Note: Updating currently is only supported for hyperdisk skus via disk update * api/gcloud without the need to delete and recreate the disk, hyperdisk allows * for an update of IOPS every 4 hours. To update your hyperdisk more frequently, * you'll need to manually delete and recreate it. */ provisionedIops: number; /** * Indicates how much throughput to provision for the disk. * This sets the number of throughput mb per second that the disk can handle. * For more details,see the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks). * Note: Updating currently is only supported for hyperdisk skus via disk update * api/gcloud without the need to delete and recreate the disk, hyperdisk allows * for an update of throughput every 4 hours. To update your hyperdisk more * frequently, you'll need to manually delete and recreate it. */ provisionedThroughput: number; /** * A tag is a key-value pair that can be attached to a Google Cloud resource. You can use tags to conditionally allow or deny policies based on whether a resource has a specific tag. This value is not returned by the API. In Terraform, this value cannot be updated and changing it will recreate the resource. */ resourceManagerTags?: { [key: string]: string; }; /** * A list of selfLinks of resource policies to attach to the instance's boot disk. Modifying this list will cause the instance to recreate, so any external values are not set until the user specifies this field. Currently a max of 1 resource policy is supported. */ resourcePolicies: string; /** * The size of the image in gigabytes. If not specified, it * will inherit the size of its base image. */ size: number; /** * The snapshot from which to initialize this disk. To create a disk with a snapshot that you created, specify the snapshot name in the following format: `global/snapshots/my-backup` */ snapshot: string; /** * Encryption key used to decrypt the given image. Structure is documented below. */ sourceImageEncryptionKey?: outputs.compute.InstanceBootDiskInitializeParamsSourceImageEncryptionKey; /** * Encryption key used to decrypt the given snapshot. Structure is documented below. */ sourceSnapshotEncryptionKey?: outputs.compute.InstanceBootDiskInitializeParamsSourceSnapshotEncryptionKey; /** * The URL or the name of the storage pool in which the new disk is created. * For example: * * https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/storagePools/{storagePool} * * /projects/{project}/zones/{zone}/storagePools/{storagePool} * * /zones/{zone}/storagePools/{storagePool} * * /{storagePool} */ storagePool?: string; /** * The GCE disk type. Such as pd-standard, pd-balanced or pd-ssd. */ type: string; } interface InstanceBootDiskInitializeParamsSourceImageEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ kmsKeySelfLink: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount?: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rawKey?: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rsaEncryptedKey?: string; /** * The [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4) * encoded SHA-256 hash of the [customer-supplied encryption key] * (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption) that protects this resource. */ sha256: string; } interface InstanceBootDiskInitializeParamsSourceSnapshotEncryptionKey { /** * The selfLink of the encryption key that is * stored in Google Cloud KMS to decrypt the given image. Only one of `kmsKeySelfLink`, `rsaEncryptedKey` and `rawKey` * may be set. */ kmsKeySelfLink: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount?: string; /** * A 256-bit [customer-supplied encryption key] * (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption), * encoded in [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4) * to decrypt the given snapshot. Only one of `kmsKeySelfLink`, `rsaEncryptedKey` and `rawKey` * may be set. */ rawKey?: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit [customer-supplied encryption key] * (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption) to decrypt the given snapshot. Only one of `kmsKeySelfLink`, `rsaEncryptedKey` and `rawKey` * may be set. */ rsaEncryptedKey?: string; /** * The [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4) * encoded SHA-256 hash of the [customer-supplied encryption key] * (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption) that protects this resource. */ sha256: string; } interface InstanceConfidentialInstanceConfig { /** * Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: `SEV`, `SEV_SNP`, `TDX`. `onHostMaintenance` can be set to MIGRATE if `confidentialInstanceType` is set to `SEV` and `minCpuPlatform` is set to `"AMD Milan"`. Otherwise, `onHostMaintenance` has to be set to TERMINATE or this will fail to create the VM. If `SEV_SNP`, currently `minCpuPlatform` has to be set to `"AMD Milan"` or this will fail to create the VM. */ confidentialInstanceType?: string; /** * Defines whether the instance should have confidential compute enabled with AMD SEV. If enabled, `onHostMaintenance` can be set to MIGRATE if `minCpuPlatform` is set to `"AMD Milan"`. Otherwise, `onHostMaintenance` has to be set to TERMINATE or this will fail to create the VM. */ enableConfidentialCompute?: boolean; } interface InstanceFromMachineImageAdvancedMachineFeatures { /** * Whether to enable nested virtualization or not. */ enableNestedVirtualization: boolean; /** * Whether to enable UEFI networking for the instance. */ enableUefiNetworking: boolean; /** * The PMU is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are "STANDARD", "ENHANCED", and "ARCHITECTURAL". */ performanceMonitoringUnit: string; /** * The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. */ threadsPerCore: number; /** * Turbo frequency mode to use for the instance. Currently supported modes is "ALL_CORE_MAX". */ turboMode: string; /** * The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance\'s nominal CPU count and the underlying platform\'s SMT width. */ visibleCoreCount: number; } interface InstanceFromMachineImageAttachedDisk { /** * Name with which the attached disk is accessible under /dev/disk/by-id/ */ deviceName: string; /** * A 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to encrypt this disk. Only one of kms_key_self_link, diskEncryptionKeyRsa and diskEncryptionKeyRaw may be set. */ diskEncryptionKeyRaw: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, diskEncryptionKeyRsa and diskEncryptionKeyRaw may be set. */ diskEncryptionKeyRsa: string; /** * The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. */ diskEncryptionKeySha256: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used */ diskEncryptionServiceAccount: string; /** * Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error. Setting this parameter cause VM recreation. */ forceAttach: boolean; /** * The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link, diskEncryptionKeyRsa and diskEncryptionKeyRaw may be set. */ kmsKeySelfLink: string; /** * Read/write mode for the disk. One of "READ_ONLY" or "READ_WRITE". */ mode: string; /** * The name or selfLink of the disk attached to this instance. */ source: string; } interface InstanceFromMachineImageBootDisk { /** * Whether the disk will be auto-deleted when the instance is deleted. */ autoDelete: boolean; /** * Name with which attached disk will be accessible under /dev/disk/by-id/ */ deviceName: string; /** * A 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to encrypt this disk. Only one of kms_key_self_link, diskEncryptionKeyRaw and diskEncryptionKeyRsa may be set. */ diskEncryptionKeyRaw: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, diskEncryptionKeyRaw and diskEncryptionKeyRsa may be set. */ diskEncryptionKeyRsa: string; /** * The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. */ diskEncryptionKeySha256: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used */ diskEncryptionServiceAccount: string; /** * Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error. Setting this parameter cause VM recreation. */ forceAttach: boolean; /** * A list of features to enable on the guest operating system. Applicable only for bootable images. */ guestOsFeatures: string[]; /** * Parameters with which a disk was created alongside the instance. */ initializeParams: outputs.compute.InstanceFromMachineImageBootDiskInitializeParams; /** * The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) */ interface: string; /** * The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link, diskEncryptionKeyRaw and diskEncryptionKeyRsa may be set. */ kmsKeySelfLink: string; /** * Read/write mode for the disk. One of "READ_ONLY" or "READ_WRITE". */ mode: string; /** * The name or selfLink of the disk attached to this instance. */ source: string; } interface InstanceFromMachineImageBootDiskInitializeParams { /** * The architecture of the disk. One of "X86_64" or "ARM64". */ architecture: string; /** * A flag to enable confidential compute mode on boot disk */ enableConfidentialCompute: boolean; /** * The image from which this disk was initialised. */ image: string; /** * A set of key/value label pairs assigned to the disk. */ labels: { [key: string]: string; }; /** * Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. */ provisionedIops: number; /** * Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. */ provisionedThroughput: number; /** * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. */ resourceManagerTags: { [key: string]: string; }; /** * A list of selfLinks of resource policies to attach to the instance's boot disk. Modifying this list will cause the instance to recreate. Currently a max of 1 resource policy is supported. */ resourcePolicies: string; /** * The size of the image in gigabytes. */ size: number; /** * The snapshot from which this disk was initialised. */ snapshot: string; /** * The encryption key used to decrypt the source image. */ sourceImageEncryptionKey: outputs.compute.InstanceFromMachineImageBootDiskInitializeParamsSourceImageEncryptionKey; /** * The encryption key used to decrypt the source snapshot. */ sourceSnapshotEncryptionKey: outputs.compute.InstanceFromMachineImageBootDiskInitializeParamsSourceSnapshotEncryptionKey; /** * The URL of the storage pool in which the new disk is created */ storagePool: string; /** * The Google Compute Engine disk type. Such as pd-standard, pd-ssd or pd-balanced. */ type: string; } interface InstanceFromMachineImageBootDiskInitializeParamsSourceImageEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ kmsKeySelfLink: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rawKey: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rsaEncryptedKey: string; /** * The SHA256 hash of the encryption key used to encrypt this disk. */ sha256: string; } interface InstanceFromMachineImageBootDiskInitializeParamsSourceSnapshotEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ kmsKeySelfLink: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rawKey: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rsaEncryptedKey: string; /** * The SHA256 hash of the encryption key used to encrypt this disk. */ sha256: string; } interface InstanceFromMachineImageConfidentialInstanceConfig { /** * The confidential computing technology the instance uses. * SEV is an AMD feature. TDX is an Intel feature. One of the following * values is required: SEV, SEV_SNP, TDX. If SEV_SNP, minCpuPlatform = * "AMD Milan" is currently required. */ confidentialInstanceType: string; /** * Defines whether the instance should have confidential compute enabled. Field will be deprecated in a future release */ enableConfidentialCompute: boolean; } interface InstanceFromMachineImageGuestAccelerator { /** * The number of the guest accelerator cards exposed to this instance. */ count: number; /** * The accelerator type resource exposed to this instance. E.g. nvidia-tesla-k80. */ type: string; } interface InstanceFromMachineImageInstanceEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS. */ kmsKeySelfLink: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount: string; /** * The SHA256 hash of the customer's encryption key. */ sha256: string; } interface InstanceFromMachineImageNetworkInterface { /** * Access configurations, i.e. IPs via which this instance can be accessed via the Internet. */ accessConfigs: outputs.compute.InstanceFromMachineImageNetworkInterfaceAccessConfig[]; /** * An array of alias IP ranges for this network interface. */ aliasIpRanges: outputs.compute.InstanceFromMachineImageNetworkInterfaceAliasIpRange[]; /** * Indicates whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. */ igmpQuery: string; /** * The prefix length of the primary internal IPv6 range. */ internalIpv6PrefixLength: number; /** * An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. */ ipv6AccessConfigs: outputs.compute.InstanceFromMachineImageNetworkInterfaceIpv6AccessConfig[]; /** * One of EXTERNAL, INTERNAL to indicate whether the IP can be accessed from the Internet. This field is always inherited from its subnetwork. */ ipv6AccessType: string; /** * An IPv6 internal network address for this network interface. If not specified, Google Cloud will automatically assign an internal IPv6 address from the instance's subnetwork. */ ipv6Address: string; /** * MAC address assigned to this network interface. */ macAddress: string; /** * A unique name for the resource, required by GCE. * Changing this forces a new resource to be created. */ name: string; /** * The name or selfLink of the network attached to this interface. */ network: string; /** * The URL of the network attachment that this interface should connect to in the following format: projects/{projectNumber}/regions/{region_name}/networkAttachments/{network_attachment_name}. */ networkAttachment: string; /** * The private IP address assigned to the instance. */ networkIp: string; /** * The type of vNIC to be used on this interface. Possible values:GVNIC, VIRTIO_NET, IDPF, MRDMA, and IRDMA */ nicType: string; /** * Name of the parent network interface of a dynamic network interface. */ parentNicName: string; /** * The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified. */ queueCount: number; /** * A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy. */ securityPolicy: string; /** * The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. */ stackType: string; /** * The name or selfLink of the subnetwork attached to this interface. */ subnetwork: string; /** * The project in which the subnetwork belongs. */ subnetworkProject: string; /** * VLAN tag of a dynamic network interface, must be an integer in the range from 2 to 255 inclusively. */ vlan: number; } interface InstanceFromMachineImageNetworkInterfaceAccessConfig { /** * The IP address that is be 1:1 mapped to the instance's network ip. */ natIp: string; /** * The networking tier used for configuring this instance. One of PREMIUM or STANDARD. */ networkTier: string; /** * The DNS domain name for the public PTR record. */ publicPtrDomainName: string; /** * A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy. */ securityPolicy: string; } interface InstanceFromMachineImageNetworkInterfaceAliasIpRange { /** * The IP CIDR range represented by this alias IP range. */ ipCidrRange: string; /** * The subnetwork secondary range name specifying the secondary range from which to allocate the IP CIDR range for this alias IP range. */ subnetworkRangeName: string; } interface InstanceFromMachineImageNetworkInterfaceIpv6AccessConfig { /** * The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork. */ externalIpv6: string; /** * The prefix length of the external IPv6 range. */ externalIpv6PrefixLength: string; /** * A unique name for the resource, required by GCE. * Changing this forces a new resource to be created. */ name: string; /** * The service-level to be provided for IPv6 traffic when the subnet has an external subnet. Only PREMIUM tier is valid for IPv6 */ networkTier: string; /** * The domain name to be used when creating DNSv6 records for the external IPv6 ranges. */ publicPtrDomainName: string; /** * A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy. */ securityPolicy: string; } interface InstanceFromMachineImageNetworkPerformanceConfig { /** * The egress bandwidth tier to enable. Possible values:TIER_1, DEFAULT */ totalEgressBandwidthTier: string; } interface InstanceFromMachineImageParams { /** * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. */ resourceManagerTags: { [key: string]: string; }; } interface InstanceFromMachineImageReservationAffinity { /** * Specifies the label selector for the reservation to use. */ specificReservation: outputs.compute.InstanceFromMachineImageReservationAffinitySpecificReservation; /** * The type of reservation from which this instance can consume resources. */ type: string; } interface InstanceFromMachineImageReservationAffinitySpecificReservation { /** * Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify compute.googleapis.com/reservation-name as the key and specify the name of your reservation as the only value. */ key: string; /** * Corresponds to the label values of a reservation resource. */ values: string[]; } interface InstanceFromMachineImageScheduling { /** * Specifies if the instance should be restarted if it was terminated by Compute Engine (not a user). */ automaticRestart: boolean; /** * Specifies the availability domain, which this instance should be scheduled on. */ availabilityDomain: number; /** * Settings for the instance to perform a graceful shutdown. */ gracefulShutdown: outputs.compute.InstanceFromMachineImageSchedulingGracefulShutdown; /** * Specify the time in seconds for host error detection, the value must be within the range of [90, 330] with the increment of 30, if unset, the default behavior of host error recovery will be used. */ hostErrorTimeoutSeconds: number; /** * Specifies the action GCE should take when SPOT VM is preempted. */ instanceTerminationAction: string; /** * Specifies the maximum amount of time a Local Ssd Vm should wait while * recovery of the Local Ssd state is attempted. Its value should be in * between 0 and 168 hours with hour granularity and the default value being 1 * hour. */ localSsdRecoveryTimeout: outputs.compute.InstanceFromMachineImageSchedulingLocalSsdRecoveryTimeout; /** * Specifies the frequency of planned maintenance events. The accepted values are: PERIODIC */ maintenanceInterval: string; /** * The timeout for new network connections to hosts. */ maxRunDuration: outputs.compute.InstanceFromMachineImageSchedulingMaxRunDuration; minNodeCpus: number; /** * Specifies node affinities or anti-affinities to determine which sole-tenant nodes your instances and managed instance groups will use as host systems. */ nodeAffinities: outputs.compute.InstanceFromMachineImageSchedulingNodeAffinity[]; /** * Describes maintenance behavior for the instance. One of MIGRATE or TERMINATE, */ onHostMaintenance: string; /** * Defines the behaviour for instances with the instance_termination_action. */ onInstanceStopAction: outputs.compute.InstanceFromMachineImageSchedulingOnInstanceStopAction; /** * Whether the instance is preemptible. */ preemptible: boolean; /** * Whether the instance is spot. If this is set as SPOT. */ provisioningModel: string; /** * Default is false and there will be 120 seconds between GCE ACPI G2 Soft Off and ACPI G3 Mechanical Off for Standard VMs and 30 seconds for Spot VMs. */ skipGuestOsShutdown: boolean; /** * Specifies the timestamp, when the instance will be terminated, * in RFC3339 text format. If specified, the instance termination action * will be performed at the termination time. */ terminationTime: string; } interface InstanceFromMachineImageSchedulingGracefulShutdown { /** * Opts-in for graceful shutdown. */ enabled: boolean; /** * The time allotted for the instance to gracefully shut down. * If the graceful shutdown isn't complete after this time, then the instance * transitions to the STOPPING state. */ maxDuration: outputs.compute.InstanceFromMachineImageSchedulingGracefulShutdownMaxDuration; } interface InstanceFromMachineImageSchedulingGracefulShutdownMaxDuration { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * The value must be between 1 and 3600, which is 3,600 seconds (one hour). */ seconds: number; } interface InstanceFromMachineImageSchedulingLocalSsdRecoveryTimeout { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface InstanceFromMachineImageSchedulingMaxRunDuration { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface InstanceFromMachineImageSchedulingNodeAffinity { key: string; operator: string; values: string[]; } interface InstanceFromMachineImageSchedulingOnInstanceStopAction { /** * If true, the contents of any attached Local SSD disks will be discarded. */ discardLocalSsd: boolean; } interface InstanceFromMachineImageScratchDisk { /** * Name with which the attached disk is accessible under /dev/disk/by-id/ */ deviceName: string; /** * The disk interface used for attaching this disk. One of SCSI or NVME. */ interface: string; /** * The size of the disk in gigabytes. One of 375 or 3000. */ size: number; } interface InstanceFromMachineImageServiceAccount { /** * The service account e-mail address. */ email: string; /** * A list of service scopes. */ scopes: string[]; } interface InstanceFromMachineImageShieldedInstanceConfig { /** * Whether integrity monitoring is enabled for the instance. */ enableIntegrityMonitoring: boolean; /** * Whether secure boot is enabled for the instance. */ enableSecureBoot: boolean; /** * Whether the instance uses vTPM. */ enableVtpm: boolean; } interface InstanceFromMachineImageSourceMachineImageEncryptionKey { kmsKeyName?: string; kmsKeyServiceAccount?: string; rawKey?: string; rsaEncryptedKey?: string; sha256: string; } interface InstanceFromTemplateAdvancedMachineFeatures { /** * Whether to enable nested virtualization or not. */ enableNestedVirtualization: boolean; /** * Whether to enable UEFI networking for the instance. */ enableUefiNetworking: boolean; /** * The PMU is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are "STANDARD", "ENHANCED", and "ARCHITECTURAL". */ performanceMonitoringUnit: string; /** * The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. */ threadsPerCore: number; /** * Turbo frequency mode to use for the instance. Currently supported modes is "ALL_CORE_MAX". */ turboMode: string; /** * The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance\'s nominal CPU count and the underlying platform\'s SMT width. */ visibleCoreCount: number; } interface InstanceFromTemplateAttachedDisk { /** * Name with which the attached disk is accessible under /dev/disk/by-id/ */ deviceName: string; /** * A 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to encrypt this disk. Only one of kms_key_self_link, diskEncryptionKeyRsa and diskEncryptionKeyRaw may be set. */ diskEncryptionKeyRaw: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, diskEncryptionKeyRsa and diskEncryptionKeyRaw may be set. */ diskEncryptionKeyRsa: string; /** * The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. */ diskEncryptionKeySha256: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used */ diskEncryptionServiceAccount: string; /** * Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error. Setting this parameter cause VM recreation. */ forceAttach: boolean; /** * The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link, diskEncryptionKeyRsa and diskEncryptionKeyRaw may be set. */ kmsKeySelfLink: string; /** * Read/write mode for the disk. One of "READ_ONLY" or "READ_WRITE". */ mode: string; /** * The name or selfLink of the disk attached to this instance. */ source: string; } interface InstanceFromTemplateBootDisk { /** * Whether the disk will be auto-deleted when the instance is deleted. */ autoDelete: boolean; /** * Name with which attached disk will be accessible under /dev/disk/by-id/ */ deviceName: string; /** * A 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to encrypt this disk. Only one of kms_key_self_link, diskEncryptionKeyRaw and diskEncryptionKeyRsa may be set. */ diskEncryptionKeyRaw: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, diskEncryptionKeyRaw and diskEncryptionKeyRsa may be set. */ diskEncryptionKeyRsa: string; /** * The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. */ diskEncryptionKeySha256: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used */ diskEncryptionServiceAccount: string; /** * Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error. Setting this parameter cause VM recreation. */ forceAttach: boolean; /** * A list of features to enable on the guest operating system. Applicable only for bootable images. */ guestOsFeatures: string[]; /** * Parameters with which a disk was created alongside the instance. */ initializeParams: outputs.compute.InstanceFromTemplateBootDiskInitializeParams; /** * The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) */ interface: string; /** * The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link, diskEncryptionKeyRaw and diskEncryptionKeyRsa may be set. */ kmsKeySelfLink: string; /** * Read/write mode for the disk. One of "READ_ONLY" or "READ_WRITE". */ mode: string; /** * The name or selfLink of the disk attached to this instance. */ source: string; } interface InstanceFromTemplateBootDiskInitializeParams { /** * The architecture of the disk. One of "X86_64" or "ARM64". */ architecture: string; /** * A flag to enable confidential compute mode on boot disk */ enableConfidentialCompute: boolean; /** * The image from which this disk was initialised. */ image: string; /** * A set of key/value label pairs assigned to the disk. */ labels: { [key: string]: string; }; /** * Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. */ provisionedIops: number; /** * Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. */ provisionedThroughput: number; /** * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. */ resourceManagerTags: { [key: string]: string; }; /** * A list of selfLinks of resource policies to attach to the instance's boot disk. Modifying this list will cause the instance to recreate. Currently a max of 1 resource policy is supported. */ resourcePolicies: string; /** * The size of the image in gigabytes. */ size: number; /** * The snapshot from which this disk was initialised. */ snapshot: string; /** * The encryption key used to decrypt the source image. */ sourceImageEncryptionKey: outputs.compute.InstanceFromTemplateBootDiskInitializeParamsSourceImageEncryptionKey; /** * The encryption key used to decrypt the source snapshot. */ sourceSnapshotEncryptionKey: outputs.compute.InstanceFromTemplateBootDiskInitializeParamsSourceSnapshotEncryptionKey; /** * The URL of the storage pool in which the new disk is created */ storagePool: string; /** * The Google Compute Engine disk type. Such as pd-standard, pd-ssd or pd-balanced. */ type: string; } interface InstanceFromTemplateBootDiskInitializeParamsSourceImageEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ kmsKeySelfLink: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rawKey: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rsaEncryptedKey: string; /** * The SHA256 hash of the encryption key used to encrypt this disk. */ sha256: string; } interface InstanceFromTemplateBootDiskInitializeParamsSourceSnapshotEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ kmsKeySelfLink: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rawKey: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rsaEncryptedKey: string; /** * The SHA256 hash of the encryption key used to encrypt this disk. */ sha256: string; } interface InstanceFromTemplateConfidentialInstanceConfig { /** * The confidential computing technology the instance uses. * SEV is an AMD feature. TDX is an Intel feature. One of the following * values is required: SEV, SEV_SNP, TDX. If SEV_SNP, minCpuPlatform = * "AMD Milan" is currently required. */ confidentialInstanceType: string; /** * Defines whether the instance should have confidential compute enabled. Field will be deprecated in a future release */ enableConfidentialCompute: boolean; } interface InstanceFromTemplateGuestAccelerator { /** * The number of the guest accelerator cards exposed to this instance. */ count: number; /** * The accelerator type resource exposed to this instance. E.g. nvidia-tesla-k80. */ type: string; } interface InstanceFromTemplateInstanceEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS. */ kmsKeySelfLink: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount: string; /** * The SHA256 hash of the customer's encryption key. */ sha256: string; } interface InstanceFromTemplateNetworkInterface { /** * Access configurations, i.e. IPs via which this instance can be accessed via the Internet. */ accessConfigs: outputs.compute.InstanceFromTemplateNetworkInterfaceAccessConfig[]; /** * An array of alias IP ranges for this network interface. */ aliasIpRanges: outputs.compute.InstanceFromTemplateNetworkInterfaceAliasIpRange[]; /** * Indicates whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. */ igmpQuery: string; /** * The prefix length of the primary internal IPv6 range. */ internalIpv6PrefixLength: number; /** * An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. */ ipv6AccessConfigs: outputs.compute.InstanceFromTemplateNetworkInterfaceIpv6AccessConfig[]; /** * One of EXTERNAL, INTERNAL to indicate whether the IP can be accessed from the Internet. This field is always inherited from its subnetwork. */ ipv6AccessType: string; /** * An IPv6 internal network address for this network interface. If not specified, Google Cloud will automatically assign an internal IPv6 address from the instance's subnetwork. */ ipv6Address: string; /** * MAC address assigned to this network interface. */ macAddress: string; /** * A unique name for the resource, required by GCE. * Changing this forces a new resource to be created. */ name: string; /** * The name or selfLink of the network attached to this interface. */ network: string; /** * The URL of the network attachment that this interface should connect to in the following format: projects/{projectNumber}/regions/{region_name}/networkAttachments/{network_attachment_name}. */ networkAttachment: string; /** * The private IP address assigned to the instance. */ networkIp: string; /** * The type of vNIC to be used on this interface. Possible values:GVNIC, VIRTIO_NET, IDPF, MRDMA, and IRDMA */ nicType: string; /** * Name of the parent network interface of a dynamic network interface. */ parentNicName: string; /** * The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified. */ queueCount: number; /** * A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy. */ securityPolicy: string; /** * The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. */ stackType: string; /** * The name or selfLink of the subnetwork attached to this interface. */ subnetwork: string; /** * The project in which the subnetwork belongs. */ subnetworkProject: string; /** * VLAN tag of a dynamic network interface, must be an integer in the range from 2 to 255 inclusively. */ vlan: number; } interface InstanceFromTemplateNetworkInterfaceAccessConfig { /** * The IP address that is be 1:1 mapped to the instance's network ip. */ natIp: string; /** * The networking tier used for configuring this instance. One of PREMIUM or STANDARD. */ networkTier: string; /** * The DNS domain name for the public PTR record. */ publicPtrDomainName: string; /** * A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy. */ securityPolicy: string; } interface InstanceFromTemplateNetworkInterfaceAliasIpRange { /** * The IP CIDR range represented by this alias IP range. */ ipCidrRange: string; /** * The subnetwork secondary range name specifying the secondary range from which to allocate the IP CIDR range for this alias IP range. */ subnetworkRangeName: string; } interface InstanceFromTemplateNetworkInterfaceIpv6AccessConfig { /** * The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork. */ externalIpv6: string; /** * The prefix length of the external IPv6 range. */ externalIpv6PrefixLength: string; /** * A unique name for the resource, required by GCE. * Changing this forces a new resource to be created. */ name: string; /** * The service-level to be provided for IPv6 traffic when the subnet has an external subnet. Only PREMIUM tier is valid for IPv6 */ networkTier: string; /** * The domain name to be used when creating DNSv6 records for the external IPv6 ranges. */ publicPtrDomainName: string; /** * A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy. */ securityPolicy: string; } interface InstanceFromTemplateNetworkPerformanceConfig { /** * The egress bandwidth tier to enable. Possible values:TIER_1, DEFAULT */ totalEgressBandwidthTier: string; } interface InstanceFromTemplateParams { /** * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. */ resourceManagerTags: { [key: string]: string; }; } interface InstanceFromTemplateReservationAffinity { /** * Specifies the label selector for the reservation to use. */ specificReservation: outputs.compute.InstanceFromTemplateReservationAffinitySpecificReservation; /** * The type of reservation from which this instance can consume resources. */ type: string; } interface InstanceFromTemplateReservationAffinitySpecificReservation { /** * Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify compute.googleapis.com/reservation-name as the key and specify the name of your reservation as the only value. */ key: string; /** * Corresponds to the label values of a reservation resource. */ values: string[]; } interface InstanceFromTemplateScheduling { /** * Specifies if the instance should be restarted if it was terminated by Compute Engine (not a user). */ automaticRestart: boolean; /** * Specifies the availability domain, which this instance should be scheduled on. */ availabilityDomain: number; /** * Settings for the instance to perform a graceful shutdown. */ gracefulShutdown: outputs.compute.InstanceFromTemplateSchedulingGracefulShutdown; /** * Specify the time in seconds for host error detection, the value must be within the range of [90, 330] with the increment of 30, if unset, the default behavior of host error recovery will be used. */ hostErrorTimeoutSeconds: number; /** * Specifies the action GCE should take when SPOT VM is preempted. */ instanceTerminationAction: string; /** * Specifies the maximum amount of time a Local Ssd Vm should wait while * recovery of the Local Ssd state is attempted. Its value should be in * between 0 and 168 hours with hour granularity and the default value being 1 * hour. */ localSsdRecoveryTimeout: outputs.compute.InstanceFromTemplateSchedulingLocalSsdRecoveryTimeout; /** * Specifies the frequency of planned maintenance events. The accepted values are: PERIODIC */ maintenanceInterval: string; /** * The timeout for new network connections to hosts. */ maxRunDuration: outputs.compute.InstanceFromTemplateSchedulingMaxRunDuration; minNodeCpus: number; /** * Specifies node affinities or anti-affinities to determine which sole-tenant nodes your instances and managed instance groups will use as host systems. */ nodeAffinities: outputs.compute.InstanceFromTemplateSchedulingNodeAffinity[]; /** * Describes maintenance behavior for the instance. One of MIGRATE or TERMINATE, */ onHostMaintenance: string; /** * Defines the behaviour for instances with the instance_termination_action. */ onInstanceStopAction: outputs.compute.InstanceFromTemplateSchedulingOnInstanceStopAction; /** * Whether the instance is preemptible. */ preemptible: boolean; /** * Whether the instance is spot. If this is set as SPOT. */ provisioningModel: string; /** * Default is false and there will be 120 seconds between GCE ACPI G2 Soft Off and ACPI G3 Mechanical Off for Standard VMs and 30 seconds for Spot VMs. */ skipGuestOsShutdown: boolean; /** * Specifies the timestamp, when the instance will be terminated, * in RFC3339 text format. If specified, the instance termination action * will be performed at the termination time. */ terminationTime: string; } interface InstanceFromTemplateSchedulingGracefulShutdown { /** * Opts-in for graceful shutdown. */ enabled: boolean; /** * The time allotted for the instance to gracefully shut down. * If the graceful shutdown isn't complete after this time, then the instance * transitions to the STOPPING state. */ maxDuration: outputs.compute.InstanceFromTemplateSchedulingGracefulShutdownMaxDuration; } interface InstanceFromTemplateSchedulingGracefulShutdownMaxDuration { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * The value must be between 1 and 3600, which is 3,600 seconds (one hour). */ seconds: number; } interface InstanceFromTemplateSchedulingLocalSsdRecoveryTimeout { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface InstanceFromTemplateSchedulingMaxRunDuration { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface InstanceFromTemplateSchedulingNodeAffinity { key: string; operator: string; values: string[]; } interface InstanceFromTemplateSchedulingOnInstanceStopAction { /** * If true, the contents of any attached Local SSD disks will be discarded. */ discardLocalSsd: boolean; } interface InstanceFromTemplateScratchDisk { /** * Name with which the attached disk is accessible under /dev/disk/by-id/ */ deviceName: string; /** * The disk interface used for attaching this disk. One of SCSI or NVME. */ interface: string; /** * The size of the disk in gigabytes. One of 375 or 3000. */ size: number; } interface InstanceFromTemplateServiceAccount { /** * The service account e-mail address. */ email: string; /** * A list of service scopes. */ scopes: string[]; } interface InstanceFromTemplateShieldedInstanceConfig { /** * Whether integrity monitoring is enabled for the instance. */ enableIntegrityMonitoring: boolean; /** * Whether secure boot is enabled for the instance. */ enableSecureBoot: boolean; /** * Whether the instance uses vTPM. */ enableVtpm: boolean; } interface InstanceGroupManagerAllInstancesConfig { /** * , The label key-value pairs that you want to patch onto the instance. * * - - - */ labels?: { [key: string]: string; }; /** * , The metadata key-value pairs that you want to patch onto the instance. For more information, see [Project and instance metadata](https://cloud.google.com/compute/docs/metadata#project_and_instance_metadata). */ metadata?: { [key: string]: string; }; } interface InstanceGroupManagerAutoHealingPolicies { /** * The health check resource that signals autohealing. */ healthCheck: string; /** * The number of seconds that the managed instance group waits before * it applies autohealing policies to new instances or recently recreated instances. Between 0 and 3600. */ initialDelaySec: number; } interface InstanceGroupManagerInstanceLifecyclePolicy { /** * , Specifies the action that a MIG performs on a failed VM. If the value of the `onFailedHealthCheck` field is `DEFAULT_ACTION`, then the same action also applies to the VMs on which your application fails a health check. Valid options are: `DO_NOTHING`, `REPAIR`. If `DO_NOTHING`, then MIG does not repair a failed VM. If `REPAIR` (default), then MIG automatically repairs a failed VM by recreating it. For more information, see about repairing VMs in a MIG. */ defaultActionOnFailure?: string; /** * , Specifies whether to apply the group's latest configuration when repairing a VM. Valid options are: `YES`, `NO`. If `YES` and you updated the group's instance template or per-instance configurations after the VM was created, then these changes are applied when VM is repaired. If `NO` (default), then updates are applied in accordance with the group's update policy type. */ forceUpdateOnRepair?: string; /** * , Specifies the action that a MIG performs on an unhealthy VM. A VM is marked as unhealthy when the application running on that VM fails a health check. Valid options are: `DEFAULT_ACTION`, `DO_NOTHING`, `REPAIR`. If `DEFAULT_ACTION` (default), then MIG uses the same action configured for the `defaultActionOnFailure` field. If `DO_NOTHING`, then MIG does not repair unhealthy VM. If `REPAIR`, then MIG automatically repairs an unhealthy VM by recreating it. For more information, see about repairing VMs in a MIG. */ onFailedHealthCheck?: string; /** * ), Configuration for VM repairs in the MIG. Structure is documented below. * - - - */ onRepair: outputs.compute.InstanceGroupManagerInstanceLifecyclePolicyOnRepair; } interface InstanceGroupManagerInstanceLifecyclePolicyOnRepair { /** * , Specifies whether the MIG can change a VM's zone during a repair. If "YES", MIG can select a different zone for the VM during a repair. Else if "NO", MIG cannot change a VM's zone during a repair. The default value of allowChangingZone is "NO". * * - - - */ allowChangingZone?: string; } interface InstanceGroupManagerNamedPort { /** * The name of the port. */ name: string; /** * The port number. * - - - */ port: number; } interface InstanceGroupManagerParams { /** * Resource manager tags to bind to the managed instance group. The tags are key-value pairs. Keys must be in the format tagKeys/123 and values in the format tagValues/456. For more information, see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources) * * - - - */ resourceManagerTags?: { [key: string]: string; }; } interface InstanceGroupManagerResourcePolicies { /** * The URL of the workload policy that is specified for this managed instance group. It can be a full or partial URL. */ workloadPolicy?: string; } interface InstanceGroupManagerStandbyPolicy { /** * Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0. */ initialDelaySec: number; /** * Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. Valid options are: `MANUAL`, `SCALE_OUT_POOL`. If `MANUAL`(default), you have full control over which VMs are stopped and suspended in the MIG. If `SCALE_OUT_POOL`, the MIG uses the VMs from the standby pools to accelerate the scale out by resuming or starting them and then automatically replenishes the standby pool with new VMs to maintain the target sizes. * - - - */ mode: string; } interface InstanceGroupManagerStatefulDisk { /** * , A value that prescribes what should happen to the stateful disk when the VM instance is deleted. The available options are `NEVER` and `ON_PERMANENT_INSTANCE_DELETION`. `NEVER` - detach the disk when the VM is deleted, but do not delete the disk. `ON_PERMANENT_INSTANCE_DELETION` will delete the stateful disk when the VM is permanently deleted from the instance group. The default is `NEVER`. */ deleteRule?: string; /** * , The device name of the disk to be attached. */ deviceName: string; } interface InstanceGroupManagerStatefulExternalIp { /** * , A value that prescribes what should happen to the external ip when the VM instance is deleted. The available options are `NEVER` and `ON_PERMANENT_INSTANCE_DELETION`. `NEVER` - detach the ip when the VM is deleted, but do not delete the ip. `ON_PERMANENT_INSTANCE_DELETION` will delete the external ip when the VM is permanently deleted from the instance group. */ deleteRule?: string; /** * , The network interface name of the external Ip. Possible value: `nic0` */ interfaceName?: string; } interface InstanceGroupManagerStatefulInternalIp { /** * , A value that prescribes what should happen to the internal ip when the VM instance is deleted. The available options are `NEVER` and `ON_PERMANENT_INSTANCE_DELETION`. `NEVER` - detach the ip when the VM is deleted, but do not delete the ip. `ON_PERMANENT_INSTANCE_DELETION` will delete the internal ip when the VM is permanently deleted from the instance group. */ deleteRule?: string; /** * , The network interface name of the internal Ip. Possible value: `nic0` */ interfaceName?: string; } interface InstanceGroupManagerStatus { /** * Properties to set on all instances in the group. After setting * allInstancesConfig on the group, you must update the group's instances to * apply the configuration. */ allInstancesConfigs: outputs.compute.InstanceGroupManagerStatusAllInstancesConfig[]; /** * A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified. */ isStable: boolean; /** * Stateful status of the given Instance Group Manager. */ statefuls: outputs.compute.InstanceGroupManagerStatusStateful[]; /** * A bit indicating whether version target has been reached in this managed instance group, i.e. all instances are in their target version. Instances' target version are specified by version field on Instance Group Manager. */ versionTargets: outputs.compute.InstanceGroupManagerStatusVersionTarget[]; } interface InstanceGroupManagerStatusAllInstancesConfig { /** * Current all-instances configuration revision. This value is in RFC3339 text format. */ currentRevision: string; /** * A bit indicating whether this configuration has been applied to all managed instances in the group. */ effective: boolean; } interface InstanceGroupManagerStatusStateful { /** * A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful config even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions. */ hasStatefulConfig: boolean; /** * Status of per-instance configs on the instances. */ perInstanceConfigs: outputs.compute.InstanceGroupManagerStatusStatefulPerInstanceConfig[]; } interface InstanceGroupManagerStatusStatefulPerInstanceConfig { /** * A bit indicating if all of the group's per-instance configs (listed in the output of a listPerInstanceConfigs API call) have status `EFFECTIVE` or there are no per-instance-configs. */ allEffective: boolean; } interface InstanceGroupManagerStatusVersionTarget { /** * A bit indicating whether version target has been reached in this managed instance group, i.e. all instances are in their target version. Instances' target version are specified by version field on Instance Group Manager. */ isReached: boolean; } interface InstanceGroupManagerUpdatePolicy { /** * , Specifies a fixed number of VM instances. This must be a positive integer. Conflicts with `maxSurgePercent`. Both cannot be 0. */ maxSurgeFixed: number; /** * , Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%. Conflicts with `maxSurgeFixed`. */ maxSurgePercent?: number; /** * , Specifies a fixed number of VM instances. This must be a positive integer. */ maxUnavailableFixed: number; /** * , Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.. */ maxUnavailablePercent?: number; /** * ), Minimum number of seconds to wait for after a newly created instance becomes available. This value must be from range [0, 3600] */ minReadySec?: number; /** * Minimal action to be taken on an instance. You can specify either `NONE` to forbid any actions, `REFRESH` to update without stopping instances, `RESTART` to restart existing instances or `REPLACE` to delete and create new instances from the target template. If you specify a `REFRESH`, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action. */ minimalAction: string; /** * Most disruptive action that is allowed to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to allow actions that do not need instance restart, RESTART to allow actions that can be applied without instance replacing or REPLACE to allow all possible actions. If the Updater determines that the minimal update action needed is more disruptive than most disruptive allowed action you specify it will not perform the update at all. */ mostDisruptiveAllowedAction?: string; /** * , The instance replacement method for managed instance groups. Valid values are: "RECREATE", "SUBSTITUTE". If SUBSTITUTE (default), the group replaces VM instances with new instances that have randomly generated names. If RECREATE, instance names are preserved. You must also set maxUnavailableFixed or maxUnavailablePercent to be greater than 0. * - - - */ replacementMethod?: string; /** * The type of update process. You can specify either `PROACTIVE` so that the instance group manager proactively executes actions in order to bring instances to their target versions or `OPPORTUNISTIC` so that no action is proactively executed but the update will be performed as part of other actions (for example, resizes or recreateInstances calls). */ type: string; } interface InstanceGroupManagerVersion { /** * The full URL to an instance template from which all new instances of this version will be created. It is recommended to reference instance templates through their unique id (`selfLinkUnique` attribute). */ instanceTemplate: string; /** * Version name. */ name?: string; /** * The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below. * * > Exactly one `version` you specify must not have a `targetSize` specified. During a rolling update, the instance group manager will fulfill the `targetSize` * constraints of every other `version`, and any remaining instances will be provisioned with the version where `targetSize` is unset. */ targetSize?: outputs.compute.InstanceGroupManagerVersionTargetSize; } interface InstanceGroupManagerVersionTargetSize { /** * , The number of instances which are managed for this version. Conflicts with `percent`. */ fixed?: number; /** * , The number of instances (calculated as percentage) which are managed for this version. Conflicts with `fixed`. * Note that when using `percent`, rounding will be in favor of explicitly set `targetSize` values; a managed instance group with 2 instances and 2 `version`s, * one of which has a `target_size.percent` of `60` will create 2 instances of that `version`. */ percent?: number; } interface InstanceGroupNamedPort { /** * The name which the port will be mapped to. */ name: string; /** * The port number to map the name to. */ port: number; } interface InstanceGuestAccelerator { /** * The number of the guest accelerator cards exposed to this instance. */ count: number; /** * The accelerator type resource to expose to this instance. E.g. `nvidia-tesla-k80`. */ type: string; } interface InstanceIAMBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface InstanceIAMMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface InstanceInstanceEncryptionKey { /** * The selfLink of the encryption key that is * stored in Google Cloud KMS to encrypt the data on this instance. */ kmsKeySelfLink: string; /** * The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount?: string; /** * The SHA256 hash of the customer's encryption key. */ sha256: string; } interface InstanceNetworkInterface { /** * Access configurations, i.e. IPs via which this * instance can be accessed via the Internet. Omit to ensure that the instance * is not accessible from the Internet. If omitted, ssh provisioners will not * work unless Terraform can send traffic to the instance's network (e.g. via * tunnel or because it is running on another cloud instance on that network). * This block can be specified once per `networkInterface`. Structure documented below. */ accessConfigs?: outputs.compute.InstanceNetworkInterfaceAccessConfig[]; /** * An * array of alias IP ranges for this network interface. Can only be specified for network * interfaces on subnet-mode networks. Structure documented below. */ aliasIpRanges?: outputs.compute.InstanceNetworkInterfaceAliasIpRange[]; /** * Indicates whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. */ igmpQuery: string; /** * The prefix length of the primary internal IPv6 range. */ internalIpv6PrefixLength: number; /** * An array of IPv6 access configurations for this interface. * Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig * specified, then this instance will have no external IPv6 Internet access. Structure documented below. */ ipv6AccessConfigs?: outputs.compute.InstanceNetworkInterfaceIpv6AccessConfig[]; /** * One of EXTERNAL, INTERNAL to indicate whether the IP can be accessed from the Internet. * This field is always inherited from its subnetwork. */ ipv6AccessType: string; /** * An IPv6 internal network address for this network interface. If not specified, Google Cloud will automatically assign an internal IPv6 address from the instance's subnetwork. */ ipv6Address: string; /** * [Beta] MAC address assigned to this network interface */ macAddress: string; /** * A unique name for the resource, required by GCE. * Changing this forces a new resource to be created. */ name: string; /** * The name or selfLink of the network to attach this interface to. * Either `network` or `subnetwork` must be provided. If network isn't provided it will * be inferred from the subnetwork. */ network: string; /** * The URL of the network attachment that this interface should connect to in the following format: `projects/{projectNumber}/regions/{region_name}/networkAttachments/{network_attachment_name}`. */ networkAttachment: string; /** * The private IP address to assign to the instance. If * empty, the address will be automatically assigned. */ networkIp: string; /** * The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET, IDPF, MRDMA, IRDMA. */ nicType?: string; /** * Name of the parent network interface of a dynamic network interface. */ parentNicName: string; /** * The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified. */ queueCount?: number; /** * Beta A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy. */ securityPolicy?: string; /** * The stack type for this network interface to identify whether the IPv6 feature is enabled or not. Values are IPV4_IPV6, IPV6_ONLY or IPV4_ONLY. If not specified, IPV4_ONLY will be used. */ stackType: string; /** * The name or selfLink of the subnetwork to attach this * interface to. Either `network` or `subnetwork` must be provided. If network isn't provided * it will be inferred from the subnetwork. The subnetwork must exist in the same region this * instance will be created in. If the network resource is in * [legacy](https://cloud.google.com/vpc/docs/legacy) mode, do not specify this field. If the * network is in auto subnet mode, specifying the subnetwork is optional. If the network is * in custom subnet mode, specifying the subnetwork is required. */ subnetwork: string; /** * The project in which the subnetwork belongs. * If the `subnetwork` is a self_link, this field is set to the project * defined in the subnetwork self_link. If the `subnetwork` is a name and this * field is not provided, the provider project is used. */ subnetworkProject: string; /** * VLAN tag of a dynamic network interface, must be an integer in the range from 2 to 255 inclusively. */ vlan?: number; } interface InstanceNetworkInterfaceAccessConfig { /** * The IP address that will be 1:1 mapped to the instance's * network ip. If not given, one will be generated. */ natIp: string; /** * The service-level to be provided for IPv6 traffic when the * subnet has an external subnet. Only PREMIUM or STANDARD tier is valid for IPv6. */ networkTier: string; /** * The domain name to be used when creating DNSv6 * records for the external IPv6 ranges.. */ publicPtrDomainName?: string; /** * A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy. */ securityPolicy: string; } interface InstanceNetworkInterfaceAliasIpRange { /** * The IP CIDR range represented by this alias IP range. This IP CIDR range * must belong to the specified subnetwork and cannot contain IP addresses reserved by * system or used by other network interfaces. This range may be a single IP address * (e.g. 10.2.3.4), a netmask (e.g. /24) or a CIDR format string (e.g. 10.1.2.0/24). */ ipCidrRange: string; /** * The subnetwork secondary range name specifying * the secondary range from which to allocate the IP CIDR range for this alias IP * range. If left unspecified, the primary range of the subnetwork will be used. */ subnetworkRangeName?: string; } interface InstanceNetworkInterfaceIpv6AccessConfig { /** * The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork. */ externalIpv6: string; /** * The prefix length of the external IPv6 range. */ externalIpv6PrefixLength: string; /** * A unique name for the resource, required by GCE. * Changing this forces a new resource to be created. */ name: string; /** * The service-level to be provided for IPv6 traffic when the subnet has an external subnet. Only PREMIUM tier is valid for IPv6 */ networkTier: string; /** * The domain name to be used when creating DNSv6 records for the external IPv6 ranges. */ publicPtrDomainName?: string; /** * A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy. */ securityPolicy: string; } interface InstanceNetworkPerformanceConfig { /** * The egress bandwidth tier to enable. * Possible values: TIER_1, DEFAULT */ totalEgressBandwidthTier: string; } interface InstanceParams { /** * A tag is a key-value pair that can be attached to a Google Cloud resource. You can use tags to conditionally allow or deny policies based on whether a resource has a specific tag. This value is not returned by the API. In Terraform, this value cannot be updated and changing it will recreate the resource. */ resourceManagerTags?: { [key: string]: string; }; } interface InstanceReservationAffinity { /** * Specifies the label selector for the reservation to use.. * Structure is documented below. */ specificReservation?: outputs.compute.InstanceReservationAffinitySpecificReservation; /** * The type of reservation from which this instance can consume resources. */ type: string; } interface InstanceReservationAffinitySpecificReservation { /** * Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify compute.googleapis.com/reservation-name as the key and specify the name of your reservation as the only value. */ key: string; /** * Corresponds to the label values of a reservation resource. */ values: string[]; } interface InstanceScheduling { /** * Specifies if the instance should be * restarted if it was terminated by Compute Engine (not a user). * Defaults to true. */ automaticRestart?: boolean; /** * Specifies the availability domain to place the instance in. The value must be a number between 1 and the number of availability domains specified in the spread placement policy attached to the instance. */ availabilityDomain?: number; /** * Beta Settings for the instance to perform a graceful shutdown. Structure is documented below. */ gracefulShutdown?: outputs.compute.InstanceSchedulingGracefulShutdown; /** * Beta Specifies the time in seconds for host error detection, the value must be within the range of [90, 330] with the increment of 30, if unset, the default behavior of host error recovery will be used. */ hostErrorTimeoutSeconds?: number; /** * Describe the type of termination action for VM. Can be `STOP` or `DELETE`. Read more on [here](https://cloud.google.com/compute/docs/instances/create-use-spot) */ instanceTerminationAction?: string; /** * (../guides/provider_versions.html.markdown) Specifies the maximum amount of time a Local Ssd Vm should wait while recovery of the Local Ssd state is attempted. Its value should be in between 0 and 168 hours with hour granularity and the default value being 1 hour. Structure is documented below. */ localSsdRecoveryTimeout?: outputs.compute.InstanceSchedulingLocalSsdRecoveryTimeout; /** * Beta Specifies the frequency of planned maintenance events. The accepted values are: `PERIODIC`. */ maintenanceInterval?: string; /** * The duration of the instance. Instance will run and be terminated after then, the termination action could be defined in `instanceTerminationAction`. Structure is documented below. */ maxRunDuration?: outputs.compute.InstanceSchedulingMaxRunDuration; /** * The minimum number of virtual CPUs this instance will consume when running on a sole-tenant node. */ minNodeCpus?: number; /** * Specifies node affinities or anti-affinities * to determine which sole-tenant nodes your instances and managed instance * groups will use as host systems. Read more on sole-tenant node creation * [here](https://cloud.google.com/compute/docs/nodes/create-nodes). * Structure documented below. */ nodeAffinities?: outputs.compute.InstanceSchedulingNodeAffinity[]; /** * Describes maintenance behavior for the * instance. Can be MIGRATE or TERMINATE, for more info, read * [here](https://cloud.google.com/compute/docs/instances/setting-instance-scheduling-options). */ onHostMaintenance: string; /** * Specifies the action to be performed when the instance is terminated using `maxRunDuration` and `STOP` `instanceTerminationAction`. Only support `true` `discardLocalSsd` at this point. Structure is documented below. */ onInstanceStopAction?: outputs.compute.InstanceSchedulingOnInstanceStopAction; /** * Specifies if the instance is preemptible. * If this field is set to true, then `automaticRestart` must be * set to false. Defaults to false. */ preemptible?: boolean; /** * Describe the type of preemptible VM. This field accepts the value `STANDARD` or `SPOT`. If the value is `STANDARD`, there will be no discount. If this is set to `SPOT`, * `preemptible` should be `true` and `automaticRestart` should be * `false`. For more info about * `SPOT`, read [here](https://cloud.google.com/compute/docs/instances/spot) */ provisioningModel: string; /** * Beta Boolean parameter. Default is false and there will be 120 seconds between GCE ACPI G2 Soft Off and ACPI G3 Mechanical Off for Standard VMs and 30 seconds for Spot VMs. */ skipGuestOsShutdown?: boolean; /** * Specifies the timestamp, when the instance will be terminated, in RFC3339 text format. If specified, the instance termination action will be performed at the termination time. */ terminationTime?: string; } interface InstanceSchedulingGracefulShutdown { /** * Opts-in for graceful shutdown. */ enabled: boolean; /** * The time allotted for the instance to gracefully shut down. * If the graceful shutdown isn't complete after this time, then the instance * transitions to the STOPPING state. Structure is documented below: */ maxDuration?: outputs.compute.InstanceSchedulingGracefulShutdownMaxDuration; } interface InstanceSchedulingGracefulShutdownMaxDuration { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented with a 0 * `seconds` field and a positive `nanos` field. Must be from 0 to * 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. * The value must be between 1 and 3600, which is 3,600 seconds (one hour).` */ seconds: number; } interface InstanceSchedulingLocalSsdRecoveryTimeout { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented with a 0 * `seconds` field and a positive `nanos` field. Must be from 0 to * 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to * 315,576,000,000 inclusive. Note: these bounds are computed from: 60 * sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years. */ seconds: number; } interface InstanceSchedulingMaxRunDuration { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented with a 0 * `seconds` field and a positive `nanos` field. Must be from 0 to * 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to * 315,576,000,000 inclusive. Note: these bounds are computed from: 60 * sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years. */ seconds: number; } interface InstanceSchedulingNodeAffinity { /** * The key for the node affinity label. */ key: string; /** * The operator. Can be `IN` for node-affinities * or `NOT_IN` for anti-affinities. */ operator: string; /** * The values for the node affinity label. */ values: string[]; } interface InstanceSchedulingOnInstanceStopAction { /** * Whether to discard local SSDs attached to the VM while terminating using `maxRunDuration`. Only supports `true` at this point. */ discardLocalSsd?: boolean; } interface InstanceScratchDisk { /** * Name with which the attached disk is accessible under /dev/disk/by-id/ */ deviceName: string; /** * The disk interface to use for attaching this disk; either SCSI or NVME. */ interface: string; /** * The size of the disk in gigabytes. One of 375 or 3000. */ size?: number; } interface InstanceServiceAccount { /** * The service account e-mail address. * **Note**: `allowStoppingForUpdate` must be set to true or your instance must have a `desiredStatus` of `TERMINATED` in order to update this field. */ email: string; /** * A list of service scopes. Both OAuth2 URLs and gcloud * short names are supported. To allow full access to all Cloud APIs, use the * `cloud-platform` scope. See a complete list of scopes [here](https://cloud.google.com/sdk/gcloud/reference/alpha/compute/instances/set-scopes#--scopes). * **Note**: `allowStoppingForUpdate` must be set to true or your instance must have a `desiredStatus` of `TERMINATED` in order to update this field. */ scopes: string[]; } interface InstanceSettingsMetadata { /** * A metadata key/value items map. The total size of all keys and values must be less than 512KB */ items?: { [key: string]: string; }; } interface InstanceShieldedInstanceConfig { /** * - Compare the most recent boot measurements to the integrity policy baseline and return a pair of pass/fail results depending on whether they match or not. Defaults to true. * **Note**: `allowStoppingForUpdate` must be set to true or your instance must have a `desiredStatus` of `TERMINATED` in order to update this field. */ enableIntegrityMonitoring?: boolean; /** * - Verify the digital signature of all boot components, and halt the boot process if signature verification fails. Defaults to false. * **Note**: `allowStoppingForUpdate` must be set to true or your instance must have a `desiredStatus` of `TERMINATED` in order to update this field. */ enableSecureBoot?: boolean; /** * - Use a virtualized trusted platform module, which is a specialized computer chip you can use to encrypt objects like keys and certificates. Defaults to true. * **Note**: `allowStoppingForUpdate` must be set to true or your instance must have a `desiredStatus` of `TERMINATED` in order to update this field. */ enableVtpm?: boolean; } interface InstanceTemplateAdvancedMachineFeatures { /** * Defines whether the instance should have nested virtualization enabled. Defaults to false. */ enableNestedVirtualization?: boolean; /** * Whether to enable UEFI networking for instance creation. */ enableUefiNetworking?: boolean; /** * [The PMU](https://cloud.google.com/compute/docs/pmu-overview) is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are `STANDARD`, `ENHANCED`, and `ARCHITECTURAL`. */ performanceMonitoringUnit?: string; /** * The number of threads per physical core. To disable [simultaneous multithreading (SMT)](https://cloud.google.com/compute/docs/instances/disabling-smt) set this to 1. */ threadsPerCore?: number; /** * Turbo frequency mode to use for the instance. Supported modes are currently either `ALL_CORE_MAX` or unset (default). */ turboMode?: string; /** * The number of physical cores to expose to an instance. [visible cores info (VC)](https://cloud.google.com/compute/docs/instances/customize-visible-cores). */ visibleCoreCount?: number; } interface InstanceTemplateConfidentialInstanceConfig { /** * Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: `SEV`, `SEV_SNP`, `TDX`. `onHostMaintenance` can be set to MIGRATE if `confidentialInstanceType` is set to `SEV` and `minCpuPlatform` is set to `"AMD Milan"`. Otherwise, `onHostMaintenance` has to be set to TERMINATE or this will fail to create the VM. If `SEV_SNP`, currently `minCpuPlatform` has to be set to `"AMD Milan"` or this will fail to create the VM. */ confidentialInstanceType?: string; /** * Defines whether the instance should have confidential compute enabled with AMD SEV. If enabled, `onHostMaintenance` can be set to MIGRATE if `minCpuPlatform` is set to `"AMD Milan"`. Otherwise, `onHostMaintenance` has to be set to TERMINATE or this will fail to create the VM. */ enableConfidentialCompute?: boolean; } interface InstanceTemplateDisk { /** * The architecture of the attached disk. Valid values are `ARM64` or `x8664`. */ architecture: string; /** * Whether or not the disk should be auto-deleted. * This defaults to true. */ autoDelete?: boolean; /** * Indicates that this is a boot disk. */ boot: boolean; /** * A unique device name that is reflected into the * /dev/ tree of a Linux operating system running within the instance. If not * specified, the server chooses a default device name to apply to this disk. */ deviceName: string; /** * Encrypts or decrypts a disk using a customer-supplied encryption key. * * If you are creating a new disk, this field encrypts the new disk using an encryption key that you provide. If you are attaching an existing disk that is already encrypted, this field decrypts the disk using the customer-supplied encryption key. * * If you encrypt a disk using a customer-supplied key, you must provide the same key again when you attempt to use this resource at a later time. For example, you must provide the key when you create a snapshot or an image from the disk or when you attach the disk to a virtual machine instance. * * If you do not provide an encryption key, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later. * * Instance templates do not store customer-supplied encryption keys, so you cannot use your own keys to encrypt disks in a managed instance group. Structure documented below. */ diskEncryptionKey?: outputs.compute.InstanceTemplateDiskDiskEncryptionKey; /** * Name of the disk. When not provided, this defaults * to the name of the instance. */ diskName?: string; /** * The size of the image in gigabytes. If not * specified, it will inherit the size of its base image. For SCRATCH disks, * the size must be exactly 375GB. */ diskSizeGb: number; /** * The GCE disk type. Such as `"pd-ssd"`, `"local-ssd"`, * `"pd-balanced"` or `"pd-standard"`, `"hyperdisk-balanced"`, `"hyperdisk-throughput"` or `"hyperdisk-extreme"`. */ diskType: string; /** * A list of features to enable on the guest operating system. Applicable only for bootable images. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. */ guestOsFeatures?: string[]; /** * Specifies the disk interface to use for attaching this disk, * which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI * and the request will fail if you attempt to attach a persistent disk in any other format * than SCSI. Local SSDs can use either NVME or SCSI. */ interface: string; /** * A set of ket/value label pairs to assign to disk created from * this template */ labels?: { [key: string]: string; }; /** * The mode in which to attach this disk, either READ_WRITE * or READ_ONLY. If you are attaching or creating a boot disk, this must * read-write mode. */ mode: string; /** * Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. For more details, see the [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk) or the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks) depending on the selected disk_type. */ provisionedIops: number; /** * Indicates how much throughput to provision for the disk, in MB/s. This sets the amount of data that can be read or written from the disk per second. Values must greater than or equal to 1. For more details, see the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks). */ provisionedThroughput: number; /** * A set of key/value resource manager tag pairs to bind to this disk. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. */ resourceManagerTags?: { [key: string]: string; }; /** * - A list (short name or id) of resource policies to attach to this disk for automatic snapshot creations. Currently a max of 1 resource policy is supported. */ resourcePolicies?: string; /** * The name (**not self_link**) * of the disk (such as those managed by `gcp.compute.Disk`) to attach. * > **Note:** Either `source`, `sourceImage`, or `sourceSnapshot` is **required** in a disk block unless the disk type is `local-ssd`. Check the API [docs](https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates/insert) for details. */ source?: string; /** * The image from which to * initialize this disk. This can be one of: the image's `selfLink`, * `projects/{project}/global/images/{image}`, * `projects/{project}/global/images/family/{family}`, `global/images/{image}`, * `global/images/family/{family}`, `family/{family}`, `{project}/{family}`, * `{project}/{image}`, `{family}`, or `{image}`. * > **Note:** Either `source`, `sourceImage`, or `sourceSnapshot` is **required** in a disk block unless the disk type is `local-ssd`. Check the API [docs](https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates/insert) for details. */ sourceImage: string; /** * The customer-supplied encryption * key of the source image. Required if the source image is protected by a * customer-supplied encryption key. * * Instance templates do not store customer-supplied encryption keys, so you * cannot create disks for instances in a managed instance group if the source * images are encrypted with your own keys. Structure * documented below. */ sourceImageEncryptionKey?: outputs.compute.InstanceTemplateDiskSourceImageEncryptionKey; /** * The source snapshot to create this disk. * > **Note:** Either `source`, `sourceImage`, or `sourceSnapshot` is **required** in a disk block unless the disk type is `local-ssd`. Check the API [docs](https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates/insert) for details. */ sourceSnapshot?: string; /** * The customer-supplied encryption * key of the source snapshot. Structure * documented below. */ sourceSnapshotEncryptionKey?: outputs.compute.InstanceTemplateDiskSourceSnapshotEncryptionKey; /** * The type of GCE disk, can be either `"SCRATCH"` or * `"PERSISTENT"`. */ type: string; } interface InstanceTemplateDiskDiskEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS */ kmsKeySelfLink?: string; /** * The service account being used for the * encryption request for the given KMS key. If absent, the Compute Engine * default service account is used. */ kmsKeyServiceAccount?: string; } interface InstanceTemplateDiskSourceImageEncryptionKey { /** * The self link of the encryption key that is * stored in Google Cloud KMS. Only one of `kmsKeySelfLink`, `rsaEncryptedKey` and `rawKey` * may be set. */ kmsKeySelfLink?: string; /** * The service account being used for the * encryption request for the given KMS key. If absent, the Compute Engine * default service account is used. */ kmsKeyServiceAccount?: string; /** * A 256-bit [customer-supplied encryption key] * (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption), * encoded in [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4) * to decrypt the given image. Only one of `kmsKeySelfLink`, `rsaEncryptedKey` and `rawKey` * may be set. */ rawKey?: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit [customer-supplied encryption key] * (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption) to decrypt the given image. Only one of `kmsKeySelfLink`, `rsaEncryptedKey` and `rawKey` * may be set. */ rsaEncryptedKey?: string; } interface InstanceTemplateDiskSourceSnapshotEncryptionKey { /** * The self link of the encryption key that is * stored in Google Cloud KMS. Only one of `kmsKeySelfLink`, `rsaEncryptedKey` and `rawKey` * may be set. */ kmsKeySelfLink?: string; /** * The service account being used for the * encryption request for the given KMS key. If absent, the Compute Engine * default service account is used. */ kmsKeyServiceAccount?: string; /** * A 256-bit [customer-supplied encryption key] * (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption), * encoded in [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4) * to decrypt this snapshot. Only one of `kmsKeySelfLink`, `rsaEncryptedKey` and `rawKey` * may be set. */ rawKey?: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit [customer-supplied encryption key] * (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption) to decrypt this snapshot. Only one of `kmsKeySelfLink`, `rsaEncryptedKey` and `rawKey` * may be set. */ rsaEncryptedKey?: string; } interface InstanceTemplateGuestAccelerator { /** * The number of the guest accelerator cards exposed to this instance. */ count: number; /** * The accelerator type resource to expose to this instance. E.g. `nvidia-tesla-k80`. */ type: string; } interface InstanceTemplateIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface InstanceTemplateIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface InstanceTemplateNetworkInterface { /** * Access configurations, i.e. IPs via which this * instance can be accessed via the Internet. Omit to ensure that the instance * is not accessible from the Internet (this means that ssh provisioners will * not work unless you can send traffic to the instance's * network (e.g. via tunnel or because it is running on another cloud instance * on that network). This block can be specified once per `networkInterface`. Structure documented below. */ accessConfigs?: outputs.compute.InstanceTemplateNetworkInterfaceAccessConfig[]; /** * An * array of alias IP ranges for this network interface. Can only be specified for network * interfaces on subnet-mode networks. Structure documented below. */ aliasIpRanges?: outputs.compute.InstanceTemplateNetworkInterfaceAliasIpRange[]; /** * Indicates whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. */ igmpQuery: string; /** * The prefix length of the primary internal IPv6 range. */ internalIpv6PrefixLength: number; /** * An array of IPv6 access configurations for this interface. * Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig * specified, then this instance will have no external IPv6 Internet access. Structure documented below. */ ipv6AccessConfigs?: outputs.compute.InstanceTemplateNetworkInterfaceIpv6AccessConfig[]; /** * One of EXTERNAL, INTERNAL to indicate whether the IP can be accessed from the Internet. This field is always inherited from its subnetwork. */ ipv6AccessType: string; /** * An IPv6 internal network address for this network interface. If not specified, Google Cloud will automatically assign an internal IPv6 address from the instance's subnetwork. */ ipv6Address: string; /** * The name of the instance template. If you leave * this blank, the provider will auto-generate a unique name. */ name: string; /** * The name or selfLink of the network to attach this interface to. * Use `network` attribute for Legacy or Auto subnetted networks and * `subnetwork` for custom subnetted networks. */ network: string; /** * The URL of the network attachment that this interface should connect to in the following format: projects/{projectNumber}/regions/{region_name}/networkAttachments/{network_attachment_name}. */ networkAttachment: string; /** * The private IP address to assign to the instance. If * empty, the address will be automatically assigned. */ networkIp?: string; /** * The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET, MRDMA, IRDMA. */ nicType?: string; /** * Name of the parent network interface of a dynamic network interface. */ parentNicName: string; /** * The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified. */ queueCount?: number; /** * The stack type for this network interface to identify whether the IPv6 feature is enabled or not. Values are IPV4_IPV6, IPV6_ONLY or IPV4_ONLY. If not specified, IPV4_ONLY will be used. */ stackType: string; /** * the name of the subnetwork to attach this interface * to. The subnetwork must exist in the same `region` this instance will be * created in. Either `network` or `subnetwork` must be provided. */ subnetwork: string; /** * The ID of the project in which the subnetwork belongs. * If it is not provided, the provider project is used. */ subnetworkProject: string; /** * VLAN tag of a dynamic network interface, must be an integer in the range from 2 to 255 inclusively. */ vlan?: number; } interface InstanceTemplateNetworkInterfaceAccessConfig { /** * The IP address that will be 1:1 mapped to the instance's * network ip. If not given, one will be generated. */ natIp: string; /** * The service-level to be provided for IPv6 traffic when the * subnet has an external subnet. Only PREMIUM and STANDARD tier is valid for IPv6. */ networkTier: string; /** * The DNS domain name for the public PTR record.The DNS domain name for the public PTR record. */ publicPtrDomainName: string; } interface InstanceTemplateNetworkInterfaceAliasIpRange { /** * The IP CIDR range represented by this alias IP range. This IP CIDR range * must belong to the specified subnetwork and cannot contain IP addresses reserved by * system or used by other network interfaces. At the time of writing only a * netmask (e.g. /24) may be supplied, with a CIDR format resulting in an API * error. */ ipCidrRange: string; /** * The subnetwork secondary range name specifying * the secondary range from which to allocate the IP CIDR range for this alias IP * range. If left unspecified, the primary range of the subnetwork will be used. */ subnetworkRangeName?: string; } interface InstanceTemplateNetworkInterfaceIpv6AccessConfig { /** * The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically. */ externalIpv6: string; /** * The prefix length of the external IPv6 range. */ externalIpv6PrefixLength: string; /** * The name of the instance template. If you leave * this blank, the provider will auto-generate a unique name. */ name: string; /** * The service-level to be provided for IPv6 traffic when the subnet has an external subnet. Only PREMIUM tier is valid for IPv6 */ networkTier: string; /** * The domain name to be used when creating DNSv6 records for the external IPv6 ranges. */ publicPtrDomainName: string; } interface InstanceTemplateNetworkPerformanceConfig { /** * The egress bandwidth tier to enable. Possible values: TIER_1, DEFAULT */ totalEgressBandwidthTier: string; } interface InstanceTemplateReservationAffinity { /** * Specifies the label selector for the reservation to use.. * Structure is documented below. */ specificReservation?: outputs.compute.InstanceTemplateReservationAffinitySpecificReservation; /** * The type of reservation from which this instance can consume resources. */ type: string; } interface InstanceTemplateReservationAffinitySpecificReservation { /** * Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify compute.googleapis.com/reservation-name as the key and specify the name of your reservation as the only value. */ key: string; /** * Corresponds to the label values of a reservation resource. */ values: string[]; } interface InstanceTemplateScheduling { /** * Specifies whether the instance should be * automatically restarted if it is terminated by Compute Engine (not * terminated by a user). This defaults to true. */ automaticRestart?: boolean; /** * Specifies the availability domain to place the instance in. The value must be a number between 1 and the number of availability domains specified in the spread placement policy attached to the instance. */ availabilityDomain?: number; /** * Beta Settings for the instance to perform a graceful shutdown. Structure is documented below. */ gracefulShutdown?: outputs.compute.InstanceTemplateSchedulingGracefulShutdown; /** * Beta Specifies the time in seconds for host error detection, the value must be within the range of [90, 330] with the increment of 30, if unset, the default behavior of host error recovery will be used. */ hostErrorTimeoutSeconds?: number; /** * Describe the type of termination action for `SPOT` VM. Can be `STOP` or `DELETE`. Read more on [here](https://cloud.google.com/compute/docs/instances/create-use-spot) */ instanceTerminationAction?: string; /** * (../guides/provider_versions.html.markdown) Specifies the maximum amount of time a Local Ssd Vm should wait while recovery of the Local Ssd state is attempted. Its value should be in between 0 and 168 hours with hour granularity and the default value being 1 hour. Structure is documented below. */ localSsdRecoveryTimeouts?: outputs.compute.InstanceTemplateSchedulingLocalSsdRecoveryTimeout[]; /** * Beta Specifies the frequency of planned maintenance events. The accepted values are: `PERIODIC`. */ maintenanceInterval?: string; /** * The duration of the instance. Instance will run and be terminated after then, the termination action could be defined in `instanceTerminationAction`. Structure is documented below. */ maxRunDuration?: outputs.compute.InstanceTemplateSchedulingMaxRunDuration; /** * Minimum number of cpus for the instance. */ minNodeCpus?: number; /** * Specifies node affinities or anti-affinities * to determine which sole-tenant nodes your instances and managed instance * groups will use as host systems. Read more on sole-tenant node creation * [here](https://cloud.google.com/compute/docs/nodes/create-nodes). * Structure documented below. */ nodeAffinities?: outputs.compute.InstanceTemplateSchedulingNodeAffinity[]; /** * Defines the maintenance behavior for this * instance. */ onHostMaintenance: string; /** * Specifies the action to be performed when the instance is terminated using `maxRunDuration` and `STOP` `instanceTerminationAction`. Only support `true` `discardLocalSsd` at this point. Structure is documented below. */ onInstanceStopAction?: outputs.compute.InstanceTemplateSchedulingOnInstanceStopAction; /** * Allows instance to be preempted. This defaults to * false. Read more on this * [here](https://cloud.google.com/compute/docs/instances/preemptible). */ preemptible?: boolean; /** * Describe the type of preemptible VM. This field accepts the value `STANDARD` or `SPOT`. If the value is `STANDARD`, there will be no discount. If this is set to `SPOT`, * `preemptible` should be `true` and `automaticRestart` should be * `false`. For more info about * `SPOT`, read [here](https://cloud.google.com/compute/docs/instances/spot) */ provisioningModel: string; /** * Beta Boolean parameter. Default is false and there will be 120 seconds between GCE ACPI G2 Soft Off and ACPI G3 Mechanical Off for Standard VMs and 30 seconds for Spot VMs. */ skipGuestOsShutdown?: boolean; /** * Specifies the timestamp, when the instance will be terminated, in RFC3339 text format. If specified, the instance termination action will be performed at the termination time. */ terminationTime?: string; } interface InstanceTemplateSchedulingGracefulShutdown { /** * Opts-in for graceful shutdown. */ enabled: boolean; /** * The time allotted for the instance to gracefully shut down. * If the graceful shutdown isn't complete after this time, then the instance * transitions to the STOPPING state. Structure is documented below: */ maxDuration?: outputs.compute.InstanceTemplateSchedulingGracefulShutdownMaxDuration; } interface InstanceTemplateSchedulingGracefulShutdownMaxDuration { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented with a 0 * `seconds` field and a positive `nanos` field. Must be from 0 to * 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. * The value must be between 1 and 3600, which is 3,600 seconds (one hour).` */ seconds: number; } interface InstanceTemplateSchedulingLocalSsdRecoveryTimeout { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented with a 0 * `seconds` field and a positive `nanos` field. Must be from 0 to * 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to * 315,576,000,000 inclusive. Note: these bounds are computed from: 60 * sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years. */ seconds: number; } interface InstanceTemplateSchedulingMaxRunDuration { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented with a 0 * `seconds` field and a positive `nanos` field. Must be from 0 to * 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to * 315,576,000,000 inclusive. Note: these bounds are computed from: 60 * sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years. */ seconds: number; } interface InstanceTemplateSchedulingNodeAffinity { /** * The key for the node affinity label. */ key: string; /** * The operator. Can be `IN` for node-affinities * or `NOT_IN` for anti-affinities. */ operator: string; /** * Corresponds to the label values of a reservation resource. */ values: string[]; } interface InstanceTemplateSchedulingOnInstanceStopAction { /** * Whether to discard local SSDs attached to the VM while terminating using `maxRunDuration`. Only supports `true` at this point. */ discardLocalSsd?: boolean; } interface InstanceTemplateServiceAccount { /** * The service account e-mail address. If not given, the * default Google Compute Engine service account is used. */ email: string; /** * A list of service scopes. Both OAuth2 URLs and gcloud * short names are supported. To allow full access to all Cloud APIs, use the * `cloud-platform` scope. See a complete list of scopes [here](https://cloud.google.com/sdk/gcloud/reference/alpha/compute/instances/set-scopes#--scopes). * * The [service accounts documentation](https://cloud.google.com/compute/docs/access/service-accounts#accesscopesiam) * explains that access scopes are the legacy method of specifying permissions for your instance. * To follow best practices you should create a dedicated service account with the minimum permissions the VM requires. * To use a dedicated service account this field should be configured as a list containing the `cloud-platform` scope. * See [Authenticate workloads using service accounts best practices](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances#best_practices) * and [Best practices for using service accounts](https://cloud.google.com/iam/docs/best-practices-service-accounts#single-purpose). */ scopes: string[]; } interface InstanceTemplateShieldedInstanceConfig { /** * - Compare the most recent boot measurements to the integrity policy baseline and return a pair of pass/fail results depending on whether they match or not. Defaults to true. */ enableIntegrityMonitoring?: boolean; /** * - Verify the digital signature of all boot components, and halt the boot process if signature verification fails. Defaults to false. */ enableSecureBoot?: boolean; /** * - Use a virtualized trusted platform module, which is a specialized computer chip you can use to encrypt objects like keys and certificates. Defaults to true. */ enableVtpm?: boolean; } interface InstantSnapshotIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface InstantSnapshotIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface InterconnectApplicationAwareInterconnect { /** * (Optional, Beta) * Bandwidth Percentage policy allows you to have granular control over how your Interconnect * bandwidth is utilized among your workloads mapping to different traffic classes. * Structure is documented below. */ bandwidthPercentagePolicy?: outputs.compute.InterconnectApplicationAwareInterconnectBandwidthPercentagePolicy; /** * (Optional, Beta) * A description for the AAI profile on this interconnect. */ profileDescription?: string; /** * (Optional, Beta) * Optional field to specify a list of shape average percentages to be * applied in conjunction with StrictPriorityPolicy or BandwidthPercentagePolicy * Structure is documented below. */ shapeAveragePercentages?: outputs.compute.InterconnectApplicationAwareInterconnectShapeAveragePercentage[]; /** * (Optional, Beta) * Specify configuration for StrictPriorityPolicy. */ strictPriorityPolicy?: outputs.compute.InterconnectApplicationAwareInterconnectStrictPriorityPolicy; } interface InterconnectApplicationAwareInterconnectBandwidthPercentagePolicy { /** * (Optional, Beta) * Specify bandwidth percentages for various traffic classes for queuing * type Bandwidth Percent. * Structure is documented below. */ bandwidthPercentages?: outputs.compute.InterconnectApplicationAwareInterconnectBandwidthPercentagePolicyBandwidthPercentage[]; } interface InterconnectApplicationAwareInterconnectBandwidthPercentagePolicyBandwidthPercentage { /** * (Optional, Beta) * Bandwidth percentage for a specific traffic class. */ percentage?: number; /** * (Optional, Beta) * Enum representing the various traffic classes offered by AAI. * Default value is `TC_UNSPECIFIED`. * Possible values are: `TC_UNSPECIFIED`, `TC1`, `TC2`, `TC3`, `TC4`, `TC5`, `TC6`. */ trafficClass?: string; } interface InterconnectApplicationAwareInterconnectShapeAveragePercentage { /** * (Optional, Beta) * Bandwidth percentage for a specific traffic class. */ percentage?: number; /** * (Optional, Beta) * Enum representing the various traffic classes offered by AAI. * Default value is `TC_UNSPECIFIED`. * Possible values are: `TC_UNSPECIFIED`, `TC1`, `TC2`, `TC3`, `TC4`, `TC5`, `TC6`. */ trafficClass?: string; } interface InterconnectApplicationAwareInterconnectStrictPriorityPolicy { } interface InterconnectAttachmentGroupAttachment { /** * (Optional) */ attachment?: string; /** * The identifier for this object. Format specified above. */ name: string; } interface InterconnectAttachmentGroupConfigured { /** * (Output) * Which SLA this group is configured to support, and why this * group does or does not meet that SLA's requirements. * Structure is documented below. */ availabilitySlas: outputs.compute.InterconnectAttachmentGroupConfiguredAvailabilitySla[]; } interface InterconnectAttachmentGroupConfiguredAvailabilitySla { /** * (Output) * Which SLA this group supports. Options are the same as the * intent. */ effectiveSla: string; /** * (Output) * Reasons why configuration.availabilitySLA.sla differs from * intent.availabilitySLA. This list is empty if and only if those are the * same. * Structure is documented below. */ intendedSlaBlockers: outputs.compute.InterconnectAttachmentGroupConfiguredAvailabilitySlaIntendedSlaBlocker[]; } interface InterconnectAttachmentGroupConfiguredAvailabilitySlaIntendedSlaBlocker { /** * Attachments in the AttachmentGroup. Keys are arbitrary user-specified * strings. Users are encouraged, but not required, to use their preferred * format for resource links as keys. * Note that there are add-members and remove-members methods in gcloud. * The size of this map is limited by an "Attachments per group" quota. * Structure is documented below. */ attachments: string[]; /** * (Output) * The category of an unmet SLA requirement. */ blockerType: string; /** * (Output) * The url of Google Cloud public documentation explaining * this requirement. This is set for every type of requirement. */ documentationLink: string; /** * (Output) * A human-readable explanation of this requirement and * why it's not met. This is set for every type of requirement. */ explanation: string; /** * (Output) * Metros used to explain this blocker in more detail. * These are three-letter lowercase strings like "iad". This will be set * for some blockers (like NO_ATTACHMENTS_IN_METRO_AND_ZONE) but does * not apply to others. */ metros: string[]; /** * (Output) * Regions used to explain this blocker in more * detail. These are region names formatted like "us-central1". This * will be set for some blockers (like INCOMPATIBLE_REGIONS) but does * not apply to others. */ regions: string[]; /** * (Output) * Zones used to explain this blocker in more detail. * Format is "zone1" and/or "zone2". This will be set for some blockers * (like MISSING_ZONE) but does not apply to others. */ zones: string[]; } interface InterconnectAttachmentGroupIntent { /** * Which SLA the user intends this group to support. * Possible values are: `PRODUCTION_NON_CRITICAL`, `PRODUCTION_CRITICAL`, `NO_SLA`, `AVAILABILITY_SLA_UNSPECIFIED`. */ availabilitySla?: string; } interface InterconnectAttachmentGroupLogicalStructure { /** * (Output) * Regions used to explain this blocker in more * detail. These are region names formatted like "us-central1". This * will be set for some blockers (like INCOMPATIBLE_REGIONS) but does * not apply to others. */ regions: outputs.compute.InterconnectAttachmentGroupLogicalStructureRegion[]; } interface InterconnectAttachmentGroupLogicalStructureRegion { /** * (Output) * Metros used to explain this blocker in more detail. * These are three-letter lowercase strings like "iad". This will be set * for some blockers (like NO_ATTACHMENTS_IN_METRO_AND_ZONE) but does * not apply to others. */ metros: outputs.compute.InterconnectAttachmentGroupLogicalStructureRegionMetro[]; /** * (Output) * The name of a region, like "us-central1". */ region: string; } interface InterconnectAttachmentGroupLogicalStructureRegionMetro { /** * (Output) * The facilities used for this group's Attachments' * Interconnects. * Structure is documented below. */ facilities: outputs.compute.InterconnectAttachmentGroupLogicalStructureRegionMetroFacility[]; /** * (Output) * The name of the metro, as a three-letter lowercase * string like "iad". This is the first component of the location of an * Interconnect. */ metro: string; } interface InterconnectAttachmentGroupLogicalStructureRegionMetroFacility { /** * (Output) * The name of a facility, like "iad-1234". */ facility: string; /** * (Output) * Zones used to explain this blocker in more detail. * Format is "zone1" and/or "zone2". This will be set for some blockers * (like MISSING_ZONE) but does not apply to others. */ zones: outputs.compute.InterconnectAttachmentGroupLogicalStructureRegionMetroFacilityZone[]; } interface InterconnectAttachmentGroupLogicalStructureRegionMetroFacilityZone { /** * (Output, Deprecated) * URLs of Attachments in the given zone, to the given * region, on Interconnects in the given facility and metro. Every * Attachment in the AG has such an entry. * * @deprecated `attachment` is deprecated and will be removed in a future major release. Use `attachments` instead. */ attachment: string[]; /** * Attachments in the AttachmentGroup. Keys are arbitrary user-specified * strings. Users are encouraged, but not required, to use their preferred * format for resource links as keys. * Note that there are add-members and remove-members methods in gcloud. * The size of this map is limited by an "Attachments per group" quota. * Structure is documented below. */ attachments: string[]; /** * (Output) * The zones that Attachments in this group are present * in, in the given facilities. This is inherited from their * Interconnects. */ zone: string; } interface InterconnectAttachmentL2Forwarding { /** * A map of VLAN tags to appliances and optional inner mapping rules. */ applianceMappings?: outputs.compute.InterconnectAttachmentL2ForwardingApplianceMapping[]; /** * The default appliance IP address. */ defaultApplianceIpAddress?: string; /** * GeneveHeader related configurations. */ geneveHeader?: outputs.compute.InterconnectAttachmentL2ForwardingGeneveHeader; /** * URL of the network to which this attachment belongs. */ network?: string; /** * The tunnel endpoint IP address. */ tunnelEndpointIpAddress?: string; } interface InterconnectAttachmentL2ForwardingApplianceMapping { /** * The appliance IP address. */ applianceIpAddress?: string; /** * Structure is documented below. */ innerVlanToApplianceMappings?: outputs.compute.InterconnectAttachmentL2ForwardingApplianceMappingInnerVlanToApplianceMapping[]; /** * The name of this appliance mapping rule. */ name?: string; /** * The VLAN tag. */ vlanId?: string; } interface InterconnectAttachmentL2ForwardingApplianceMappingInnerVlanToApplianceMapping { /** * The inner appliance IP address. */ innerApplianceIpAddress?: string; /** * List of inner VLAN tags. */ innerVlanTags?: string[]; } interface InterconnectAttachmentL2ForwardingGeneveHeader { /** * VNI is a 24-bit unique virtual network identifier. */ vni?: number; } interface InterconnectAttachmentParams { /** * Resource manager tags to be bound to the interconnect attachment. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. * * The `l2Forwarding` block supports: */ resourceManagerTags?: { [key: string]: string; }; } interface InterconnectAttachmentPrivateInterconnectInfo { /** * (Output) * 802.1q encapsulation tag to be used for traffic between * Google and the customer, going to and from this network and region. */ tag8021q: number; } interface InterconnectCircuitInfo { /** * (Output) * Customer-side demarc ID for this circuit. */ customerDemarcId: string; /** * (Output) * Google-assigned unique ID for this circuit. Assigned at circuit turn-up. */ googleCircuitId: string; /** * (Output) * Google-side demarc ID for this circuit. Assigned at circuit turn-up and provided by * Google to the customer in the LOA. */ googleDemarcId: string; } interface InterconnectExpectedOutage { /** * (Output) * If issueType is IT_PARTIAL_OUTAGE, a list of the Google-side circuit IDs that will be * affected. */ affectedCircuits: string[]; /** * An optional description of this resource. Provide this property when you create the resource. */ description: string; /** * (Output) * Scheduled end time for the outage (milliseconds since Unix epoch). */ endTime: string; /** * (Output) * Form this outage is expected to take. Note that the versions of this enum prefixed with * "IT_" have been deprecated in favor of the unprefixed values. Can take one of the * following values: * - OUTAGE: The Interconnect may be completely out of service for some or all of the * specified window. * - PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a whole should remain * up, but with reduced bandwidth. */ issueType: string; /** * Name of the resource. Provided by the client when the resource is created. The name must be * 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters * long and match the regular expression `a-z?` which means the first * character must be a lowercase letter, and all following characters must be a dash, * lowercase letter, or digit, except the last character, which cannot be a dash. */ name: string; /** * (Output) * The party that generated this notification. Note that the value of NSRC_GOOGLE has been * deprecated in favor of GOOGLE. Can take the following value: * - GOOGLE: this notification as generated by Google. */ source: string; /** * (Output) * Scheduled start time for the outage (milliseconds since Unix epoch). */ startTime: string; /** * (Output) * State of this notification. Note that the versions of this enum prefixed with "NS_" have * been deprecated in favor of the unprefixed values. Can take one of the following values: * - ACTIVE: This outage notification is active. The event could be in the past, present, * or future. See startTime and endTime for scheduling. * - CANCELLED: The outage associated with this notification was cancelled before the * outage was due to start. * - COMPLETED: The outage associated with this notification is complete. */ state: string; } interface InterconnectGroupConfigured { /** * (Output) * How reliable this topology is configured to be, and why * this group does or does not meet the requirements for the intended * capability. * Structure is documented below. */ topologyCapabilities: outputs.compute.InterconnectGroupConfiguredTopologyCapability[]; } interface InterconnectGroupConfiguredTopologyCapability { /** * (Output) * Reasons why configuration.topologyCapability.sla differs * from intent.topologyCapability. This list is empty if and only if those * are the same. * Structure is documented below. */ intendedCapabilityBlockers: outputs.compute.InterconnectGroupConfiguredTopologyCapabilityIntendedCapabilityBlocker[]; /** * (Output) * Which level of reliability this group is configured to * support. */ supportedSla: string; } interface InterconnectGroupConfiguredTopologyCapabilityIntendedCapabilityBlocker { /** * (Output) * The category of an unmet SLA requirement. The Intended * SLA Blockers section below explains this field and how it relates to * other fields in intendedCapabilityBlockers. */ blockerType: string; /** * (Output) * The url of Google Cloud public documentation explaining * this requirement. This is set for every type of requirement. */ documentationLink: string; /** * (Output) * A human-readable explanation of this requirement and * why it's not met. This is set for every type of requirement. */ explanation: string; /** * (Output) * Facilities used to explain this blocker in more detail. * Like physicalStructure.metros.facilities.facility, this is a numeric * string like "5467". */ facilities: string[]; /** * Interconnects in the InterconnectGroup. Keys are arbitrary user-specified * strings. Users are encouraged, but not required, to use their preferred * format for resource links as keys. * Note that there are add-members and remove-members methods in gcloud. * The size of this map is limited by an "Interconnects per group" quota. * Structure is documented below. */ interconnects: string[]; /** * (Output) * Metros used to explain this blocker in more detail. * These are three-letter lowercase strings like "iad". A blocker like * INCOMPATIBLE_METROS will specify the problematic metros in this * field. */ metros: string[]; /** * (Output) * Zones used to explain this blocker in more detail. * Zone names are "zone1" and/or "zone2". */ zones: string[]; } interface InterconnectGroupIntent { /** * The reliability the user intends this group to be capable of, in terms * of the Interconnect product SLAs. * Possible values are: `PRODUCTION_NON_CRITICAL`, `PRODUCTION_CRITICAL`, `NO_SLA`, `AVAILABILITY_SLA_UNSPECIFIED`. */ topologyCapability?: string; } interface InterconnectGroupInterconnect { /** * The URL of an Interconnect in this group. All Interconnects in the group are unique. */ interconnect?: string; /** * The identifier for this object. Format specified above. */ name: string; } interface InterconnectGroupPhysicalStructure { /** * (Output) * Metros used to explain this blocker in more detail. * These are three-letter lowercase strings like "iad". A blocker like * INCOMPATIBLE_METROS will specify the problematic metros in this * field. */ metros: outputs.compute.InterconnectGroupPhysicalStructureMetro[]; } interface InterconnectGroupPhysicalStructureMetro { /** * (Output) * Facilities used to explain this blocker in more detail. * Like physicalStructure.metros.facilities.facility, this is a numeric * string like "5467". */ facilities: outputs.compute.InterconnectGroupPhysicalStructureMetroFacility[]; /** * (Output) * The name of the metro, as a three-letter lowercase string * like "iad". This is the first component of the location of * Interconnects underneath this. */ metro: string; } interface InterconnectGroupPhysicalStructureMetroFacility { /** * (Output) * The ID of this facility, as a numeric string like * "5467". This is the third component of the location of Interconnects * in this facility. */ facility: string; /** * (Output) * Zones used to explain this blocker in more detail. * Zone names are "zone1" and/or "zone2". */ zones: outputs.compute.InterconnectGroupPhysicalStructureMetroFacilityZone[]; } interface InterconnectGroupPhysicalStructureMetroFacilityZone { /** * Interconnects in the InterconnectGroup. Keys are arbitrary user-specified * strings. Users are encouraged, but not required, to use their preferred * format for resource links as keys. * Note that there are add-members and remove-members methods in gcloud. * The size of this map is limited by an "Interconnects per group" quota. * Structure is documented below. */ interconnects: string[]; /** * (Output) * The name of the zone, either "zone1" or "zone2". * This is the second component of the location of Interconnects in * this facility. */ zone: string; } interface InterconnectMacsec { /** * If set to true, the Interconnect connection is configured with a should-secure * MACsec security policy, that allows the Google router to fallback to cleartext * traffic if the MKA session cannot be established. By default, the Interconnect * connection is configured with a must-secure security policy that drops all traffic * if the MKA session cannot be established with your router. */ failOpen?: boolean; /** * A keychain placeholder describing a set of named key objects along with their * start times. A MACsec CKN/CAK is generated for each key in the key chain. * Google router automatically picks the key with the most recent startTime when establishing * or re-establishing a MACsec secure link. * Structure is documented below. */ preSharedKeys: outputs.compute.InterconnectMacsecPreSharedKey[]; } interface InterconnectMacsecPreSharedKey { /** * (Optional, Deprecated) * If set to true, the Interconnect connection is configured with a should-secure * MACsec security policy, that allows the Google router to fallback to cleartext * traffic if the MKA session cannot be established. By default, the Interconnect * connection is configured with a must-secure security policy that drops all traffic * if the MKA session cannot be established with your router. * * > **Warning:** `failOpen` is deprecated and will be removed in a future major release. Use other `failOpen` instead. * * @deprecated `failOpen` is deprecated and will be removed in a future major release. Use other `failOpen` instead. */ failOpen?: boolean; /** * A name for this pre-shared key. The name must be 1-63 characters long, and * comply with RFC1035. Specifically, the name must be 1-63 characters long and match * the regular expression `a-z?` which means the first character * must be a lowercase letter, and all following characters must be a dash, lowercase * letter, or digit, except the last character, which cannot be a dash. */ name: string; /** * A RFC3339 timestamp on or after which the key is valid. startTime can be in the * future. If the keychain has a single key, startTime can be omitted. If the keychain * has multiple keys, startTime is mandatory for each key. The start times of keys must * be in increasing order. The start times of two consecutive keys must be at least 6 * hours apart. */ startTime?: string; } interface InterconnectParams { /** * Resource manager tags to be bound to the interconnect. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. */ resourceManagerTags?: { [key: string]: string; }; } interface MachineImageIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface MachineImageIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface MachineImageMachineImageEncryptionKey { /** * The name of the encryption key that is stored in Google Cloud KMS. */ kmsKeyName?: string; /** * The service account used for the encryption request for the given KMS key. * If absent, the Compute Engine Service Agent service account is used. */ kmsKeyServiceAccount?: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. */ rawKey?: string; /** * (Output) * The RFC 4648 base64 encoded SHA-256 hash of the * customer-supplied encryption key that protects this resource. */ sha256: string; } interface ManagedSslCertificateManaged { /** * Domains for which a managed SSL certificate will be valid. Currently, * there can be up to 100 domains in this list. */ domains: string[]; } interface MangedSslCertificateManaged { /** * Domains for which a managed SSL certificate will be valid. Currently, * there can be up to 100 domains in this list. */ domains: string[]; } interface NetworkAttachmentConnectionEndpoint { /** * (Output) * The IPv4 address assigned to the producer instance network interface. This value will be a range in case of Serverless. */ ipAddress: string; /** * (Output) * The project id or number of the interface to which the IP was assigned. */ projectIdOrNum: string; /** * (Output) * Alias IP ranges from the same subnetwork. */ secondaryIpCidrRanges: string; /** * (Output) * The status of a connected endpoint to this network attachment. */ status: string; /** * (Output) * The subnetwork used to assign the IP to the producer instance network interface. */ subnetwork: string; } interface NetworkEndpointListNetworkEndpoint { /** * The name for a specific VM instance that the IP address belongs to. * This is required for network endpoints of type GCE_VM_IP_PORT. * The instance must be in the same zone as the network endpoint group. */ instance: string; /** * IPv4 address of network endpoint. The IP address must belong * to a VM in GCE (either the primary IP or as part of an aliased IP * range). */ ipAddress: string; /** * Port number of network endpoint. * **Note** `port` is required unless the Network Endpoint Group is created * with the type of `GCE_VM_IP` */ port?: number; } interface NetworkFirewallPolicyPacketMirroringRuleMatch { /** * CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. */ destIpRanges?: string[]; /** * Pairs of IP protocols and ports that the rule should match. * Structure is documented below. * * * The `layer4Configs` block supports: */ layer4Configs: outputs.compute.NetworkFirewallPolicyPacketMirroringRuleMatchLayer4Config[]; /** * CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. */ srcIpRanges?: string[]; } interface NetworkFirewallPolicyPacketMirroringRuleMatchLayer4Config { /** * The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. * This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. */ ipProtocol: string; /** * An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. * Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. */ ports?: string[]; } interface NetworkFirewallPolicyPacketMirroringRuleTargetSecureTag { /** * Name of the secure tag, created with TagManager's TagValue API. * diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' */ name?: string; /** * (Output) * State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. */ state: string; } interface NetworkFirewallPolicyRuleMatch { /** * Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. */ destAddressGroups?: string[]; /** * Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100. */ destFqdns?: string[]; /** * CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. */ destIpRanges?: string[]; /** * (Optional, Beta) * Network scope of the traffic destination. * Possible values are: `INTERNET`, `INTRA_VPC`, `NON_INTERNET`, `VPC_NETWORKS`. */ destNetworkScope?: string; /** * Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. */ destRegionCodes?: string[]; /** * Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination. */ destThreatIntelligences?: string[]; /** * Pairs of IP protocols and ports that the rule should match. * Structure is documented below. */ layer4Configs: outputs.compute.NetworkFirewallPolicyRuleMatchLayer4Config[]; /** * Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. */ srcAddressGroups?: string[]; /** * Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100. */ srcFqdns?: string[]; /** * CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. */ srcIpRanges?: string[]; /** * (Optional, Beta) * Network scope of the traffic source. * Possible values are: `INTERNET`, `INTRA_VPC`, `NON_INTERNET`, `VPC_NETWORKS`. */ srcNetworkScope?: string; /** * (Optional, Beta) * Networks of the traffic source. It can be either a full or partial url. */ srcNetworks?: string[]; /** * Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000. */ srcRegionCodes?: string[]; /** * List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256. * Structure is documented below. */ srcSecureTags?: outputs.compute.NetworkFirewallPolicyRuleMatchSrcSecureTag[]; /** * Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source. * * * The `layer4Configs` block supports: */ srcThreatIntelligences?: string[]; } interface NetworkFirewallPolicyRuleMatchLayer4Config { /** * The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. * This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. */ ipProtocol: string; /** * An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. * Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. */ ports?: string[]; } interface NetworkFirewallPolicyRuleMatchSrcSecureTag { /** * Name of the secure tag, created with TagManager's TagValue API. */ name?: string; /** * (Output) * State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. */ state: string; } interface NetworkFirewallPolicyRuleTargetSecureTag { /** * Name of the secure tag, created with TagManager's TagValue API. */ name?: string; /** * (Output) * State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. */ state: string; } interface NetworkFirewallPolicyWithRulesPredefinedRule { /** * (Output) * The Action to perform when the client connection triggers the rule. Can currently be either * "allow", "deny", "applySecurityProfileGroup" or "gotoNext". */ action: string; /** * An optional description of this resource. */ description: string; /** * (Output) * The direction in which this rule applies. If unspecified an INGRESS rule is created. */ direction: string; /** * (Output) * Denotes whether the firewall policy rule is disabled. When set to true, * the firewall policy rule is not enforced and traffic behaves as if it did * not exist. If this is unspecified, the firewall policy rule will be * enabled. */ disabled: boolean; /** * (Output) * Denotes whether to enable logging for a particular rule. * If logging is enabled, logs will be exported to the * configured export destination in Stackdriver. */ enableLogging: boolean; /** * (Output) * A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. * Structure is documented below. */ matches: outputs.compute.NetworkFirewallPolicyWithRulesPredefinedRuleMatch[]; /** * (Output) * An integer indicating the priority of a rule in the list. The priority must be a value * between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the * highest priority and 2147483647 is the lowest priority. */ priority: number; /** * (Output) * An optional name for the rule. This field is not a unique identifier * and can be updated. */ ruleName: string; /** * (Output) * A fully-qualified URL of a SecurityProfile resource instance. * Example: * https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group * Must be specified if action is 'apply_security_profile_group'. */ securityProfileGroup: string; /** * (Output) * A list of secure tags that controls which instances the firewall rule * applies to. If targetSecureTag are specified, then the * firewall rule applies only to instances in the VPC network that have one * of those EFFECTIVE secure tags, if all the targetSecureTag are in * INEFFECTIVE state, then this rule will be ignored. * targetSecureTag may not be set at the same time as * targetServiceAccounts. * If neither targetServiceAccounts nor * targetSecureTag are specified, the firewall rule applies * to all instances on the specified network. * Maximum number of target label tags allowed is 256. * Structure is documented below. */ targetSecureTags: outputs.compute.NetworkFirewallPolicyWithRulesPredefinedRuleTargetSecureTag[]; /** * (Output) * A list of service accounts indicating the sets of * instances that are applied with this rule. */ targetServiceAccounts: string[]; /** * (Output) * Boolean flag indicating if the traffic should be TLS decrypted. * It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. */ tlsInspect: boolean; } interface NetworkFirewallPolicyWithRulesPredefinedRuleMatch { /** * Address groups which should be matched against the traffic destination. * Maximum number of destination address groups is 10. */ destAddressGroups: string[]; /** * Fully Qualified Domain Name (FQDN) which should be matched against * traffic destination. Maximum number of destination fqdn allowed is 100. */ destFqdns: string[]; /** * Destination IP address range in CIDR format. Required for * EGRESS rules. */ destIpRanges: string[]; /** * Region codes whose IP addresses will be used to match for destination * of traffic. Should be specified as 2 letter country code defined as per * ISO 3166 alpha-2 country codes. ex."US" * Maximum number of destination region codes allowed is 5000. */ destRegionCodes: string[]; /** * Names of Network Threat Intelligence lists. * The IPs in these lists will be matched against traffic destination. */ destThreatIntelligences: string[]; /** * Pairs of IP protocols and ports that the rule should match. * Structure is documented below. */ layer4Configs: outputs.compute.NetworkFirewallPolicyWithRulesPredefinedRuleMatchLayer4Config[]; /** * Address groups which should be matched against the traffic source. * Maximum number of source address groups is 10. */ srcAddressGroups: string[]; /** * Fully Qualified Domain Name (FQDN) which should be matched against * traffic source. Maximum number of source fqdn allowed is 100. */ srcFqdns: string[]; /** * Source IP address range in CIDR format. Required for * INGRESS rules. */ srcIpRanges: string[]; /** * Region codes whose IP addresses will be used to match for source * of traffic. Should be specified as 2 letter country code defined as per * ISO 3166 alpha-2 country codes. ex."US" * Maximum number of source region codes allowed is 5000. */ srcRegionCodes: string[]; /** * List of secure tag values, which should be matched at the source * of the traffic. * For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, * and there is no srcIpRange, this rule will be ignored. * Maximum number of source tag values allowed is 256. * Structure is documented below. * * * The `layer4Config` block supports: */ srcSecureTags: outputs.compute.NetworkFirewallPolicyWithRulesPredefinedRuleMatchSrcSecureTag[]; /** * Names of Network Threat Intelligence lists. * The IPs in these lists will be matched against traffic source. */ srcThreatIntelligences: string[]; } interface NetworkFirewallPolicyWithRulesPredefinedRuleMatchLayer4Config { /** * (Output) * The IP protocol to which this rule applies. The protocol * type is required when creating a firewall rule. * This value can either be one of the following well * known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), * or the IP protocol number. */ ipProtocol: string; /** * (Output) * An optional list of ports to which this rule applies. This field * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. * Example inputs include: ["22"], ["80","443"], and * ["12345-12349"]. */ ports: string[]; } interface NetworkFirewallPolicyWithRulesPredefinedRuleMatchSrcSecureTag { /** * Name of the secure tag, created with TagManager's TagValue API. * @pattern tagValues/[0-9]+ */ name: string; /** * (Output) * [Output Only] State of the secure tag, either `EFFECTIVE` or * `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted * or its network is deleted. */ state: string; } interface NetworkFirewallPolicyWithRulesPredefinedRuleTargetSecureTag { /** * Name of the secure tag, created with TagManager's TagValue API. * @pattern tagValues/[0-9]+ */ name: string; /** * (Output) * [Output Only] State of the secure tag, either `EFFECTIVE` or * `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted * or its network is deleted. */ state: string; } interface NetworkFirewallPolicyWithRulesRule { /** * The Action to perform when the client connection triggers the rule. Can currently be either * "allow", "deny", "applySecurityProfileGroup" or "gotoNext". */ action: string; /** * A description of the rule. */ description?: string; /** * The direction in which this rule applies. If unspecified an INGRESS rule is created. * Possible values are: `INGRESS`, `EGRESS`. */ direction?: string; /** * Denotes whether the firewall policy rule is disabled. When set to true, * the firewall policy rule is not enforced and traffic behaves as if it did * not exist. If this is unspecified, the firewall policy rule will be * enabled. */ disabled?: boolean; /** * Denotes whether to enable logging for a particular rule. * If logging is enabled, logs will be exported to the * configured export destination in Stackdriver. */ enableLogging?: boolean; /** * A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. * Structure is documented below. */ match: outputs.compute.NetworkFirewallPolicyWithRulesRuleMatch; /** * An integer indicating the priority of a rule in the list. The priority must be a value * between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the * highest priority and 2147483647 is the lowest priority. */ priority: number; /** * An optional name for the rule. This field is not a unique identifier * and can be updated. */ ruleName?: string; /** * A fully-qualified URL of a SecurityProfile resource instance. * Example: * https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group * Must be specified if action is 'apply_security_profile_group'. */ securityProfileGroup?: string; /** * A list of secure tags that controls which instances the firewall rule * applies to. If targetSecureTag are specified, then the * firewall rule applies only to instances in the VPC network that have one * of those EFFECTIVE secure tags, if all the targetSecureTag are in * INEFFECTIVE state, then this rule will be ignored. * targetSecureTag may not be set at the same time as * targetServiceAccounts. * If neither targetServiceAccounts nor * targetSecureTag are specified, the firewall rule applies * to all instances on the specified network. * Maximum number of target label tags allowed is 256. * Structure is documented below. */ targetSecureTags?: outputs.compute.NetworkFirewallPolicyWithRulesRuleTargetSecureTag[]; /** * A list of service accounts indicating the sets of * instances that are applied with this rule. */ targetServiceAccounts?: string[]; /** * Boolean flag indicating if the traffic should be TLS decrypted. * It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. */ tlsInspect?: boolean; } interface NetworkFirewallPolicyWithRulesRuleMatch { /** * Address groups which should be matched against the traffic destination. * Maximum number of destination address groups is 10. */ destAddressGroups?: string[]; /** * Fully Qualified Domain Name (FQDN) which should be matched against * traffic destination. Maximum number of destination fqdn allowed is 100. */ destFqdns?: string[]; /** * Destination IP address range in CIDR format. Required for * EGRESS rules. */ destIpRanges?: string[]; /** * (Optional, Beta) * Network scope of the traffic destination. * Possible values are: `INTERNET`, `INTRA_VPC`, `NON_INTERNET`, `VPC_NETWORKS`. */ destNetworkScope?: string; /** * Region codes whose IP addresses will be used to match for destination * of traffic. Should be specified as 2 letter country code defined as per * ISO 3166 alpha-2 country codes. ex."US" * Maximum number of destination region codes allowed is 5000. */ destRegionCodes?: string[]; /** * Names of Network Threat Intelligence lists. * The IPs in these lists will be matched against traffic destination. */ destThreatIntelligences?: string[]; /** * Pairs of IP protocols and ports that the rule should match. * Structure is documented below. */ layer4Configs: outputs.compute.NetworkFirewallPolicyWithRulesRuleMatchLayer4Config[]; /** * Address groups which should be matched against the traffic source. * Maximum number of source address groups is 10. */ srcAddressGroups?: string[]; /** * Fully Qualified Domain Name (FQDN) which should be matched against * traffic source. Maximum number of source fqdn allowed is 100. */ srcFqdns?: string[]; /** * Source IP address range in CIDR format. Required for * INGRESS rules. */ srcIpRanges?: string[]; /** * (Optional, Beta) * Network scope of the traffic source. * Possible values are: `INTERNET`, `INTRA_VPC`, `NON_INTERNET`, `VPC_NETWORKS`. */ srcNetworkScope?: string; /** * (Optional, Beta) * Networks of the traffic source. It can be either a full or partial url. */ srcNetworks?: string[]; /** * Region codes whose IP addresses will be used to match for source * of traffic. Should be specified as 2 letter country code defined as per * ISO 3166 alpha-2 country codes. ex."US" * Maximum number of source region codes allowed is 5000. */ srcRegionCodes?: string[]; /** * List of secure tag values, which should be matched at the source * of the traffic. * For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, * and there is no srcIpRange, this rule will be ignored. * Maximum number of source tag values allowed is 256. * Structure is documented below. * * * The `layer4Config` block supports: */ srcSecureTags?: outputs.compute.NetworkFirewallPolicyWithRulesRuleMatchSrcSecureTag[]; /** * Names of Network Threat Intelligence lists. * The IPs in these lists will be matched against traffic source. */ srcThreatIntelligences?: string[]; } interface NetworkFirewallPolicyWithRulesRuleMatchLayer4Config { /** * (Output) * The IP protocol to which this rule applies. The protocol * type is required when creating a firewall rule. * This value can either be one of the following well * known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), * or the IP protocol number. */ ipProtocol: string; /** * (Output) * An optional list of ports to which this rule applies. This field * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. * Example inputs include: ["22"], ["80","443"], and * ["12345-12349"]. */ ports?: string[]; } interface NetworkFirewallPolicyWithRulesRuleMatchSrcSecureTag { /** * Name of the secure tag, created with TagManager's TagValue API. * @pattern tagValues/[0-9]+ */ name?: string; /** * (Output) * [Output Only] State of the secure tag, either `EFFECTIVE` or * `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted * or its network is deleted. */ state: string; } interface NetworkFirewallPolicyWithRulesRuleTargetSecureTag { /** * Name of the secure tag, created with TagManager's TagValue API. * @pattern tagValues/[0-9]+ */ name?: string; /** * (Output) * [Output Only] State of the secure tag, either `EFFECTIVE` or * `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted * or its network is deleted. */ state: string; } interface NetworkParams { /** * Resource manager tags to be bound to the network. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. */ resourceManagerTags?: { [key: string]: string; }; } interface NodeGroupAutoscalingPolicy { /** * Maximum size of the node group. Set to a value less than or equal * to 100 and greater than or equal to min-nodes. */ maxNodes: number; /** * Minimum size of the node group. Must be less * than or equal to max-nodes. The default value is 0. */ minNodes: number; /** * The autoscaling mode. Set to one of the following: * - OFF: Disables the autoscaler. * - ON: Enables scaling in and scaling out. * - ONLY_SCALE_OUT: Enables only scaling out. * You must use this mode if your node groups are configured to * restart their hosted VMs on minimal servers. * Possible values are: `OFF`, `ON`, `ONLY_SCALE_OUT`. */ mode: string; } interface NodeGroupMaintenanceWindow { /** * instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. */ startTime: string; } interface NodeGroupShareSettings { /** * A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. * Structure is documented below. */ projectMaps?: outputs.compute.NodeGroupShareSettingsProjectMap[]; /** * Node group sharing type. * Possible values are: `ORGANIZATION`, `SPECIFIC_PROJECTS`, `LOCAL`. */ shareType: string; } interface NodeGroupShareSettingsProjectMap { /** * The identifier for this object. Format specified above. */ id: string; /** * The project id/number should be the same as the key of this project config in the project map. */ projectId: string; } interface NodeTemplateAccelerator { /** * The number of the guest accelerator cards exposed to this * node template. */ acceleratorCount?: number; /** * Full or partial URL of the accelerator type resource to expose * to this node template. */ acceleratorType?: string; } interface NodeTemplateDisk { /** * Specifies the number of such disks. */ diskCount?: number; /** * Specifies the size of the disk in base-2 GB. */ diskSizeGb?: number; /** * Specifies the desired disk type on the node. This disk type must be a local storage type (e.g.: local-ssd). Note that for nodeTemplates, this should be the name of the disk type and not its URL. */ diskType?: string; } interface NodeTemplateNodeTypeFlexibility { /** * Number of virtual CPUs to use. */ cpus?: string; /** * (Output) * Use local SSD */ localSsd: string; /** * Physical memory available to the node, defined in MB. */ memory?: string; } interface NodeTemplateServerBinding { /** * Type of server binding policy. If `RESTART_NODE_ON_ANY_SERVER`, * nodes using this template will restart on any physical server * following a maintenance event. * If `RESTART_NODE_ON_MINIMAL_SERVER`, nodes using this template * will restart on the same physical server following a maintenance * event, instead of being live migrated to or restarted on a new * physical server. This option may be useful if you are using * software licenses tied to the underlying server characteristics * such as physical sockets or cores, to avoid the need for * additional licenses when maintenance occurs. However, VMs on such * nodes will experience outages while maintenance is applied. * Possible values are: `RESTART_NODE_ON_ANY_SERVER`, `RESTART_NODE_ON_MINIMAL_SERVERS`. */ type: string; } interface OrganizationSecurityPolicyRuleMatch { /** * The configuration options for matching the rule. * Structure is documented below. */ config: outputs.compute.OrganizationSecurityPolicyRuleMatchConfig; /** * A description of the rule. */ description?: string; /** * Preconfigured versioned expression. For organization security policy rules, * the only supported type is "FIREWALL". * Default value is `FIREWALL`. * Possible values are: `FIREWALL`. */ versionedExpr?: string; } interface OrganizationSecurityPolicyRuleMatchConfig { /** * Destination IP address range in CIDR format. Required for * EGRESS rules. */ destIpRanges?: string[]; /** * Pairs of IP protocols and ports that the rule should match. * Structure is documented below. * * * The `layer4Config` block supports: */ layer4Configs: outputs.compute.OrganizationSecurityPolicyRuleMatchConfigLayer4Config[]; /** * Source IP address range in CIDR format. Required for * INGRESS rules. */ srcIpRanges?: string[]; } interface OrganizationSecurityPolicyRuleMatchConfigLayer4Config { /** * The IP protocol to which this rule applies. The protocol * type is required when creating a firewall rule. * This value can either be one of the following well * known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), * or the IP protocol number. */ ipProtocol: string; /** * An optional list of ports to which this rule applies. This field * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. * * Example inputs include: ["22"], ["80","443"], and * ["12345-12349"]. */ ports?: string[]; } interface PacketMirroringCollectorIlb { /** * The URL of the forwarding rule. */ url: string; } interface PacketMirroringFilter { /** * IP CIDR ranges that apply as a filter on the source (ingress) or * destination (egress) IP in the IP header. Only IPv4 is supported. */ cidrRanges?: string[]; /** * Direction of traffic to mirror. * Default value is `BOTH`. * Possible values are: `INGRESS`, `EGRESS`, `BOTH`. */ direction?: string; /** * Possible IP protocols including tcp, udp, icmp and esp */ ipProtocols?: string[]; } interface PacketMirroringMirroredResources { /** * All the listed instances will be mirrored. Specify at most 50. * Structure is documented below. */ instances?: outputs.compute.PacketMirroringMirroredResourcesInstance[]; /** * All instances in one of these subnetworks will be mirrored. * Structure is documented below. */ subnetworks?: outputs.compute.PacketMirroringMirroredResourcesSubnetwork[]; /** * All instances with these tags will be mirrored. */ tags?: string[]; } interface PacketMirroringMirroredResourcesInstance { /** * The URL of the instances where this rule should be active. */ url: string; } interface PacketMirroringMirroredResourcesSubnetwork { /** * The URL of the subnetwork where this rule should be active. */ url: string; } interface PacketMirroringNetwork { /** * The full selfLink URL of the network where this rule is active. */ url: string; } interface PerInstanceConfigPreservedState { /** * Stateful disks for the instance. * Structure is documented below. */ disks?: outputs.compute.PerInstanceConfigPreservedStateDisk[]; /** * Preserved external IPs defined for this instance. This map is keyed with the name of the network interface. * Structure is documented below. */ externalIps?: outputs.compute.PerInstanceConfigPreservedStateExternalIp[]; /** * Preserved internal IPs defined for this instance. This map is keyed with the name of the network interface. * Structure is documented below. */ internalIps?: outputs.compute.PerInstanceConfigPreservedStateInternalIp[]; /** * Preserved metadata defined for this instance. This is a list of key->value pairs. */ metadata?: { [key: string]: string; }; } interface PerInstanceConfigPreservedStateDisk { /** * A value that prescribes what should happen to the stateful disk when the VM instance is deleted. * The available options are `NEVER` and `ON_PERMANENT_INSTANCE_DELETION`. * `NEVER` - detach the disk when the VM is deleted, but do not delete the disk. * `ON_PERMANENT_INSTANCE_DELETION` will delete the stateful disk when the VM is permanently * deleted from the instance group. * Default value is `NEVER`. * Possible values are: `NEVER`, `ON_PERMANENT_INSTANCE_DELETION`. */ deleteRule?: string; /** * A unique device name that is reflected into the /dev/ tree of a Linux operating system running within the instance. */ deviceName: string; /** * The mode of the disk. * Default value is `READ_WRITE`. * Possible values are: `READ_ONLY`, `READ_WRITE`. */ mode?: string; /** * The URI of an existing persistent disk to attach under the specified device-name in the format * `projects/project-id/zones/zone/disks/disk-name`. */ source: string; } interface PerInstanceConfigPreservedStateExternalIp { /** * These stateful IPs will never be released during autohealing, update or VM instance recreate operations. This flag is used to configure if the IP reservation should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted. * Default value is `NEVER`. * Possible values are: `NEVER`, `ON_PERMANENT_INSTANCE_DELETION`. */ autoDelete?: string; /** * The identifier for this object. Format specified above. */ interfaceName: string; /** * Ip address representation * Structure is documented below. */ ipAddress?: outputs.compute.PerInstanceConfigPreservedStateExternalIpIpAddress; } interface PerInstanceConfigPreservedStateExternalIpIpAddress { /** * The URL of the reservation for this IP address. */ address?: string; } interface PerInstanceConfigPreservedStateInternalIp { /** * These stateful IPs will never be released during autohealing, update or VM instance recreate operations. This flag is used to configure if the IP reservation should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted. * Default value is `NEVER`. * Possible values are: `NEVER`, `ON_PERMANENT_INSTANCE_DELETION`. */ autoDelete?: string; /** * The identifier for this object. Format specified above. */ interfaceName: string; /** * Ip address representation * Structure is documented below. */ ipAddress?: outputs.compute.PerInstanceConfigPreservedStateInternalIpIpAddress; } interface PerInstanceConfigPreservedStateInternalIpIpAddress { /** * The URL of the reservation for this IP address. */ address?: string; } interface PreviewFeatureRolloutOperation { /** * The input for the rollout operation. * Structure is documented below. */ rolloutInput?: outputs.compute.PreviewFeatureRolloutOperationRolloutInput; } interface PreviewFeatureRolloutOperationRolloutInput { /** * Predefined rollout plans. * Possible values are: `ROLLOUT_PLAN_FAST_ROLLOUT`. */ predefinedRolloutPlan: string; } interface PublicDelegatedPrefixPublicDelegatedSubPrefix { /** * The allocatable prefix length supported by this public delegated prefix. This field is optional and cannot be set for prefixes in DELEGATION mode. It cannot be set for IPv4 prefixes either, and it always defaults to 32. */ allocatablePrefixLength?: number; /** * Name of the project scoping this PublicDelegatedSubPrefix. */ delegateeProject?: string; /** * An optional description of this resource. */ description?: string; /** * (Output) * Whether this PublicDelegatedSubPrefix supports enhanced IPv4 allocations. * Applicable for IPv4 sub-PDPs only. */ enableEnhancedIpv4Allocation: boolean; /** * The IP address range, in CIDR format, represented by this public delegated prefix. */ ipCidrRange?: string; /** * (Output) * The internet access type for IPv6 Public Delegated Prefixes. Inherited * from parent prefix and can be one of following: * * EXTERNAL: The prefix will be announced to the internet. All children * PDPs will have access type as EXTERNAL. * * INTERNAL: The prefix won’t be announced to the internet. Prefix will * be used privately within Google Cloud. All children PDPs will have * access type as INTERNAL. */ ipv6AccessType: string; /** * Whether the sub prefix is delegated for address creation. */ isAddress?: boolean; /** * Specifies the mode of this IPv6 PDP. MODE must be one of: * * DELEGATION * * EXTERNAL_IPV6_FORWARDING_RULE_CREATION * * EXTERNAL_IPV6_SUBNETWORK_CREATION * * INTERNAL_IPV6_SUBNETWORK_CREATION * Possible values are: `DELEGATION`, `EXTERNAL_IPV6_FORWARDING_RULE_CREATION`, `EXTERNAL_IPV6_SUBNETWORK_CREATION`, `INTERNAL_IPV6_SUBNETWORK_CREATION`. */ mode?: string; /** * Name of the resource. The name must be 1-63 characters long, and * comply with RFC1035. Specifically, the name must be 1-63 characters * long and match the regular expression `a-z?` * which means the first character must be a lowercase letter, and all * following characters must be a dash, lowercase letter, or digit, * except the last character, which cannot be a dash. */ name?: string; /** * A region where the prefix will reside. */ region?: string; /** * The status of the sub public delegated prefix. * Possible values are: `INITIALIZING`, `READY_TO_ANNOUNCE`, `ANNOUNCED`, `DELETING`. */ status?: string; } interface RegionAutoscalerAutoscalingPolicy { /** * The number of seconds that the autoscaler should wait before it * starts collecting information from a new instance. This prevents * the autoscaler from collecting information when the instance is * initializing, during which the collected usage would not be * reliable. The default time autoscaler waits is 60 seconds. * Virtual machine initialization times might vary because of * numerous factors. We recommend that you test how long an * instance may take to initialize. To do this, create an instance * and time the startup process. */ cooldownPeriod?: number; /** * Defines the CPU utilization policy that allows the autoscaler to * scale based on the average CPU utilization of a managed instance * group. * Structure is documented below. */ cpuUtilization: outputs.compute.RegionAutoscalerAutoscalingPolicyCpuUtilization; /** * Configuration parameters of autoscaling based on a load balancer. * Structure is documented below. */ loadBalancingUtilization?: outputs.compute.RegionAutoscalerAutoscalingPolicyLoadBalancingUtilization; /** * The maximum number of instances that the autoscaler can scale up * to. This is required when creating or updating an autoscaler. The * maximum number of replicas should not be lower than minimal number * of replicas. */ maxReplicas: number; /** * Configuration parameters of autoscaling based on a custom metric. * Structure is documented below. */ metrics?: outputs.compute.RegionAutoscalerAutoscalingPolicyMetric[]; /** * The minimum number of replicas that the autoscaler can scale down * to. This cannot be less than 0. If not provided, autoscaler will * choose a default value depending on maximum number of instances * allowed. */ minReplicas: number; /** * Defines operating mode for this policy. */ mode?: string; /** * (Optional, Beta) * Defines scale down controls to reduce the risk of response latency * and outages due to abrupt scale-in events * Structure is documented below. */ scaleDownControl?: outputs.compute.RegionAutoscalerAutoscalingPolicyScaleDownControl; /** * Defines scale in controls to reduce the risk of response latency * and outages due to abrupt scale-in events * Structure is documented below. */ scaleInControl?: outputs.compute.RegionAutoscalerAutoscalingPolicyScaleInControl; /** * Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. * Structure is documented below. */ scalingSchedules?: outputs.compute.RegionAutoscalerAutoscalingPolicyScalingSchedule[]; } interface RegionAutoscalerAutoscalingPolicyCpuUtilization { /** * Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are: * - NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics. * - OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand. */ predictiveMethod?: string; /** * The target CPU utilization that the autoscaler should maintain. * Must be a float value in the range (0, 1]. If not specified, the * default is 0.6. * If the CPU level is below the target utilization, the autoscaler * scales down the number of instances until it reaches the minimum * number of instances you specified or until the average CPU of * your instances reaches the target utilization. * If the average CPU is above the target utilization, the autoscaler * scales up until it reaches the maximum number of instances you * specified or until the average utilization reaches the target * utilization. */ target: number; } interface RegionAutoscalerAutoscalingPolicyLoadBalancingUtilization { /** * Fraction of backend capacity utilization (set in HTTP(s) load * balancing configuration) that autoscaler should maintain. Must * be a positive float value. If not defined, the default is 0.8. */ target: number; } interface RegionAutoscalerAutoscalingPolicyMetric { /** * A filter string to be used as the filter string for * a Stackdriver Monitoring TimeSeries.list API call. * This filter is used to select a specific TimeSeries for * the purpose of autoscaling and to determine whether the metric * is exporting per-instance or per-group data. * You can only use the AND operator for joining selectors. * You can only use direct equality comparison operator (=) without * any functions for each selector. * You can specify the metric in both the filter string and in the * metric field. However, if specified in both places, the metric must * be identical. * The monitored resource type determines what kind of values are * expected for the metric. If it is a gce_instance, the autoscaler * expects the metric to include a separate TimeSeries for each * instance in a group. In such a case, you cannot filter on resource * labels. * If the resource type is any other value, the autoscaler expects * this metric to contain values that apply to the entire autoscaled * instance group and resource label filtering can be performed to * point autoscaler at the correct TimeSeries to scale upon. * This is called a per-group metric for the purpose of autoscaling. * If not specified, the type defaults to gce_instance. * You should provide a filter that is selective enough to pick just * one TimeSeries for the autoscaled group or for each of the instances * (if you are using gceInstance resource type). If multiple * TimeSeries are returned upon the query execution, the autoscaler * will sum their respective values to obtain its scaling value. */ filter?: string; /** * The identifier (type) of the Stackdriver Monitoring metric. * The metric cannot have negative values. * The metric must have a value type of INT64 or DOUBLE. */ name: string; /** * If scaling is based on a per-group metric value that represents the * total amount of work to be done or resource usage, set this value to * an amount assigned for a single instance of the scaled group. * The autoscaler will keep the number of instances proportional to the * value of this metric, the metric itself should not change value due * to group resizing. * For example, a good metric to use with the target is * `pubsub.googleapis.com/subscription/num_undelivered_messages` * or a custom metric exporting the total number of requests coming to * your instances. * A bad example would be a metric exporting an average or median * latency, since this value can't include a chunk assignable to a * single instance, it could be better used with utilizationTarget * instead. */ singleInstanceAssignment?: number; /** * The target value of the metric that autoscaler should * maintain. This must be a positive value. A utilization * metric scales number of virtual machines handling requests * to increase or decrease proportionally to the metric. * For example, a good metric to use as a utilizationTarget is * www.googleapis.com/compute/instance/network/received_bytes_count. * The autoscaler will work to keep this value constant for each * of the instances. */ target?: number; /** * Defines how target utilization value is expressed for a * Stackdriver Monitoring metric. * Possible values are: `GAUGE`, `DELTA_PER_SECOND`, `DELTA_PER_MINUTE`. */ type?: string; } interface RegionAutoscalerAutoscalingPolicyScaleDownControl { /** * A nested object resource. * Structure is documented below. */ maxScaledDownReplicas?: outputs.compute.RegionAutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicas; /** * How long back autoscaling should look when computing recommendations * to include directives regarding slower scale down, as described above. */ timeWindowSec?: number; } interface RegionAutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicas { /** * Specifies a fixed number of VM instances. This must be a positive * integer. */ fixed?: number; /** * Specifies a percentage of instances between 0 to 100%, inclusive. * For example, specify 80 for 80%. */ percent?: number; } interface RegionAutoscalerAutoscalingPolicyScaleInControl { /** * A nested object resource. * Structure is documented below. */ maxScaledInReplicas?: outputs.compute.RegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas; /** * How long back autoscaling should look when computing recommendations * to include directives regarding slower scale down, as described above. */ timeWindowSec?: number; } interface RegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas { /** * Specifies a fixed number of VM instances. This must be a positive * integer. */ fixed?: number; /** * Specifies a percentage of instances between 0 to 100%, inclusive. * For example, specify 80 for 80%. */ percent?: number; } interface RegionAutoscalerAutoscalingPolicyScalingSchedule { /** * A description of a scaling schedule. */ description?: string; /** * A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect. */ disabled?: boolean; /** * The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300. */ durationSec: number; /** * Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule. */ minRequiredReplicas: number; /** * The identifier for this object. Format specified above. */ name: string; /** * The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field). */ schedule: string; /** * The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database. */ timeZone?: string; } interface RegionBackendServiceBackend { /** * Specifies the balancing mode for this backend. * See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) * for an explanation of load balancing modes. * Default value is `UTILIZATION`. * Possible values are: `UTILIZATION`, `RATE`, `CONNECTION`, `CUSTOM_METRICS`. */ balancingMode?: string; /** * A multiplier applied to the group's maximum servicing capacity * (based on UTILIZATION, RATE or CONNECTION). * ~>**NOTE**: This field cannot be set for * INTERNAL region backend services (default loadBalancingScheme), * but is required for non-INTERNAL backend service. The total * capacityScaler for all backends must be non-zero. * A setting of 0 means the group is completely drained, offering * 0% of its available Capacity. Valid range is [0.0,1.0]. */ capacityScaler?: number; /** * The set of custom metrics that are used for CUSTOM_METRICS BalancingMode. * Structure is documented below. */ customMetrics?: outputs.compute.RegionBackendServiceBackendCustomMetric[]; /** * An optional description of this resource. * Provide this property when you create the resource. */ description?: string; /** * This field designates whether this is a failover backend. More * than one failover backend can be configured for a given RegionBackendService. */ failover: boolean; /** * The fully-qualified URL of an Instance Group or Network Endpoint * Group resource. In case of instance group this defines the list * of instances that serve traffic. Member virtual machine * instances from each instance group must live in the same zone as * the instance group itself. No two backends in a backend service * are allowed to use same Instance Group resource. * For Network Endpoint Groups this defines list of endpoints. All * endpoints of Network Endpoint Group must be hosted on instances * located in the same zone as the Network Endpoint Group. * Backend services cannot mix Instance Group and * Network Endpoint Group backends. * When the `loadBalancingScheme` is INTERNAL, only instance groups * are supported. * Note that you must specify an Instance Group or Network Endpoint * Group resource using the fully-qualified URL, rather than a * partial URL. */ group: string; /** * The max number of simultaneous connections for the group. Can * be used with either CONNECTION or UTILIZATION balancing modes. * Cannot be set for INTERNAL backend services. * For CONNECTION mode, either maxConnections or one * of maxConnectionsPerInstance or maxConnectionsPerEndpoint, * as appropriate for group type, must be set. */ maxConnections?: number; /** * The max number of simultaneous connections that a single backend * network endpoint can handle. Cannot be set * for INTERNAL backend services. * This is used to calculate the capacity of the group. Can be * used in either CONNECTION or UTILIZATION balancing modes. For * CONNECTION mode, either maxConnections or * maxConnectionsPerEndpoint must be set. */ maxConnectionsPerEndpoint?: number; /** * The max number of simultaneous connections that a single * backend instance can handle. Cannot be set for INTERNAL backend * services. * This is used to calculate the capacity of the group. * Can be used in either CONNECTION or UTILIZATION balancing modes. * For CONNECTION mode, either maxConnections or * maxConnectionsPerInstance must be set. */ maxConnectionsPerInstance?: number; /** * (Optional, Beta) * Defines a maximum number of in-flight requests for the whole NEG * or instance group. Not available if backend's balancingMode is RATE * or CONNECTION. */ maxInFlightRequests: number; /** * (Optional, Beta) * Defines a maximum number of in-flight requests for a single endpoint. * Not available if backend's balancingMode is RATE or CONNECTION. */ maxInFlightRequestsPerEndpoint: number; /** * (Optional, Beta) * Defines a maximum number of in-flight requests for a single VM. * Not available if backend's balancingMode is RATE or CONNECTION. */ maxInFlightRequestsPerInstance: number; /** * The max requests per second (RPS) of the group. Cannot be set * for INTERNAL backend services. * Can be used with either RATE or UTILIZATION balancing modes, * but required if RATE mode. Either maxRate or one * of maxRatePerInstance or maxRatePerEndpoint, as appropriate for * group type, must be set. */ maxRate?: number; /** * The max requests per second (RPS) that a single backend network * endpoint can handle. This is used to calculate the capacity of * the group. Can be used in either balancing mode. For RATE mode, * either maxRate or maxRatePerEndpoint must be set. Cannot be set * for INTERNAL backend services. */ maxRatePerEndpoint?: number; /** * The max requests per second (RPS) that a single backend * instance can handle. This is used to calculate the capacity of * the group. Can be used in either balancing mode. For RATE mode, * either maxRate or maxRatePerInstance must be set. Cannot be set * for INTERNAL backend services. */ maxRatePerInstance?: number; /** * Used when balancingMode is UTILIZATION. This ratio defines the * CPU utilization target for the group. Valid range is [0.0, 1.0]. * Cannot be set for INTERNAL backend services. */ maxUtilization?: number; /** * (Optional, Beta) * This field specifies how long a connection should be kept alive for: * - LONG: Most of the requests are expected to take more than multiple * seconds to finish. * - SHORT: Most requests are expected to finish with a sub-second latency. * Possible values are: `LONG`, `SHORT`. */ trafficDuration?: string; } interface RegionBackendServiceBackendCustomMetric { /** * If true, the metric data is not used for load balancing. */ dryRun: boolean; /** * Optional parameter to define a target utilization for the Custom Metrics * balancing mode. The valid range is [0.0, 1.0]. */ maxUtilization?: number; /** * Name of a custom utilization signal. The name must be 1-64 characters * long and match the regular expression a-z? which * means the first character must be a lowercase letter, and all following * characters must be a dash, period, underscore, lowercase letter, or * digit, except the last character, which cannot be a dash, period, or * underscore. For usage guidelines, see Custom Metrics balancing mode. This * field can only be used for a global or regional backend service with the * loadBalancingScheme set to EXTERNAL_MANAGED, * INTERNAL_MANAGED INTERNAL_SELF_MANAGED. */ name: string; } interface RegionBackendServiceCdnPolicy { /** * The CacheKeyPolicy for this CdnPolicy. * Structure is documented below. */ cacheKeyPolicy?: outputs.compute.RegionBackendServiceCdnPolicyCacheKeyPolicy; /** * Specifies the cache setting for all responses from this backend. * The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC * Possible values are: `USE_ORIGIN_HEADERS`, `FORCE_CACHE_ALL`, `CACHE_ALL_STATIC`. */ cacheMode: string; /** * Specifies the maximum allowed TTL for cached content served by this origin. */ clientTtl: number; /** * Specifies the default TTL for cached content served by this origin for responses * that do not have an existing valid TTL (max-age or s-max-age). */ defaultTtl: number; /** * Specifies the maximum allowed TTL for cached content served by this origin. */ maxTtl: number; /** * Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. */ negativeCaching: boolean; /** * Sets a cache TTL for the specified HTTP status code. negativeCaching must be enabled to configure negativeCachingPolicy. * Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's default cache TTLs. * Structure is documented below. */ negativeCachingPolicies?: outputs.compute.RegionBackendServiceCdnPolicyNegativeCachingPolicy[]; /** * Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. */ serveWhileStale: number; /** * Maximum number of seconds the response to a signed URL request * will be considered fresh, defaults to 1hr (3600s). After this * time period, the response will be revalidated before * being served. * When serving responses to signed URL requests, Cloud CDN will * internally behave as though all responses from this backend had a * "Cache-Control: public, max-age=[TTL]" header, regardless of any * existing Cache-Control header. The actual headers served in * responses will not be altered. */ signedUrlCacheMaxAgeSec?: number; } interface RegionBackendServiceCdnPolicyCacheKeyPolicy { /** * If true requests to different hosts will be cached separately. */ includeHost?: boolean; /** * Names of cookies to include in cache keys. */ includeNamedCookies?: string[]; /** * If true, http and https requests will be cached separately. */ includeProtocol?: boolean; /** * If true, include query string parameters in the cache key * according to queryStringWhitelist and * query_string_blacklist. If neither is set, the entire query * string will be included. * If false, the query string will be excluded from the cache * key entirely. */ includeQueryString?: boolean; /** * Names of query string parameters to exclude in cache keys. * All other parameters will be included. Either specify * queryStringWhitelist or query_string_blacklist, not both. * '&' and '=' will be percent encoded and not treated as * delimiters. */ queryStringBlacklists?: string[]; /** * Names of query string parameters to include in cache keys. * All other parameters will be excluded. Either specify * queryStringWhitelist or query_string_blacklist, not both. * '&' and '=' will be percent encoded and not treated as * delimiters. */ queryStringWhitelists?: string[]; } interface RegionBackendServiceCdnPolicyNegativeCachingPolicy { /** * The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 308, 404, 405, 410, 421, 451 and 501 * can be specified as values, and you cannot specify a status code more than once. */ code?: number; /** * (Optional, Beta) * The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s * (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. */ ttl?: number; } interface RegionBackendServiceCircuitBreakers { /** * (Optional, Beta) * The timeout for new network connections to hosts. * Structure is documented below. */ connectTimeout?: outputs.compute.RegionBackendServiceCircuitBreakersConnectTimeout; /** * The maximum number of connections to the backend cluster. * Defaults to 1024. */ maxConnections?: number; /** * The maximum number of pending requests to the backend cluster. * Defaults to 1024. */ maxPendingRequests?: number; /** * The maximum number of parallel requests to the backend cluster. * Defaults to 1024. */ maxRequests?: number; /** * Maximum requests for a single backend connection. This parameter * is respected by both the HTTP/1.1 and HTTP/2 implementations. If * not specified, there is no limit. Setting this parameter to 1 * will effectively disable keep alive. */ maxRequestsPerConnection?: number; /** * The maximum number of parallel retries to the backend cluster. * Defaults to 3. */ maxRetries?: number; } interface RegionBackendServiceCircuitBreakersConnectTimeout { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface RegionBackendServiceConnectionTrackingPolicy { /** * Specifies connection persistence when backends are unhealthy. * If set to `DEFAULT_FOR_PROTOCOL`, the existing connections persist on * unhealthy backends only for connection-oriented protocols (TCP and SCTP) * and only if the Tracking Mode is PER_CONNECTION (default tracking mode) * or the Session Affinity is configured for 5-tuple. They do not persist * for UDP. * If set to `NEVER_PERSIST`, after a backend becomes unhealthy, the existing * connections on the unhealthy backend are never persisted on the unhealthy * backend. They are always diverted to newly selected healthy backends * (unless all backends are unhealthy). * If set to `ALWAYS_PERSIST`, existing connections always persist on * unhealthy backends regardless of protocol and session affinity. It is * generally not recommended to use this mode overriding the default. * Default value is `DEFAULT_FOR_PROTOCOL`. * Possible values are: `DEFAULT_FOR_PROTOCOL`, `NEVER_PERSIST`, `ALWAYS_PERSIST`. */ connectionPersistenceOnUnhealthyBackends?: string; /** * Enable Strong Session Affinity for Network Load Balancing. This option is not available publicly. */ enableStrongAffinity?: boolean; /** * Specifies how long to keep a Connection Tracking entry while there is * no matching traffic (in seconds). * For L4 ILB the minimum(default) is 10 minutes and maximum is 16 hours. * For NLB the minimum(default) is 60 seconds and the maximum is 16 hours. */ idleTimeoutSec: number; /** * Specifies the key used for connection tracking. There are two options: * `PER_CONNECTION`: The Connection Tracking is performed as per the * Connection Key (default Hash Method) for the specific protocol. * `PER_SESSION`: The Connection Tracking is performed as per the * configured Session Affinity. It matches the configured Session Affinity. * Default value is `PER_CONNECTION`. * Possible values are: `PER_CONNECTION`, `PER_SESSION`. */ trackingMode?: string; } interface RegionBackendServiceConsistentHash { /** * Hash is based on HTTP Cookie. This field describes a HTTP cookie * that will be used as the hash key for the consistent hash load * balancer. If the cookie is not present, it will be generated. * This field is applicable if the sessionAffinity is set to HTTP_COOKIE. * Structure is documented below. */ httpCookie?: outputs.compute.RegionBackendServiceConsistentHashHttpCookie; /** * The hash based on the value of the specified header field. * This field is applicable if the sessionAffinity is set to HEADER_FIELD. */ httpHeaderName?: string; /** * The minimum number of virtual nodes to use for the hash ring. * Larger ring sizes result in more granular load * distributions. If the number of hosts in the load balancing pool * is larger than the ring size, each host will be assigned a single * virtual node. * Defaults to 1024. */ minimumRingSize?: number; } interface RegionBackendServiceConsistentHashHttpCookie { /** * Name of the cookie. */ name?: string; /** * Path to set for the cookie. */ path?: string; /** * Lifetime of the cookie. * Structure is documented below. */ ttl?: outputs.compute.RegionBackendServiceConsistentHashHttpCookieTtl; } interface RegionBackendServiceConsistentHashHttpCookieTtl { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface RegionBackendServiceCustomMetric { /** * If true, the metric data is not used for load balancing. */ dryRun: boolean; /** * Name of a custom utilization signal. The name must be 1-64 characters * long and match the regular expression a-z? which * means the first character must be a lowercase letter, and all following * characters must be a dash, period, underscore, lowercase letter, or * digit, except the last character, which cannot be a dash, period, or * underscore. For usage guidelines, see Custom Metrics balancing mode. This * field can only be used for a global or regional backend service with the * loadBalancingScheme set to EXTERNAL_MANAGED, * INTERNAL_MANAGED INTERNAL_SELF_MANAGED. */ name: string; } interface RegionBackendServiceDynamicForwarding { /** * (Optional, Beta) * IP:PORT based dynamic forwarding configuration. * Structure is documented below. */ ipPortSelection?: outputs.compute.RegionBackendServiceDynamicForwardingIpPortSelection; } interface RegionBackendServiceDynamicForwardingIpPortSelection { /** * (Optional, Beta) * A boolean flag enabling IP:PORT based dynamic forwarding. */ enabled?: boolean; } interface RegionBackendServiceFailoverPolicy { /** * On failover or failback, this field indicates whether connection drain * will be honored. Setting this to true has the following effect: connections * to the old active pool are not drained. Connections to the new active pool * use the timeout of 10 min (currently fixed). Setting to false has the * following effect: both old and new connections will have a drain timeout * of 10 min. * This can be set to true only if the protocol is TCP. * The default is false. */ disableConnectionDrainOnFailover: boolean; /** * This option is used only when no healthy VMs are detected in the primary * and backup instance groups. When set to true, traffic is dropped. When * set to false, new connections are sent across all VMs in the primary group. * The default is false. */ dropTrafficIfUnhealthy: boolean; /** * The value of the field must be in [0, 1]. If the ratio of the healthy * VMs in the primary backend is at or below this number, traffic arriving * at the load-balanced IP will be directed to the failover backend. * In case where 'failoverRatio' is not set or all the VMs in the backup * backend are unhealthy, the traffic will be directed back to the primary * backend in the "force" mode, where traffic will be spread to the healthy * VMs with the best effort, or to all VMs when no VM is healthy. * This field is only used with l4 load balancing. */ failoverRatio?: number; } interface RegionBackendServiceHaPolicy { /** * Specifies whether fast IP move is enabled, and if so, the mechanism to achieve it. * Supported values are: * * `DISABLED`: Fast IP Move is disabled. You can only use the haPolicy.leader API to * update the leader. * * `GARP_RA`: Provides a method to very quickly define a new network endpoint as the * leader. This method is faster than updating the leader using the * haPolicy.leader API. Fast IP move works as follows: The VM hosting the * network endpoint that should become the new leader sends either a * Gratuitous ARP (GARP) packet (IPv4) or an ICMPv6 Router Advertisement(RA) * packet (IPv6). Google Cloud immediately but temporarily associates the * forwarding rule IP address with that VM, and both new and in-flight packets * are quickly delivered to that VM. * Possible values are: `DISABLED`, `GARP_RA`. */ fastIpMove?: string; /** * Selects one of the network endpoints attached to the backend NEGs of this service as the * active endpoint (the leader) that receives all traffic. * Structure is documented below. */ leader?: outputs.compute.RegionBackendServiceHaPolicyLeader; } interface RegionBackendServiceHaPolicyLeader { /** * A fully-qualified URL of the zonal Network Endpoint Group (NEG) that the leader is * attached to. */ backendGroup?: string; /** * The network endpoint within the leader.backendGroup that is designated as the leader. * Structure is documented below. */ networkEndpoint?: outputs.compute.RegionBackendServiceHaPolicyLeaderNetworkEndpoint; } interface RegionBackendServiceHaPolicyLeaderNetworkEndpoint { /** * The name of the VM instance of the leader network endpoint. The instance must * already be attached to the NEG specified in the haPolicy.leader.backendGroup. */ instance?: string; } interface RegionBackendServiceIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface RegionBackendServiceIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface RegionBackendServiceIap { /** * Whether the serving infrastructure will authenticate and authorize all incoming requests. */ enabled: boolean; /** * OAuth2 Client ID for IAP */ oauth2ClientId?: string; /** * OAuth2 Client Secret for IAP * **Note**: This property is sensitive and will not be displayed in the plan. */ oauth2ClientSecret?: string; /** * (Output) * OAuth2 Client Secret SHA-256 for IAP * **Note**: This property is sensitive and will not be displayed in the plan. */ oauth2ClientSecretSha256: string; } interface RegionBackendServiceLogConfig { /** * Whether to enable logging for the load balancer traffic served by this backend service. */ enable?: boolean; /** * Specifies the fields to include in logging. This field can only be specified if logging is enabled for this backend service. */ optionalFields: string[]; /** * Specifies the optional logging mode for the load balancer traffic. * Supported values: INCLUDE_ALL_OPTIONAL, EXCLUDE_ALL_OPTIONAL, CUSTOM. * Possible values are: `INCLUDE_ALL_OPTIONAL`, `EXCLUDE_ALL_OPTIONAL`, `CUSTOM`. */ optionalMode: string; /** * This field can only be specified if logging is enabled for this backend service. The value of * the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer * where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. * The default value is 1.0. */ sampleRate?: number; } interface RegionBackendServiceNetworkPassThroughLbTrafficPolicy { /** * When configured, new connections are load balanced across healthy backend endpoints in the local zone. * Structure is documented below. */ zonalAffinity?: outputs.compute.RegionBackendServiceNetworkPassThroughLbTrafficPolicyZonalAffinity; } interface RegionBackendServiceNetworkPassThroughLbTrafficPolicyZonalAffinity { /** * This field indicates whether zonal affinity is enabled or not. * Default value is `ZONAL_AFFINITY_DISABLED`. * Possible values are: `ZONAL_AFFINITY_DISABLED`, `ZONAL_AFFINITY_SPILL_CROSS_ZONE`, `ZONAL_AFFINITY_STAY_WITHIN_ZONE`. */ spillover?: string; /** * The value of the field must be in [0, 1]. When the ratio of the count of healthy backend endpoints in a zone * to the count of backend endpoints in that same zone is equal to or above this threshold, the load balancer * distributes new connections to all healthy endpoints in the local zone only. When the ratio of the count * of healthy backend endpoints in a zone to the count of backend endpoints in that same zone is below this * threshold, the load balancer distributes all new connections to all healthy endpoints across all zones. */ spilloverRatio?: number; } interface RegionBackendServiceOutlierDetection { /** * The base time that a host is ejected for. The real time is equal to the base * time multiplied by the number of times the host has been ejected. Defaults to * 30000ms or 30s. * Structure is documented below. */ baseEjectionTime?: outputs.compute.RegionBackendServiceOutlierDetectionBaseEjectionTime; /** * Number of errors before a host is ejected from the connection pool. When the * backend host is accessed over HTTP, a 5xx return code qualifies as an error. * Defaults to 5. */ consecutiveErrors?: number; /** * The number of consecutive gateway failures (502, 503, 504 status or connection * errors that are mapped to one of those status codes) before a consecutive * gateway failure ejection occurs. Defaults to 5. */ consecutiveGatewayFailure?: number; /** * The percentage chance that a host will be actually ejected when an outlier * status is detected through consecutive 5xx. This setting can be used to disable * ejection or to ramp it up slowly. Defaults to 100. */ enforcingConsecutiveErrors?: number; /** * The percentage chance that a host will be actually ejected when an outlier * status is detected through consecutive gateway failures. This setting can be * used to disable ejection or to ramp it up slowly. Defaults to 0. */ enforcingConsecutiveGatewayFailure?: number; /** * The percentage chance that a host will be actually ejected when an outlier * status is detected through success rate statistics. This setting can be used to * disable ejection or to ramp it up slowly. Defaults to 100. */ enforcingSuccessRate?: number; /** * Time interval between ejection sweep analysis. This can result in both new * ejections as well as hosts being returned to service. Defaults to 10 seconds. * Structure is documented below. */ interval?: outputs.compute.RegionBackendServiceOutlierDetectionInterval; /** * Maximum percentage of hosts in the load balancing pool for the backend service * that can be ejected. Defaults to 10%. */ maxEjectionPercent?: number; /** * The number of hosts in a cluster that must have enough request volume to detect * success rate outliers. If the number of hosts is less than this setting, outlier * detection via success rate statistics is not performed for any host in the * cluster. Defaults to 5. */ successRateMinimumHosts?: number; /** * The minimum number of total requests that must be collected in one interval (as * defined by the interval duration above) to include this host in success rate * based outlier detection. If the volume is lower than this setting, outlier * detection via success rate statistics is not performed for that host. Defaults * to 100. */ successRateRequestVolume?: number; /** * This factor is used to determine the ejection threshold for success rate outlier * ejection. The ejection threshold is the difference between the mean success * rate, and the product of this factor and the standard deviation of the mean * success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided * by a thousand to get a double. That is, if the desired factor is 1.9, the * runtime value should be 1900. Defaults to 1900. */ successRateStdevFactor?: number; } interface RegionBackendServiceOutlierDetectionBaseEjectionTime { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations * less than one second are represented with a 0 `seconds` field and a positive * `nanos` field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 * inclusive. */ seconds: number; } interface RegionBackendServiceOutlierDetectionInterval { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations * less than one second are represented with a 0 `seconds` field and a positive * `nanos` field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 * inclusive. */ seconds: number; } interface RegionBackendServiceParams { /** * Resource manager tags to be bound to the region backend service. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. */ resourceManagerTags?: { [key: string]: string; }; } interface RegionBackendServiceStrongSessionAffinityCookie { /** * Name of the cookie. */ name?: string; /** * Path to set for the cookie. */ path?: string; /** * Lifetime of the cookie. * Structure is documented below. */ ttl?: outputs.compute.RegionBackendServiceStrongSessionAffinityCookieTtl; } interface RegionBackendServiceStrongSessionAffinityCookieTtl { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface RegionBackendServiceSubsetting { /** * The algorithm used for subsetting. * Possible values are: `CONSISTENT_HASH_SUBSETTING`. */ policy: string; /** * The number of backends per backend group assigned to each proxy instance or each service mesh client. * An input parameter to the CONSISTENT_HASH_SUBSETTING algorithm. Can only be set if policy is set to * CONSISTENT_HASH_SUBSETTING. Can only be set if load balancing scheme is INTERNAL_MANAGED or INTERNAL_SELF_MANAGED. * subsetSize is optional for Internal HTTP(S) load balancing and required for Traffic Director. * If you do not provide this value, Cloud Load Balancing will calculate it dynamically to optimize the number * of proxies/clients visible to each backend and vice versa. * Must be greater than 0. If subsetSize is larger than the number of backends/endpoints, then subsetting is disabled. */ subsetSize?: number; } interface RegionBackendServiceTlsSettings { /** * Reference to the BackendAuthenticationConfig resource from the networksecurity.googleapis.com namespace. * Can be used in authenticating TLS connections to the backend, as specified by the authenticationMode field. * Can only be specified if authenticationMode is not NONE. */ authenticationConfig?: string; /** * Server Name Indication - see RFC3546 section 3.1. If set, the load balancer sends this string as the SNI hostname in the * TLS connection to the backend, and requires that this string match a Subject Alternative Name (SAN) in the backend's * server certificate. With a Regional Internet NEG backend, if the SNI is specified here, the load balancer uses it * regardless of whether the Regional Internet NEG is specified with FQDN or IP address and port. */ sni?: string; /** * A list of Subject Alternative Names (SANs) that the Load Balancer verifies during a TLS handshake with the backend. * When the server presents its X.509 certificate to the Load Balancer, the Load Balancer inspects the certificate's SAN field, * and requires that at least one SAN match one of the subjectAltNames in the list. This field is limited to 5 entries. * When both sni and subjectAltNames are specified, the load balancer matches the backend certificate's SAN only to * subjectAltNames. * Structure is documented below. */ subjectAltNames?: outputs.compute.RegionBackendServiceTlsSettingsSubjectAltName[]; } interface RegionBackendServiceTlsSettingsSubjectAltName { /** * The SAN specified as a DNS Name. */ dnsName?: string; /** * The SAN specified as a URI. */ uniformResourceIdentifier?: string; } interface RegionCommitmentLicenseResource { /** * The number of licenses purchased. */ amount?: string; /** * Specifies the core range of the instance for which this license applies. */ coresPerLicense?: string; /** * Any applicable license URI. */ license: string; } interface RegionCommitmentResource { /** * Name of the accelerator type resource. Applicable only when the type is ACCELERATOR. */ acceleratorType?: string; /** * The amount of the resource purchased (in a type-dependent unit, * such as bytes). For vCPUs, this can just be an integer. For memory, * this must be provided in MB. Memory must be a multiple of 256 MB, * with up to 6.5GB of memory per every vCPU. */ amount?: string; /** * Type of resource for which this commitment applies. * Possible values are VCPU, MEMORY, LOCAL_SSD, and ACCELERATOR. */ type?: string; } interface RegionDiskAsyncPrimaryDisk { /** * Primary disk for asynchronous disk replication. */ disk: string; } interface RegionDiskDiskEncryptionKey { /** * The name of the encryption key that is stored in Google Cloud KMS. */ kmsKeyName?: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. * **Note**: This property is sensitive and will not be displayed in the plan. */ rawKey?: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit * customer-supplied encryption key to either encrypt or decrypt * this resource. You can provide either the rawKey or the rsaEncryptedKey. * **Note**: This property is sensitive and will not be displayed in the plan. */ rsaEncryptedKey?: string; /** * (Output) * The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied * encryption key that protects this resource. */ sha256: string; } interface RegionDiskGuestOsFeature { /** * The type of supported feature. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. * Possible values are: `MULTI_IP_SUBNET`, `SECURE_BOOT`, `SEV_CAPABLE`, `UEFI_COMPATIBLE`, `VIRTIO_SCSI_MULTIQUEUE`, `WINDOWS`, `GVNIC`, `SEV_LIVE_MIGRATABLE`, `SEV_SNP_CAPABLE`, `SUSPEND_RESUME_COMPATIBLE`, `TDX_CAPABLE`. */ type: string; } interface RegionDiskIamBindingCondition { description?: string; expression: string; title: string; } interface RegionDiskIamMemberCondition { description?: string; expression: string; title: string; } interface RegionDiskSourceSnapshotEncryptionKey { /** * (Optional, Beta) * The name of the encryption key that is stored in Google Cloud KMS. */ kmsKeyName?: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. */ rawKey?: string; /** * (Output) * The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied * encryption key that protects this resource. */ sha256: string; } interface RegionHealthCheckGrpcHealthCheck { /** * The gRPC service name for the health check. * The value of grpcServiceName has the following meanings by convention: * * Empty serviceName means the overall status of all services at the backend. * * Non-empty serviceName means the health of that gRPC service, as defined by the owner of the service. * The grpcServiceName can only be ASCII. */ grpcServiceName?: string; /** * The port number for the health check request. * Must be specified if portName and portSpecification are not set * or if portSpecification is USE_FIXED_PORT. Valid values are 1 through 65535. */ port?: number; /** * Port name as defined in InstanceGroup#NamedPort#name. If both port and * portName are defined, port takes precedence. */ portName?: string; /** * Specifies how port is selected for health checking, can be one of the * following values: * * `USE_FIXED_PORT`: The port number in `port` is used for health checking. * * `USE_NAMED_PORT`: The `portName` is used for health checking. * * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * If not specified, gRPC health check follows behavior specified in `port` and * `portName` fields. * Possible values are: `USE_FIXED_PORT`, `USE_NAMED_PORT`, `USE_SERVING_PORT`. */ portSpecification?: string; } interface RegionHealthCheckGrpcTlsHealthCheck { /** * The gRPC service name for the health check. * The value of grpcServiceName has the following meanings by convention: * * Empty serviceName means the overall status of all services at the backend. * * Non-empty serviceName means the health of that gRPC service, as defined by the owner of the service. * The grpcServiceName can only be ASCII. */ grpcServiceName?: string; /** * The port number for the health check request. * Must be specified if portSpecification is USE_FIXED_PORT. Valid values are 1 through 65535. */ port?: number; /** * Specifies how port is selected for health checking, can be one of the * following values: * * `USE_FIXED_PORT`: The port number in `port` is used for health checking. * * `USE_NAMED_PORT`: Not supported for GRPC with TLS health checking. * * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * If not specified, gRPC health check follows behavior specified in the `port` field. * Possible values are: `USE_FIXED_PORT`, `USE_NAMED_PORT`, `USE_SERVING_PORT`. */ portSpecification?: string; } interface RegionHealthCheckHttp2HealthCheck { /** * The value of the host header in the HTTP2 health check request. * If left empty (default value), the public IP on behalf of which this health * check is performed will be used. */ host?: string; /** * The TCP port number for the HTTP2 health check request. * The default value is 443. */ port?: number; /** * Port name as defined in InstanceGroup#NamedPort#name. If both port and * portName are defined, port takes precedence. */ portName?: string; /** * Specifies how port is selected for health checking, can be one of the * following values: * * * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. * * * 'USE_NAMED_PORT': The 'portName' is used for health checking. * * * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * * If not specified, HTTP2 health check follows behavior specified in 'port' and * 'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"] */ portSpecification?: string; /** * Specifies the type of proxy header to append before sending data to the * backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"] */ proxyHeader?: string; /** * The request path of the HTTP2 health check request. * The default value is /. */ requestPath?: string; /** * The bytes to match against the beginning of the response data. If left empty * (the default value), any response will indicate health. The response data * can only be ASCII. */ response?: string; } interface RegionHealthCheckHttpHealthCheck { /** * The value of the host header in the HTTP health check request. * If left empty (default value), the public IP on behalf of which this health * check is performed will be used. */ host?: string; /** * The TCP port number for the HTTP health check request. * The default value is 80. */ port?: number; /** * Port name as defined in InstanceGroup#NamedPort#name. If both port and * portName are defined, port takes precedence. */ portName?: string; /** * Specifies how port is selected for health checking, can be one of the * following values: * * `USE_FIXED_PORT`: The port number in `port` is used for health checking. * * `USE_NAMED_PORT`: The `portName` is used for health checking. * * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * If not specified, HTTP health check follows behavior specified in `port` and * `portName` fields. * Possible values are: `USE_FIXED_PORT`, `USE_NAMED_PORT`, `USE_SERVING_PORT`. */ portSpecification?: string; /** * Specifies the type of proxy header to append before sending data to the * backend. * Default value is `NONE`. * Possible values are: `NONE`, `PROXY_V1`. */ proxyHeader?: string; /** * The request path of the HTTP health check request. * The default value is /. */ requestPath?: string; /** * The bytes to match against the beginning of the response data. If left empty * (the default value), any response will indicate health. The response data * can only be ASCII. */ response?: string; } interface RegionHealthCheckHttpsHealthCheck { /** * The value of the host header in the HTTPS health check request. * If left empty (default value), the public IP on behalf of which this health * check is performed will be used. */ host?: string; /** * The TCP port number for the HTTPS health check request. * The default value is 443. */ port?: number; /** * Port name as defined in InstanceGroup#NamedPort#name. If both port and * portName are defined, port takes precedence. */ portName?: string; /** * Specifies how port is selected for health checking, can be one of the * following values: * * `USE_FIXED_PORT`: The port number in `port` is used for health checking. * * `USE_NAMED_PORT`: The `portName` is used for health checking. * * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * If not specified, HTTPS health check follows behavior specified in `port` and * `portName` fields. * Possible values are: `USE_FIXED_PORT`, `USE_NAMED_PORT`, `USE_SERVING_PORT`. */ portSpecification?: string; /** * Specifies the type of proxy header to append before sending data to the * backend. * Default value is `NONE`. * Possible values are: `NONE`, `PROXY_V1`. */ proxyHeader?: string; /** * The request path of the HTTPS health check request. * The default value is /. */ requestPath?: string; /** * The bytes to match against the beginning of the response data. If left empty * (the default value), any response will indicate health. The response data * can only be ASCII. */ response?: string; } interface RegionHealthCheckLogConfig { /** * Indicates whether or not to export logs. This is false by default, * which means no health check logging will be done. */ enable?: boolean; } interface RegionHealthCheckSslHealthCheck { /** * The TCP port number for the HTTP2 health check request. * The default value is 443. */ port?: number; /** * Port name as defined in InstanceGroup#NamedPort#name. If both port and * portName are defined, port takes precedence. */ portName?: string; /** * Specifies how port is selected for health checking, can be one of the * following values: * * `USE_FIXED_PORT`: The port number in `port` is used for health checking. * * `USE_NAMED_PORT`: The `portName` is used for health checking. * * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * If not specified, HTTP2 health check follows behavior specified in `port` and * `portName` fields. * Possible values are: `USE_FIXED_PORT`, `USE_NAMED_PORT`, `USE_SERVING_PORT`. */ portSpecification?: string; /** * Specifies the type of proxy header to append before sending data to the * backend. * Default value is `NONE`. * Possible values are: `NONE`, `PROXY_V1`. */ proxyHeader?: string; /** * The application data to send once the SSL connection has been * established (default value is empty). If both request and response are * empty, the connection establishment alone will indicate health. The request * data can only be ASCII. */ request?: string; /** * The bytes to match against the beginning of the response data. If left empty * (the default value), any response will indicate health. The response data * can only be ASCII. */ response?: string; } interface RegionHealthCheckTcpHealthCheck { /** * The TCP port number for the TCP health check request. * The default value is 80. */ port?: number; /** * Port name as defined in InstanceGroup#NamedPort#name. If both port and * portName are defined, port takes precedence. */ portName?: string; /** * Specifies how port is selected for health checking, can be one of the * following values: * * `USE_FIXED_PORT`: The port number in `port` is used for health checking. * * `USE_NAMED_PORT`: The `portName` is used for health checking. * * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each * network endpoint is used for health checking. For other backends, the * port or named port specified in the Backend Service is used for health * checking. * If not specified, TCP health check follows behavior specified in `port` and * `portName` fields. * Possible values are: `USE_FIXED_PORT`, `USE_NAMED_PORT`, `USE_SERVING_PORT`. */ portSpecification?: string; /** * Specifies the type of proxy header to append before sending data to the * backend. * Default value is `NONE`. * Possible values are: `NONE`, `PROXY_V1`. */ proxyHeader?: string; /** * The application data to send once the TCP connection has been * established (default value is empty). If both request and response are * empty, the connection establishment alone will indicate health. The request * data can only be ASCII. */ request?: string; /** * The bytes to match against the beginning of the response data. If left empty * (the default value), any response will indicate health. The response data * can only be ASCII. */ response?: string; } interface RegionInstanceGroupManagerAllInstancesConfig { /** * , The label key-value pairs that you want to patch onto the instance. * * - - - */ labels?: { [key: string]: string; }; /** * , The metadata key-value pairs that you want to patch onto the instance. For more information, see [Project and instance metadata](https://cloud.google.com/compute/docs/metadata#project_and_instance_metadata). */ metadata?: { [key: string]: string; }; } interface RegionInstanceGroupManagerAutoHealingPolicies { /** * The health check resource that signals autohealing. */ healthCheck: string; /** * The number of seconds that the managed instance group waits before * it applies autohealing policies to new instances or recently recreated instances. Between 0 and 3600. */ initialDelaySec: number; } interface RegionInstanceGroupManagerInstanceFlexibilityPolicy { /** * Named instance selections configuring properties that the group will use when creating new VMs. */ instanceSelections?: outputs.compute.RegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection[]; } interface RegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection { /** * List of disks to be attached to the instances created from this selection. */ disks?: outputs.compute.RegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDisk[]; /** * Full machine-type names, e.g. "n1-standard-16" */ machineTypes: string[]; /** * Name of the minimum CPU platform to be used by this instance selection. e.g. 'Intel Ice Lake' */ minCpuPlatform?: string; /** * The name of the instance group manager. Must be 1-63 * characters long and comply with * [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Supported characters * include lowercase letters, numbers, and hyphens. */ name: string; /** * Preference of this instance selection. Lower number means higher preference. MIG will first try to create a VM based on the machine-type with lowest rank and fallback to next rank based on availability. Machine types and instance selections with the same rank have the same preference. */ rank?: number; } interface RegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDisk { /** * , The architecture of the image. Allowed values are ARM64 or X86_64. */ architecture?: string; /** * , Whether or not the disk should be auto-deleted. This defaults to true. */ autoDelete?: boolean; /** * , Indicates that this is a boot disk. This defaults to false. */ boot?: boolean; /** * , A unique device name that is reflected into the /dev/ tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk. */ deviceName?: string; /** * , Encrypts or decrypts a disk using a customer-supplied encryption key. Structure is documented below. */ diskEncryptionKey?: outputs.compute.RegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskDiskEncryptionKey; /** * , Name of the disk. When not provided, this defaults to the name of the instance. */ diskName?: string; /** * , The size of the image in gigabytes. If not specified, it will inherit the size of its base image. For SCRATCH disks, the size must be one of 375 or 3000 GB, with a default of 375 GB. */ diskSizeGb?: number; /** * , The Google Compute Engine disk type. Such as "pd-ssd", "local-ssd", "pd-balanced" or "pd-standard". */ diskType?: string; /** * , A list of features to enable on the guest operating system. Applicable only for bootable images. */ guestOsFeatures?: string[]; /** * , Specifies the disk interface to use for attaching this disk. */ interface?: string; /** * , A set of key/value label pairs to assign to disks. Structure is documented below. */ labels?: outputs.compute.RegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskLabel[]; /** * , The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If you are attaching or creating a boot disk, this must read-write mode. */ mode?: string; /** * , Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. For more details, see the [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk) or the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks) depending on the selected disk_type. */ provisionedIops?: number; /** * , Indicates how much throughput to provision for the disk, in MB/s. This sets the amount of data that can be read or written from the disk per second. Values must greater than or equal to 1. For more details, see the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks). */ provisionedThroughput?: number; /** * , A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. Structure is documented below. */ resourceManagerTags?: outputs.compute.RegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskResourceManagerTag[]; /** * , A list (short name or id) of resource policies to attach to this disk. Currently a max of 1 resource policy is supported. */ resourcePolicies?: string; /** * , The name (not self_link) of the disk (such as those managed by google_compute_disk) to attach. > Note: Either source or sourceImage is required when creating a new instance except for when creating a local SSD. */ source?: string; /** * , The image from which to initialize this disk. This can be one of: the image's self_link, projects/{project}/global/images/{image}, projects/{project}/global/images/family/{family}, global/images/{image}, global/images/family/{family}, family/{family}, {project}/{family}, {project}/{image}, {family}, or {image}. > Note: Either source or sourceImage is required when creating a new instance except for when creating a local SSD. */ sourceImage?: string; /** * , The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key. Instance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys. Structure is documented below. */ sourceImageEncryptionKey?: outputs.compute.RegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskSourceImageEncryptionKey; /** * , The source snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot, initializeParams.sourceImage, or disks.source is required except for local SSD. */ sourceSnapshot?: string; /** * , The customer-supplied encryption key of the source snapshot. Structure is documented below. * - - - */ sourceSnapshotEncryptionKey?: outputs.compute.RegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskSourceSnapshotEncryptionKey; /** * , The type of Google Compute Engine disk, can be either "SCRATCH" or "PERSISTENT". */ type?: string; } interface RegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskDiskEncryptionKey { /** * , The self link of the encryption key that is stored in Google Cloud KMS. * - - - */ kmsKeySelfLink?: string; /** * , The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount?: string; /** * , Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rawKey?: string; /** * , Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rsaEncryptedKey?: string; } interface RegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskLabel { /** * , The unique key of the label to assign to disks. */ key: string; /** * , The value of the label to assign to disks. * - - - */ value: string; } interface RegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskResourceManagerTag { /** * , The unique key of the resource manager tag to assign to disks. Keys must be in the format tagKeys/{tag_key_id}. */ key: string; /** * , The value of the resource manager tag to assign to disks. Values must be in the format tagValues/456. * - - - */ value: string; } interface RegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskSourceImageEncryptionKey { /** * , The self link of the encryption key that is stored in Google Cloud KMS. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. * - - - */ kmsKeySelfLink?: string; /** * , The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount?: string; /** * , Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rawKey?: string; /** * , Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rsaEncryptedKey?: string; } interface RegionInstanceGroupManagerInstanceFlexibilityPolicyInstanceSelectionDiskSourceSnapshotEncryptionKey { /** * , The self link of the encryption key that is stored in Google Cloud KMS. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. * - - - */ kmsKeySelfLink?: string; /** * , The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. */ kmsKeyServiceAccount?: string; /** * , Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rawKey?: string; /** * , Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. Only one of kms_key_self_link, rsaEncryptedKey and rawKey may be set. */ rsaEncryptedKey?: string; } interface RegionInstanceGroupManagerInstanceLifecyclePolicy { /** * , Specifies the action that a MIG performs on a failed VM. If the value of the `onFailedHealthCheck` field is `DEFAULT_ACTION`, then the same action also applies to the VMs on which your application fails a health check. Valid options are: `DO_NOTHING`, `REPAIR`. If `DO_NOTHING`, then MIG does not repair a failed VM. If `REPAIR` (default), then MIG automatically repairs a failed VM by recreating it. For more information, see about repairing VMs in a MIG. */ defaultActionOnFailure?: string; /** * , Specifies whether to apply the group's latest configuration when repairing a VM. Valid options are: `YES`, `NO`. If `YES` and you updated the group's instance template or per-instance configurations after the VM was created, then these changes are applied when VM is repaired. If `NO` (default), then updates are applied in accordance with the group's update policy type. */ forceUpdateOnRepair?: string; /** * , Specifies the action that a MIG performs on an unhealthy VM. A VM is marked as unhealthy when the application running on that VM fails a health check. Valid options are: `DEFAULT_ACTION`, `DO_NOTHING`, `REPAIR`. If `DEFAULT_ACTION` (default), then MIG uses the same action configured for the `defaultActionOnFailure` field. If `DO_NOTHING`, then MIG does not repair unhealthy VM. If `REPAIR`, then MIG automatically repairs an unhealthy VM by recreating it. For more information, see about repairing VMs in a MIG. */ onFailedHealthCheck?: string; /** * ), Configuration for VM repairs in the MIG. Structure is documented below. * - - - */ onRepair: outputs.compute.RegionInstanceGroupManagerInstanceLifecyclePolicyOnRepair; } interface RegionInstanceGroupManagerInstanceLifecyclePolicyOnRepair { /** * , Specifies whether the MIG can change a VM's zone during a repair. If "YES", MIG can select a different zone for the VM during a repair. Else if "NO", MIG cannot change a VM's zone during a repair. The default value of allowChangingZone is "NO". * * - - - * The `instanceFlexibilityPolicy` block supports: */ allowChangingZone?: string; } interface RegionInstanceGroupManagerNamedPort { /** * The name of the port. */ name: string; /** * The port number. * - - - */ port: number; } interface RegionInstanceGroupManagerParams { /** * Resource manager tags to bind to the managed instance group. The tags are key-value pairs. Keys must be in the format tagKeys/123 and values in the format tagValues/456. For more information, see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources) */ resourceManagerTags?: { [key: string]: string; }; } interface RegionInstanceGroupManagerStandbyPolicy { /** * Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0. */ initialDelaySec: number; /** * Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. Valid options are: `MANUAL`, `SCALE_OUT_POOL`. If `MANUAL`(default), you have full control over which VMs are stopped and suspended in the MIG. If `SCALE_OUT_POOL`, the MIG uses the VMs from the standby pools to accelerate the scale out by resuming or starting them and then automatically replenishes the standby pool with new VMs to maintain the target sizes. * - - - */ mode: string; } interface RegionInstanceGroupManagerStatefulDisk { /** * , A value that prescribes what should happen to the stateful disk when the VM instance is deleted. The available options are `NEVER` and `ON_PERMANENT_INSTANCE_DELETION`. `NEVER` - detach the disk when the VM is deleted, but do not delete the disk. `ON_PERMANENT_INSTANCE_DELETION` will delete the stateful disk when the VM is permanently deleted from the instance group. The default is `NEVER`. */ deleteRule?: string; /** * , The device name of the disk to be attached. */ deviceName: string; } interface RegionInstanceGroupManagerStatefulExternalIp { /** * , A value that prescribes what should happen to the external ip when the VM instance is deleted. The available options are `NEVER` and `ON_PERMANENT_INSTANCE_DELETION`. `NEVER` - detach the ip when the VM is deleted, but do not delete the ip. `ON_PERMANENT_INSTANCE_DELETION` will delete the external ip when the VM is permanently deleted from the instance group. */ deleteRule?: string; /** * , The network interface name of the external Ip. Possible value: `nic0`. */ interfaceName?: string; } interface RegionInstanceGroupManagerStatefulInternalIp { /** * , A value that prescribes what should happen to the internal ip when the VM instance is deleted. The available options are `NEVER` and `ON_PERMANENT_INSTANCE_DELETION`. `NEVER` - detach the ip when the VM is deleted, but do not delete the ip. `ON_PERMANENT_INSTANCE_DELETION` will delete the internal ip when the VM is permanently deleted from the instance group. */ deleteRule?: string; /** * , The network interface name of the internal Ip. Possible value: `nic0`. */ interfaceName?: string; } interface RegionInstanceGroupManagerStatus { /** * Properties to set on all instances in the group. After setting * allInstancesConfig on the group, you must update the group's instances to * apply the configuration. */ allInstancesConfigs: outputs.compute.RegionInstanceGroupManagerStatusAllInstancesConfig[]; /** * A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified. */ isStable: boolean; /** * Stateful status of the given Instance Group Manager. */ statefuls: outputs.compute.RegionInstanceGroupManagerStatusStateful[]; /** * A bit indicating whether version target has been reached in this managed instance group, i.e. all instances are in their target version. Instances' target version are specified by version field on Instance Group Manager. */ versionTargets: outputs.compute.RegionInstanceGroupManagerStatusVersionTarget[]; } interface RegionInstanceGroupManagerStatusAllInstancesConfig { /** * Current all-instances configuration revision. This value is in RFC3339 text format. */ currentRevision: string; /** * A bit indicating whether this configuration has been applied to all managed instances in the group. */ effective: boolean; } interface RegionInstanceGroupManagerStatusStateful { /** * A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful config even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions. */ hasStatefulConfig: boolean; /** * Status of per-instance configs on the instances. */ perInstanceConfigs: outputs.compute.RegionInstanceGroupManagerStatusStatefulPerInstanceConfig[]; } interface RegionInstanceGroupManagerStatusStatefulPerInstanceConfig { /** * A bit indicating if all of the group's per-instance configs (listed in the output of a listPerInstanceConfigs API call) have status `EFFECTIVE` or there are no per-instance-configs. */ allEffective: boolean; } interface RegionInstanceGroupManagerStatusVersionTarget { /** * A bit indicating whether version target has been reached in this managed instance group, i.e. all instances are in their target version. Instances' target version are specified by version field on Instance Group Manager. */ isReached: boolean; } interface RegionInstanceGroupManagerUpdatePolicy { /** * The instance redistribution policy for regional managed instance groups. Valid values are: `"PROACTIVE"`, `"NONE"`. If `PROACTIVE` (default), the group attempts to maintain an even distribution of VM instances across zones in the region. If `NONE`, proactive redistribution is disabled. */ instanceRedistributionType?: string; /** * , Specifies a fixed number of VM instances. This must be a positive integer. Conflicts with `maxSurgePercent`. Both cannot be 0. */ maxSurgeFixed: number; /** * , Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%. Conflicts with `maxSurgeFixed`. */ maxSurgePercent?: number; /** * , Specifies a fixed number of VM instances. This must be a positive integer. */ maxUnavailableFixed: number; /** * , Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.. */ maxUnavailablePercent?: number; /** * ), Minimum number of seconds to wait for after a newly created instance becomes available. This value must be from range [0, 3600] */ minReadySec?: number; /** * Minimal action to be taken on an instance. You can specify either `NONE` to forbid any actions, `REFRESH` to update without stopping instances, `RESTART` to restart existing instances or `REPLACE` to delete and create new instances from the target template. If you specify a `REFRESH`, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action. */ minimalAction: string; /** * Most disruptive action that is allowed to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to allow actions that do not need instance restart, RESTART to allow actions that can be applied without instance replacing or REPLACE to allow all possible actions. If the Updater determines that the minimal update action needed is more disruptive than most disruptive allowed action you specify it will not perform the update at all. */ mostDisruptiveAllowedAction?: string; /** * , The instance replacement method for managed instance groups. Valid values are: "RECREATE", "SUBSTITUTE". If SUBSTITUTE (default), the group replaces VM instances with new instances that have randomly generated names. If RECREATE, instance names are preserved. You must also set maxUnavailableFixed or maxUnavailablePercent to be greater than 0. * - - - */ replacementMethod?: string; /** * The type of update process. You can specify either `PROACTIVE` so that the instance group manager proactively executes actions in order to bring instances to their target versions or `OPPORTUNISTIC` so that no action is proactively executed but the update will be performed as part of other actions (for example, resizes or recreateInstances calls). */ type: string; } interface RegionInstanceGroupManagerVersion { /** * The full URL to an instance template from which all new instances of this version will be created. */ instanceTemplate: string; /** * Version name. */ name?: string; /** * The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below. * * > Exactly one `version` you specify must not have a `targetSize` specified. During a rolling update, the instance group manager will fulfill the `targetSize` * constraints of every other `version`, and any remaining instances will be provisioned with the version where `targetSize` is unset. */ targetSize?: outputs.compute.RegionInstanceGroupManagerVersionTargetSize; } interface RegionInstanceGroupManagerVersionTargetSize { /** * , The number of instances which are managed for this version. Conflicts with `percent`. */ fixed?: number; /** * , The number of instances (calculated as percentage) which are managed for this version. Conflicts with `fixed`. * Note that when using `percent`, rounding will be in favor of explicitly set `targetSize` values; a managed instance group with 2 instances and 2 `version`s, * one of which has a `target_size.percent` of `60` will create 2 instances of that `version`. */ percent?: number; } interface RegionInstanceTemplateAdvancedMachineFeatures { /** * Defines whether the instance should have nested virtualization enabled. Defaults to false. */ enableNestedVirtualization?: boolean; /** * Whether to enable UEFI networking for instance creation. */ enableUefiNetworking?: boolean; /** * [The PMU](https://cloud.google.com/compute/docs/pmu-overview) is a hardware component within the CPU core that monitors how the processor runs code. Valid values for the level of PMU are `STANDARD`, `ENHANCED`, and `ARCHITECTURAL`. */ performanceMonitoringUnit?: string; /** * The number of threads per physical core. To disable [simultaneous multithreading (SMT)](https://cloud.google.com/compute/docs/instances/disabling-smt) set this to 1. */ threadsPerCore?: number; /** * Turbo frequency mode to use for the instance. Supported modes are currently either `ALL_CORE_MAX` or unset (default). */ turboMode?: string; /** * The number of physical cores to expose to an instance. [visible cores info (VC)](https://cloud.google.com/compute/docs/instances/customize-visible-cores). */ visibleCoreCount?: number; } interface RegionInstanceTemplateConfidentialInstanceConfig { /** * Defines the confidential computing technology the instance uses. SEV is an AMD feature. TDX is an Intel feature. One of the following values is required: `SEV`, `SEV_SNP`, `TDX`. `onHostMaintenance` can be set to MIGRATE if `confidentialInstanceType` is set to `SEV` and `minCpuPlatform` is set to `"AMD Milan"`. Otherwise, `onHostMaintenance` has to be set to TERMINATE or this will fail to create the VM. If `SEV_SNP`, currently `minCpuPlatform` has to be set to `"AMD Milan"` or this will fail to create the VM. */ confidentialInstanceType?: string; /** * Defines whether the instance should have confidential compute enabled with AMD SEV. If enabled, `onHostMaintenance` can be set to MIGRATE if `minCpuPlatform` is set to `"AMD Milan"`. Otherwise, `onHostMaintenance` has to be set to TERMINATE or this will fail to create the VM. */ enableConfidentialCompute?: boolean; } interface RegionInstanceTemplateDisk { /** * The architecture of the attached disk. Valid values are `ARM64` or `x8664`. */ architecture: string; /** * Whether or not the disk should be auto-deleted. * This defaults to true. */ autoDelete?: boolean; /** * Indicates that this is a boot disk. */ boot: boolean; /** * A unique device name that is reflected into the * /dev/ tree of a Linux operating system running within the instance. If not * specified, the server chooses a default device name to apply to this disk. */ deviceName: string; /** * Encrypts or decrypts a disk using a customer-supplied encryption key. * * If you are creating a new disk, this field encrypts the new disk using an encryption key that you provide. If you are attaching an existing disk that is already encrypted, this field decrypts the disk using the customer-supplied encryption key. * * If you encrypt a disk using a customer-supplied key, you must provide the same key again when you attempt to use this resource at a later time. For example, you must provide the key when you create a snapshot or an image from the disk or when you attach the disk to a virtual machine instance. * * If you do not provide an encryption key, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later. * * Instance templates do not store customer-supplied encryption keys, so you cannot use your own keys to encrypt disks in a managed instance group. Structure documented below. */ diskEncryptionKey?: outputs.compute.RegionInstanceTemplateDiskDiskEncryptionKey; /** * Name of the disk. When not provided, this defaults * to the name of the instance. */ diskName?: string; /** * The size of the image in gigabytes. If not * specified, it will inherit the size of its base image. For SCRATCH disks, * the size must be exactly 375GB. */ diskSizeGb: number; /** * The GCE disk type. Such as `"pd-ssd"`, `"local-ssd"`, * `"pd-balanced"` or `"pd-standard"`. */ diskType: string; /** * A list of features to enable on the guest operating system. Applicable only for bootable images. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. */ guestOsFeatures?: string[]; /** * Specifies the disk interface to use for attaching this disk, * which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI * and the request will fail if you attempt to attach a persistent disk in any other format * than SCSI. Local SSDs can use either NVME or SCSI. */ interface: string; /** * A set of ket/value label pairs to assign to disk created from * this template */ labels?: { [key: string]: string; }; /** * The mode in which to attach this disk, either READ_WRITE * or READ_ONLY. If you are attaching or creating a boot disk, this must * read-write mode. */ mode: string; /** * Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. For more details, see the [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk) or the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks) depending on the selected disk_type. */ provisionedIops: number; /** * Indicates how much throughput to provision for the disk, in MB/s. This sets the amount of data that can be read or written from the disk per second. Values must greater than or equal to 1. For more details, see the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks). */ provisionedThroughput: number; /** * A set of key/value resource manager tag pairs to bind to this disk. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. */ resourceManagerTags?: { [key: string]: string; }; /** * - A list (short name or id) of resource policies to attach to this disk for automatic snapshot creations. Currently a max of 1 resource policy is supported. */ resourcePolicies?: string; /** * The name (**not self_link**) * of the disk (such as those managed by `gcp.compute.Disk`) to attach. * > **Note:** Either `source`, `sourceImage`, or `sourceSnapshot` is **required** in a disk block unless the disk type is `local-ssd`. Check the API [docs](https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates/insert) for details. */ source?: string; /** * The image from which to * initialize this disk. This can be one of: the image's `selfLink`, * `projects/{project}/global/images/{image}`, * `projects/{project}/global/images/family/{family}`, `global/images/{image}`, * `global/images/family/{family}`, `family/{family}`, `{project}/{family}`, * `{project}/{image}`, `{family}`, or `{image}`. * > **Note:** Either `source`, `sourceImage`, or `sourceSnapshot` is **required** in a disk block unless the disk type is `local-ssd`. Check the API [docs](https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates/insert) for details. */ sourceImage: string; /** * The customer-supplied encryption * key of the source image. Required if the source image is protected by a * customer-supplied encryption key. * * Instance templates do not store customer-supplied encryption keys, so you * cannot create disks for instances in a managed instance group if the source * images are encrypted with your own keys. Structure * documented below. */ sourceImageEncryptionKey?: outputs.compute.RegionInstanceTemplateDiskSourceImageEncryptionKey; /** * The source snapshot to create this disk. * > **Note:** Either `source`, `sourceImage`, or `sourceSnapshot` is **required** in a disk block unless the disk type is `local-ssd`. Check the API [docs](https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates/insert) for details. */ sourceSnapshot?: string; /** * The customer-supplied encryption * key of the source snapshot. Structure * documented below. */ sourceSnapshotEncryptionKey?: outputs.compute.RegionInstanceTemplateDiskSourceSnapshotEncryptionKey; /** * The type of GCE disk, can be either `"SCRATCH"` or * `"PERSISTENT"`. */ type: string; } interface RegionInstanceTemplateDiskDiskEncryptionKey { /** * The self link of the encryption key that is stored in Google Cloud KMS */ kmsKeySelfLink?: string; /** * The service account being used for the * encryption request for the given KMS key. If absent, the Compute Engine * default service account is used. */ kmsKeyServiceAccount?: string; } interface RegionInstanceTemplateDiskSourceImageEncryptionKey { /** * The self link of the encryption key that is * stored in Google Cloud KMS. Only one of `kmsKeySelfLink`, `rsaEncryptedKey` and `rawKey` * may be set. */ kmsKeySelfLink?: string; /** * The service account being used for the * encryption request for the given KMS key. If absent, the Compute Engine * default service account is used. */ kmsKeyServiceAccount?: string; /** * A 256-bit [customer-supplied encryption key] * (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption), * encoded in [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4) * to decrypt the given image. Only one of `kmsKeySelfLink`, `rsaEncryptedKey` and `rawKey` * may be set. */ rawKey?: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit [customer-supplied encryption key] * (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption) to decrypt the given image. Only one of `kmsKeySelfLink`, `rsaEncryptedKey` and `rawKey` * may be set. */ rsaEncryptedKey?: string; } interface RegionInstanceTemplateDiskSourceSnapshotEncryptionKey { /** * The self link of the encryption key that is * stored in Google Cloud KMS. Only one of `kmsKeySelfLink`, `rsaEncryptedKey` and `rawKey` * may be set. */ kmsKeySelfLink?: string; /** * The service account being used for the * encryption request for the given KMS key. If absent, the Compute Engine * default service account is used. */ kmsKeyServiceAccount?: string; /** * A 256-bit [customer-supplied encryption key] * (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption), * encoded in [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4) * to decrypt this snapshot. Only one of `kmsKeySelfLink`, `rsaEncryptedKey` and `rawKey` * may be set. */ rawKey?: string; /** * Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit [customer-supplied encryption key] * (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption) to decrypt this snapshot. Only one of `kmsKeySelfLink`, `rsaEncryptedKey` and `rawKey` * may be set. */ rsaEncryptedKey?: string; } interface RegionInstanceTemplateGuestAccelerator { /** * The number of the guest accelerator cards exposed to this instance. */ count: number; /** * The accelerator type resource to expose to this instance. E.g. `nvidia-tesla-k80`. */ type: string; } interface RegionInstanceTemplateNetworkInterface { /** * Access configurations, i.e. IPs via which this * instance can be accessed via the Internet. Omit to ensure that the instance * is not accessible from the Internet (this means that ssh provisioners will * not work unless you are running Terraform can send traffic to the instance's * network (e.g. via tunnel or because it is running on another cloud instance * on that network). This block can be specified once per `networkInterface`. Structure documented below. */ accessConfigs?: outputs.compute.RegionInstanceTemplateNetworkInterfaceAccessConfig[]; /** * An * array of alias IP ranges for this network interface. Can only be specified for network * interfaces on subnet-mode networks. Structure documented below. */ aliasIpRanges?: outputs.compute.RegionInstanceTemplateNetworkInterfaceAliasIpRange[]; /** * Indicates whether igmp query is enabled on the network interface or not. If enabled, also indicates the version of IGMP supported. */ igmpQuery: string; /** * The prefix length of the primary internal IPv6 range. */ internalIpv6PrefixLength: number; /** * An array of IPv6 access configurations for this interface. * Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig * specified, then this instance will have no external IPv6 Internet access. Structure documented below. */ ipv6AccessConfigs: outputs.compute.RegionInstanceTemplateNetworkInterfaceIpv6AccessConfig[]; /** * One of EXTERNAL, INTERNAL to indicate whether the IP can be accessed from the Internet. This field is always inherited from its subnetwork. */ ipv6AccessType: string; /** * An IPv6 internal network address for this network interface. If not specified, Google Cloud will automatically assign an internal IPv6 address from the instance's subnetwork. */ ipv6Address: string; /** * The name of the instance template. If you leave * this blank, Terraform will auto-generate a unique name. */ name: string; /** * The name or selfLink of the network to attach this interface to. * Use `network` attribute for Legacy or Auto subnetted networks and * `subnetwork` for custom subnetted networks. */ network: string; /** * The URL of the network attachment that this interface should connect to in the following format: projects/{projectNumber}/regions/{region_name}/networkAttachments/{network_attachment_name}. */ networkAttachment: string; /** * The private IP address to assign to the instance. If * empty, the address will be automatically assigned. */ networkIp?: string; /** * The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET, MRDMA, IRDMA. */ nicType?: string; /** * Name of the parent network interface of a dynamic network interface. */ parentNicName: string; /** * The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified. */ queueCount?: number; /** * The stack type for this network interface to identify whether the IPv6 feature is enabled or not. Values are IPV4_IPV6, IPV6_ONLY or IPV4_ONLY. If not specified, IPV4_ONLY will be used. */ stackType: string; /** * the name of the subnetwork to attach this interface * to. The subnetwork must exist in the same `region` this instance will be * created in. Either `network` or `subnetwork` must be provided. */ subnetwork: string; /** * The ID of the project in which the subnetwork belongs. * If it is not provided, the provider project is used. */ subnetworkProject: string; /** * VLAN tag of a dynamic network interface, must be an integer in the range from 2 to 255 inclusively. */ vlan?: number; } interface RegionInstanceTemplateNetworkInterfaceAccessConfig { /** * The IP address that will be 1:1 mapped to the instance's * network ip. If not given, one will be generated. */ natIp: string; /** * The service-level to be provided for IPv6 traffic when the * subnet has an external subnet. Only PREMIUM and STANDARD tier is valid for IPv6. */ networkTier: string; /** * The DNS domain name for the public PTR record.The DNS domain name for the public PTR record. */ publicPtrDomainName: string; } interface RegionInstanceTemplateNetworkInterfaceAliasIpRange { /** * The IP CIDR range represented by this alias IP range. This IP CIDR range * must belong to the specified subnetwork and cannot contain IP addresses reserved by * system or used by other network interfaces. At the time of writing only a * netmask (e.g. /24) may be supplied, with a CIDR format resulting in an API * error. */ ipCidrRange: string; /** * The subnetwork secondary range name specifying * the secondary range from which to allocate the IP CIDR range for this alias IP * range. If left unspecified, the primary range of the subnetwork will be used. */ subnetworkRangeName?: string; } interface RegionInstanceTemplateNetworkInterfaceIpv6AccessConfig { /** * The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically. */ externalIpv6: string; /** * The prefix length of the external IPv6 range. */ externalIpv6PrefixLength: string; /** * The name of the instance template. If you leave * this blank, Terraform will auto-generate a unique name. */ name: string; /** * The service-level to be provided for IPv6 traffic when the subnet has an external subnet. Only PREMIUM tier is valid for IPv6 */ networkTier: string; /** * The domain name to be used when creating DNSv6 records for the external IPv6 ranges. */ publicPtrDomainName: string; } interface RegionInstanceTemplateNetworkPerformanceConfig { /** * The egress bandwidth tier to enable. Possible values: TIER_1, DEFAULT */ totalEgressBandwidthTier: string; } interface RegionInstanceTemplateReservationAffinity { /** * Specifies the label selector for the reservation to use.. * Structure is documented below. */ specificReservation?: outputs.compute.RegionInstanceTemplateReservationAffinitySpecificReservation; /** * The type of reservation from which this instance can consume resources. */ type: string; } interface RegionInstanceTemplateReservationAffinitySpecificReservation { /** * Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify compute.googleapis.com/reservation-name as the key and specify the name of your reservation as the only value. */ key: string; /** * Corresponds to the label values of a reservation resource. */ values: string[]; } interface RegionInstanceTemplateScheduling { /** * Specifies whether the instance should be * automatically restarted if it is terminated by Compute Engine (not * terminated by a user). This defaults to true. */ automaticRestart?: boolean; /** * Specifies the availability domain to place the instance in. The value must be a number between 1 and the number of availability domains specified in the spread placement policy attached to the instance. */ availabilityDomain?: number; /** * Settings for the instance to perform a graceful shutdown. */ gracefulShutdown?: outputs.compute.RegionInstanceTemplateSchedulingGracefulShutdown; /** * Beta Specifies the time in seconds for host error detection, the value must be within the range of [90, 330] with the increment of 30, if unset, the default behavior of host error recovery will be used. */ hostErrorTimeoutSeconds?: number; /** * Describe the type of termination action for `SPOT` VM. Can be `STOP` or `DELETE`. Read more on [here](https://cloud.google.com/compute/docs/instances/create-use-spot) */ instanceTerminationAction?: string; /** * Specifies the maximum amount of time a Local Ssd Vm should wait while * recovery of the Local Ssd state is attempted. Its value should be in * between 0 and 168 hours with hour granularity and the default value being 1 * hour. */ localSsdRecoveryTimeouts?: outputs.compute.RegionInstanceTemplateSchedulingLocalSsdRecoveryTimeout[]; /** * Specifies the frequency of planned maintenance events. The accepted values are: PERIODIC */ maintenanceInterval?: string; /** * The duration of the instance. Instance will run and be terminated after then, the termination action could be defined in `instanceTerminationAction`. Only support `DELETE` `instanceTerminationAction` at this point. Structure is documented below. */ maxRunDuration?: outputs.compute.RegionInstanceTemplateSchedulingMaxRunDuration; /** * Minimum number of cpus for the instance. */ minNodeCpus?: number; /** * Specifies node affinities or anti-affinities * to determine which sole-tenant nodes your instances and managed instance * groups will use as host systems. Read more on sole-tenant node creation * [here](https://cloud.google.com/compute/docs/nodes/create-nodes). * Structure documented below. */ nodeAffinities?: outputs.compute.RegionInstanceTemplateSchedulingNodeAffinity[]; /** * Defines the maintenance behavior for this * instance. */ onHostMaintenance: string; /** * Defines the behaviour for instances with the instance_termination_action. */ onInstanceStopAction?: outputs.compute.RegionInstanceTemplateSchedulingOnInstanceStopAction; /** * Allows instance to be preempted. This defaults to * false. Read more on this * [here](https://cloud.google.com/compute/docs/instances/preemptible). */ preemptible?: boolean; /** * Describe the type of preemptible VM. This field accepts the value `STANDARD` or `SPOT`. If the value is `STANDARD`, there will be no discount. If this is set to `SPOT`, * `preemptible` should be `true` and `automaticRestart` should be * `false`. For more info about * `SPOT`, read [here](https://cloud.google.com/compute/docs/instances/spot) */ provisioningModel: string; /** * Default is false and there will be 120 seconds between GCE ACPI G2 Soft Off and ACPI G3 Mechanical Off for Standard VMs and 30 seconds for Spot VMs. */ skipGuestOsShutdown?: boolean; /** * Specifies the timestamp, when the instance will be terminated, in RFC3339 text format. If specified, the instance termination action will be performed at the termination time. */ terminationTime?: string; } interface RegionInstanceTemplateSchedulingGracefulShutdown { /** * Opts-in for graceful shutdown. */ enabled: boolean; /** * The time allotted for the instance to gracefully shut down. * If the graceful shutdown isn't complete after this time, then the instance * transitions to the STOPPING state. Structure is documented below: */ maxDuration?: outputs.compute.RegionInstanceTemplateSchedulingGracefulShutdownMaxDuration; } interface RegionInstanceTemplateSchedulingGracefulShutdownMaxDuration { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented with a 0 * `seconds` field and a positive `nanos` field. Must be from 0 to * 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. * The value must be between 1 and 3600, which is 3,600 seconds (one hour).` */ seconds: number; } interface RegionInstanceTemplateSchedulingLocalSsdRecoveryTimeout { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must * be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. * Must be from 0 to 315,576,000,000 inclusive. */ seconds: number; } interface RegionInstanceTemplateSchedulingMaxRunDuration { /** * Span of time that's a fraction of a second at nanosecond * resolution. Durations less than one second are represented with a 0 * `seconds` field and a positive `nanos` field. Must be from 0 to * 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to * 315,576,000,000 inclusive. Note: these bounds are computed from: 60 * sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years. */ seconds: number; } interface RegionInstanceTemplateSchedulingNodeAffinity { /** * The key for the node affinity label. */ key: string; /** * The operator. Can be `IN` for node-affinities * or `NOT_IN` for anti-affinities. */ operator: string; /** * Corresponds to the label values of a reservation resource. */ values: string[]; } interface RegionInstanceTemplateSchedulingOnInstanceStopAction { /** * If true, the contents of any attached Local SSD disks will be discarded. */ discardLocalSsd?: boolean; } interface RegionInstanceTemplateServiceAccount { /** * The service account e-mail address. If not given, the * default Google Compute Engine service account is used. */ email: string; /** * A list of service scopes. Both OAuth2 URLs and gcloud * short names are supported. To allow full access to all Cloud APIs, use the * `cloud-platform` scope. See a complete list of scopes [here](https://cloud.google.com/sdk/gcloud/reference/alpha/compute/instances/set-scopes#--scopes). * * The [service accounts documentation](https://cloud.google.com/compute/docs/access/service-accounts#accesscopesiam) * explains that access scopes are the legacy method of specifying permissions for your instance. * To follow best practices you should create a dedicated service account with the minimum permissions the VM requires. * To use a dedicated service account this field should be configured as a list containing the `cloud-platform` scope. * See [Authenticate workloads using service accounts best practices](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances#best_practices) * and [Best practices for using service accounts](https://cloud.google.com/iam/docs/best-practices-service-accounts#single-purpose). */ scopes: string[]; } interface RegionInstanceTemplateShieldedInstanceConfig { /** * - Compare the most recent boot measurements to the integrity policy baseline and return a pair of pass/fail results depending on whether they match or not. Defaults to true. */ enableIntegrityMonitoring?: boolean; /** * - Verify the digital signature of all boot components, and halt the boot process if signature verification fails. Defaults to false. */ enableSecureBoot?: boolean; /** * - Use a virtualized trusted platform module, which is a specialized computer chip you can use to encrypt objects like keys and certificates. Defaults to true. */ enableVtpm?: boolean; } interface RegionNetworkEndpointGroupAppEngine { /** * Optional serving service. * The service name must be 1-63 characters long, and comply with RFC1035. * Example value: "default", "my-service". */ service?: string; /** * A template to parse service and version fields from a request URL. * URL mask allows for routing to multiple App Engine services without * having to create multiple Network Endpoint Groups and backend services. * For example, the request URLs "foo1-dot-appname.appspot.com/v1" and * "foo1-dot-appname.appspot.com/v2" can be backed by the same Serverless NEG with * URL mask "-dot-appname.appspot.com/". The URL mask will parse * them to { service = "foo1", version = "v1" } and { service = "foo1", version = "v2" } respectively. */ urlMask?: string; /** * Optional serving version. * The version must be 1-63 characters long, and comply with RFC1035. * Example value: "v1", "v2". */ version?: string; } interface RegionNetworkEndpointGroupCloudFunction { /** * A user-defined name of the Cloud Function. * The function name is case-sensitive and must be 1-63 characters long. * Example value: "func1". */ function?: string; /** * A template to parse function field from a request URL. URL mask allows * for routing to multiple Cloud Functions without having to create * multiple Network Endpoint Groups and backend services. * For example, request URLs "mydomain.com/function1" and "mydomain.com/function2" * can be backed by the same Serverless NEG with URL mask "/". The URL mask * will parse them to { function = "function1" } and { function = "function2" } respectively. */ urlMask?: string; } interface RegionNetworkEndpointGroupCloudRun { /** * Cloud Run service is the main resource of Cloud Run. * The service must be 1-63 characters long, and comply with RFC1035. * Example value: "run-service". */ service?: string; /** * Cloud Run tag represents the "named-revision" to provide * additional fine-grained traffic routing information. * The tag must be 1-63 characters long, and comply with RFC1035. * Example value: "revision-0010". */ tag?: string; /** * A template to parse service and tag fields from a request URL. * URL mask allows for routing to multiple Run services without having * to create multiple network endpoint groups and backend services. * For example, request URLs "foo1.domain.com/bar1" and "foo1.domain.com/bar2" * an be backed by the same Serverless Network Endpoint Group (NEG) with * URL mask ".domain.com/". The URL mask will parse them to { service="bar1", tag="foo1" } * and { service="bar2", tag="foo2" } respectively. */ urlMask?: string; } interface RegionNetworkEndpointGroupPscData { /** * The PSC producer port to use when consumer PSC NEG connects to a producer. If * this flag isn't specified for a PSC NEG with endpoint type * private-service-connect, then PSC NEG will be connected to a first port in the * available PSC producer port range. */ producerPort?: string; } interface RegionNetworkEndpointGroupServerlessDeployment { /** * The platform of the NEG backend target(s). Possible values: * API Gateway: apigateway.googleapis.com */ platform: string; /** * The user-defined name of the workload/instance. This value must be provided explicitly or in the urlMask. * The resource identified by this value is platform-specific and is as follows: API Gateway: The gateway ID, App Engine: The service name, * Cloud Functions: The function name, Cloud Run: The service name */ resource?: string; /** * A template to parse platform-specific fields from a request URL. URL mask allows for routing to multiple resources * on the same serverless platform without having to create multiple Network Endpoint Groups and backend resources. * The fields parsed by this template are platform-specific and are as follows: API Gateway: The gateway ID, * App Engine: The service and version, Cloud Functions: The function name, Cloud Run: The service and tag */ urlMask?: string; /** * The optional resource version. The version identified by this value is platform-specific and is follows: * API Gateway: Unused, App Engine: The service version, Cloud Functions: Unused, Cloud Run: The service tag */ version?: string; } interface RegionNetworkFirewallPolicyRuleMatch { /** * Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. */ destAddressGroups?: string[]; /** * Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100. */ destFqdns?: string[]; /** * CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. */ destIpRanges?: string[]; /** * (Optional, Beta) * Network scope of the traffic destination. * Possible values are: `INTERNET`, `INTRA_VPC`, `NON_INTERNET`, `VPC_NETWORKS`. */ destNetworkScope?: string; /** * Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. */ destRegionCodes?: string[]; /** * Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination. */ destThreatIntelligences?: string[]; /** * Pairs of IP protocols and ports that the rule should match. * Structure is documented below. */ layer4Configs: outputs.compute.RegionNetworkFirewallPolicyRuleMatchLayer4Config[]; /** * Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. */ srcAddressGroups?: string[]; /** * Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100. */ srcFqdns?: string[]; /** * CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. */ srcIpRanges?: string[]; /** * (Optional, Beta) * Network scope of the traffic source. * Possible values are: `INTERNET`, `INTRA_VPC`, `NON_INTERNET`, `VPC_NETWORKS`. */ srcNetworkScope?: string; /** * (Optional, Beta) * Networks of the traffic source. It can be either a full or partial url. */ srcNetworks?: string[]; /** * Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000. */ srcRegionCodes?: string[]; /** * List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256. * Structure is documented below. */ srcSecureTags?: outputs.compute.RegionNetworkFirewallPolicyRuleMatchSrcSecureTag[]; /** * Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source. * * * The `layer4Configs` block supports: */ srcThreatIntelligences?: string[]; } interface RegionNetworkFirewallPolicyRuleMatchLayer4Config { /** * The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. * This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. */ ipProtocol: string; /** * An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. * Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. */ ports?: string[]; } interface RegionNetworkFirewallPolicyRuleMatchSrcSecureTag { /** * Name of the secure tag, created with TagManager's TagValue API. */ name?: string; /** * (Output) * State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. */ state: string; } interface RegionNetworkFirewallPolicyRuleTargetSecureTag { /** * Name of the secure tag, created with TagManager's TagValue API. */ name?: string; /** * (Output) * State of the secure tag, either EFFECTIVE or INEFFECTIVE. A secure tag is INEFFECTIVE when it is deleted or its network is deleted. */ state: string; } interface RegionNetworkFirewallPolicyWithRulesPredefinedRule { /** * (Output) * The Action to perform when the client connection triggers the rule. Can currently be either * "allow", "deny", "applySecurityProfileGroup" or "gotoNext". */ action: string; /** * An optional description of this resource. */ description: string; /** * (Output) * The direction in which this rule applies. If unspecified an INGRESS rule is created. */ direction: string; /** * (Output) * Denotes whether the firewall policy rule is disabled. When set to true, * the firewall policy rule is not enforced and traffic behaves as if it did * not exist. If this is unspecified, the firewall policy rule will be * enabled. */ disabled: boolean; /** * (Output) * Denotes whether to enable logging for a particular rule. * If logging is enabled, logs will be exported to the * configured export destination in Stackdriver. */ enableLogging: boolean; /** * (Output) * A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. * Structure is documented below. */ matches: outputs.compute.RegionNetworkFirewallPolicyWithRulesPredefinedRuleMatch[]; /** * (Output) * An integer indicating the priority of a rule in the list. The priority must be a value * between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the * highest priority and 2147483647 is the lowest priority. */ priority: number; /** * (Output) * An optional name for the rule. This field is not a unique identifier * and can be updated. */ ruleName: string; /** * (Output) * A fully-qualified URL of a SecurityProfile resource instance. * Example: * https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group * Must be specified if action is 'apply_security_profile_group'. */ securityProfileGroup: string; /** * (Output) * A list of secure tags that controls which instances the firewall rule * applies to. If targetSecureTag are specified, then the * firewall rule applies only to instances in the VPC network that have one * of those EFFECTIVE secure tags, if all the targetSecureTag are in * INEFFECTIVE state, then this rule will be ignored. * targetSecureTag may not be set at the same time as * targetServiceAccounts. * If neither targetServiceAccounts nor * targetSecureTag are specified, the firewall rule applies * to all instances on the specified network. * Maximum number of target label tags allowed is 256. * Structure is documented below. */ targetSecureTags: outputs.compute.RegionNetworkFirewallPolicyWithRulesPredefinedRuleTargetSecureTag[]; /** * (Output) * A list of service accounts indicating the sets of * instances that are applied with this rule. */ targetServiceAccounts: string[]; /** * (Output) * Boolean flag indicating if the traffic should be TLS decrypted. * It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. */ tlsInspect: boolean; } interface RegionNetworkFirewallPolicyWithRulesPredefinedRuleMatch { /** * Address groups which should be matched against the traffic destination. * Maximum number of destination address groups is 10. */ destAddressGroups: string[]; /** * Fully Qualified Domain Name (FQDN) which should be matched against * traffic destination. Maximum number of destination fqdn allowed is 100. */ destFqdns: string[]; /** * Destination IP address range in CIDR format. Required for * EGRESS rules. */ destIpRanges: string[]; /** * Region codes whose IP addresses will be used to match for destination * of traffic. Should be specified as 2 letter country code defined as per * ISO 3166 alpha-2 country codes. ex."US" * Maximum number of destination region codes allowed is 5000. */ destRegionCodes: string[]; /** * Names of Network Threat Intelligence lists. * The IPs in these lists will be matched against traffic destination. */ destThreatIntelligences: string[]; /** * Pairs of IP protocols and ports that the rule should match. * Structure is documented below. */ layer4Configs: outputs.compute.RegionNetworkFirewallPolicyWithRulesPredefinedRuleMatchLayer4Config[]; /** * Address groups which should be matched against the traffic source. * Maximum number of source address groups is 10. */ srcAddressGroups: string[]; /** * Fully Qualified Domain Name (FQDN) which should be matched against * traffic source. Maximum number of source fqdn allowed is 100. */ srcFqdns: string[]; /** * Source IP address range in CIDR format. Required for * INGRESS rules. */ srcIpRanges: string[]; /** * Region codes whose IP addresses will be used to match for source * of traffic. Should be specified as 2 letter country code defined as per * ISO 3166 alpha-2 country codes. ex."US" * Maximum number of source region codes allowed is 5000. */ srcRegionCodes: string[]; /** * List of secure tag values, which should be matched at the source * of the traffic. * For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, * and there is no srcIpRange, this rule will be ignored. * Maximum number of source tag values allowed is 256. * Structure is documented below. * * * The `layer4Config` block supports: */ srcSecureTags: outputs.compute.RegionNetworkFirewallPolicyWithRulesPredefinedRuleMatchSrcSecureTag[]; /** * Names of Network Threat Intelligence lists. * The IPs in these lists will be matched against traffic source. */ srcThreatIntelligences: string[]; } interface RegionNetworkFirewallPolicyWithRulesPredefinedRuleMatchLayer4Config { /** * (Output) * The IP protocol to which this rule applies. The protocol * type is required when creating a firewall rule. * This value can either be one of the following well * known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), * or the IP protocol number. */ ipProtocol: string; /** * (Output) * An optional list of ports to which this rule applies. This field * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. * Example inputs include: ["22"], ["80","443"], and * ["12345-12349"]. */ ports: string[]; } interface RegionNetworkFirewallPolicyWithRulesPredefinedRuleMatchSrcSecureTag { /** * Name of the secure tag, created with TagManager's TagValue API. * @pattern tagValues/[0-9]+ */ name: string; /** * (Output) * [Output Only] State of the secure tag, either `EFFECTIVE` or * `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted * or its network is deleted. */ state: string; } interface RegionNetworkFirewallPolicyWithRulesPredefinedRuleTargetSecureTag { /** * Name of the secure tag, created with TagManager's TagValue API. * @pattern tagValues/[0-9]+ */ name: string; /** * (Output) * [Output Only] State of the secure tag, either `EFFECTIVE` or * `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted * or its network is deleted. */ state: string; } interface RegionNetworkFirewallPolicyWithRulesRule { /** * The Action to perform when the client connection triggers the rule. Can currently be either * "allow", "deny", "applySecurityProfileGroup" or "gotoNext". */ action: string; /** * A description of the rule. */ description?: string; /** * The direction in which this rule applies. If unspecified an INGRESS rule is created. * Possible values are: `INGRESS`, `EGRESS`. */ direction?: string; /** * Denotes whether the firewall policy rule is disabled. When set to true, * the firewall policy rule is not enforced and traffic behaves as if it did * not exist. If this is unspecified, the firewall policy rule will be * enabled. */ disabled?: boolean; /** * Denotes whether to enable logging for a particular rule. * If logging is enabled, logs will be exported to the * configured export destination in Stackdriver. */ enableLogging?: boolean; /** * A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. * Structure is documented below. */ match: outputs.compute.RegionNetworkFirewallPolicyWithRulesRuleMatch; /** * An integer indicating the priority of a rule in the list. The priority must be a value * between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the * highest priority and 2147483647 is the lowest priority. */ priority: number; /** * An optional name for the rule. This field is not a unique identifier * and can be updated. */ ruleName?: string; /** * A fully-qualified URL of a SecurityProfile resource instance. * Example: * https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group * Must be specified if action is 'apply_security_profile_group'. */ securityProfileGroup?: string; /** * A list of secure tags that controls which instances the firewall rule * applies to. If targetSecureTag are specified, then the * firewall rule applies only to instances in the VPC network that have one * of those EFFECTIVE secure tags, if all the targetSecureTag are in * INEFFECTIVE state, then this rule will be ignored. * targetSecureTag may not be set at the same time as * targetServiceAccounts. * If neither targetServiceAccounts nor * targetSecureTag are specified, the firewall rule applies * to all instances on the specified network. * Maximum number of target label tags allowed is 256. * Structure is documented below. */ targetSecureTags?: outputs.compute.RegionNetworkFirewallPolicyWithRulesRuleTargetSecureTag[]; /** * A list of service accounts indicating the sets of * instances that are applied with this rule. */ targetServiceAccounts?: string[]; /** * Boolean flag indicating if the traffic should be TLS decrypted. * It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. */ tlsInspect?: boolean; } interface RegionNetworkFirewallPolicyWithRulesRuleMatch { /** * Address groups which should be matched against the traffic destination. * Maximum number of destination address groups is 10. */ destAddressGroups?: string[]; /** * Fully Qualified Domain Name (FQDN) which should be matched against * traffic destination. Maximum number of destination fqdn allowed is 100. */ destFqdns?: string[]; /** * Destination IP address range in CIDR format. Required for * EGRESS rules. */ destIpRanges?: string[]; /** * (Optional, Beta) * Network scope of the traffic destination. * Possible values are: `INTERNET`, `INTRA_VPC`, `NON_INTERNET`, `VPC_NETWORKS`. */ destNetworkScope?: string; /** * Region codes whose IP addresses will be used to match for destination * of traffic. Should be specified as 2 letter country code defined as per * ISO 3166 alpha-2 country codes. ex."US" * Maximum number of destination region codes allowed is 5000. */ destRegionCodes?: string[]; /** * Names of Network Threat Intelligence lists. * The IPs in these lists will be matched against traffic destination. */ destThreatIntelligences?: string[]; /** * Pairs of IP protocols and ports that the rule should match. * Structure is documented below. */ layer4Configs: outputs.compute.RegionNetworkFirewallPolicyWithRulesRuleMatchLayer4Config[]; /** * Address groups which should be matched against the traffic source. * Maximum number of source address groups is 10. */ srcAddressGroups?: string[]; /** * Fully Qualified Domain Name (FQDN) which should be matched against * traffic source. Maximum number of source fqdn allowed is 100. */ srcFqdns?: string[]; /** * Source IP address range in CIDR format. Required for * INGRESS rules. */ srcIpRanges?: string[]; /** * (Optional, Beta) * Network scope of the traffic source. * Possible values are: `INTERNET`, `INTRA_VPC`, `NON_INTERNET`, `VPC_NETWORKS`. */ srcNetworkScope?: string; /** * (Optional, Beta) * Networks of the traffic source. It can be either a full or partial url. */ srcNetworks?: string[]; /** * Region codes whose IP addresses will be used to match for source * of traffic. Should be specified as 2 letter country code defined as per * ISO 3166 alpha-2 country codes. ex."US" * Maximum number of source region codes allowed is 5000. */ srcRegionCodes?: string[]; /** * List of secure tag values, which should be matched at the source * of the traffic. * For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, * and there is no srcIpRange, this rule will be ignored. * Maximum number of source tag values allowed is 256. * Structure is documented below. * * * The `layer4Config` block supports: */ srcSecureTags?: outputs.compute.RegionNetworkFirewallPolicyWithRulesRuleMatchSrcSecureTag[]; /** * Names of Network Threat Intelligence lists. * The IPs in these lists will be matched against traffic source. */ srcThreatIntelligences?: string[]; } interface RegionNetworkFirewallPolicyWithRulesRuleMatchLayer4Config { /** * (Output) * The IP protocol to which this rule applies. The protocol * type is required when creating a firewall rule. * This value can either be one of the following well * known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), * or the IP protocol number. */ ipProtocol: string; /** * (Output) * An optional list of ports to which this rule applies. This field * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. * Example inputs include: ["22"], ["80","443"], and * ["12345-12349"]. */ ports?: string[]; } interface RegionNetworkFirewallPolicyWithRulesRuleMatchSrcSecureTag { /** * Name of the secure tag, created with TagManager's TagValue API. * @pattern tagValues/[0-9]+ */ name?: string; /** * (Output) * [Output Only] State of the secure tag, either `EFFECTIVE` or * `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted * or its network is deleted. */ state: string; } interface RegionNetworkFirewallPolicyWithRulesRuleTargetSecureTag { /** * Name of the secure tag, created with TagManager's TagValue API. * @pattern tagValues/[0-9]+ */ name?: string; /** * (Output) * [Output Only] State of the secure tag, either `EFFECTIVE` or * `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted * or its network is deleted. */ state: string; } interface RegionPerInstanceConfigPreservedState { /** * Stateful disks for the instance. * Structure is documented below. */ disks?: outputs.compute.RegionPerInstanceConfigPreservedStateDisk[]; /** * Preserved external IPs defined for this instance. This map is keyed with the name of the network interface. * Structure is documented below. */ externalIps?: outputs.compute.RegionPerInstanceConfigPreservedStateExternalIp[]; /** * Preserved internal IPs defined for this instance. This map is keyed with the name of the network interface. * Structure is documented below. */ internalIps?: outputs.compute.RegionPerInstanceConfigPreservedStateInternalIp[]; /** * Preserved metadata defined for this instance. This is a list of key->value pairs. */ metadata?: { [key: string]: string; }; } interface RegionPerInstanceConfigPreservedStateDisk { /** * A value that prescribes what should happen to the stateful disk when the VM instance is deleted. * The available options are `NEVER` and `ON_PERMANENT_INSTANCE_DELETION`. * `NEVER` - detach the disk when the VM is deleted, but do not delete the disk. * `ON_PERMANENT_INSTANCE_DELETION` will delete the stateful disk when the VM is permanently * deleted from the instance group. * Default value is `NEVER`. * Possible values are: `NEVER`, `ON_PERMANENT_INSTANCE_DELETION`. */ deleteRule?: string; /** * A unique device name that is reflected into the /dev/ tree of a Linux operating system running within the instance. */ deviceName: string; /** * The mode of the disk. * Default value is `READ_WRITE`. * Possible values are: `READ_ONLY`, `READ_WRITE`. */ mode?: string; /** * The URI of an existing persistent disk to attach under the specified device-name in the format * `projects/project-id/zones/zone/disks/disk-name`. */ source: string; } interface RegionPerInstanceConfigPreservedStateExternalIp { /** * These stateful IPs will never be released during autohealing, update or VM instance recreate operations. This flag is used to configure if the IP reservation should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted. * Default value is `NEVER`. * Possible values are: `NEVER`, `ON_PERMANENT_INSTANCE_DELETION`. */ autoDelete?: string; /** * The identifier for this object. Format specified above. */ interfaceName: string; /** * Ip address representation * Structure is documented below. */ ipAddress?: outputs.compute.RegionPerInstanceConfigPreservedStateExternalIpIpAddress; } interface RegionPerInstanceConfigPreservedStateExternalIpIpAddress { /** * The URL of the reservation for this IP address. */ address?: string; } interface RegionPerInstanceConfigPreservedStateInternalIp { /** * These stateful IPs will never be released during autohealing, update or VM instance recreate operations. This flag is used to configure if the IP reservation should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted. * Default value is `NEVER`. * Possible values are: `NEVER`, `ON_PERMANENT_INSTANCE_DELETION`. */ autoDelete?: string; /** * The identifier for this object. Format specified above. */ interfaceName: string; /** * Ip address representation * Structure is documented below. */ ipAddress?: outputs.compute.RegionPerInstanceConfigPreservedStateInternalIpIpAddress; } interface RegionPerInstanceConfigPreservedStateInternalIpIpAddress { /** * The URL of the reservation for this IP address. */ address?: string; } interface RegionResizeRequestRequestedRunDuration { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 600 to 604800 inclusive. Note: minimum and maximum allowed range for requestedRunDuration is 10 minutes (600 seconds) and 7 days(604800 seconds) correspondingly. */ seconds: string; } interface RegionResizeRequestStatus { /** * (Output) * Fatal errors encountered during the queueing or provisioning phases of the ResizeRequest that caused the transition to the FAILED state. Contrary to the lastAttempt errors, this field is final and errors are never removed from here, as the ResizeRequest is not going to retry. * Structure is documented below. */ errors: outputs.compute.RegionResizeRequestStatusError[]; /** * (Output) * Information about the last attempt to fulfill the request. The value is temporary since the ResizeRequest can retry, as long as it's still active and the last attempt value can either be cleared or replaced with a different error. Since ResizeRequest retries infrequently, the value may be stale and no longer show an active problem. The value is cleared when ResizeRequest transitions to the final state (becomes inactive). If the final state is FAILED the error describing it will be storred in the "error" field only. * Structure is documented below. */ lastAttempts: outputs.compute.RegionResizeRequestStatusLastAttempt[]; } interface RegionResizeRequestStatusError { /** * (Output) * The array of errors encountered while processing this operation. * Structure is documented below. */ errors: outputs.compute.RegionResizeRequestStatusErrorError[]; } interface RegionResizeRequestStatusErrorError { /** * (Output) * The error type identifier for this error. */ code: string; /** * (Output) * An array of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. * Structure is documented below. */ errorDetails: outputs.compute.RegionResizeRequestStatusErrorErrorErrorDetail[]; /** * (Output) * Indicates the field in the request that caused the error. This property is optional. */ location: string; /** * (Output) * The localized error message in the above locale. */ message: string; } interface RegionResizeRequestStatusErrorErrorErrorDetail { /** * (Output) * A nested object resource. * Structure is documented below. */ errorInfos: outputs.compute.RegionResizeRequestStatusErrorErrorErrorDetailErrorInfo[]; /** * (Output) * A nested object resource. * Structure is documented below. */ helps: outputs.compute.RegionResizeRequestStatusErrorErrorErrorDetailHelp[]; /** * (Output) * A nested object resource. * Structure is documented below. */ localizedMessages: outputs.compute.RegionResizeRequestStatusErrorErrorErrorDetailLocalizedMessage[]; /** * (Output) * A nested object resource. * Structure is documented below. */ quotaInfos: outputs.compute.RegionResizeRequestStatusErrorErrorErrorDetailQuotaInfo[]; } interface RegionResizeRequestStatusErrorErrorErrorDetailErrorInfo { /** * (Output) * The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". */ domain: string; /** * (Output) * Additional structured details about this error. */ metadatas: { [key: string]: string; }; /** * (Output) * The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. */ reason: string; } interface RegionResizeRequestStatusErrorErrorErrorDetailHelp { /** * (Output) * A nested object resource. * Structure is documented below. */ links: outputs.compute.RegionResizeRequestStatusErrorErrorErrorDetailHelpLink[]; } interface RegionResizeRequestStatusErrorErrorErrorDetailHelpLink { /** * An optional description of this resize-request. */ description: string; /** * (Output) * The URL of the link. */ url: string; } interface RegionResizeRequestStatusErrorErrorErrorDetailLocalizedMessage { /** * (Output) * The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX" */ locale: string; /** * (Output) * The localized error message in the above locale. */ message: string; } interface RegionResizeRequestStatusErrorErrorErrorDetailQuotaInfo { /** * (Output) * The map holding related quota dimensions */ dimensions: { [key: string]: string; }; /** * (Output) * Future quota limit being rolled out. The limit's unit depends on the quota type or metric. */ futureLimit: number; /** * (Output) * Current effective quota limit. The limit's unit depends on the quota type or metric. */ limit: number; /** * (Output) * The name of the quota limit. */ limitName: string; /** * (Output) * The Compute Engine quota metric name. */ metricName: string; /** * (Output) * Rollout status of the future quota limit. */ rolloutStatus: string; } interface RegionResizeRequestStatusLastAttempt { /** * (Output) * Fatal errors encountered during the queueing or provisioning phases of the ResizeRequest that caused the transition to the FAILED state. Contrary to the lastAttempt errors, this field is final and errors are never removed from here, as the ResizeRequest is not going to retry. * Structure is documented below. */ errors: outputs.compute.RegionResizeRequestStatusLastAttemptError[]; } interface RegionResizeRequestStatusLastAttemptError { /** * (Output) * The array of errors encountered while processing this operation. * Structure is documented below. */ errors: outputs.compute.RegionResizeRequestStatusLastAttemptErrorError[]; } interface RegionResizeRequestStatusLastAttemptErrorError { /** * (Output) * The error type identifier for this error. */ code: string; /** * (Output) * An array of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. * Structure is documented below. */ errorDetails: outputs.compute.RegionResizeRequestStatusLastAttemptErrorErrorErrorDetail[]; /** * (Output) * Indicates the field in the request that caused the error. This property is optional. */ location: string; /** * (Output) * The localized error message in the above locale. */ message: string; } interface RegionResizeRequestStatusLastAttemptErrorErrorErrorDetail { /** * (Output) * A nested object resource. * Structure is documented below. */ errorInfos: outputs.compute.RegionResizeRequestStatusLastAttemptErrorErrorErrorDetailErrorInfo[]; /** * (Output) * A nested object resource. * Structure is documented below. */ helps: outputs.compute.RegionResizeRequestStatusLastAttemptErrorErrorErrorDetailHelp[]; /** * (Output) * A nested object resource. * Structure is documented below. */ localizedMessages: outputs.compute.RegionResizeRequestStatusLastAttemptErrorErrorErrorDetailLocalizedMessage[]; /** * (Output) * A nested object resource. * Structure is documented below. */ quotaInfos: outputs.compute.RegionResizeRequestStatusLastAttemptErrorErrorErrorDetailQuotaInfo[]; } interface RegionResizeRequestStatusLastAttemptErrorErrorErrorDetailErrorInfo { /** * (Output) * The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". */ domain: string; /** * (Output) * Additional structured details about this error. */ metadatas: { [key: string]: string; }; /** * (Output) * The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. */ reason: string; } interface RegionResizeRequestStatusLastAttemptErrorErrorErrorDetailHelp { /** * (Output) * A nested object resource. * Structure is documented below. */ links: outputs.compute.RegionResizeRequestStatusLastAttemptErrorErrorErrorDetailHelpLink[]; } interface RegionResizeRequestStatusLastAttemptErrorErrorErrorDetailHelpLink { /** * An optional description of this resize-request. */ description: string; /** * (Output) * The URL of the link. */ url: string; } interface RegionResizeRequestStatusLastAttemptErrorErrorErrorDetailLocalizedMessage { /** * (Output) * The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX" */ locale: string; /** * (Output) * The localized error message in the above locale. */ message: string; } interface RegionResizeRequestStatusLastAttemptErrorErrorErrorDetailQuotaInfo { /** * (Output) * The map holding related quota dimensions */ dimensions: { [key: string]: string; }; /** * (Output) * Future quota limit being rolled out. The limit's unit depends on the quota type or metric. */ futureLimit: number; /** * (Output) * Current effective quota limit. The limit's unit depends on the quota type or metric. */ limit: number; /** * (Output) * The name of the quota limit. */ limitName: string; /** * (Output) * The Compute Engine quota metric name. */ metricName: string; /** * (Output) * Rollout status of the future quota limit. */ rolloutStatus: string; } interface RegionSecurityPolicyAdvancedOptionsConfig { /** * Custom configuration to apply the JSON parsing. Only applicable when JSON parsing is set to STANDARD. * Structure is documented below. */ jsonCustomConfig?: outputs.compute.RegionSecurityPolicyAdvancedOptionsConfigJsonCustomConfig; /** * JSON body parsing. Supported values include: "DISABLED", "STANDARD", "STANDARD_WITH_GRAPHQL". * Possible values are: `DISABLED`, `STANDARD`, `STANDARD_WITH_GRAPHQL`. */ jsonParsing?: string; /** * Logging level. Supported values include: "NORMAL", "VERBOSE". * Possible values are: `NORMAL`, `VERBOSE`. */ logLevel?: string; /** * (Optional, Beta) * The maximum request size chosen by the customer with Waf enabled. Values supported are "8KB", "16KB, "32KB", "48KB" and "64KB". * Values are case insensitive. * Possible values are: `8KB`, `16KB`, `32KB`, `48KB`, `64KB`. */ requestBodyInspectionSize?: string; /** * An optional list of case-insensitive request header names to use for resolving the callers client IP address. */ userIpRequestHeaders?: string[]; } interface RegionSecurityPolicyAdvancedOptionsConfigJsonCustomConfig { /** * A list of custom Content-Type header values to apply the JSON parsing. */ contentTypes: string[]; } interface RegionSecurityPolicyDdosProtectionConfig { /** * Google Cloud Armor offers the following options to help protect systems against DDoS attacks: * - STANDARD: basic always-on protection for network load balancers, protocol forwarding, or VMs with public IP addresses. * - ADVANCED: additional protections for Managed Protection Plus subscribers who use network load balancers, protocol forwarding, or VMs with public IP addresses. * - ADVANCED_PREVIEW: flag to enable the security policy in preview mode. * Possible values are: `ADVANCED`, `ADVANCED_PREVIEW`, `STANDARD`. */ ddosProtection: string; } interface RegionSecurityPolicyRule { /** * The Action to perform when the rule is matched. The following are the valid actions: * * allow: allow access to target. * * deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for STATUS are 403, 404, and 502. * * rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rateLimitOptions to be set. * * redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. This action is only supported in Global Security Policies of type CLOUD_ARMOR. * * throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rateLimitOptions to be set for this. */ action: string; /** * An optional description of this resource. Provide this property when you create the resource. */ description?: string; /** * A match condition that incoming traffic is evaluated against. * If it evaluates to true, the corresponding 'action' is enforced. * Structure is documented below. */ match?: outputs.compute.RegionSecurityPolicyRuleMatch; /** * A match condition that incoming packets are evaluated against for CLOUD_ARMOR_NETWORK security policies. If it matches, the corresponding 'action' is enforced. * The match criteria for a rule consists of built-in match fields (like 'srcIpRanges') and potentially multiple user-defined match fields ('userDefinedFields'). * Field values may be extracted directly from the packet or derived from it (e.g. 'srcRegionCodes'). Some fields may not be present in every packet (e.g. 'srcPorts'). A user-defined field is only present if the base header is found in the packet and the entire field is in bounds. * Each match field may specify which values can match it, listing one or more ranges, prefixes, or exact values that are considered a match for the field. A field value must be present in order to match a specified match field. If no match values are specified for a match field, then any field value is considered to match it, and it's not required to be present. For strings specifying '*' is also equivalent to match all. * For a packet to match a rule, all specified match fields must match the corresponding field values derived from the packet. * Example: * networkMatch: srcIpRanges: - "192.0.2.0/24" - "198.51.100.0/24" userDefinedFields: - name: "ipv4FragmentOffset" values: - "1-0x1fff" * The above match condition matches packets with a source IP in 192.0.2.0/24 or 198.51.100.0/24 and a user-defined field named "ipv4FragmentOffset" with a value between 1 and 0x1fff inclusive * Structure is documented below. */ networkMatch?: outputs.compute.RegionSecurityPolicyRuleNetworkMatch; /** * Preconfigured WAF configuration to be applied for the rule. * If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect. * Structure is documented below. */ preconfiguredWafConfig?: outputs.compute.RegionSecurityPolicyRulePreconfiguredWafConfig; /** * If set to true, the specified action is not enforced. */ preview?: boolean; /** * An integer indicating the priority of a rule in the list. * The priority must be a positive value between 0 and 2147483647. * Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest priority. */ priority: number; /** * Must be specified if the action is "rateBasedBan" or "throttle". Cannot be specified for any other actions. * Structure is documented below. */ rateLimitOptions?: outputs.compute.RegionSecurityPolicyRuleRateLimitOptions; } interface RegionSecurityPolicyRuleMatch { /** * The configuration options available when specifying versionedExpr. * This field must be specified if versionedExpr is specified and cannot be specified if versionedExpr is not specified. * Structure is documented below. */ config?: outputs.compute.RegionSecurityPolicyRuleMatchConfig; /** * User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. * Structure is documented below. */ expr?: outputs.compute.RegionSecurityPolicyRuleMatchExpr; /** * Preconfigured versioned expression. If this field is specified, config must also be specified. * Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding srcIpRange field in config. * Possible values are: `SRC_IPS_V1`. */ versionedExpr?: string; } interface RegionSecurityPolicyRuleMatchConfig { /** * CIDR IP address range. Maximum number of srcIpRanges allowed is 10. */ srcIpRanges?: string[]; } interface RegionSecurityPolicyRuleMatchExpr { /** * Textual representation of an expression in Common Expression Language syntax. The application context of the containing message determines which well-known feature set of CEL is supported. */ expression: string; } interface RegionSecurityPolicyRuleNetworkMatch { /** * Destination IPv4/IPv6 addresses or CIDR prefixes, in standard text format. */ destIpRanges?: string[]; /** * Destination port numbers for TCP/UDP/SCTP. Each element can be a 16-bit unsigned decimal number (e.g. "80") or range (e.g. "0-1023"). */ destPorts?: string[]; /** * IPv4 protocol / IPv6 next header (after extension headers). Each element can be an 8-bit unsigned decimal number (e.g. "6"), range (e.g. "253-254"), or one of the following protocol names: "tcp", "udp", "icmp", "esp", "ah", "ipip", or "sctp". */ ipProtocols?: string[]; /** * BGP Autonomous System Number associated with the source IP address. */ srcAsns?: number[]; /** * Source IPv4/IPv6 addresses or CIDR prefixes, in standard text format. */ srcIpRanges?: string[]; /** * Source port numbers for TCP/UDP/SCTP. Each element can be a 16-bit unsigned decimal number (e.g. "80") or range (e.g. "0-1023"). */ srcPorts?: string[]; /** * Two-letter ISO 3166-1 alpha-2 country code associated with the source IP address. */ srcRegionCodes?: string[]; /** * User-defined fields. Each element names a defined field and lists the matching values for that field. * Structure is documented below. */ userDefinedFields?: outputs.compute.RegionSecurityPolicyRuleNetworkMatchUserDefinedField[]; } interface RegionSecurityPolicyRuleNetworkMatchUserDefinedField { /** * Name of the user-defined field, as given in the definition. */ name?: string; /** * Matching values of the field. Each element can be a 32-bit unsigned decimal or hexadecimal (starting with "0x") number (e.g. "64") or range (e.g. "0x400-0x7ff"). */ values?: string[]; } interface RegionSecurityPolicyRulePreconfiguredWafConfig { /** * An exclusion to apply during preconfigured WAF evaluation. * Structure is documented below. */ exclusions?: outputs.compute.RegionSecurityPolicyRulePreconfiguredWafConfigExclusion[]; } interface RegionSecurityPolicyRulePreconfiguredWafConfigExclusion { /** * Request cookie whose value will be excluded from inspection during preconfigured WAF evaluation. * Structure is documented below. */ requestCookies?: outputs.compute.RegionSecurityPolicyRulePreconfiguredWafConfigExclusionRequestCooky[]; /** * Request header whose value will be excluded from inspection during preconfigured WAF evaluation. * Structure is documented below. */ requestHeaders?: outputs.compute.RegionSecurityPolicyRulePreconfiguredWafConfigExclusionRequestHeader[]; /** * Request query parameter whose value will be excluded from inspection during preconfigured WAF evaluation. * Note that the parameter can be in the query string or in the POST body. * Structure is documented below. */ requestQueryParams?: outputs.compute.RegionSecurityPolicyRulePreconfiguredWafConfigExclusionRequestQueryParam[]; /** * Request URI from the request line to be excluded from inspection during preconfigured WAF evaluation. * When specifying this field, the query or fragment part should be excluded. * Structure is documented below. */ requestUris?: outputs.compute.RegionSecurityPolicyRulePreconfiguredWafConfigExclusionRequestUri[]; /** * A list of target rule IDs under the WAF rule set to apply the preconfigured WAF exclusion. * If omitted, it refers to all the rule IDs under the WAF rule set. */ targetRuleIds?: string[]; /** * Target WAF rule set to apply the preconfigured WAF exclusion. */ targetRuleSet: string; } interface RegionSecurityPolicyRulePreconfiguredWafConfigExclusionRequestCooky { /** * You can specify an exact match or a partial match by using a field operator and a field value. * Available options: * EQUALS: The operator matches if the field value equals the specified value. * STARTS_WITH: The operator matches if the field value starts with the specified value. * ENDS_WITH: The operator matches if the field value ends with the specified value. * CONTAINS: The operator matches if the field value contains the specified value. * EQUALS_ANY: The operator matches if the field value is any value. * Possible values are: `CONTAINS`, `ENDS_WITH`, `EQUALS`, `EQUALS_ANY`, `STARTS_WITH`. */ operator: string; /** * A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. * The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. */ value?: string; } interface RegionSecurityPolicyRulePreconfiguredWafConfigExclusionRequestHeader { /** * You can specify an exact match or a partial match by using a field operator and a field value. * Available options: * EQUALS: The operator matches if the field value equals the specified value. * STARTS_WITH: The operator matches if the field value starts with the specified value. * ENDS_WITH: The operator matches if the field value ends with the specified value. * CONTAINS: The operator matches if the field value contains the specified value. * EQUALS_ANY: The operator matches if the field value is any value. * Possible values are: `CONTAINS`, `ENDS_WITH`, `EQUALS`, `EQUALS_ANY`, `STARTS_WITH`. */ operator: string; /** * A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. * The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. */ value?: string; } interface RegionSecurityPolicyRulePreconfiguredWafConfigExclusionRequestQueryParam { /** * You can specify an exact match or a partial match by using a field operator and a field value. * Available options: * EQUALS: The operator matches if the field value equals the specified value. * STARTS_WITH: The operator matches if the field value starts with the specified value. * ENDS_WITH: The operator matches if the field value ends with the specified value. * CONTAINS: The operator matches if the field value contains the specified value. * EQUALS_ANY: The operator matches if the field value is any value. * Possible values are: `CONTAINS`, `ENDS_WITH`, `EQUALS`, `EQUALS_ANY`, `STARTS_WITH`. */ operator: string; /** * A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. * The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. */ value?: string; } interface RegionSecurityPolicyRulePreconfiguredWafConfigExclusionRequestUri { /** * You can specify an exact match or a partial match by using a field operator and a field value. * Available options: * EQUALS: The operator matches if the field value equals the specified value. * STARTS_WITH: The operator matches if the field value starts with the specified value. * ENDS_WITH: The operator matches if the field value ends with the specified value. * CONTAINS: The operator matches if the field value contains the specified value. * EQUALS_ANY: The operator matches if the field value is any value. * Possible values are: `CONTAINS`, `ENDS_WITH`, `EQUALS`, `EQUALS_ANY`, `STARTS_WITH`. */ operator: string; /** * A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. * The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. */ value?: string; } interface RegionSecurityPolicyRuleRateLimitOptions { /** * Can only be specified if the action for the rule is "rateBasedBan". * If specified, determines the time (in seconds) the traffic will continue to be banned by the rate limit after the rate falls below the threshold. */ banDurationSec?: number; /** * Can only be specified if the action for the rule is "rateBasedBan". * If specified, the key will be banned for the configured 'banDurationSec' when the number of requests that exceed the 'rateLimitThreshold' also exceed this 'banThreshold'. * Structure is documented below. */ banThreshold?: outputs.compute.RegionSecurityPolicyRuleRateLimitOptionsBanThreshold; /** * Action to take for requests that are under the configured rate limit threshold. * Valid option is "allow" only. */ conformAction?: string; /** * Determines the key to enforce the rateLimitThreshold on. Possible values are: * * ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKey" is not configured. * * IP: The source IP address of the request is the key. Each IP has this limit enforced separately. * * HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. * * XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. * * HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. * * HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. * * SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. * * REGION_CODE: The country/region from which the request originates. * * TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. * * TLS_JA4_FINGERPRINT: JA4 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. * * USER_IP: The IP address of the originating client, which is resolved based on "userIpRequestHeaders" configured with the security policy. If there is no "userIpRequestHeaders" configuration or an IP address cannot be resolved from it, the key type defaults to IP. * Possible values are: `ALL`, `IP`, `HTTP_HEADER`, `XFF_IP`, `HTTP_COOKIE`, `HTTP_PATH`, `SNI`, `REGION_CODE`, `TLS_JA3_FINGERPRINT`, `TLS_JA4_FINGERPRINT`, `USER_IP`. */ enforceOnKey?: string; /** * If specified, any combination of values of enforceOnKeyType/enforceOnKeyName is treated as the key on which ratelimit threshold/action is enforced. * You can specify up to 3 enforceOnKeyConfigs. * If enforceOnKeyConfigs is specified, enforceOnKey must not be specified. * Structure is documented below. */ enforceOnKeyConfigs?: outputs.compute.RegionSecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig[]; /** * Rate limit key name applicable only for the following key types: * HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. * HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. */ enforceOnKeyName?: string; /** * Action to take for requests that are above the configured rate limit threshold, to deny with a specified HTTP response code. * Valid options are deny(STATUS), where valid values for STATUS are 403, 404, 429, and 502. */ exceedAction?: string; /** * Threshold at which to begin ratelimiting. * Structure is documented below. */ rateLimitThreshold?: outputs.compute.RegionSecurityPolicyRuleRateLimitOptionsRateLimitThreshold; } interface RegionSecurityPolicyRuleRateLimitOptionsBanThreshold { /** * Number of HTTP(S) requests for calculating the threshold. */ count?: number; /** * Interval over which the threshold is computed. */ intervalSec?: number; } interface RegionSecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig { /** * Rate limit key name applicable only for the following key types: * HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. * HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. */ enforceOnKeyName?: string; /** * Determines the key to enforce the rateLimitThreshold on. Possible values are: * * ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKeyConfigs" is not configured. * * IP: The source IP address of the request is the key. Each IP has this limit enforced separately. * * HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. * * XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. * * HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. * * HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. * * SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. * * REGION_CODE: The country/region from which the request originates. * * TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. * * TLS_JA4_FINGERPRINT: JA4 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. * * USER_IP: The IP address of the originating client, which is resolved based on "userIpRequestHeaders" configured with the security policy. If there is no "userIpRequestHeaders" configuration or an IP address cannot be resolved from it, the key type defaults to IP. * Possible values are: `ALL`, `IP`, `HTTP_HEADER`, `XFF_IP`, `HTTP_COOKIE`, `HTTP_PATH`, `SNI`, `REGION_CODE`, `TLS_JA3_FINGERPRINT`, `TLS_JA4_FINGERPRINT`, `USER_IP`. */ enforceOnKeyType?: string; } interface RegionSecurityPolicyRuleRateLimitOptionsRateLimitThreshold { /** * Number of HTTP(S) requests for calculating the threshold. */ count?: number; /** * Interval over which the threshold is computed. */ intervalSec?: number; } interface RegionSecurityPolicyUserDefinedField { /** * The base relative to which 'offset' is measured. Possible values are: * - IPV4: Points to the beginning of the IPv4 header. * - IPV6: Points to the beginning of the IPv6 header. * - TCP: Points to the beginning of the TCP header, skipping over any IPv4 options or IPv6 extension headers. Not present for non-first fragments. * - UDP: Points to the beginning of the UDP header, skipping over any IPv4 options or IPv6 extension headers. Not present for non-first fragments. * Possible values are: `IPV4`, `IPV6`, `TCP`, `UDP`. */ base: string; /** * If specified, apply this mask (bitwise AND) to the field to ignore bits before matching. * Encoded as a hexadecimal number (starting with "0x"). * The last byte of the field (in network byte order) corresponds to the least significant byte of the mask. */ mask?: string; /** * Name of the user-defined field, as given in the definition. */ name?: string; /** * Offset of the first byte of the field (in network byte order) relative to 'base'. */ offset?: number; /** * Size of the field in bytes. Valid values: 1-4. */ size?: number; } interface RegionUrlMapDefaultRouteAction { /** * The specification for allowing client side cross-origin requests. Please see * [W3C Recommendation for Cross Origin Resource Sharing](https://www.w3.org/TR/cors/) * Structure is documented below. */ corsPolicy?: outputs.compute.RegionUrlMapDefaultRouteActionCorsPolicy; /** * The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. * As part of fault injection, when clients send requests to a backend service, delays can be introduced by a load balancer on a percentage of requests before sending those requests to the backend service. * Similarly requests from clients can be aborted by the load balancer for a percentage of requests. * timeout and retryPolicy is ignored by clients that are configured with a faultInjectionPolicy if: 1. The traffic is generated by fault injection AND 2. The fault injection is not a delay fault injection. * Fault injection is not supported with the global external HTTP(S) load balancer (classic). To see which load balancers support fault injection, see Load balancing: [Routing and traffic management features](https://cloud.google.com/load-balancing/docs/features#routing-traffic-management). * Structure is documented below. */ faultInjectionPolicy?: outputs.compute.RegionUrlMapDefaultRouteActionFaultInjectionPolicy; /** * Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. * The load balancer does not wait for responses from the shadow service. Before sending traffic to the shadow service, the host / authority header is suffixed with -shadow. * Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. * Structure is documented below. */ requestMirrorPolicy?: outputs.compute.RegionUrlMapDefaultRouteActionRequestMirrorPolicy; /** * Specifies the retry policy associated with this route. * Structure is documented below. */ retryPolicy?: outputs.compute.RegionUrlMapDefaultRouteActionRetryPolicy; /** * Specifies the timeout for the selected route. Timeout is computed from the time the request has been fully processed (known as end-of-stream) up until the response has been processed. Timeout includes all retries. * If not specified, this field uses the largest timeout among all backend services associated with the route. * Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. * Structure is documented below. */ timeout?: outputs.compute.RegionUrlMapDefaultRouteActionTimeout; /** * The spec to modify the URL of the request, before forwarding the request to the matched service. * urlRewrite is the only action supported in UrlMaps for external HTTP(S) load balancers. * Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. * Structure is documented below. */ urlRewrite?: outputs.compute.RegionUrlMapDefaultRouteActionUrlRewrite; /** * A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non-zero number. * After a backend service is identified and before forwarding the request to the backend service, advanced routing actions such as URL rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction. * Structure is documented below. */ weightedBackendServices?: outputs.compute.RegionUrlMapDefaultRouteActionWeightedBackendService[]; } interface RegionUrlMapDefaultRouteActionCorsPolicy { /** * In response to a preflight request, setting this to true indicates that the actual request can include user credentials. This field translates to the Access-Control-Allow-Credentials header. * Default is false. */ allowCredentials?: boolean; /** * Specifies the content for the Access-Control-Allow-Headers header. */ allowHeaders?: string[]; /** * Specifies the content for the Access-Control-Allow-Methods header. */ allowMethods?: string[]; /** * Specifies the regualar expression patterns that match allowed origins. For regular expression grammar * please see en.cppreference.com/w/cpp/regex/ecmascript * An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. */ allowOriginRegexes?: string[]; /** * Specifies the list of origins that will be allowed to do CORS requests. * An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. */ allowOrigins?: string[]; /** * If true, the setting specifies the CORS policy is disabled. The default value of false, which indicates that the CORS policy is in effect. */ disabled?: boolean; /** * Specifies the content for the Access-Control-Expose-Headers header. */ exposeHeaders?: string[]; /** * Specifies how long results of a preflight request can be cached in seconds. * This translates to the Access-Control-Max-Age header. */ maxAge?: number; } interface RegionUrlMapDefaultRouteActionFaultInjectionPolicy { /** * The specification for how client requests are aborted as part of fault injection. * Structure is documented below. */ abort?: outputs.compute.RegionUrlMapDefaultRouteActionFaultInjectionPolicyAbort; /** * The specification for how client requests are delayed as part of fault injection, before being sent to a backend service. * Structure is documented below. */ delay?: outputs.compute.RegionUrlMapDefaultRouteActionFaultInjectionPolicyDelay; } interface RegionUrlMapDefaultRouteActionFaultInjectionPolicyAbort { /** * The HTTP status code used to abort the request. * The value must be between 200 and 599 inclusive. */ httpStatus?: number; /** * The percentage of traffic (connections/operations/requests) which will be aborted as part of fault injection. * The value must be between 0.0 and 100.0 inclusive. */ percentage?: number; } interface RegionUrlMapDefaultRouteActionFaultInjectionPolicyDelay { /** * Specifies the value of the fixed delay interval. * Structure is documented below. */ fixedDelay?: outputs.compute.RegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay; /** * The percentage of traffic (connections/operations/requests) on which delay will be introduced as part of fault injection. * The value must be between 0.0 and 100.0 inclusive. */ percentage?: number; } interface RegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are * represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds?: string; } interface RegionUrlMapDefaultRouteActionRequestMirrorPolicy { /** * The full or partial URL to the RegionBackendService resource being mirrored to. * The backend service configured for a mirroring policy must reference backends that are of the same type as the original backend service matched in the URL map. * Serverless NEG backends are not currently supported as a mirrored backend service. */ backendService?: string; /** * (Optional, Beta) * The percentage of requests to be mirrored to backendService. * The value must be between 0.0 and 100.0 inclusive. */ mirrorPercent?: number; } interface RegionUrlMapDefaultRouteActionRetryPolicy { /** * Specifies the allowed number retries. This number must be > 0. If not specified, defaults to 1. */ numRetries?: number; /** * Specifies a non-zero timeout per retry attempt. * If not specified, will use the timeout set in HttpRouteAction. If timeout in HttpRouteAction is not set, * will use the largest timeout among all backend services associated with the route. * Structure is documented below. */ perTryTimeout?: outputs.compute.RegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeout; /** * Specifies one or more conditions when this retry policy applies. * Valid values are listed below. Only the following codes are supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true: cancelled, deadline-exceeded, internal, resource-exhausted, unavailable. * - 5xx : retry is attempted if the instance or endpoint responds with any 5xx response code, or if the instance or endpoint does not respond at all. For example, disconnects, reset, read timeout, connection failure, and refused streams. * - gateway-error : Similar to 5xx, but only applies to response codes 502, 503 or 504. * - connect-failure : a retry is attempted on failures connecting to the instance or endpoint. For example, connection timeouts. * - retriable-4xx : a retry is attempted if the instance or endpoint responds with a 4xx response code. The only error that you can retry is error code 409. * - refused-stream : a retry is attempted if the instance or endpoint resets the stream with a REFUSED_STREAM error code. This reset type indicates that it is safe to retry. * - cancelled : a retry is attempted if the gRPC status code in the response header is set to cancelled. * - deadline-exceeded : a retry is attempted if the gRPC status code in the response header is set to deadline-exceeded. * - internal : a retry is attempted if the gRPC status code in the response header is set to internal. * - resource-exhausted : a retry is attempted if the gRPC status code in the response header is set to resource-exhausted. * - unavailable : a retry is attempted if the gRPC status code in the response header is set to unavailable. */ retryConditions?: string[]; } interface RegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeout { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are * represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds?: string; } interface RegionUrlMapDefaultRouteActionTimeout { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds?: string; } interface RegionUrlMapDefaultRouteActionUrlRewrite { /** * Before forwarding the request to the selected service, the request's host header is replaced with contents of hostRewrite. * The value must be from 1 to 255 characters. */ hostRewrite?: string; /** * Before forwarding the request to the selected backend service, the matching portion of the request's path is replaced by pathPrefixRewrite. * The value must be from 1 to 1024 characters. */ pathPrefixRewrite?: string; } interface RegionUrlMapDefaultRouteActionWeightedBackendService { /** * The full or partial URL to the default BackendService resource. Before forwarding the request to backendService, the load balancer applies any relevant headerActions specified as part of this backendServiceWeight. */ backendService?: string; /** * Specifies changes to request and response headers that need to take effect for the selected backendService. * headerAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap. * headerAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. * Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. * Structure is documented below. */ headerAction?: outputs.compute.RegionUrlMapDefaultRouteActionWeightedBackendServiceHeaderAction; /** * Specifies the fraction of traffic sent to a backend service, computed as weight / (sum of all weightedBackendService weights in routeAction) . * The selection of a backend service is determined only for new traffic. Once a user's request has been directed to a backend service, subsequent requests are sent to the same backend service as determined by the backend service's session affinity policy. * The value must be from 0 to 1000. */ weight?: number; } interface RegionUrlMapDefaultRouteActionWeightedBackendServiceHeaderAction { /** * Headers to add to a matching request before forwarding the request to the backendService. * Structure is documented below. */ requestHeadersToAdds?: outputs.compute.RegionUrlMapDefaultRouteActionWeightedBackendServiceHeaderActionRequestHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the request before forwarding the request to the backendService. */ requestHeadersToRemoves?: string[]; /** * Headers to add the response before sending the response back to the client. * Structure is documented below. */ responseHeadersToAdds?: outputs.compute.RegionUrlMapDefaultRouteActionWeightedBackendServiceHeaderActionResponseHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the response before sending the response back to the client. */ responseHeadersToRemoves?: string[]; } interface RegionUrlMapDefaultRouteActionWeightedBackendServiceHeaderActionRequestHeadersToAdd { /** * The name of the header. */ headerName?: string; /** * The value of the header to add. */ headerValue?: string; /** * If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. * The default value is false. */ replace?: boolean; } interface RegionUrlMapDefaultRouteActionWeightedBackendServiceHeaderActionResponseHeadersToAdd { /** * The name of the header. */ headerName?: string; /** * The value of the header to add. */ headerValue?: string; /** * If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. * The default value is false. */ replace?: boolean; } interface RegionUrlMapDefaultUrlRedirect { /** * The host that will be used in the redirect response instead of the one that was * supplied in the request. The value must be between 1 and 255 characters. */ hostRedirect?: string; /** * If set to true, the URL scheme in the redirected request is set to https. If set to * false, the URL scheme of the redirected request will remain the same as that of the * request. This must only be set for UrlMaps used in TargetHttpProxys. Setting this * true for TargetHttpsProxy is not permitted. The default is set to false. */ httpsRedirect?: boolean; /** * The path that will be used in the redirect response instead of the one that was * supplied in the request. pathRedirect cannot be supplied together with * prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the * original request will be used for the redirect. The value must be between 1 and 1024 * characters. */ pathRedirect?: string; /** * The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, * retaining the remaining portion of the URL before redirecting the request. * prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or * neither. If neither is supplied, the path of the original request will be used for * the redirect. The value must be between 1 and 1024 characters. */ prefixRedirect?: string; /** * The HTTP Status code to use for this RedirectAction. Supported values are: * * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. * * FOUND, which corresponds to 302. * * SEE_OTHER which corresponds to 303. * * TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method * will be retained. * * PERMANENT_REDIRECT, which corresponds to 308. In this case, * the request method will be retained. */ redirectResponseCode?: string; /** * If set to true, any accompanying query portion of the original URL is removed prior * to redirecting the request. If set to false, the query portion of the original URL is * retained. * This field is required to ensure an empty block is not set. The normal default value is false. */ stripQuery: boolean; } interface RegionUrlMapHeaderAction { /** * Headers to add to a matching request before forwarding the request to the backendService. * Structure is documented below. */ requestHeadersToAdds?: outputs.compute.RegionUrlMapHeaderActionRequestHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the request before forwarding the request to the backendService. */ requestHeadersToRemoves?: string[]; /** * Headers to add the response before sending the response back to the client. * Structure is documented below. */ responseHeadersToAdds?: outputs.compute.RegionUrlMapHeaderActionResponseHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the response before sending the response back to the client. */ responseHeadersToRemoves?: string[]; } interface RegionUrlMapHeaderActionRequestHeadersToAdd { /** * The name of the header. */ headerName?: string; /** * The value of the header to add. */ headerValue?: string; /** * If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. * The default value is false. */ replace?: boolean; } interface RegionUrlMapHeaderActionResponseHeadersToAdd { /** * The name of the header. */ headerName?: string; /** * The value of the header to add. */ headerValue?: string; /** * If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. * The default value is false. */ replace?: boolean; } interface RegionUrlMapHostRule { /** * An optional description of this HostRule. Provide this property * when you create the resource. */ description?: string; /** * The list of host patterns to match. They must be valid * hostnames, except * will match any string of ([a-z0-9-.]*). In * that case, * must be the first character and must be followed in * the pattern by either - or .. */ hosts: string[]; /** * The name of the PathMatcher to use to match the path portion of * the URL if the hostRule matches the URL's host portion. */ pathMatcher: string; } interface RegionUrlMapPathMatcher { /** * defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs * advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request * to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. * Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. * Only one of defaultRouteAction or defaultUrlRedirect must be set. * Structure is documented below. */ defaultRouteAction?: outputs.compute.RegionUrlMapPathMatcherDefaultRouteAction; /** * A reference to a RegionBackendService resource. This will be used if * none of the pathRules defined by this PathMatcher is matched by * the URL's path portion. */ defaultService?: string; /** * When none of the specified hostRules match, the request is redirected to a URL specified * by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService or * defaultRouteAction must not be set. * Structure is documented below. */ defaultUrlRedirect?: outputs.compute.RegionUrlMapPathMatcherDefaultUrlRedirect; /** * An optional description of this resource. */ description?: string; /** * Specifies changes to request and response headers that need to take effect for the selected backendService. * headerAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap. * headerAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. * Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. * Structure is documented below. */ headerAction?: outputs.compute.RegionUrlMapPathMatcherHeaderAction; /** * The name to which this PathMatcher is referred by the HostRule. */ name: string; /** * The list of path rules. Use this list instead of routeRules when routing based * on simple path matching is all that's required. The order by which path rules * are specified does not matter. Matches are always done on the longest-path-first * basis. For example: a pathRule with a path /a/b/c/* will match before /a/b/* * irrespective of the order in which those paths appear in this list. Within a * given pathMatcher, only one of pathRules or routeRules must be set. * Structure is documented below. */ pathRules?: outputs.compute.RegionUrlMapPathMatcherPathRule[]; /** * The list of ordered HTTP route rules. Use this list instead of pathRules when * advanced route matching and routing actions are desired. The order of specifying * routeRules matters: the first rule that matches will cause its specified routing * action to take effect. Within a given pathMatcher, only one of pathRules or * routeRules must be set. routeRules are not supported in UrlMaps intended for * External load balancers. * Structure is documented below. */ routeRules?: outputs.compute.RegionUrlMapPathMatcherRouteRule[]; } interface RegionUrlMapPathMatcherDefaultRouteAction { /** * The specification for allowing client side cross-origin requests. Please see * [W3C Recommendation for Cross Origin Resource Sharing](https://www.w3.org/TR/cors/) * Structure is documented below. */ corsPolicy?: outputs.compute.RegionUrlMapPathMatcherDefaultRouteActionCorsPolicy; /** * The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. * As part of fault injection, when clients send requests to a backend service, delays can be introduced by a load balancer on a percentage of requests before sending those requests to the backend service. * Similarly requests from clients can be aborted by the load balancer for a percentage of requests. * timeout and retryPolicy is ignored by clients that are configured with a faultInjectionPolicy if: 1. The traffic is generated by fault injection AND 2. The fault injection is not a delay fault injection. * Fault injection is not supported with the global external HTTP(S) load balancer (classic). To see which load balancers support fault injection, see Load balancing: [Routing and traffic management features](https://cloud.google.com/load-balancing/docs/features#routing-traffic-management). * Structure is documented below. */ faultInjectionPolicy?: outputs.compute.RegionUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicy; /** * Specifies the maximum duration (timeout) for streams on the selected route. * Unlike the `Timeout` field where the timeout duration starts from the time the request * has been fully processed (known as end-of-stream), the duration in this field * is computed from the beginning of the stream until the response has been processed, * including all retries. A stream that does not complete in this duration is closed. * Structure is documented below. */ maxStreamDuration: outputs.compute.RegionUrlMapPathMatcherDefaultRouteActionMaxStreamDuration; /** * Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. * The load balancer does not wait for responses from the shadow service. Before sending traffic to the shadow service, the host / authority header is suffixed with -shadow. * Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. * Structure is documented below. */ requestMirrorPolicy?: outputs.compute.RegionUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicy; /** * Specifies the retry policy associated with this route. * Structure is documented below. */ retryPolicy?: outputs.compute.RegionUrlMapPathMatcherDefaultRouteActionRetryPolicy; /** * Specifies the timeout for the selected route. Timeout is computed from the time the request has been fully processed (known as end-of-stream) up until the response has been processed. Timeout includes all retries. * If not specified, this field uses the largest timeout among all backend services associated with the route. * Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. * Structure is documented below. */ timeout: outputs.compute.RegionUrlMapPathMatcherDefaultRouteActionTimeout; /** * The spec to modify the URL of the request, before forwarding the request to the matched service. * urlRewrite is the only action supported in UrlMaps for external HTTP(S) load balancers. * Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. * Structure is documented below. */ urlRewrite?: outputs.compute.RegionUrlMapPathMatcherDefaultRouteActionUrlRewrite; /** * A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non-zero number. * After a backend service is identified and before forwarding the request to the backend service, advanced routing actions such as URL rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction. * Structure is documented below. */ weightedBackendServices?: outputs.compute.RegionUrlMapPathMatcherDefaultRouteActionWeightedBackendService[]; } interface RegionUrlMapPathMatcherDefaultRouteActionCorsPolicy { /** * In response to a preflight request, setting this to true indicates that the actual request can include user credentials. This field translates to the Access-Control-Allow-Credentials header. * Default is false. */ allowCredentials?: boolean; /** * Specifies the content for the Access-Control-Allow-Headers header. */ allowHeaders?: string[]; /** * Specifies the content for the Access-Control-Allow-Methods header. */ allowMethods?: string[]; /** * Specifies the regualar expression patterns that match allowed origins. For regular expression grammar * please see en.cppreference.com/w/cpp/regex/ecmascript * An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. */ allowOriginRegexes?: string[]; /** * Specifies the list of origins that will be allowed to do CORS requests. * An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. */ allowOrigins?: string[]; /** * If true, the setting specifies the CORS policy is disabled. The default value of false, which indicates that the CORS policy is in effect. */ disabled?: boolean; /** * Specifies the content for the Access-Control-Expose-Headers header. */ exposeHeaders?: string[]; /** * Specifies how long results of a preflight request can be cached in seconds. * This translates to the Access-Control-Max-Age header. */ maxAge?: number; } interface RegionUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicy { /** * The specification for how client requests are aborted as part of fault injection. * Structure is documented below. */ abort?: outputs.compute.RegionUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbort; /** * The specification for how client requests are delayed as part of fault injection, before being sent to a backend service. * Structure is documented below. */ delay?: outputs.compute.RegionUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelay; } interface RegionUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbort { /** * The HTTP status code used to abort the request. * The value must be between 200 and 599 inclusive. */ httpStatus?: number; /** * The percentage of traffic (connections/operations/requests) which will be aborted as part of fault injection. * The value must be between 0.0 and 100.0 inclusive. */ percentage?: number; } interface RegionUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelay { /** * Specifies the value of the fixed delay interval. * Structure is documented below. */ fixedDelay?: outputs.compute.RegionUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelay; /** * The percentage of traffic (connections/operations/requests) on which delay will be introduced as part of fault injection. * The value must be between 0.0 and 100.0 inclusive. */ percentage?: number; } interface RegionUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelay { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are * represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds?: string; } interface RegionUrlMapPathMatcherDefaultRouteActionMaxStreamDuration { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds: string; } interface RegionUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicy { /** * The full or partial URL to the RegionBackendService resource being mirrored to. * The backend service configured for a mirroring policy must reference backends that are of the same type as the original backend service matched in the URL map. * Serverless NEG backends are not currently supported as a mirrored backend service. */ backendService: string; /** * (Optional, Beta) * The percentage of requests to be mirrored to backendService. * The value must be between 0.0 and 100.0 inclusive. */ mirrorPercent?: number; } interface RegionUrlMapPathMatcherDefaultRouteActionRetryPolicy { /** * Specifies the allowed number retries. This number must be > 0. If not specified, defaults to 1. */ numRetries?: number; /** * Specifies a non-zero timeout per retry attempt. * If not specified, will use the timeout set in HttpRouteAction. If timeout in HttpRouteAction is not set, * will use the largest timeout among all backend services associated with the route. * Structure is documented below. */ perTryTimeout?: outputs.compute.RegionUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeout; /** * Specifies one or more conditions when this retry policy applies. * Valid values are listed below. Only the following codes are supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true: cancelled, deadline-exceeded, internal, resource-exhausted, unavailable. * - 5xx : retry is attempted if the instance or endpoint responds with any 5xx response code, or if the instance or endpoint does not respond at all. For example, disconnects, reset, read timeout, connection failure, and refused streams. * - gateway-error : Similar to 5xx, but only applies to response codes 502, 503 or 504. * - connect-failure : a retry is attempted on failures connecting to the instance or endpoint. For example, connection timeouts. * - retriable-4xx : a retry is attempted if the instance or endpoint responds with a 4xx response code. The only error that you can retry is error code 409. * - refused-stream : a retry is attempted if the instance or endpoint resets the stream with a REFUSED_STREAM error code. This reset type indicates that it is safe to retry. * - cancelled : a retry is attempted if the gRPC status code in the response header is set to cancelled. * - deadline-exceeded : a retry is attempted if the gRPC status code in the response header is set to deadline-exceeded. * - internal : a retry is attempted if the gRPC status code in the response header is set to internal. * - resource-exhausted : a retry is attempted if the gRPC status code in the response header is set to resource-exhausted. * - unavailable : a retry is attempted if the gRPC status code in the response header is set to unavailable. */ retryConditions?: string[]; } interface RegionUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeout { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are * represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds?: string; } interface RegionUrlMapPathMatcherDefaultRouteActionTimeout { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds?: string; } interface RegionUrlMapPathMatcherDefaultRouteActionUrlRewrite { /** * Before forwarding the request to the selected service, the request's host header is replaced with contents of hostRewrite. * The value must be from 1 to 255 characters. */ hostRewrite?: string; /** * Before forwarding the request to the selected backend service, the matching portion of the request's path is replaced by pathPrefixRewrite. * The value must be from 1 to 1024 characters. */ pathPrefixRewrite?: string; /** * If specified, the pattern rewrites the URL path (based on the :path header) using the HTTP template syntax. * A corresponding pathTemplateMatch must be specified. Any template variables must exist in the pathTemplateMatch field. * * At least one variable must be specified in the pathTemplateMatch field * * You can omit variables from the rewritten URL * * The * and ** operators cannot be matched unless they have a corresponding variable name - e.g. {format=*} or {var=**}. * For example, a pathTemplateMatch of /static/{format=**} could be rewritten as /static/content/{format} to prefix * /content to the URL. Variables can also be re-ordered in a rewrite, so that /{country}/{format}/{suffix=**} can be * rewritten as /content/{format}/{country}/{suffix}. * At least one non-empty routeRules[].matchRules[].path_template_match is required. * Only one of pathPrefixRewrite or pathTemplateRewrite may be specified. */ pathTemplateRewrite?: string; } interface RegionUrlMapPathMatcherDefaultRouteActionWeightedBackendService { /** * The full or partial URL to the default BackendService resource. Before forwarding the request to backendService, the load balancer applies any relevant headerActions specified as part of this backendServiceWeight. */ backendService?: string; /** * Specifies changes to request and response headers that need to take effect for the selected backendService. * headerAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap. * headerAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. * Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. * Structure is documented below. */ headerAction?: outputs.compute.RegionUrlMapPathMatcherDefaultRouteActionWeightedBackendServiceHeaderAction; /** * Specifies the fraction of traffic sent to a backend service, computed as weight / (sum of all weightedBackendService weights in routeAction) . * The selection of a backend service is determined only for new traffic. Once a user's request has been directed to a backend service, subsequent requests are sent to the same backend service as determined by the backend service's session affinity policy. * The value must be from 0 to 1000. */ weight?: number; } interface RegionUrlMapPathMatcherDefaultRouteActionWeightedBackendServiceHeaderAction { /** * Headers to add to a matching request before forwarding the request to the backendService. * Structure is documented below. */ requestHeadersToAdds?: outputs.compute.RegionUrlMapPathMatcherDefaultRouteActionWeightedBackendServiceHeaderActionRequestHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the request before forwarding the request to the backendService. */ requestHeadersToRemoves?: string[]; /** * Headers to add the response before sending the response back to the client. * Structure is documented below. */ responseHeadersToAdds?: outputs.compute.RegionUrlMapPathMatcherDefaultRouteActionWeightedBackendServiceHeaderActionResponseHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the response before sending the response back to the client. */ responseHeadersToRemoves?: string[]; } interface RegionUrlMapPathMatcherDefaultRouteActionWeightedBackendServiceHeaderActionRequestHeadersToAdd { /** * The name of the header. */ headerName?: string; /** * The value of the header to add. */ headerValue?: string; /** * If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. * The default value is false. */ replace?: boolean; } interface RegionUrlMapPathMatcherDefaultRouteActionWeightedBackendServiceHeaderActionResponseHeadersToAdd { /** * The name of the header. */ headerName?: string; /** * The value of the header to add. */ headerValue?: string; /** * If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. * The default value is false. */ replace?: boolean; } interface RegionUrlMapPathMatcherDefaultUrlRedirect { /** * The host that will be used in the redirect response instead of the one that was * supplied in the request. The value must be between 1 and 255 characters. */ hostRedirect?: string; /** * If set to true, the URL scheme in the redirected request is set to https. If set to * false, the URL scheme of the redirected request will remain the same as that of the * request. This must only be set for UrlMaps used in TargetHttpProxys. Setting this * true for TargetHttpsProxy is not permitted. The default is set to false. */ httpsRedirect?: boolean; /** * The path that will be used in the redirect response instead of the one that was * supplied in the request. pathRedirect cannot be supplied together with * prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the * original request will be used for the redirect. The value must be between 1 and 1024 * characters. */ pathRedirect?: string; /** * The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, * retaining the remaining portion of the URL before redirecting the request. * prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or * neither. If neither is supplied, the path of the original request will be used for * the redirect. The value must be between 1 and 1024 characters. */ prefixRedirect?: string; /** * The HTTP Status code to use for this RedirectAction. Supported values are: * * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. * * FOUND, which corresponds to 302. * * SEE_OTHER which corresponds to 303. * * TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method * will be retained. * * PERMANENT_REDIRECT, which corresponds to 308. In this case, * the request method will be retained. */ redirectResponseCode?: string; /** * If set to true, any accompanying query portion of the original URL is removed prior * to redirecting the request. If set to false, the query portion of the original URL is * retained. * This field is required to ensure an empty block is not set. The normal default value is false. */ stripQuery: boolean; } interface RegionUrlMapPathMatcherHeaderAction { /** * Headers to add to a matching request before forwarding the request to the backendService. * Structure is documented below. */ requestHeadersToAdds?: outputs.compute.RegionUrlMapPathMatcherHeaderActionRequestHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the request before forwarding the request to the backendService. */ requestHeadersToRemoves?: string[]; /** * Headers to add the response before sending the response back to the client. * Structure is documented below. */ responseHeadersToAdds?: outputs.compute.RegionUrlMapPathMatcherHeaderActionResponseHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the response before sending the response back to the client. */ responseHeadersToRemoves?: string[]; } interface RegionUrlMapPathMatcherHeaderActionRequestHeadersToAdd { /** * The name of the header. */ headerName?: string; /** * The value of the header to add. */ headerValue?: string; /** * If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. * The default value is false. */ replace?: boolean; } interface RegionUrlMapPathMatcherHeaderActionResponseHeadersToAdd { /** * The name of the header. */ headerName?: string; /** * The value of the header to add. */ headerValue?: string; /** * If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. * The default value is false. */ replace?: boolean; } interface RegionUrlMapPathMatcherPathRule { /** * The list of path patterns to match. Each must start with / and the only place a * \* is allowed is at the end following a /. The string fed to the path matcher * does not include any text after the first ? or #, and those chars are not * allowed here. */ paths: string[]; /** * In response to a matching path, the load balancer performs advanced routing * actions like URL rewrites, header transformations, etc. prior to forwarding the * request to the selected backend. If routeAction specifies any * weightedBackendServices, service must not be set. Conversely if service is set, * routeAction cannot contain any weightedBackendServices. Only one of routeAction * or urlRedirect must be set. * Structure is documented below. */ routeAction?: outputs.compute.RegionUrlMapPathMatcherPathRuleRouteAction; /** * The region backend service resource to which traffic is * directed if this rule is matched. If routeAction is additionally specified, * advanced routing actions like URL Rewrites, etc. take effect prior to sending * the request to the backend. However, if service is specified, routeAction cannot * contain any weightedBackendService s. Conversely, if routeAction specifies any * weightedBackendServices, service must not be specified. Only one of urlRedirect, * service or routeAction.weightedBackendService must be set. */ service?: string; /** * When a path pattern is matched, the request is redirected to a URL specified * by urlRedirect. If urlRedirect is specified, service or routeAction must not * be set. * Structure is documented below. */ urlRedirect?: outputs.compute.RegionUrlMapPathMatcherPathRuleUrlRedirect; } interface RegionUrlMapPathMatcherPathRuleRouteAction { /** * The specification for allowing client side cross-origin requests. Please see W3C * Recommendation for Cross Origin Resource Sharing * Structure is documented below. */ corsPolicy?: outputs.compute.RegionUrlMapPathMatcherPathRuleRouteActionCorsPolicy; /** * The specification for fault injection introduced into traffic to test the * resiliency of clients to backend service failure. As part of fault injection, * when clients send requests to a backend service, delays can be introduced by * Loadbalancer on a percentage of requests before sending those request to the * backend service. Similarly requests from clients can be aborted by the * Loadbalancer for a percentage of requests. timeout and retryPolicy will be * ignored by clients that are configured with a fault_injection_policy. * Structure is documented below. */ faultInjectionPolicy?: outputs.compute.RegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy; /** * Specifies the policy on how requests intended for the route's backends are * shadowed to a separate mirrored backend service. Loadbalancer does not wait for * responses from the shadow service. Prior to sending traffic to the shadow * service, the host / authority header is suffixed with -shadow. * Structure is documented below. */ requestMirrorPolicy?: outputs.compute.RegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy; /** * Specifies the retry policy associated with this route. * Structure is documented below. */ retryPolicy?: outputs.compute.RegionUrlMapPathMatcherPathRuleRouteActionRetryPolicy; /** * Specifies the timeout for the selected route. Timeout is computed from the time * the request is has been fully processed (i.e. end-of-stream) up until the * response has been completely processed. Timeout includes all retries. If not * specified, the default value is 15 seconds. * Structure is documented below. */ timeout?: outputs.compute.RegionUrlMapPathMatcherPathRuleRouteActionTimeout; /** * The spec to modify the URL of the request, prior to forwarding the request to * the matched service * Structure is documented below. */ urlRewrite?: outputs.compute.RegionUrlMapPathMatcherPathRuleRouteActionUrlRewrite; /** * A list of weighted backend services to send traffic to when a route match * occurs. The weights determine the fraction of traffic that flows to their * corresponding backend service. If all traffic needs to go to a single backend * service, there must be one weightedBackendService with weight set to a non 0 * number. Once a backendService is identified and before forwarding the request to * the backend service, advanced routing actions like Url rewrites and header * transformations are applied depending on additional settings specified in this * HttpRouteAction. * Structure is documented below. */ weightedBackendServices?: outputs.compute.RegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendService[]; } interface RegionUrlMapPathMatcherPathRuleRouteActionCorsPolicy { /** * In response to a preflight request, setting this to true indicates that the actual request can include user credentials. This field translates to the Access-Control-Allow-Credentials header. * Default is false. */ allowCredentials?: boolean; /** * Specifies the content for the Access-Control-Allow-Headers header. */ allowHeaders?: string[]; /** * Specifies the content for the Access-Control-Allow-Methods header. */ allowMethods?: string[]; /** * Specifies the regualar expression patterns that match allowed origins. For regular expression grammar * please see en.cppreference.com/w/cpp/regex/ecmascript * An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. */ allowOriginRegexes?: string[]; /** * Specifies the list of origins that will be allowed to do CORS requests. * An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. */ allowOrigins?: string[]; /** * If true, the setting specifies the CORS policy is disabled. The default value of false, which indicates that the CORS policy is in effect. */ disabled: boolean; /** * Specifies the content for the Access-Control-Expose-Headers header. */ exposeHeaders?: string[]; /** * Specifies how long results of a preflight request can be cached in seconds. * This translates to the Access-Control-Max-Age header. */ maxAge?: number; } interface RegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy { /** * The specification for how client requests are aborted as part of fault injection. * Structure is documented below. */ abort?: outputs.compute.RegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort; /** * The specification for how client requests are delayed as part of fault injection, before being sent to a backend service. * Structure is documented below. */ delay?: outputs.compute.RegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay; } interface RegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort { /** * The HTTP status code used to abort the request. * The value must be between 200 and 599 inclusive. */ httpStatus: number; /** * The percentage of traffic (connections/operations/requests) which will be aborted as part of fault injection. * The value must be between 0.0 and 100.0 inclusive. */ percentage: number; } interface RegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay { /** * Specifies the value of the fixed delay interval. * Structure is documented below. */ fixedDelay: outputs.compute.RegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay; /** * The percentage of traffic (connections/operations/requests) on which delay will be introduced as part of fault injection. * The value must be between 0.0 and 100.0 inclusive. */ percentage: number; } interface RegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are * represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds: string; } interface RegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy { /** * The full or partial URL to the RegionBackendService resource being mirrored to. * The backend service configured for a mirroring policy must reference backends that are of the same type as the original backend service matched in the URL map. * Serverless NEG backends are not currently supported as a mirrored backend service. */ backendService: string; /** * (Optional, Beta) * The percentage of requests to be mirrored to backendService. * The value must be between 0.0 and 100.0 inclusive. */ mirrorPercent?: number; } interface RegionUrlMapPathMatcherPathRuleRouteActionRetryPolicy { /** * Specifies the allowed number retries. This number must be > 0. If not specified, defaults to 1. */ numRetries?: number; /** * Specifies a non-zero timeout per retry attempt. * If not specified, will use the timeout set in HttpRouteAction. If timeout in HttpRouteAction is not set, * will use the largest timeout among all backend services associated with the route. * Structure is documented below. */ perTryTimeout?: outputs.compute.RegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout; /** * Specifies one or more conditions when this retry policy applies. * Valid values are listed below. Only the following codes are supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true: cancelled, deadline-exceeded, internal, resource-exhausted, unavailable. * - 5xx : retry is attempted if the instance or endpoint responds with any 5xx response code, or if the instance or endpoint does not respond at all. For example, disconnects, reset, read timeout, connection failure, and refused streams. * - gateway-error : Similar to 5xx, but only applies to response codes 502, 503 or 504. * - connect-failure : a retry is attempted on failures connecting to the instance or endpoint. For example, connection timeouts. * - retriable-4xx : a retry is attempted if the instance or endpoint responds with a 4xx response code. The only error that you can retry is error code 409. * - refused-stream : a retry is attempted if the instance or endpoint resets the stream with a REFUSED_STREAM error code. This reset type indicates that it is safe to retry. * - cancelled : a retry is attempted if the gRPC status code in the response header is set to cancelled. * - deadline-exceeded : a retry is attempted if the gRPC status code in the response header is set to deadline-exceeded. * - internal : a retry is attempted if the gRPC status code in the response header is set to internal. * - resource-exhausted : a retry is attempted if the gRPC status code in the response header is set to resource-exhausted. * - unavailable : a retry is attempted if the gRPC status code in the response header is set to unavailable. */ retryConditions?: string[]; } interface RegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are * represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds: string; } interface RegionUrlMapPathMatcherPathRuleRouteActionTimeout { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds: string; } interface RegionUrlMapPathMatcherPathRuleRouteActionUrlRewrite { /** * Before forwarding the request to the selected service, the request's host header is replaced with contents of hostRewrite. * The value must be from 1 to 255 characters. */ hostRewrite?: string; /** * Before forwarding the request to the selected backend service, the matching portion of the request's path is replaced by pathPrefixRewrite. * The value must be from 1 to 1024 characters. */ pathPrefixRewrite?: string; } interface RegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendService { /** * The full or partial URL to the default BackendService resource. Before forwarding the request to backendService, the load balancer applies any relevant headerActions specified as part of this backendServiceWeight. */ backendService: string; /** * Specifies changes to request and response headers that need to take effect for the selected backendService. * headerAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap. * headerAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. * Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. * Structure is documented below. */ headerAction?: outputs.compute.RegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServiceHeaderAction; /** * Specifies the fraction of traffic sent to a backend service, computed as weight / (sum of all weightedBackendService weights in routeAction) . * The selection of a backend service is determined only for new traffic. Once a user's request has been directed to a backend service, subsequent requests are sent to the same backend service as determined by the backend service's session affinity policy. * The value must be from 0 to 1000. */ weight: number; } interface RegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServiceHeaderAction { /** * Headers to add to a matching request before forwarding the request to the backendService. * Structure is documented below. */ requestHeadersToAdds?: outputs.compute.RegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServiceHeaderActionRequestHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the request before forwarding the request to the backendService. */ requestHeadersToRemoves?: string[]; /** * Headers to add the response before sending the response back to the client. * Structure is documented below. */ responseHeadersToAdds?: outputs.compute.RegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServiceHeaderActionResponseHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the response before sending the response back to the client. */ responseHeadersToRemoves?: string[]; } interface RegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServiceHeaderActionRequestHeadersToAdd { /** * The name of the header. */ headerName: string; /** * The value of the header to add. */ headerValue: string; /** * If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. * The default value is false. */ replace: boolean; } interface RegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServiceHeaderActionResponseHeadersToAdd { /** * The name of the header. */ headerName: string; /** * The value of the header to add. */ headerValue: string; /** * If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. * The default value is false. */ replace: boolean; } interface RegionUrlMapPathMatcherPathRuleUrlRedirect { /** * The host that will be used in the redirect response instead of the one * that was supplied in the request. The value must be between 1 and 255 * characters. */ hostRedirect?: string; /** * If set to true, the URL scheme in the redirected request is set to https. * If set to false, the URL scheme of the redirected request will remain the * same as that of the request. This must only be set for UrlMaps used in * TargetHttpProxys. Setting this true for TargetHttpsProxy is not * permitted. The default is set to false. */ httpsRedirect?: boolean; /** * The path that will be used in the redirect response instead of the one * that was supplied in the request. pathRedirect cannot be supplied * together with prefixRedirect. Supply one alone or neither. If neither is * supplied, the path of the original request will be used for the redirect. * The value must be between 1 and 1024 characters. */ pathRedirect?: string; /** * The prefix that replaces the prefixMatch specified in the * HttpRouteRuleMatch, retaining the remaining portion of the URL before * redirecting the request. prefixRedirect cannot be supplied together with * pathRedirect. Supply one alone or neither. If neither is supplied, the * path of the original request will be used for the redirect. The value * must be between 1 and 1024 characters. */ prefixRedirect?: string; /** * The HTTP Status code to use for this RedirectAction. Supported values are: * * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. * * FOUND, which corresponds to 302. * * SEE_OTHER which corresponds to 303. * * TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method * will be retained. * * PERMANENT_REDIRECT, which corresponds to 308. In this case, * the request method will be retained. */ redirectResponseCode?: string; /** * If set to true, any accompanying query portion of the original URL is removed * prior to redirecting the request. If set to false, the query portion of the * original URL is retained. * This field is required to ensure an empty block is not set. The normal default value is false. */ stripQuery: boolean; } interface RegionUrlMapPathMatcherRouteRule { /** * Specifies changes to request and response headers that need to take effect for * the selected backendService. The headerAction specified here are applied before * the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].r * outeAction.weightedBackendService.backendServiceWeightAction[].headerAction * Structure is documented below. */ headerAction?: outputs.compute.RegionUrlMapPathMatcherRouteRuleHeaderAction; /** * The rules for determining a match. * Structure is documented below. */ matchRules?: outputs.compute.RegionUrlMapPathMatcherRouteRuleMatchRule[]; /** * For routeRules within a given pathMatcher, priority determines the order * in which load balancer will interpret routeRules. RouteRules are evaluated * in order of priority, from the lowest to highest number. The priority of * a rule decreases as its number increases (1, 2, 3, N+1). The first rule * that matches the request is applied. * You cannot configure two or more routeRules with the same priority. * Priority for each rule must be set to a number between 0 and * 2147483647 inclusive. * Priority numbers can have gaps, which enable you to add or remove rules * in the future without affecting the rest of the rules. For example, * 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to which * you could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in the * future without any impact on existing rules. */ priority: number; /** * In response to a matching matchRule, the load balancer performs advanced routing * actions like URL rewrites, header transformations, etc. prior to forwarding the * request to the selected backend. If routeAction specifies any * weightedBackendServices, service must not be set. Conversely if service is set, * routeAction cannot contain any weightedBackendServices. Only one of routeAction * or urlRedirect must be set. * Structure is documented below. */ routeAction?: outputs.compute.RegionUrlMapPathMatcherRouteRuleRouteAction; /** * The region backend service resource to which traffic is * directed if this rule is matched. If routeAction is additionally specified, * advanced routing actions like URL Rewrites, etc. take effect prior to sending * the request to the backend. However, if service is specified, routeAction cannot * contain any weightedBackendService s. Conversely, if routeAction specifies any * weightedBackendServices, service must not be specified. Only one of urlRedirect, * service or routeAction.weightedBackendService must be set. */ service?: string; /** * When this rule is matched, the request is redirected to a URL specified by * urlRedirect. If urlRedirect is specified, service or routeAction must not be * set. * Structure is documented below. */ urlRedirect?: outputs.compute.RegionUrlMapPathMatcherRouteRuleUrlRedirect; } interface RegionUrlMapPathMatcherRouteRuleHeaderAction { /** * Headers to add to a matching request before forwarding the request to the backendService. * Structure is documented below. */ requestHeadersToAdds?: outputs.compute.RegionUrlMapPathMatcherRouteRuleHeaderActionRequestHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the request before forwarding the request to the backendService. */ requestHeadersToRemoves?: string[]; /** * Headers to add the response before sending the response back to the client. * Structure is documented below. */ responseHeadersToAdds?: outputs.compute.RegionUrlMapPathMatcherRouteRuleHeaderActionResponseHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the response before sending the response back to the client. */ responseHeadersToRemoves?: string[]; } interface RegionUrlMapPathMatcherRouteRuleHeaderActionRequestHeadersToAdd { /** * The name of the header. */ headerName: string; /** * The value of the header to add. */ headerValue: string; /** * If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. * The default value is false. */ replace: boolean; } interface RegionUrlMapPathMatcherRouteRuleHeaderActionResponseHeadersToAdd { /** * The name of the header. */ headerName: string; /** * The value of the header to add. */ headerValue: string; /** * If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. * The default value is false. */ replace: boolean; } interface RegionUrlMapPathMatcherRouteRuleMatchRule { /** * For satisfying the matchRule condition, the path of the request must exactly * match the value specified in fullPathMatch after removing any query parameters * and anchor that may be part of the original URL. FullPathMatch must be between 1 * and 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must * be specified. */ fullPathMatch?: string; /** * Specifies a list of header match criteria, all of which must match corresponding * headers in the request. * Structure is documented below. */ headerMatches?: outputs.compute.RegionUrlMapPathMatcherRouteRuleMatchRuleHeaderMatch[]; /** * Specifies that prefixMatch and fullPathMatch matches are case sensitive. * Defaults to false. */ ignoreCase?: boolean; /** * Opaque filter criteria used by Loadbalancer to restrict routing configuration to * a limited set xDS compliant clients. In their xDS requests to Loadbalancer, xDS * clients present node metadata. If a match takes place, the relevant routing * configuration is made available to those proxies. For each metadataFilter in * this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the * filterLabels must match the corresponding label provided in the metadata. If its * filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match * with corresponding labels in the provided metadata. metadataFilters specified * here can be overrides those specified in ForwardingRule that refers to this * UrlMap. metadataFilters only applies to Loadbalancers that have their * loadBalancingScheme set to INTERNAL_SELF_MANAGED. * Structure is documented below. */ metadataFilters?: outputs.compute.RegionUrlMapPathMatcherRouteRuleMatchRuleMetadataFilter[]; /** * For satisfying the matchRule condition, the path of the request * must match the wildcard pattern specified in pathTemplateMatch * after removing any query parameters and anchor that may be part * of the original URL. * pathTemplateMatch must be between 1 and 255 characters * (inclusive). The pattern specified by pathTemplateMatch may * have at most 5 wildcard operators and at most 5 variable * captures in total. */ pathTemplateMatch?: string; /** * For satisfying the matchRule condition, the request's path must begin with the * specified prefixMatch. prefixMatch must begin with a /. The value must be * between 1 and 1024 characters. Only one of prefixMatch, fullPathMatch or * regexMatch must be specified. */ prefixMatch?: string; /** * Specifies a list of query parameter match criteria, all of which must match * corresponding query parameters in the request. * Structure is documented below. */ queryParameterMatches?: outputs.compute.RegionUrlMapPathMatcherRouteRuleMatchRuleQueryParameterMatch[]; /** * For satisfying the matchRule condition, the path of the request must satisfy the * regular expression specified in regexMatch after removing any query parameters * and anchor supplied with the original URL. For regular expression grammar please * see en.cppreference.com/w/cpp/regex/ecmascript Only one of prefixMatch, * fullPathMatch or regexMatch must be specified. */ regexMatch?: string; } interface RegionUrlMapPathMatcherRouteRuleMatchRuleHeaderMatch { /** * The value should exactly match contents of exactMatch. Only one of exactMatch, * prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. */ exactMatch?: string; /** * The name of the HTTP header to match. For matching against the HTTP request's * authority, use a headerMatch with the header name ":authority". For matching a * request's method, use the headerName ":method". */ headerName: string; /** * If set to false, the headerMatch is considered a match if the match criteria * above are met. If set to true, the headerMatch is considered a match if the * match criteria above are NOT met. Defaults to false. */ invertMatch?: boolean; /** * The value of the header must start with the contents of prefixMatch. Only one of * exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch * must be set. */ prefixMatch?: string; /** * A header with the contents of headerName must exist. The match takes place * whether or not the request's header has a value or not. Only one of exactMatch, * prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. */ presentMatch?: boolean; /** * The header value must be an integer and its value must be in the range specified * in rangeMatch. If the header does not contain an integer, number or is empty, * the match fails. For example for a range [-5, 0] * * -3 will match * * 0 will not match * * 0.25 will not match * * -3someString will not match. * Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or * rangeMatch must be set. * Structure is documented below. */ rangeMatch?: outputs.compute.RegionUrlMapPathMatcherRouteRuleMatchRuleHeaderMatchRangeMatch; /** * The value of the header must match the regular expression specified in * regexMatch. For regular expression grammar, please see: * en.cppreference.com/w/cpp/regex/ecmascript For matching against a port * specified in the HTTP request, use a headerMatch with headerName set to PORT and * a regular expression that satisfies the RFC2616 Host header's port specifier. * Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or * rangeMatch must be set. */ regexMatch?: string; /** * The value of the header must end with the contents of suffixMatch. Only one of * exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch * must be set. */ suffixMatch?: string; } interface RegionUrlMapPathMatcherRouteRuleMatchRuleHeaderMatchRangeMatch { /** * The end of the range (exclusive). */ rangeEnd: number; /** * The start of the range (inclusive). */ rangeStart: number; } interface RegionUrlMapPathMatcherRouteRuleMatchRuleMetadataFilter { /** * The list of label value pairs that must match labels in the provided metadata * based on filterMatchCriteria This list must not be empty and can have at the * most 64 entries. * Structure is documented below. */ filterLabels: outputs.compute.RegionUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterLabel[]; /** * Specifies how individual filterLabel matches within the list of filterLabels * contribute towards the overall metadataFilter match. Supported values are: * * MATCH_ANY: At least one of the filterLabels must have a matching label in the * provided metadata. * * MATCH_ALL: All filterLabels must have matching labels in * the provided metadata. * Possible values are: `MATCH_ALL`, `MATCH_ANY`. */ filterMatchCriteria: string; } interface RegionUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterLabel { /** * Name of metadata label. The name can have a maximum length of 1024 characters * and must be at least 1 character long. */ name: string; /** * The value of the label must match the specified value. value can have a maximum * length of 1024 characters. */ value: string; } interface RegionUrlMapPathMatcherRouteRuleMatchRuleQueryParameterMatch { /** * The queryParameterMatch matches if the value of the parameter exactly matches * the contents of exactMatch. Only one of presentMatch, exactMatch and regexMatch * must be set. */ exactMatch?: string; /** * The name of the query parameter to match. The query parameter must exist in the * request, in the absence of which the request match fails. */ name: string; /** * Specifies that the queryParameterMatch matches if the request contains the query * parameter, irrespective of whether the parameter has a value or not. Only one of * presentMatch, exactMatch and regexMatch must be set. */ presentMatch?: boolean; /** * The queryParameterMatch matches if the value of the parameter matches the * regular expression specified by regexMatch. For the regular expression grammar, * please see en.cppreference.com/w/cpp/regex/ecmascript Only one of presentMatch, * exactMatch and regexMatch must be set. */ regexMatch?: string; } interface RegionUrlMapPathMatcherRouteRuleRouteAction { /** * The specification for allowing client side cross-origin requests. Please see W3C * Recommendation for Cross Origin Resource Sharing * Structure is documented below. */ corsPolicy?: outputs.compute.RegionUrlMapPathMatcherRouteRuleRouteActionCorsPolicy; /** * The specification for fault injection introduced into traffic to test the * resiliency of clients to backend service failure. As part of fault injection, * when clients send requests to a backend service, delays can be introduced by * Loadbalancer on a percentage of requests before sending those request to the * backend service. Similarly requests from clients can be aborted by the * Loadbalancer for a percentage of requests. timeout and retryPolicy will be * ignored by clients that are configured with a fault_injection_policy. * Structure is documented below. */ faultInjectionPolicy?: outputs.compute.RegionUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicy; /** * Specifies the policy on how requests intended for the route's backends are * shadowed to a separate mirrored backend service. Loadbalancer does not wait for * responses from the shadow service. Prior to sending traffic to the shadow * service, the host / authority header is suffixed with -shadow. * Structure is documented below. */ requestMirrorPolicy?: outputs.compute.RegionUrlMapPathMatcherRouteRuleRouteActionRequestMirrorPolicy; /** * Specifies the retry policy associated with this route. * Structure is documented below. */ retryPolicy?: outputs.compute.RegionUrlMapPathMatcherRouteRuleRouteActionRetryPolicy; /** * Specifies the timeout for the selected route. Timeout is computed from the time * the request is has been fully processed (i.e. end-of-stream) up until the * response has been completely processed. Timeout includes all retries. If not * specified, the default value is 15 seconds. * Structure is documented below. */ timeout?: outputs.compute.RegionUrlMapPathMatcherRouteRuleRouteActionTimeout; /** * The spec to modify the URL of the request, prior to forwarding the request to * the matched service * Structure is documented below. */ urlRewrite?: outputs.compute.RegionUrlMapPathMatcherRouteRuleRouteActionUrlRewrite; /** * A list of weighted backend services to send traffic to when a route match * occurs. The weights determine the fraction of traffic that flows to their * corresponding backend service. If all traffic needs to go to a single backend * service, there must be one weightedBackendService with weight set to a non 0 * number. Once a backendService is identified and before forwarding the request to * the backend service, advanced routing actions like Url rewrites and header * transformations are applied depending on additional settings specified in this * HttpRouteAction. * Structure is documented below. */ weightedBackendServices?: outputs.compute.RegionUrlMapPathMatcherRouteRuleRouteActionWeightedBackendService[]; } interface RegionUrlMapPathMatcherRouteRuleRouteActionCorsPolicy { /** * In response to a preflight request, setting this to true indicates that the actual request can include user credentials. This field translates to the Access-Control-Allow-Credentials header. * Default is false. */ allowCredentials?: boolean; /** * Specifies the content for the Access-Control-Allow-Headers header. */ allowHeaders?: string[]; /** * Specifies the content for the Access-Control-Allow-Methods header. */ allowMethods?: string[]; /** * Specifies the regualar expression patterns that match allowed origins. For regular expression grammar * please see en.cppreference.com/w/cpp/regex/ecmascript * An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. */ allowOriginRegexes?: string[]; /** * Specifies the list of origins that will be allowed to do CORS requests. * An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. */ allowOrigins?: string[]; /** * If true, the setting specifies the CORS policy is disabled. The default value of false, which indicates that the CORS policy is in effect. */ disabled?: boolean; /** * Specifies the content for the Access-Control-Expose-Headers header. */ exposeHeaders?: string[]; /** * Specifies how long results of a preflight request can be cached in seconds. * This translates to the Access-Control-Max-Age header. */ maxAge?: number; } interface RegionUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicy { /** * The specification for how client requests are aborted as part of fault injection. * Structure is documented below. */ abort?: outputs.compute.RegionUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyAbort; /** * The specification for how client requests are delayed as part of fault injection, before being sent to a backend service. * Structure is documented below. */ delay?: outputs.compute.RegionUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelay; } interface RegionUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyAbort { /** * The HTTP status code used to abort the request. * The value must be between 200 and 599 inclusive. */ httpStatus?: number; /** * The percentage of traffic (connections/operations/requests) which will be aborted as part of fault injection. * The value must be between 0.0 and 100.0 inclusive. */ percentage?: number; } interface RegionUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelay { /** * Specifies the value of the fixed delay interval. * Structure is documented below. */ fixedDelay?: outputs.compute.RegionUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayFixedDelay; /** * The percentage of traffic (connections/operations/requests) on which delay will be introduced as part of fault injection. * The value must be between 0.0 and 100.0 inclusive. */ percentage?: number; } interface RegionUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayFixedDelay { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are * represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds: string; } interface RegionUrlMapPathMatcherRouteRuleRouteActionRequestMirrorPolicy { /** * The full or partial URL to the RegionBackendService resource being mirrored to. * The backend service configured for a mirroring policy must reference backends that are of the same type as the original backend service matched in the URL map. * Serverless NEG backends are not currently supported as a mirrored backend service. */ backendService: string; /** * (Optional, Beta) * The percentage of requests to be mirrored to backendService. * The value must be between 0.0 and 100.0 inclusive. */ mirrorPercent?: number; } interface RegionUrlMapPathMatcherRouteRuleRouteActionRetryPolicy { /** * Specifies the allowed number retries. This number must be > 0. If not specified, defaults to 1. */ numRetries: number; /** * Specifies a non-zero timeout per retry attempt. * If not specified, will use the timeout set in HttpRouteAction. If timeout in HttpRouteAction is not set, * will use the largest timeout among all backend services associated with the route. * Structure is documented below. */ perTryTimeout?: outputs.compute.RegionUrlMapPathMatcherRouteRuleRouteActionRetryPolicyPerTryTimeout; /** * Specifies one or more conditions when this retry policy applies. * Valid values are listed below. Only the following codes are supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true: cancelled, deadline-exceeded, internal, resource-exhausted, unavailable. * - 5xx : retry is attempted if the instance or endpoint responds with any 5xx response code, or if the instance or endpoint does not respond at all. For example, disconnects, reset, read timeout, connection failure, and refused streams. * - gateway-error : Similar to 5xx, but only applies to response codes 502, 503 or 504. * - connect-failure : a retry is attempted on failures connecting to the instance or endpoint. For example, connection timeouts. * - retriable-4xx : a retry is attempted if the instance or endpoint responds with a 4xx response code. The only error that you can retry is error code 409. * - refused-stream : a retry is attempted if the instance or endpoint resets the stream with a REFUSED_STREAM error code. This reset type indicates that it is safe to retry. * - cancelled : a retry is attempted if the gRPC status code in the response header is set to cancelled. * - deadline-exceeded : a retry is attempted if the gRPC status code in the response header is set to deadline-exceeded. * - internal : a retry is attempted if the gRPC status code in the response header is set to internal. * - resource-exhausted : a retry is attempted if the gRPC status code in the response header is set to resource-exhausted. * - unavailable : a retry is attempted if the gRPC status code in the response header is set to unavailable. */ retryConditions?: string[]; } interface RegionUrlMapPathMatcherRouteRuleRouteActionRetryPolicyPerTryTimeout { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are * represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds: string; } interface RegionUrlMapPathMatcherRouteRuleRouteActionTimeout { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds: string; } interface RegionUrlMapPathMatcherRouteRuleRouteActionUrlRewrite { /** * Before forwarding the request to the selected service, the request's host header is replaced with contents of hostRewrite. * The value must be from 1 to 255 characters. */ hostRewrite?: string; /** * Before forwarding the request to the selected backend service, the matching portion of the request's path is replaced by pathPrefixRewrite. * The value must be from 1 to 1024 characters. */ pathPrefixRewrite?: string; /** * If specified, the pattern rewrites the URL path (based on the :path header) using the HTTP template syntax. * A corresponding pathTemplateMatch must be specified. Any template variables must exist in the pathTemplateMatch field. * * At least one variable must be specified in the pathTemplateMatch field * * You can omit variables from the rewritten URL * * The * and ** operators cannot be matched unless they have a corresponding variable name - e.g. {format=*} or {var=**}. * For example, a pathTemplateMatch of /static/{format=**} could be rewritten as /static/content/{format} to prefix * /content to the URL. Variables can also be re-ordered in a rewrite, so that /{country}/{format}/{suffix=**} can be * rewritten as /content/{format}/{country}/{suffix}. * At least one non-empty routeRules[].matchRules[].path_template_match is required. * Only one of pathPrefixRewrite or pathTemplateRewrite may be specified. */ pathTemplateRewrite?: string; } interface RegionUrlMapPathMatcherRouteRuleRouteActionWeightedBackendService { /** * The full or partial URL to the default BackendService resource. Before forwarding the request to backendService, the load balancer applies any relevant headerActions specified as part of this backendServiceWeight. */ backendService: string; /** * Specifies changes to request and response headers that need to take effect for the selected backendService. * headerAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap. * headerAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. * Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. * Structure is documented below. */ headerAction?: outputs.compute.RegionUrlMapPathMatcherRouteRuleRouteActionWeightedBackendServiceHeaderAction; /** * Specifies the fraction of traffic sent to a backend service, computed as weight / (sum of all weightedBackendService weights in routeAction) . * The selection of a backend service is determined only for new traffic. Once a user's request has been directed to a backend service, subsequent requests are sent to the same backend service as determined by the backend service's session affinity policy. * The value must be from 0 to 1000. */ weight: number; } interface RegionUrlMapPathMatcherRouteRuleRouteActionWeightedBackendServiceHeaderAction { /** * Headers to add to a matching request before forwarding the request to the backendService. * Structure is documented below. */ requestHeadersToAdds?: outputs.compute.RegionUrlMapPathMatcherRouteRuleRouteActionWeightedBackendServiceHeaderActionRequestHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the request before forwarding the request to the backendService. */ requestHeadersToRemoves?: string[]; /** * Headers to add the response before sending the response back to the client. * Structure is documented below. */ responseHeadersToAdds?: outputs.compute.RegionUrlMapPathMatcherRouteRuleRouteActionWeightedBackendServiceHeaderActionResponseHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the response before sending the response back to the client. */ responseHeadersToRemoves?: string[]; } interface RegionUrlMapPathMatcherRouteRuleRouteActionWeightedBackendServiceHeaderActionRequestHeadersToAdd { /** * The name of the header. */ headerName: string; /** * The value of the header to add. */ headerValue: string; /** * If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. * The default value is false. */ replace: boolean; } interface RegionUrlMapPathMatcherRouteRuleRouteActionWeightedBackendServiceHeaderActionResponseHeadersToAdd { /** * The name of the header. */ headerName: string; /** * The value of the header to add. */ headerValue: string; /** * If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. * The default value is false. */ replace: boolean; } interface RegionUrlMapPathMatcherRouteRuleUrlRedirect { /** * The host that will be used in the redirect response instead of the one * that was supplied in the request. The value must be between 1 and 255 * characters. */ hostRedirect?: string; /** * If set to true, the URL scheme in the redirected request is set to https. * If set to false, the URL scheme of the redirected request will remain the * same as that of the request. This must only be set for UrlMaps used in * TargetHttpProxys. Setting this true for TargetHttpsProxy is not * permitted. The default is set to false. */ httpsRedirect?: boolean; /** * The path that will be used in the redirect response instead of the one * that was supplied in the request. pathRedirect cannot be supplied * together with prefixRedirect. Supply one alone or neither. If neither is * supplied, the path of the original request will be used for the redirect. * The value must be between 1 and 1024 characters. */ pathRedirect?: string; /** * The prefix that replaces the prefixMatch specified in the * HttpRouteRuleMatch, retaining the remaining portion of the URL before * redirecting the request. prefixRedirect cannot be supplied together with * pathRedirect. Supply one alone or neither. If neither is supplied, the * path of the original request will be used for the redirect. The value * must be between 1 and 1024 characters. */ prefixRedirect?: string; /** * The HTTP Status code to use for this RedirectAction. Supported values are: * * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. * * FOUND, which corresponds to 302. * * SEE_OTHER which corresponds to 303. * * TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method * will be retained. * * PERMANENT_REDIRECT, which corresponds to 308. In this case, * the request method will be retained. */ redirectResponseCode?: string; /** * If set to true, any accompanying query portion of the original URL is removed * prior to redirecting the request. If set to false, the query portion of the * original URL is retained. * This field is required to ensure an empty block is not set. The normal default value is false. */ stripQuery?: boolean; } interface RegionUrlMapTest { /** * Description of this test case. */ description?: string; /** * Host portion of the URL. */ host: string; /** * Path portion of the URL. */ path: string; /** * A reference to expected RegionBackendService resource the given URL should be mapped to. */ service: string; } interface ReservationDeleteAfterDuration { /** * Number of nanoseconds for the auto-delete duration. */ nanos?: number; /** * Number of seconds for the auto-delete duration. */ seconds?: string; } interface ReservationReservationSharingPolicy { /** * Sharing config for all Google Cloud services. * Possible values are: `ALLOW_ALL`, `DISALLOW_ALL`. */ serviceShareType: string; } interface ReservationResourceStatus { /** * (Output) * Health information for the reservation. * Structure is documented below. */ healthInfos: outputs.compute.ReservationResourceStatusHealthInfo[]; /** * (Output) * The number of reservation blocks associated with this reservation. */ reservationBlockCount: number; /** * (Output) * Maintenance information for this reservation * Structure is documented below. */ reservationMaintenances: outputs.compute.ReservationResourceStatusReservationMaintenance[]; /** * (Output) * Allocation Properties of this reservation. * Structure is documented below. */ specificSkuAllocations: outputs.compute.ReservationResourceStatusSpecificSkuAllocation[]; } interface ReservationResourceStatusHealthInfo { /** * (Output) * The number of reservation blocks that are degraded. */ degradedBlockCount: number; /** * (Output) * The health status of the reservation. */ healthStatus: string; /** * (Output) * The number of reservation blocks that are healthy. */ healthyBlockCount: number; } interface ReservationResourceStatusReservationMaintenance { /** * (Output) * Describes number of instances that have ongoing maintenance. */ instanceMaintenanceOngoingCount: number; /** * (Output) * Describes number of instances that have pending maintenance. */ instanceMaintenancePendingCount: number; /** * (Output) * Progress for ongoing maintenance for this group of VMs/hosts. Describes number of hosts in the block that have ongoing maintenance. */ maintenanceOngoingCount: number; /** * (Output) * Progress for ongoing maintenance for this group of VMs/hosts. Describes number of hosts in the block that have pending maintenance. */ maintenancePendingCount: number; /** * (Output) * The type of maintenance for the reservation. */ schedulingType: string; /** * (Output) * Describes number of subblock Infrastructure that has ongoing maintenance. Here, Subblock Infrastructure Maintenance pertains to upstream hardware contained in the Subblock that is necessary for a VM Family(e.g. NVLink Domains). Not all VM Families will support this field. */ subblockInfraMaintenanceOngoingCount: number; /** * (Output) * Describes number of subblock Infrastructure that has pending maintenance. Here, Subblock Infrastructure Maintenance pertains to upstream hardware contained in the Subblock that is necessary for a VM Family (e.g. NVLink Domains). Not all VM Families will support this field. */ subblockInfraMaintenancePendingCount: number; /** * (Output) * Maintenance information on this group of VMs. * Structure is documented below. */ upcomingGroupMaintenances: outputs.compute.ReservationResourceStatusReservationMaintenanceUpcomingGroupMaintenance[]; } interface ReservationResourceStatusReservationMaintenanceUpcomingGroupMaintenance { /** * (Output) * Indicates if the maintenance can be customer triggered. */ canReschedule: boolean; /** * (Output) * The latest time for the planned maintenance window to start. This timestamp value is in RFC3339 text format. */ latestWindowStartTime: string; /** * (Output) * Indicates whether the UpcomingMaintenance will be triggered on VM shutdown. */ maintenanceOnShutdown: boolean; /** * (Output) * The reasons for the maintenance. Only valid for vms. */ maintenanceReasons: string[]; /** * (Output) * Status of the maintenance. */ maintenanceStatus: string; /** * (Output) * Defines the type of maintenance. */ type: string; /** * (Output) * The time by which the maintenance disruption will be completed. This timestamp value is in RFC3339 text format. */ windowEndTime: string; /** * (Output) * The current start time of the maintenance window. This timestamp value is in RFC3339 text format. */ windowStartTime: string; } interface ReservationResourceStatusSpecificSkuAllocation { /** * (Output) * ID of the instance template used to populate reservation properties. */ sourceInstanceTemplateId: string; /** * (Output) * Per service utilization breakdown. The Key is the Google Cloud managed service name. */ utilizations: { [key: string]: string; }; } interface ReservationShareSettings { /** * A map of project number and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. * Structure is documented below. */ projectMaps?: outputs.compute.ReservationShareSettingsProjectMap[]; /** * (Optional, Beta) * List of project IDs with which the reservation is shared. */ projects?: string[]; /** * Type of sharing for this shared-reservation * Possible values are: `LOCAL`, `SPECIFIC_PROJECTS`. */ shareType: string; } interface ReservationShareSettingsProjectMap { /** * The identifier for this object. Format specified above. */ id: string; /** * The project id/number, should be same as the key of this project config in the project map. */ projectId?: string; } interface ReservationSpecificReservation { /** * (Output) * Indicates how many instances are actually usable currently. */ assuredCount: number; /** * The number of resources that are allocated. */ count: number; /** * (Output) * How many instances are in use. */ inUseCount: number; /** * The instance properties for the reservation. * Structure is documented below. */ instanceProperties: outputs.compute.ReservationSpecificReservationInstanceProperties; /** * Specifies the instance template to create the reservation. If you use this field, you must exclude the * instanceProperties field. */ sourceInstanceTemplate?: string; } interface ReservationSpecificReservationInstanceProperties { /** * Guest accelerator type and count. * Structure is documented below. */ guestAccelerators?: outputs.compute.ReservationSpecificReservationInstancePropertiesGuestAccelerator[]; /** * The amount of local ssd to reserve with each instance. This * reserves disks of type `local-ssd`. * Structure is documented below. */ localSsds?: outputs.compute.ReservationSpecificReservationInstancePropertiesLocalSsd[]; /** * (Output) * An opaque location hint used to place the allocation close to other resources. This field is for use by internal tools that use the public API. */ locationHint: string; /** * The name of the machine type to reserve. */ machineType: string; /** * (Optional, Beta) * Specifies the frequency of planned maintenance events. * Possible values are: `AS_NEEDED`, `PERIODIC`, `RECURRENT`. */ maintenanceInterval?: string; /** * The minimum CPU platform for the reservation. For example, * `"Intel Skylake"`. See * the CPU platform availability reference](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform#availablezones) * for information on available CPU platforms. */ minCpuPlatform: string; } interface ReservationSpecificReservationInstancePropertiesGuestAccelerator { /** * The number of the guest accelerator cards exposed to * this instance. */ acceleratorCount: number; /** * The full or partial URL of the accelerator type to * attach to this instance. For example: * `projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100` * If you are creating an instance template, specify only the accelerator name. */ acceleratorType: string; } interface ReservationSpecificReservationInstancePropertiesLocalSsd { /** * The size of the disk in base-2 GB. */ diskSizeGb: number; /** * The disk interface to use for attaching this disk. * Default value is `SCSI`. * Possible values are: `SCSI`, `NVME`. */ interface?: string; } interface ResizeRequestRequestedRunDuration { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 600 to 604800 inclusive. Note: minimum and maximum allowed range for requestedRunDuration is 10 minutes (600 seconds) and 7 days(604800 seconds) correspondingly. */ seconds: string; } interface ResizeRequestStatus { /** * (Output) * Fatal errors encountered during the queueing or provisioning phases of the ResizeRequest that caused the transition to the FAILED state. Contrary to the lastAttempt errors, this field is final and errors are never removed from here, as the ResizeRequest is not going to retry. * Structure is documented below. */ errors: outputs.compute.ResizeRequestStatusError[]; /** * (Output) * Information about the last attempt to fulfill the request. The value is temporary since the ResizeRequest can retry, as long as it's still active and the last attempt value can either be cleared or replaced with a different error. Since ResizeRequest retries infrequently, the value may be stale and no longer show an active problem. The value is cleared when ResizeRequest transitions to the final state (becomes inactive). If the final state is FAILED the error describing it will be storred in the "error" field only. * Structure is documented below. */ lastAttempts: outputs.compute.ResizeRequestStatusLastAttempt[]; } interface ResizeRequestStatusError { /** * (Output) * The array of errors encountered while processing this operation. * Structure is documented below. */ errors: outputs.compute.ResizeRequestStatusErrorError[]; } interface ResizeRequestStatusErrorError { /** * (Output) * The error type identifier for this error. */ code: string; /** * (Output) * An array of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. * Structure is documented below. */ errorDetails: outputs.compute.ResizeRequestStatusErrorErrorErrorDetail[]; /** * (Output) * Indicates the field in the request that caused the error. This property is optional. */ location: string; /** * (Output) * The localized error message in the above locale. */ message: string; } interface ResizeRequestStatusErrorErrorErrorDetail { /** * (Output) * A nested object resource. * Structure is documented below. */ errorInfos: outputs.compute.ResizeRequestStatusErrorErrorErrorDetailErrorInfo[]; /** * (Output) * A nested object resource. * Structure is documented below. */ helps: outputs.compute.ResizeRequestStatusErrorErrorErrorDetailHelp[]; /** * (Output) * A nested object resource. * Structure is documented below. */ localizedMessages: outputs.compute.ResizeRequestStatusErrorErrorErrorDetailLocalizedMessage[]; /** * (Output) * A nested object resource. * Structure is documented below. */ quotaInfos: outputs.compute.ResizeRequestStatusErrorErrorErrorDetailQuotaInfo[]; } interface ResizeRequestStatusErrorErrorErrorDetailErrorInfo { /** * (Output) * The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". */ domain: string; /** * (Output) * Additional structured details about this error. */ metadatas: { [key: string]: string; }; /** * (Output) * The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. */ reason: string; } interface ResizeRequestStatusErrorErrorErrorDetailHelp { /** * (Output) * A nested object resource. * Structure is documented below. */ links: outputs.compute.ResizeRequestStatusErrorErrorErrorDetailHelpLink[]; } interface ResizeRequestStatusErrorErrorErrorDetailHelpLink { /** * An optional description of this resize-request. */ description: string; /** * (Output) * The URL of the link. */ url: string; } interface ResizeRequestStatusErrorErrorErrorDetailLocalizedMessage { /** * (Output) * The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX" */ locale: string; /** * (Output) * The localized error message in the above locale. */ message: string; } interface ResizeRequestStatusErrorErrorErrorDetailQuotaInfo { /** * (Output) * The map holding related quota dimensions */ dimensions: { [key: string]: string; }; /** * (Output) * Future quota limit being rolled out. The limit's unit depends on the quota type or metric. */ futureLimit: number; /** * (Output) * Current effective quota limit. The limit's unit depends on the quota type or metric. */ limit: number; /** * (Output) * The name of the quota limit. */ limitName: string; /** * (Output) * The Compute Engine quota metric name. */ metricName: string; /** * (Output) * Rollout status of the future quota limit. */ rolloutStatus: string; } interface ResizeRequestStatusLastAttempt { /** * (Output) * Fatal errors encountered during the queueing or provisioning phases of the ResizeRequest that caused the transition to the FAILED state. Contrary to the lastAttempt errors, this field is final and errors are never removed from here, as the ResizeRequest is not going to retry. * Structure is documented below. */ errors: outputs.compute.ResizeRequestStatusLastAttemptError[]; } interface ResizeRequestStatusLastAttemptError { /** * (Output) * The array of errors encountered while processing this operation. * Structure is documented below. */ errors: outputs.compute.ResizeRequestStatusLastAttemptErrorError[]; } interface ResizeRequestStatusLastAttemptErrorError { /** * (Output) * The error type identifier for this error. */ code: string; /** * (Output) * An array of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. * Structure is documented below. */ errorDetails: outputs.compute.ResizeRequestStatusLastAttemptErrorErrorErrorDetail[]; /** * (Output) * Indicates the field in the request that caused the error. This property is optional. */ location: string; /** * (Output) * The localized error message in the above locale. */ message: string; } interface ResizeRequestStatusLastAttemptErrorErrorErrorDetail { /** * (Output) * A nested object resource. * Structure is documented below. */ errorInfos: outputs.compute.ResizeRequestStatusLastAttemptErrorErrorErrorDetailErrorInfo[]; /** * (Output) * A nested object resource. * Structure is documented below. */ helps: outputs.compute.ResizeRequestStatusLastAttemptErrorErrorErrorDetailHelp[]; /** * (Output) * A nested object resource. * Structure is documented below. */ localizedMessages: outputs.compute.ResizeRequestStatusLastAttemptErrorErrorErrorDetailLocalizedMessage[]; /** * (Output) * A nested object resource. * Structure is documented below. */ quotaInfos: outputs.compute.ResizeRequestStatusLastAttemptErrorErrorErrorDetailQuotaInfo[]; } interface ResizeRequestStatusLastAttemptErrorErrorErrorDetailErrorInfo { /** * (Output) * The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". */ domain: string; /** * (Output) * Additional structured details about this error. */ metadatas: { [key: string]: string; }; /** * (Output) * The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. */ reason: string; } interface ResizeRequestStatusLastAttemptErrorErrorErrorDetailHelp { /** * (Output) * A nested object resource. * Structure is documented below. */ links: outputs.compute.ResizeRequestStatusLastAttemptErrorErrorErrorDetailHelpLink[]; } interface ResizeRequestStatusLastAttemptErrorErrorErrorDetailHelpLink { /** * An optional description of this resize-request. */ description: string; /** * (Output) * The URL of the link. */ url: string; } interface ResizeRequestStatusLastAttemptErrorErrorErrorDetailLocalizedMessage { /** * (Output) * The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX" */ locale: string; /** * (Output) * The localized error message in the above locale. */ message: string; } interface ResizeRequestStatusLastAttemptErrorErrorErrorDetailQuotaInfo { /** * (Output) * The map holding related quota dimensions */ dimensions: { [key: string]: string; }; /** * (Output) * Future quota limit being rolled out. The limit's unit depends on the quota type or metric. */ futureLimit: number; /** * (Output) * Current effective quota limit. The limit's unit depends on the quota type or metric. */ limit: number; /** * (Output) * The name of the quota limit. */ limitName: string; /** * (Output) * The Compute Engine quota metric name. */ metricName: string; /** * (Output) * Rollout status of the future quota limit. */ rolloutStatus: string; } interface ResourcePolicyDiskConsistencyGroupPolicy { /** * Enable disk consistency on the resource policy. */ enabled: boolean; } interface ResourcePolicyGroupPlacementPolicy { /** * The number of availability domains instances will be spread across. If two instances are in different * availability domain, they will not be put in the same low latency network */ availabilityDomainCount?: number; /** * Collocation specifies whether to place VMs inside the same availability domain on the same low-latency network. * Specify `COLLOCATED` to enable collocation. Can only be specified with `vmCount`. If compute instances are created * with a COLLOCATED policy, then exactly `vmCount` instances must be created at the same time with the resource policy * attached. * Possible values are: `COLLOCATED`. */ collocation?: string; /** * Specifies the shape of the GPU slice, in slice based GPU families eg. A4X. */ gpuTopology?: string; /** * (Optional, Beta) * Specifies the number of max logical switches. */ maxDistance?: number; /** * (Optional, Beta) * Specifies the shape of the TPU slice. */ tpuTopology?: string; /** * Number of VMs in this placement group. Google does not recommend that you use this field * unless you use a compact policy and you want your policy to work only if it contains this * exact number of VMs. */ vmCount?: number; } interface ResourcePolicyInstanceSchedulePolicy { /** * The expiration time of the schedule. The timestamp is an RFC3339 string. */ expirationTime?: string; /** * The start time of the schedule. The timestamp is an RFC3339 string. */ startTime?: string; /** * Specifies the time zone to be used in interpreting the schedule. The value of this field must be a time zone name * from the tz database: http://en.wikipedia.org/wiki/Tz_database. */ timeZone: string; /** * Specifies the schedule for starting instances. * Structure is documented below. */ vmStartSchedule?: outputs.compute.ResourcePolicyInstanceSchedulePolicyVmStartSchedule; /** * Specifies the schedule for stopping instances. * Structure is documented below. */ vmStopSchedule?: outputs.compute.ResourcePolicyInstanceSchedulePolicyVmStopSchedule; } interface ResourcePolicyInstanceSchedulePolicyVmStartSchedule { /** * Specifies the frequency for the operation, using the unix-cron format. */ schedule: string; } interface ResourcePolicyInstanceSchedulePolicyVmStopSchedule { /** * Specifies the frequency for the operation, using the unix-cron format. */ schedule: string; } interface ResourcePolicySnapshotSchedulePolicy { /** * Retention policy applied to snapshots created by this resource policy. * Structure is documented below. */ retentionPolicy?: outputs.compute.ResourcePolicySnapshotSchedulePolicyRetentionPolicy; /** * Contains one of an `hourlySchedule`, `dailySchedule`, or `weeklySchedule`. * Structure is documented below. */ schedule: outputs.compute.ResourcePolicySnapshotSchedulePolicySchedule; /** * Properties with which the snapshots are created, such as labels. * Structure is documented below. */ snapshotProperties?: outputs.compute.ResourcePolicySnapshotSchedulePolicySnapshotProperties; } interface ResourcePolicySnapshotSchedulePolicyRetentionPolicy { /** * Maximum age of the snapshot that is allowed to be kept. */ maxRetentionDays: number; /** * Specifies the behavior to apply to scheduled snapshots when * the source disk is deleted. * Default value is `KEEP_AUTO_SNAPSHOTS`. * Possible values are: `KEEP_AUTO_SNAPSHOTS`, `APPLY_RETENTION_POLICY`. */ onSourceDiskDelete?: string; } interface ResourcePolicySnapshotSchedulePolicySchedule { /** * The policy will execute every nth day at the specified time. * Structure is documented below. */ dailySchedule?: outputs.compute.ResourcePolicySnapshotSchedulePolicyScheduleDailySchedule; /** * The policy will execute every nth hour starting at the specified time. * Structure is documented below. */ hourlySchedule?: outputs.compute.ResourcePolicySnapshotSchedulePolicyScheduleHourlySchedule; /** * Allows specifying a snapshot time for each day of the week. * Structure is documented below. */ weeklySchedule?: outputs.compute.ResourcePolicySnapshotSchedulePolicyScheduleWeeklySchedule; } interface ResourcePolicySnapshotSchedulePolicyScheduleDailySchedule { /** * Defines a schedule with units measured in days. The value determines how many days pass between the start of each cycle. Days in cycle for snapshot schedule policy must be 1. */ daysInCycle: number; /** * This must be in UTC format that resolves to one of * 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, * both 13:00-5 and 08:00 are valid. */ startTime: string; } interface ResourcePolicySnapshotSchedulePolicyScheduleHourlySchedule { /** * The number of hours between snapshots. */ hoursInCycle: number; /** * Time within the window to start the operations. * It must be in an hourly format "HH:MM", * where HH : [00-23] and MM : [00] GMT. eg: 21:00 */ startTime: string; } interface ResourcePolicySnapshotSchedulePolicyScheduleWeeklySchedule { /** * May contain up to seven (one for each day of the week) snapshot times. * Structure is documented below. */ dayOfWeeks: outputs.compute.ResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeek[]; } interface ResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeek { /** * The day of the week to create the snapshot. e.g. MONDAY * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ day: string; /** * Time within the window to start the operations. * It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. */ startTime: string; } interface ResourcePolicySnapshotSchedulePolicySnapshotProperties { /** * Creates the new snapshot in the snapshot chain labeled with the * specified name. The chain name must be 1-63 characters long and comply * with RFC1035. */ chainName?: string; /** * Whether to perform a 'guest aware' snapshot. */ guestFlush?: boolean; /** * A set of key-value pairs. */ labels?: { [key: string]: string; }; /** * Cloud Storage bucket location to store the auto snapshot * (regional or multi-regional) */ storageLocations?: string; } interface ResourcePolicyWorkloadPolicy { /** * The accelerator topology. This field can be set only when the workload policy type is HIGH_THROUGHPUT * and cannot be set if max topology distance is set. */ acceleratorTopology?: string; /** * The maximum topology distance. This field can be set only when the workload policy type is HIGH_THROUGHPUT * and cannot be set if accelerator topology is set. * Possible values are: `BLOCK`, `CLUSTER`, `SUBBLOCK`. */ maxTopologyDistance?: string; /** * The type of workload policy. * Possible values are: `HIGH_AVAILABILITY`, `HIGH_THROUGHPUT`. */ type: string; } interface RolloutPlanWave { /** * The display name of this wave of the rollout plan. */ displayName?: string; /** * (Output) * The wave number. */ number: number; /** * Options to control the pace of orchestration of a wave. * Structure is documented below. */ orchestrationOptions?: outputs.compute.RolloutPlanWaveOrchestrationOptions; /** * The selectors for this wave. There is a logical AND between each selector * defined in a wave, so a resource must satisfy the criteria of *all* the * specified selectors to be in scope for the wave. * Structure is documented below. */ selectors: outputs.compute.RolloutPlanWaveSelector[]; /** * The validation to be performed before progressing to the next wave. * Structure is documented below. */ validation: outputs.compute.RolloutPlanWaveValidation; } interface RolloutPlanWaveOrchestrationOptions { /** * Delays, if any, to be added between batches of projects. * Structure is documented below. */ delays?: outputs.compute.RolloutPlanWaveOrchestrationOptionsDelay[]; /** * Maximum number of locations to be orchestrated in parallel. */ maxConcurrentLocations?: number; /** * Maximum number of resources to be orchestrated per location in parallel. */ maxConcurrentResourcesPerLocation?: number; } interface RolloutPlanWaveOrchestrationOptionsDelay { /** * Controls whether the delay should only be added between batches of projects corresponding to different locations, or also between batches of projects corresponding to the same location. * Possible values are: `DELIMITER_UNSPECIFIED`, `DELIMITER_LOCATION`, `DELIMITER_BATCH`. */ delimiter?: string; /** * The duration of the delay, if any, to be added between batches of projects. */ duration?: string; /** * Controls whether the specified duration is to be added at the end of each batch, or if the total processing time for each batch will be padded if needed to meet the specified duration. * Possible values are: `TYPE_UNSPECIFIED`, `TYPE_OFFSET`, `TYPE_MINIMUM`. */ type?: string; } interface RolloutPlanWaveSelector { /** * Roll out to resources by location. * Structure is documented below. */ locationSelector?: outputs.compute.RolloutPlanWaveSelectorLocationSelector; /** * Roll out to resources by Cloud Resource Manager resource hierarchy nodes such as projects, folders, orgs. * Structure is documented below. */ resourceHierarchySelector?: outputs.compute.RolloutPlanWaveSelectorResourceHierarchySelector; } interface RolloutPlanWaveSelectorLocationSelector { /** * Example: "us-central1-a" */ includedLocations?: string[]; } interface RolloutPlanWaveSelectorResourceHierarchySelector { /** * Format: "folders/{folder_id}" */ includedFolders?: string[]; /** * Format: "organizations/{organization_id}" */ includedOrganizations?: string[]; /** * Format: "projects/{project_id}" */ includedProjects?: string[]; } interface RolloutPlanWaveValidation { /** * Metadata required if type = "time". * Structure is documented below. */ timeBasedValidationMetadata?: outputs.compute.RolloutPlanWaveValidationTimeBasedValidationMetadata; /** * The type of the validation. Possible values: * "manual": The system waits for an end-user approval API before progressing to the next wave. * "time": The system waits for a user specified duration before progressing to the next wave. */ type: string; } interface RolloutPlanWaveValidationTimeBasedValidationMetadata { /** * The duration that the system waits in between waves. This wait starts * after all changes in the wave are rolled out. */ waitDuration?: string; } interface RouteAsPath { /** * (Output) * The AS numbers of the AS Path. */ asLists: number[]; /** * (Output) * The type of the AS Path, which can be one of the following values: * - 'AS_SET': unordered set of autonomous systems that the route in has traversed * - 'AS_SEQUENCE': ordered set of autonomous systems that the route has traversed * - 'AS_CONFED_SEQUENCE': ordered set of Member Autonomous Systems in the local confederation that the route has traversed * - 'AS_CONFED_SET': unordered set of Member Autonomous Systems in the local confederation that the route has traversed */ pathSegmentType: string; } interface RouteParams { /** * Resource manager tags to be bound to the route. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. The field is ignored when empty. * The field is immutable and causes resource replacement when mutated. This field is only * set at create time and modifying this field after creation will trigger recreation. * To apply tags to an existing resource, see the gcp.tags.TagBinding resource. */ resourceManagerTags?: { [key: string]: string; }; } interface RouteWarning { /** * (Output) * A warning code, if applicable. For example, Compute Engine returns * NO_RESULTS_ON_PAGE if there are no results in the response. */ code: string; /** * (Output) * Metadata about this warning in key: value format. For example: * "data": { "key": "scope", "value": "zones/us-east1-d" } * Structure is [documented below. */ datas: outputs.compute.RouteWarningData[]; /** * (Output) * A human-readable description of the warning code. */ message: string; } interface RouteWarningData { /** * (Output) * A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding). */ key: string; /** * (Output) * A warning data value corresponding to the key. */ value: string; } interface RouterBgp { /** * User-specified flag to indicate which mode to use for advertisement. * Default value is `DEFAULT`. * Possible values are: `DEFAULT`, `CUSTOM`. */ advertiseMode?: string; /** * User-specified list of prefix groups to advertise in custom mode. * This field can only be populated if advertiseMode is CUSTOM and * is advertised to all peers of the router. These groups will be * advertised in addition to any specified prefixes. Leave this field * blank to advertise no custom groups. * This enum field has the one valid value: ALL_SUBNETS */ advertisedGroups?: string[]; /** * User-specified list of individual IP ranges to advertise in * custom mode. This field can only be populated if advertiseMode * is CUSTOM and is advertised to all peers of the router. These IP * ranges will be advertised in addition to any specified groups. * Leave this field blank to advertise no custom IP ranges. * Structure is documented below. */ advertisedIpRanges?: outputs.compute.RouterBgpAdvertisedIpRange[]; /** * Local BGP Autonomous System Number (ASN). Must be an RFC6996 * private ASN, either 16-bit or 32-bit. The value will be fixed for * this router resource. All VPN tunnels that link to this router * will have the same local ASN. */ asn: number; /** * Explicitly specifies a range of valid BGP Identifiers for this Router. * It is provided as a link-local IPv4 range (from 169.254.0.0/16), of * size at least /30, even if the BGP sessions are over IPv6. It must * not overlap with any IPv4 BGP session ranges. Other vendors commonly * call this router ID. */ identifierRange: string; /** * The interval in seconds between BGP keepalive messages that are sent * to the peer. Hold time is three times the interval at which keepalive * messages are sent, and the hold time is the maximum number of seconds * allowed to elapse between successive keepalive messages that BGP * receives from a peer. * BGP will use the smaller of either the local hold time value or the * peer's hold time value as the hold time for the BGP connection * between the two peers. If set, this value must be between 20 and 60. * The default is 20. */ keepaliveInterval?: number; } interface RouterBgpAdvertisedIpRange { /** * User-specified description for the IP range. * * The `md5AuthenticationKeys` block supports: */ description?: string; /** * The IP range to advertise. The value must be a * CIDR-formatted string. */ range: string; } interface RouterMd5AuthenticationKeys { /** * Value of the key used for MD5 authentication. */ key: string; /** * Name of the resource. The name must be 1-63 characters long, and * comply with RFC1035. Specifically, the name must be 1-63 characters * long and match the regular expression `a-z?` * which means the first character must be a lowercase letter, and all * following characters must be a dash, lowercase letter, or digit, * except the last character, which cannot be a dash. */ name: string; } interface RouterNatLogConfig { /** * Indicates whether or not to export logs. */ enable: boolean; /** * Specifies the desired filtering of logs on this NAT. * Possible values are: `ERRORS_ONLY`, `TRANSLATIONS_ONLY`, `ALL`. */ filter: string; } interface RouterNatNat64Subnetwork { /** * Name of the NAT service. The name must be 1-63 characters long and * comply with RFC1035. */ name: string; } interface RouterNatRule { /** * The action to be enforced for traffic that matches this rule. * Structure is documented below. */ action: outputs.compute.RouterNatRuleAction; /** * An optional description of this rule. */ description?: string; /** * CEL expression that specifies the match condition that egress traffic from a VM is evaluated against. * If it evaluates to true, the corresponding action is enforced. * The following examples are valid match expressions for public NAT: * "inIpRange(destination.ip, '1.1.0.0/16') || inIpRange(destination.ip, '2.2.0.0/16')" * "destination.ip == '1.1.0.1' || destination.ip == '8.8.8.8'" * The following example is a valid match expression for private NAT: * "nexthop.hub == 'https://networkconnectivity.googleapis.com/v1alpha1/projects/my-project/global/hub/hub-1'" */ match: string; /** * An integer uniquely identifying a rule in the list. * The rule number must be a positive value between 0 and 65000, and must be unique among rules within a NAT. */ ruleNumber: number; } interface RouterNatRuleAction { /** * A list of URLs of the IP resources used for this NAT rule. * These IP addresses must be valid static external IP addresses assigned to the project. * This field is used for public NAT. */ sourceNatActiveIps?: string[]; /** * A list of URLs of the subnetworks used as source ranges for this NAT Rule. * These subnetworks must have purpose set to PRIVATE_NAT. * This field is used for private NAT. */ sourceNatActiveRanges?: string[]; /** * A list of URLs of the IP resources to be drained. * These IPs must be valid static external IPs that have been assigned to the NAT. * These IPs should be used for updating/patching a NAT rule only. * This field is used for public NAT. */ sourceNatDrainIps?: string[]; /** * A list of URLs of subnetworks representing source ranges to be drained. * This is only supported on patch/update, and these subnetworks must have previously been used as active ranges in this NAT Rule. * This field is used for private NAT. */ sourceNatDrainRanges?: string[]; } interface RouterNatSubnetwork { /** * Self-link of the subnetwork resource that will use NAT64 */ name: string; /** * List of the secondary ranges of the subnetwork that are allowed * to use NAT. This can be populated only if * `LIST_OF_SECONDARY_IP_RANGES` is one of the values in * sourceIpRangesToNat * * The `nat64Subnetwork` block supports: */ secondaryIpRangeNames?: string[]; /** * List of options for which source IPs in the subnetwork * should have NAT enabled. Supported values include: * `ALL_IP_RANGES`, `LIST_OF_SECONDARY_IP_RANGES`, * `PRIMARY_IP_RANGE`. */ sourceIpRangesToNats: string[]; } interface RouterParams { /** * Resource manager tags to be bound to the router. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. */ resourceManagerTags?: { [key: string]: string; }; } interface RouterPeerAdvertisedIpRange { /** * User-specified description for the IP range. */ description?: string; /** * The IP range to advertise. The value must be a * CIDR-formatted string. */ range: string; } interface RouterPeerBfd { /** * The minimum interval, in milliseconds, between BFD control packets * received from the peer router. The actual value is negotiated * between the two routers and is equal to the greater of this value * and the transmit interval of the other router. If set, this value * must be between 1000 and 30000. */ minReceiveInterval?: number; /** * The minimum interval, in milliseconds, between BFD control packets * transmitted to the peer router. The actual value is negotiated * between the two routers and is equal to the greater of this value * and the corresponding receive interval of the other router. If set, * this value must be between 1000 and 30000. */ minTransmitInterval?: number; /** * The number of consecutive BFD packets that must be missed before * BFD declares that a peer is unavailable. If set, the value must * be a value between 5 and 16. * * The `md5AuthenticationKey` block supports: */ multiplier?: number; /** * The BFD session initialization mode for this BGP peer. * If set to `ACTIVE`, the Cloud Router will initiate the BFD session * for this BGP peer. If set to `PASSIVE`, the Cloud Router will wait * for the peer router to initiate the BFD session for this BGP peer. * If set to `DISABLED`, BFD is disabled for this BGP peer. * Possible values are: `ACTIVE`, `DISABLED`, `PASSIVE`. */ sessionInitializationMode: string; } interface RouterPeerCustomLearnedIpRange { /** * The IP range to learn. The value must be a * CIDR-formatted string. */ range: string; } interface RouterPeerMd5AuthenticationKey { /** * Value of the key. */ key: string; /** * Name of this BGP peer. The name must be 1-63 characters long, * and comply with RFC1035. Specifically, the name must be 1-63 characters * long and match the regular expression `a-z?` which * means the first character must be a lowercase letter, and all * following characters must be a dash, lowercase letter, or digit, * except the last character, which cannot be a dash. */ name: string; } interface RouterRoutePolicyTerm { /** * 'CEL expressions to evaluate to modify a route when this term matches.'\ * Structure is documented below. */ actions?: outputs.compute.RouterRoutePolicyTermAction[]; /** * CEL expression evaluated against a route to determine if this term applies (see Policy Language). * Structure is documented below. */ match: outputs.compute.RouterRoutePolicyTermMatch; /** * The evaluation priority for this term, which must be between 0 (inclusive) and 231 (exclusive), and unique within the list. */ priority: number; } interface RouterRoutePolicyTermAction { /** * Description of the expression */ description?: string; /** * Textual representation of an expression in Common Expression * Language syntax. */ expression: string; /** * String indicating the location of the expression for error * reporting, e.g. a file name and a position in the file */ location?: string; /** * Title for the expression, i.e. a short string describing its * purpose. */ title?: string; } interface RouterRoutePolicyTermMatch { /** * Description of the expression */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a file name and a position in the file */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. */ title?: string; } interface RouterStatusBestRoute { asPaths: outputs.compute.RouterStatusBestRouteAsPath[]; /** * Creation timestamp in RFC3339 text format. */ creationTimestamp: string; /** * An optional description of this resource. Provide this property * when you create the resource. */ description: string; /** * The destination range of outgoing packets that this route applies to. * Only IPv4 is supported. */ destRange: string; /** * The name of the router. */ name: string; /** * The network name or resource link to the parent * network of this subnetwork. */ network: string; /** * URL to a gateway that should handle matching packets. * Currently, you can only specify the internet gateway, using a full or * partial valid URL: * * 'https://www.googleapis.com/compute/v1/projects/project/global/gateways/default-internet-gateway' * * 'projects/project/global/gateways/default-internet-gateway' * * 'global/gateways/default-internet-gateway' * * The string 'default-internet-gateway'. */ nextHopGateway: string; /** * The hub network that should handle matching packets, which should conform to RFC1035. */ nextHopHub: string; /** * The IP address or URL to a forwarding rule of type * loadBalancingScheme=INTERNAL that should handle matching * packets. * * With the GA provider you can only specify the forwarding * rule as a partial or full URL. For example, the following * are all valid values: * * 10.128.0.56 * * https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule * * regions/region/forwardingRules/forwardingRule * * When the beta provider, you can also specify the IP address * of a forwarding rule from the same VPC or any peered VPC. * * Note that this can only be used when the destinationRange is * a public (non-RFC 1918) IP CIDR range. */ nextHopIlb: string; /** * URL to an instance that should handle matching packets. * You can specify this as a full or partial URL. For example: * * 'https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance' * * 'projects/project/zones/zone/instances/instance' * * 'zones/zone/instances/instance' * * Just the instance name, with the zone in 'next_hop_instance_zone'. */ nextHopInstance: string; /** * The zone of the instance specified in next_hop_instance. Omit if nextHopInstance is specified as a URL. */ nextHopInstanceZone: string; /** * Internal fixed region-to-region cost that Google Cloud calculates based on factors such as network performance, distance, and available bandwidth between regions. */ nextHopInterRegionCost: string; /** * Network IP address of an instance that should handle matching packets. */ nextHopIp: string; /** * Multi-Exit Discriminator, a BGP route metric that indicates the desirability of a particular route in a network. */ nextHopMed: string; /** * URL to a Network that should handle matching packets. */ nextHopNetwork: string; /** * Indicates the origin of the route. Can be IGP (Interior Gateway Protocol), EGP (Exterior Gateway Protocol), or INCOMPLETE. */ nextHopOrigin: string; /** * The network peering name that should handle matching packets, which should conform to RFC1035. */ nextHopPeering: string; /** * URL to a VpnTunnel that should handle matching packets. */ nextHopVpnTunnel: string; /** * Additional params passed with the request, but not persisted as part of resource payload */ params: outputs.compute.RouterStatusBestRouteParam[]; /** * The priority of this route. Priority is used to break ties in cases * where there is more than one matching route of equal prefix length. * * In the case of two routes with equal prefix length, the one with the * lowest-numbered priority value wins. * * Default value is 1000. Valid range is 0 through 65535. */ priority: number; /** * The ID of the project in which the resource * belongs. If it is not provided, the provider project is used. */ project: string; /** * The status of the route, which can be one of the following values: * - 'ACTIVE' for an active route * - 'INACTIVE' for an inactive route */ routeStatus: string; /** * The type of this route, which can be one of the following values: * - 'TRANSIT' for a transit route that this router learned from another Cloud Router and will readvertise to one of its BGP peers * - 'SUBNET' for a route from a subnet of the VPC * - 'BGP' for a route learned from a BGP peer of this router * - 'STATIC' for a static route */ routeType: string; selfLink: string; /** * A list of instance tags to which this route applies. */ tags: string[]; /** * If potential misconfigurations are detected for this route, this field will be populated with warning messages. */ warnings: outputs.compute.RouterStatusBestRouteWarning[]; } interface RouterStatusBestRouteAsPath { /** * The AS numbers of the AS Path. */ asLists: number[]; /** * The type of the AS Path, which can be one of the following values: * - 'AS_SET': unordered set of autonomous systems that the route in has traversed * - 'AS_SEQUENCE': ordered set of autonomous systems that the route has traversed * - 'AS_CONFED_SEQUENCE': ordered set of Member Autonomous Systems in the local confederation that the route has traversed * - 'AS_CONFED_SET': unordered set of Member Autonomous Systems in the local confederation that the route has traversed */ pathSegmentType: string; } interface RouterStatusBestRouteParam { /** * Resource manager tags to be bound to the route. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. The field is ignored when empty. * The field is immutable and causes resource replacement when mutated. This field is only * set at create time and modifying this field after creation will trigger recreation. * To apply tags to an existing resource, see the gcp.tags.TagBinding resource. */ resourceManagerTags: { [key: string]: string; }; } interface RouterStatusBestRouteWarning { /** * A warning code, if applicable. For example, Compute Engine returns * NO_RESULTS_ON_PAGE if there are no results in the response. */ code: string; /** * Metadata about this warning in key: value format. For example: * "data": [ { "key": "scope", "value": "zones/us-east1-d" } */ datas: outputs.compute.RouterStatusBestRouteWarningData[]; /** * A human-readable description of the warning code. */ message: string; } interface RouterStatusBestRouteWarningData { /** * A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding). */ key: string; /** * A warning data value corresponding to the key. */ value: string; } interface RouterStatusBestRoutesForRouter { asPaths: outputs.compute.RouterStatusBestRoutesForRouterAsPath[]; /** * Creation timestamp in RFC3339 text format. */ creationTimestamp: string; /** * An optional description of this resource. Provide this property * when you create the resource. */ description: string; /** * The destination range of outgoing packets that this route applies to. * Only IPv4 is supported. */ destRange: string; /** * The name of the router. */ name: string; /** * The network name or resource link to the parent * network of this subnetwork. */ network: string; /** * URL to a gateway that should handle matching packets. * Currently, you can only specify the internet gateway, using a full or * partial valid URL: * * 'https://www.googleapis.com/compute/v1/projects/project/global/gateways/default-internet-gateway' * * 'projects/project/global/gateways/default-internet-gateway' * * 'global/gateways/default-internet-gateway' * * The string 'default-internet-gateway'. */ nextHopGateway: string; /** * The hub network that should handle matching packets, which should conform to RFC1035. */ nextHopHub: string; /** * The IP address or URL to a forwarding rule of type * loadBalancingScheme=INTERNAL that should handle matching * packets. * * With the GA provider you can only specify the forwarding * rule as a partial or full URL. For example, the following * are all valid values: * * 10.128.0.56 * * https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule * * regions/region/forwardingRules/forwardingRule * * When the beta provider, you can also specify the IP address * of a forwarding rule from the same VPC or any peered VPC. * * Note that this can only be used when the destinationRange is * a public (non-RFC 1918) IP CIDR range. */ nextHopIlb: string; /** * URL to an instance that should handle matching packets. * You can specify this as a full or partial URL. For example: * * 'https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance' * * 'projects/project/zones/zone/instances/instance' * * 'zones/zone/instances/instance' * * Just the instance name, with the zone in 'next_hop_instance_zone'. */ nextHopInstance: string; /** * The zone of the instance specified in next_hop_instance. Omit if nextHopInstance is specified as a URL. */ nextHopInstanceZone: string; /** * Internal fixed region-to-region cost that Google Cloud calculates based on factors such as network performance, distance, and available bandwidth between regions. */ nextHopInterRegionCost: string; /** * Network IP address of an instance that should handle matching packets. */ nextHopIp: string; /** * Multi-Exit Discriminator, a BGP route metric that indicates the desirability of a particular route in a network. */ nextHopMed: string; /** * URL to a Network that should handle matching packets. */ nextHopNetwork: string; /** * Indicates the origin of the route. Can be IGP (Interior Gateway Protocol), EGP (Exterior Gateway Protocol), or INCOMPLETE. */ nextHopOrigin: string; /** * The network peering name that should handle matching packets, which should conform to RFC1035. */ nextHopPeering: string; /** * URL to a VpnTunnel that should handle matching packets. */ nextHopVpnTunnel: string; /** * Additional params passed with the request, but not persisted as part of resource payload */ params: outputs.compute.RouterStatusBestRoutesForRouterParam[]; /** * The priority of this route. Priority is used to break ties in cases * where there is more than one matching route of equal prefix length. * * In the case of two routes with equal prefix length, the one with the * lowest-numbered priority value wins. * * Default value is 1000. Valid range is 0 through 65535. */ priority: number; /** * The ID of the project in which the resource * belongs. If it is not provided, the provider project is used. */ project: string; /** * The status of the route, which can be one of the following values: * - 'ACTIVE' for an active route * - 'INACTIVE' for an inactive route */ routeStatus: string; /** * The type of this route, which can be one of the following values: * - 'TRANSIT' for a transit route that this router learned from another Cloud Router and will readvertise to one of its BGP peers * - 'SUBNET' for a route from a subnet of the VPC * - 'BGP' for a route learned from a BGP peer of this router * - 'STATIC' for a static route */ routeType: string; selfLink: string; /** * A list of instance tags to which this route applies. */ tags: string[]; /** * If potential misconfigurations are detected for this route, this field will be populated with warning messages. */ warnings: outputs.compute.RouterStatusBestRoutesForRouterWarning[]; } interface RouterStatusBestRoutesForRouterAsPath { /** * The AS numbers of the AS Path. */ asLists: number[]; /** * The type of the AS Path, which can be one of the following values: * - 'AS_SET': unordered set of autonomous systems that the route in has traversed * - 'AS_SEQUENCE': ordered set of autonomous systems that the route has traversed * - 'AS_CONFED_SEQUENCE': ordered set of Member Autonomous Systems in the local confederation that the route has traversed * - 'AS_CONFED_SET': unordered set of Member Autonomous Systems in the local confederation that the route has traversed */ pathSegmentType: string; } interface RouterStatusBestRoutesForRouterParam { /** * Resource manager tags to be bound to the route. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. The field is ignored when empty. * The field is immutable and causes resource replacement when mutated. This field is only * set at create time and modifying this field after creation will trigger recreation. * To apply tags to an existing resource, see the gcp.tags.TagBinding resource. */ resourceManagerTags: { [key: string]: string; }; } interface RouterStatusBestRoutesForRouterWarning { /** * A warning code, if applicable. For example, Compute Engine returns * NO_RESULTS_ON_PAGE if there are no results in the response. */ code: string; /** * Metadata about this warning in key: value format. For example: * "data": [ { "key": "scope", "value": "zones/us-east1-d" } */ datas: outputs.compute.RouterStatusBestRoutesForRouterWarningData[]; /** * A human-readable description of the warning code. */ message: string; } interface RouterStatusBestRoutesForRouterWarningData { /** * A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding). */ key: string; /** * A warning data value corresponding to the key. */ value: string; } interface SecurityPolicyAdaptiveProtectionConfig { /** * ) Configuration for [Automatically deploy Adaptive Protection suggested rules](https://cloud.google.com/armor/docs/adaptive-protection-auto-deploy?hl=en). Structure is documented below. * * The `layer7DdosDefenseConfig` block supports: */ autoDeployConfig?: outputs.compute.SecurityPolicyAdaptiveProtectionConfigAutoDeployConfig; /** * Configuration for [Google Cloud Armor Adaptive Protection Layer 7 DDoS Defense](https://cloud.google.com/armor/docs/adaptive-protection-overview?hl=en). Structure is documented below. */ layer7DdosDefenseConfig?: outputs.compute.SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig; } interface SecurityPolicyAdaptiveProtectionConfigAutoDeployConfig { /** * Rules are only automatically deployed for alerts on potential attacks with confidence scores greater than this threshold. */ confidenceThreshold?: number; /** * Google Cloud Armor stops applying the action in the automatically deployed rule to an identified attacker after this duration. The rule continues to operate against new requests. */ expirationSec?: number; /** * Rules are only automatically deployed when the estimated impact to baseline traffic from the suggested mitigation is below this threshold. */ impactedBaselineThreshold?: number; /** * Identifies new attackers only when the load to the backend service that is under attack exceeds this threshold. */ loadThreshold?: number; } interface SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig { /** * If set to true, enables CAAP for L7 DDoS detection. */ enable?: boolean; /** * Rule visibility. Supported values include: "STANDARD", "PREMIUM". */ ruleVisibility: string; /** * Configuration options for layer7 adaptive protection for various customizable thresholds. */ thresholdConfigs?: outputs.compute.SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig[]; } interface SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig { /** * Confidence threshold above which Adaptive Protection's auto-deploy takes actions. */ autoDeployConfidenceThreshold?: number; /** * Duration over which Adaptive Protection's auto-deployed actions last. */ autoDeployExpirationSec?: number; /** * Impacted baseline threshold below which Adaptive Protection's auto-deploy takes actions. */ autoDeployImpactedBaselineThreshold?: number; /** * Load threshold above which Adaptive Protection automatically deploy threshold based on the backend load threshold and detect a new rule during an alerted attack. */ autoDeployLoadThreshold?: number; /** * Detection threshold based on absolute QPS. */ detectionAbsoluteQps?: number; /** * Detection threshold based on the backend service's load. */ detectionLoadThreshold?: number; /** * Detection threshold based on QPS relative to the average of baseline traffic. */ detectionRelativeToBaselineQps?: number; /** * The name of config. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the security policy. */ name: string; /** * Configuration options for enabling Adaptive Protection to work on the specified service granularity. Structure is documented below. */ trafficGranularityConfigs?: outputs.compute.SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig[]; } interface SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig { /** * If enabled, traffic matching each unique value for the specified type constitutes a separate traffic unit. It can only be set to true if value is empty. */ enableEachUniqueValue?: boolean; /** * The type of this configuration, a granular traffic unit can be one of the following: * * `HTTP_HEADER_HOST` * * `HTTP_PATH` */ type: string; /** * Requests that match this value constitute a granular traffic unit. */ value?: string; } interface SecurityPolicyAdvancedOptionsConfig { /** * Custom configuration to apply the JSON parsing. Only applicable when * `jsonParsing` is set to `STANDARD`. Structure is documented below. */ jsonCustomConfig: outputs.compute.SecurityPolicyAdvancedOptionsConfigJsonCustomConfig; /** * Whether or not to JSON parse the payload body. Defaults to `DISABLED`. * * `DISABLED` - Don't parse JSON payloads in POST bodies. * * `STANDARD` - Parse JSON payloads in POST bodies. * * `STANDARD_WITH_GRAPHQL` - Parse JSON and GraphQL payloads in POST bodies. */ jsonParsing: string; /** * Log level to use. Defaults to `NORMAL`. * * `NORMAL` - Normal log level. * * `VERBOSE` - Verbose log level. */ logLevel: string; /** * The maximum request size chosen by the customer with Waf enabled. Values supported are "8KB", "16KB, "32KB", "48KB" and "64KB". Values are case insensitive. */ requestBodyInspectionSize: string; /** * An optional list of case-insensitive request header names to use for resolving the callers client IP address. */ userIpRequestHeaders?: string[]; } interface SecurityPolicyAdvancedOptionsConfigJsonCustomConfig { /** * A list of custom Content-Type header values to apply the JSON parsing. The * format of the Content-Type header values is defined in * [RFC 1341](https://www.ietf.org/rfc/rfc1341.txt). When configuring a custom Content-Type header * value, only the type/subtype needs to be specified, and the parameters should be excluded. */ contentTypes: string[]; } interface SecurityPolicyRecaptchaOptionsConfig { /** * A field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of `GOOGLE_RECAPTCHA` under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. */ redirectSiteKey: string; } interface SecurityPolicyRule { /** * Action to take when `match` matches the request. Valid values: */ action: string; /** * An optional description of this rule. Max size is 64. */ description?: string; /** * Additional actions that are performed on headers. Structure is documented below. */ headerAction?: outputs.compute.SecurityPolicyRuleHeaderAction; /** * A match condition that incoming traffic is evaluated against. * If it evaluates to true, the corresponding `action` is enforced. Structure is documented below. */ match: outputs.compute.SecurityPolicyRuleMatch; /** * Preconfigured WAF configuration to be applied for the rule. If the rule does not evaluate preconfigured WAF rules, i.e., if `evaluatePreconfiguredWaf()` is not used, this field will have no effect. Structure is documented below. */ preconfiguredWafConfig?: outputs.compute.SecurityPolicyRulePreconfiguredWafConfig; /** * When set to true, the `action` specified above is not enforced. * Stackdriver logs for requests that trigger a preview action are annotated as such. */ preview: boolean; /** * An unique positive integer indicating the priority of evaluation for a rule. * Rules are evaluated from highest priority (lowest numerically) to lowest priority (highest numerically) in order. */ priority: number; /** * Must be specified if the `action` is `rateBasedBan` or `throttle`. Cannot be specified for other actions. Structure is documented below. */ rateLimitOptions?: outputs.compute.SecurityPolicyRuleRateLimitOptions; /** * Can be specified if the `action` is `redirect`. Cannot be specified for other actions. Structure is documented below. */ redirectOptions?: outputs.compute.SecurityPolicyRuleRedirectOptions; } interface SecurityPolicyRuleHeaderAction { /** * The list of request headers to add or overwrite if they're already present. * Structure is documented below. */ requestHeadersToAdds?: outputs.compute.SecurityPolicyRuleHeaderActionRequestHeadersToAdd[]; } interface SecurityPolicyRuleHeaderActionRequestHeadersToAdd { /** * The name of the header to set. */ headerName?: string; /** * The value to set the named header to. */ headerValue?: string; } interface SecurityPolicyRuleMatch { /** * The configuration options available when specifying versionedExpr. * This field must be specified if versionedExpr is specified and cannot be specified if versionedExpr is not specified. * Structure is documented below. */ config?: outputs.compute.SecurityPolicyRuleMatchConfig; /** * User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. * Structure is documented below. */ expr?: outputs.compute.SecurityPolicyRuleMatchExpr; /** * The configuration options available when specifying a user defined CEVAL expression (i.e., 'expr'). * Structure is documented below. */ exprOptions?: outputs.compute.SecurityPolicyRuleMatchExprOptions; /** * Preconfigured versioned expression. If this field is specified, config must also be specified. * Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding srcIpRange field in config. * Possible values are: `SRC_IPS_V1`. */ versionedExpr?: string; } interface SecurityPolicyRuleMatchConfig { /** * CIDR IP address range. Maximum number of srcIpRanges allowed is 10. */ srcIpRanges?: string[]; } interface SecurityPolicyRuleMatchExpr { /** * Textual representation of an expression in Common Expression Language syntax. The application context of the containing message determines which well-known feature set of CEL is supported. */ expression: string; } interface SecurityPolicyRuleMatchExprOptions { /** * reCAPTCHA configuration options to be applied for the rule. If the rule does not evaluate reCAPTCHA tokens, this field has no effect. * Structure is documented below. */ recaptchaOptions: outputs.compute.SecurityPolicyRuleMatchExprOptionsRecaptchaOptions; } interface SecurityPolicyRuleMatchExprOptionsRecaptchaOptions { /** * A list of site keys to be used during the validation of reCAPTCHA action-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created. */ actionTokenSiteKeys?: string[]; /** * A list of site keys to be used during the validation of reCAPTCHA session-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created. */ sessionTokenSiteKeys?: string[]; } interface SecurityPolicyRulePreconfiguredWafConfig { /** * An exclusion to apply during preconfigured WAF evaluation. * Structure is documented below. */ exclusions?: outputs.compute.SecurityPolicyRulePreconfiguredWafConfigExclusion[]; } interface SecurityPolicyRulePreconfiguredWafConfigExclusion { /** * Request cookie whose value will be excluded from inspection during preconfigured WAF evaluation. * Structure is documented below. */ requestCookies?: outputs.compute.SecurityPolicyRulePreconfiguredWafConfigExclusionRequestCooky[]; /** * Request header whose value will be excluded from inspection during preconfigured WAF evaluation. * Structure is documented below. */ requestHeaders?: outputs.compute.SecurityPolicyRulePreconfiguredWafConfigExclusionRequestHeader[]; /** * Request query parameter whose value will be excluded from inspection during preconfigured WAF evaluation. * Note that the parameter can be in the query string or in the POST body. * Structure is documented below. */ requestQueryParams?: outputs.compute.SecurityPolicyRulePreconfiguredWafConfigExclusionRequestQueryParam[]; /** * Request URI from the request line to be excluded from inspection during preconfigured WAF evaluation. * When specifying this field, the query or fragment part should be excluded. * Structure is documented below. */ requestUris?: outputs.compute.SecurityPolicyRulePreconfiguredWafConfigExclusionRequestUri[]; /** * A list of target rule IDs under the WAF rule set to apply the preconfigured WAF exclusion. * If omitted, it refers to all the rule IDs under the WAF rule set. */ targetRuleIds?: string[]; /** * Target WAF rule set to apply the preconfigured WAF exclusion. */ targetRuleSet: string; } interface SecurityPolicyRulePreconfiguredWafConfigExclusionRequestCooky { /** * You can specify an exact match or a partial match by using a field operator and a field value. * Available options: * EQUALS: The operator matches if the field value equals the specified value. * STARTS_WITH: The operator matches if the field value starts with the specified value. * ENDS_WITH: The operator matches if the field value ends with the specified value. * CONTAINS: The operator matches if the field value contains the specified value. * EQUALS_ANY: The operator matches if the field value is any value. */ operator: string; /** * A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. * The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. */ value?: string; } interface SecurityPolicyRulePreconfiguredWafConfigExclusionRequestHeader { /** * You can specify an exact match or a partial match by using a field operator and a field value. * Available options: * EQUALS: The operator matches if the field value equals the specified value. * STARTS_WITH: The operator matches if the field value starts with the specified value. * ENDS_WITH: The operator matches if the field value ends with the specified value. * CONTAINS: The operator matches if the field value contains the specified value. * EQUALS_ANY: The operator matches if the field value is any value. */ operator: string; /** * A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. * The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. */ value?: string; } interface SecurityPolicyRulePreconfiguredWafConfigExclusionRequestQueryParam { /** * You can specify an exact match or a partial match by using a field operator and a field value. * Available options: * EQUALS: The operator matches if the field value equals the specified value. * STARTS_WITH: The operator matches if the field value starts with the specified value. * ENDS_WITH: The operator matches if the field value ends with the specified value. * CONTAINS: The operator matches if the field value contains the specified value. * EQUALS_ANY: The operator matches if the field value is any value. */ operator: string; /** * A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. * The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. */ value?: string; } interface SecurityPolicyRulePreconfiguredWafConfigExclusionRequestUri { /** * You can specify an exact match or a partial match by using a field operator and a field value. * Available options: * EQUALS: The operator matches if the field value equals the specified value. * STARTS_WITH: The operator matches if the field value starts with the specified value. * ENDS_WITH: The operator matches if the field value ends with the specified value. * CONTAINS: The operator matches if the field value contains the specified value. * EQUALS_ANY: The operator matches if the field value is any value. */ operator: string; /** * A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. * The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. */ value?: string; } interface SecurityPolicyRuleRateLimitOptions { /** * Can only be specified if the action for the rule is "rateBasedBan". * If specified, determines the time (in seconds) the traffic will continue to be banned by the rate limit after the rate falls below the threshold. */ banDurationSec?: number; /** * Can only be specified if the action for the rule is "rateBasedBan". * If specified, the key will be banned for the configured 'banDurationSec' when the number of requests that exceed the 'rateLimitThreshold' also exceed this 'banThreshold'. * Structure is documented below. */ banThreshold?: outputs.compute.SecurityPolicyRuleRateLimitOptionsBanThreshold; /** * Action to take for requests that are under the configured rate limit threshold. * Valid option is "allow" only. */ conformAction?: string; /** * Determines the key to enforce the rateLimitThreshold on. Possible values are: * * ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKey" is not configured. * * IP: The source IP address of the request is the key. Each IP has this limit enforced separately. * * HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. * * XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. * * HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. * * HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. * * SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. * * REGION_CODE: The country/region from which the request originates. * * TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. * * TLS_JA4_FINGERPRINT: JA4 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. * * USER_IP: The IP address of the originating client, which is resolved based on "userIpRequestHeaders" configured with the security policy. If there is no "userIpRequestHeaders" configuration or an IP address cannot be resolved from it, the key type defaults to IP. * Possible values are: `ALL`, `IP`, `HTTP_HEADER`, `XFF_IP`, `HTTP_COOKIE`, `HTTP_PATH`, `SNI`, `REGION_CODE`, `TLS_JA3_FINGERPRINT`, `TLS_JA4_FINGERPRINT`, `USER_IP`. */ enforceOnKey?: string; /** * If specified, any combination of values of enforceOnKeyType/enforceOnKeyName is treated as the key on which ratelimit threshold/action is enforced. * You can specify up to 3 enforceOnKeyConfigs. * If enforceOnKeyConfigs is specified, enforceOnKey must not be specified. * Structure is documented below. */ enforceOnKeyConfigs?: outputs.compute.SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig[]; /** * Rate limit key name applicable only for the following key types: * HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. * HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. */ enforceOnKeyName?: string; /** * Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. * Valid options are deny(STATUS), where valid values for STATUS are 403, 404, 429, and 502. */ exceedAction?: string; /** * Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. This field is only supported in Global Security Policies of type CLOUD_ARMOR. * Structure is documented below. */ exceedRedirectOptions?: outputs.compute.SecurityPolicyRuleRateLimitOptionsExceedRedirectOptions; /** * Threshold at which to begin ratelimiting. * Structure is documented below. */ rateLimitThreshold?: outputs.compute.SecurityPolicyRuleRateLimitOptionsRateLimitThreshold; } interface SecurityPolicyRuleRateLimitOptionsBanThreshold { /** * Number of HTTP(S) requests for calculating the threshold. */ count?: number; /** * Interval over which the threshold is computed. */ intervalSec?: number; } interface SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig { /** * Rate limit key name applicable only for the following key types: * HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. * HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. */ enforceOnKeyName?: string; /** * Determines the key to enforce the rateLimitThreshold on. Possible values are: * * ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKeyConfigs" is not configured. * * IP: The source IP address of the request is the key. Each IP has this limit enforced separately. * * HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. * * XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. * * HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. * * HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. * * SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. * * REGION_CODE: The country/region from which the request originates. * * TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. * * TLS_JA4_FINGERPRINT: JA4 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. * * USER_IP: The IP address of the originating client, which is resolved based on "userIpRequestHeaders" configured with the security policy. If there is no "userIpRequestHeaders" configuration or an IP address cannot be resolved from it, the key type defaults to IP. * Possible values are: `ALL`, `IP`, `HTTP_HEADER`, `XFF_IP`, `HTTP_COOKIE`, `HTTP_PATH`, `SNI`, `REGION_CODE`, `TLS_JA3_FINGERPRINT`, `TLS_JA4_FINGERPRINT`, `USER_IP`. */ enforceOnKeyType?: string; } interface SecurityPolicyRuleRateLimitOptionsExceedRedirectOptions { /** * Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. */ target?: string; /** * Type of the redirect action. */ type?: string; } interface SecurityPolicyRuleRateLimitOptionsRateLimitThreshold { /** * Number of HTTP(S) requests for calculating the threshold. */ count?: number; /** * Interval over which the threshold is computed. */ intervalSec?: number; } interface SecurityPolicyRuleRedirectOptions { /** * Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. */ target?: string; /** * Type of the redirect action. */ type?: string; } interface SecurityScanConfigAuthentication { /** * Describes authentication configuration that uses a custom account. * Structure is documented below. */ customAccount?: outputs.compute.SecurityScanConfigAuthenticationCustomAccount; /** * Describes authentication configuration that uses a Google account. * Structure is documented below. */ googleAccount?: outputs.compute.SecurityScanConfigAuthenticationGoogleAccount; } interface SecurityScanConfigAuthenticationCustomAccount { /** * The login form URL of the website. */ loginUrl: string; /** * The password of the custom account. The credential is stored encrypted * in GCP. * **Note**: This property is sensitive and will not be displayed in the plan. */ password: string; /** * The user name of the custom account. */ username: string; } interface SecurityScanConfigAuthenticationGoogleAccount { /** * The password of the Google account. The credential is stored encrypted * in GCP. * **Note**: This property is sensitive and will not be displayed in the plan. */ password: string; /** * The user name of the Google account. */ username: string; } interface SecurityScanConfigSchedule { /** * The duration of time between executions in days */ intervalDurationDays: number; /** * A timestamp indicates when the next run will be scheduled. The value is refreshed * by the server after each run. If unspecified, it will default to current server time, * which means the scan will be scheduled to start immediately. */ scheduleTime?: string; } interface ServiceAttachmentConnectedEndpoint { /** * (Output) * The url of the consumer network. */ consumerNetwork: string; /** * (Output) * The URL of the consumer forwarding rule. */ endpoint: string; /** * (Output) * NOTE: This field is temporarily non-functional due to an underlying API issue. * Any value provided here will be ignored until the API issue is resolved, expected around 2026-03. * 'The nat IPs of the connected endpoint.' */ natIps: string[]; /** * (Output) * The number of consumer Network Connectivity Center spokes that the connected Private Service Connect endpoint has propagated to. */ propagatedConnectionCount: number; /** * (Output) * The PSC connection id of the connected endpoint. */ pscConnectionId: string; /** * (Output) * The status of the connection from the consumer forwarding rule to * this service attachment. */ status: string; } interface ServiceAttachmentConsumerAcceptList { /** * The number of consumer forwarding rules the consumer project can * create. */ connectionLimit: number; /** * The network that is allowed to connect to this service attachment. * Only one of projectIdOrNum and networkUrl may be set. */ networkUrl?: string; /** * A project that is allowed to connect to this service attachment. * Only one of projectIdOrNum and networkUrl may be set. */ projectIdOrNum?: string; } interface ServiceAttachmentPscServiceAttachmentId { /** * (Output) * The high 64 bits of the PSC service attachment ID. */ high: string; /** * (Output) * The low 64 bits of the PSC service attachment ID. */ low: string; } interface ServiceAttachmentTunnelingConfig { /** * The encapsulation profile for tunneling traffic. */ encapsulationProfile?: string; /** * The routing mode for tunneling traffic. */ routingMode?: string; } interface SnapshotIamBindingCondition { description?: string; expression: string; title: string; } interface SnapshotIamMemberCondition { description?: string; expression: string; title: string; } interface SnapshotSettingsStorageLocation { /** * When the policy is SPECIFIC_LOCATIONS, snapshots will be stored in the * locations listed in this field. Keys are Cloud Storage bucket locations. * Only one location can be specified. * Structure is documented below. */ locations?: outputs.compute.SnapshotSettingsStorageLocationLocation[]; /** * The chosen location policy * Possible values are: `NEAREST_MULTI_REGION`, `LOCAL_REGION`, `SPECIFIC_LOCATIONS`. */ policy: string; } interface SnapshotSettingsStorageLocationLocation { /** * The identifier for this object. Format specified above. */ location: string; /** * Name of the location. It should be one of the Cloud Storage buckets. * Only one location can be specified. (should match location) */ name: string; } interface SnapshotSnapshotEncryptionKey { /** * The name of the encryption key that is stored in Google Cloud KMS. */ kmsKeySelfLink?: string; /** * The service account used for the encryption request for the given KMS key. * If absent, the Compute Engine Service Agent service account is used. */ kmsKeyServiceAccount?: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. * **Note**: This property is sensitive and will not be displayed in the plan. */ rawKey?: string; /** * Specifies an encryption key stored in Google Cloud KMS, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. * **Note**: This property is sensitive and will not be displayed in the plan. */ rsaEncryptedKey?: string; /** * (Output) * The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied * encryption key that protects this resource. */ sha256: string; } interface SnapshotSourceDiskEncryptionKey { /** * The name of the encryption key that is stored in Google Cloud KMS. */ kmsKeySelfLink?: string; /** * The service account used for the encryption request for the given KMS key. * If absent, the Compute Engine Service Agent service account is used. */ kmsKeyServiceAccount?: string; /** * Specifies a 256-bit customer-supplied encryption key, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. * **Note**: This property is sensitive and will not be displayed in the plan. */ rawKey?: string; /** * Specifies an encryption key stored in Google Cloud KMS, encoded in * RFC 4648 base64 to either encrypt or decrypt this resource. * **Note**: This property is sensitive and will not be displayed in the plan. */ rsaEncryptedKey?: string; } interface StoragePoolIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface StoragePoolIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface StoragePoolResourceStatus { /** * (Output) * Number of disks used. */ diskCount: string; /** * (Output) * Timestamp of the last successful resize in RFC3339 text format. */ lastResizeTimestamp: string; /** * (Output) * Maximum allowed aggregate disk size in gigabytes. */ maxTotalProvisionedDiskCapacityGb: string; /** * (Output) * Space used by data stored in disks within the storage pool (in bytes). * This will reflect the total number of bytes written to the disks in the pool, in contrast to the capacity of those disks. */ poolUsedCapacityBytes: string; /** * (Output) * Sum of all the disks' provisioned IOPS, minus some amount that is allowed per disk that is not counted towards pool's IOPS capacity. For more information, see https://cloud.google.com/compute/docs/disks/storage-pools. */ poolUsedIops: string; /** * (Output) * Sum of all the disks' provisioned throughput in MB/s. */ poolUsedThroughput: string; /** * (Output) * Amount of data written into the pool, before it is compacted. */ poolUserWrittenBytes: string; /** * (Output) * Sum of all the capacity provisioned in disks in this storage pool. * A disk's provisioned capacity is the same as its total capacity. */ totalProvisionedDiskCapacityGb: string; /** * (Output) * Sum of all the disks' provisioned IOPS. */ totalProvisionedDiskIops: string; /** * (Output) * Sum of all the disks' provisioned throughput in MB/s, * minus some amount that is allowed per disk that is not counted towards pool's throughput capacity. */ totalProvisionedDiskThroughput: string; } interface StoragePoolStatus { /** * (Output) * Number of disks used. */ diskCount: string; /** * (Output) * Timestamp of the last successful resize in RFC3339 text format. */ lastResizeTimestamp: string; /** * (Output) * Maximum allowed aggregate disk size in gigabytes. */ maxTotalProvisionedDiskCapacityGb: string; /** * (Output) * Space used by data stored in disks within the storage pool (in bytes). * This will reflect the total number of bytes written to the disks in the pool, in contrast to the capacity of those disks. */ poolUsedCapacityBytes: string; /** * (Output) * Sum of all the disks' provisioned IOPS, minus some amount that is allowed per disk that is not counted towards pool's IOPS capacity. For more information, see https://cloud.google.com/compute/docs/disks/storage-pools. */ poolUsedIops: string; /** * (Output) * Sum of all the disks' provisioned throughput in MB/s. */ poolUsedThroughput: string; /** * (Output) * Amount of data written into the pool, before it is compacted. */ poolUserWrittenBytes: string; /** * (Output) * Sum of all the capacity provisioned in disks in this storage pool. * A disk's provisioned capacity is the same as its total capacity. */ totalProvisionedDiskCapacityGb: string; /** * (Output) * Sum of all the disks' provisioned IOPS. */ totalProvisionedDiskIops: string; /** * (Output) * Sum of all the disks' provisioned throughput in MB/s, * minus some amount that is allowed per disk that is not counted towards pool's throughput capacity. */ totalProvisionedDiskThroughput: string; } interface SubnetworkIAMBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface SubnetworkIAMMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface SubnetworkLogConfig { /** * Can only be specified if VPC flow logging for this subnetwork is enabled. * Toggles the aggregation interval for collecting flow logs. Increasing the * interval time will reduce the amount of generated flow logs for long * lasting connections. Default is an interval of 5 seconds per connection. * Default value is `INTERVAL_5_SEC`. * Possible values are: `INTERVAL_5_SEC`, `INTERVAL_30_SEC`, `INTERVAL_1_MIN`, `INTERVAL_5_MIN`, `INTERVAL_10_MIN`, `INTERVAL_15_MIN`. */ aggregationInterval?: string; /** * Export filter used to define which VPC flow logs should be logged, as as CEL expression. See * https://cloud.google.com/vpc/docs/flow-logs#filtering for details on how to format this field. * The default value is 'true', which evaluates to include everything. */ filterExpr?: string; /** * Can only be specified if VPC flow logging for this subnetwork is enabled. * The value of the field must be in [0, 1]. Set the sampling rate of VPC * flow logs within the subnetwork where 1.0 means all collected logs are * reported and 0.0 means no logs are reported. Default is 0.5 which means * half of all collected logs are reported. */ flowSampling?: number; /** * Can only be specified if VPC flow logging for this subnetwork is enabled. * Configures whether metadata fields should be added to the reported VPC * flow logs. * Default value is `INCLUDE_ALL_METADATA`. * Possible values are: `EXCLUDE_ALL_METADATA`, `INCLUDE_ALL_METADATA`, `CUSTOM_METADATA`. */ metadata?: string; /** * List of metadata fields that should be added to reported logs. * Can only be specified if VPC flow logs for this subnetwork is enabled and "metadata" is set to CUSTOM_METADATA. */ metadataFields?: string[]; } interface SubnetworkParams { /** * Resource manager tags to be bound to the subnetwork. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. The field is ignored when empty. * The field is immutable and causes resource replacement when mutated. This field is only * set at create time and modifying this field after creation will trigger recreation. * To apply tags to an existing resource, see the gcp.tags.TagBinding resource. */ resourceManagerTags?: { [key: string]: string; }; } interface SubnetworkSecondaryIpRange { /** * The range of IP addresses belonging to this subnetwork secondary * range. Provide this property when you create the subnetwork. * Ranges must be unique and non-overlapping with all primary and * secondary IP ranges within a network. Only IPv4 is supported. * Field is optional when `reservedInternalRange` is defined, otherwise required. */ ipCidrRange: string; /** * The name associated with this subnetwork secondary range, used * when adding an alias IP range to a VM instance. The name must * be 1-63 characters long, and comply with RFC1035. The name * must be unique within the subnetwork. */ rangeName: string; /** * The ID of the reserved internal range. Must be prefixed with `networkconnectivity.googleapis.com` * E.g. `networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId}` */ reservedInternalRange?: string; } interface URLMapDefaultCustomErrorResponsePolicy { /** * Specifies rules for returning error responses. * In a given policy, if you specify rules for both a range of error codes as well as rules for specific error codes then rules with specific error codes have a higher priority. * For example, assume that you configure a rule for 401 (Un-authorized) code, and another for all 4 series error codes (4XX). * If the backend service returns a 401, then the rule for 401 will be applied. However if the backend service returns a 403, the rule for 4xx takes effect. * Structure is documented below. */ errorResponseRules?: outputs.compute.URLMapDefaultCustomErrorResponsePolicyErrorResponseRule[]; /** * The full or partial URL to the BackendBucket resource that contains the custom error content. Examples are: * https://www.googleapis.com/compute/v1/projects/project/global/backendBuckets/myBackendBucket * compute/v1/projects/project/global/backendBuckets/myBackendBucket * global/backendBuckets/myBackendBucket * If errorService is not specified at lower levels like pathMatcher, pathRule and routeRule, an errorService specified at a higher level in the UrlMap will be used. If UrlMap.defaultCustomErrorResponsePolicy contains one or more errorResponseRules[], it must specify errorService. * If load balancer cannot reach the backendBucket, a simple Not Found Error will be returned, with the original response code (or overrideResponseCode if configured). */ errorService?: string; } interface URLMapDefaultCustomErrorResponsePolicyErrorResponseRule { /** * Valid values include: * - A number between 400 and 599: For example 401 or 503, in which case the load balancer applies the policy if the error code exactly matches this value. * - 5xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 500 to 599. * - 4xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 400 to 499. * Values must be unique within matchResponseCodes and across all errorResponseRules of CustomErrorResponsePolicy. */ matchResponseCodes?: string[]; /** * The HTTP status code returned with the response containing the custom error content. * If overrideResponseCode is not supplied, the same response code returned by the original backend bucket or backend service is returned to the client. */ overrideResponseCode?: number; /** * The full path to a file within backendBucket. For example: /errors/defaultError.html * path must start with a leading slash. path cannot have trailing slashes. * If the file is not available in backendBucket or the load balancer cannot reach the BackendBucket, a simple Not Found Error is returned to the client. * The value must be from 1 to 1024 characters. */ path?: string; } interface URLMapDefaultRouteAction { /** * The specification for allowing client side cross-origin requests. Please see * [W3C Recommendation for Cross Origin Resource Sharing](https://www.w3.org/TR/cors/) * Structure is documented below. */ corsPolicy?: outputs.compute.URLMapDefaultRouteActionCorsPolicy; /** * The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. * As part of fault injection, when clients send requests to a backend service, delays can be introduced by Loadbalancer on a * percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted * by the Loadbalancer for a percentage of requests. * timeout and retryPolicy will be ignored by clients that are configured with a faultInjectionPolicy. * Structure is documented below. */ faultInjectionPolicy?: outputs.compute.URLMapDefaultRouteActionFaultInjectionPolicy; /** * Specifies the maximum duration (timeout) for streams on the selected route. * Unlike the `Timeout` field where the timeout duration starts from the time the request * has been fully processed (known as end-of-stream), the duration in this field * is computed from the beginning of the stream until the response has been processed, * including all retries. A stream that does not complete in this duration is closed. * Structure is documented below. */ maxStreamDuration: outputs.compute.URLMapDefaultRouteActionMaxStreamDuration; /** * Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. * Loadbalancer does not wait for responses from the shadow service. Prior to sending traffic to the shadow service, * the host / authority header is suffixed with -shadow. * Structure is documented below. */ requestMirrorPolicy?: outputs.compute.URLMapDefaultRouteActionRequestMirrorPolicy; /** * Specifies the retry policy associated with this route. * Structure is documented below. */ retryPolicy?: outputs.compute.URLMapDefaultRouteActionRetryPolicy; /** * Specifies the timeout for the selected route. Timeout is computed from the time the request has been * fully processed (i.e. end-of-stream) up until the response has been completely processed. Timeout includes all retries. * If not specified, will use the largest timeout among all backend services associated with the route. * Structure is documented below. */ timeout: outputs.compute.URLMapDefaultRouteActionTimeout; /** * The spec to modify the URL of the request, prior to forwarding the request to the matched service. * Structure is documented below. */ urlRewrite?: outputs.compute.URLMapDefaultRouteActionUrlRewrite; /** * A list of weighted backend services to send traffic to when a route match occurs. * The weights determine the fraction of traffic that flows to their corresponding backend service. * If all traffic needs to go to a single backend service, there must be one weightedBackendService * with weight set to a non 0 number. * Once a backendService is identified and before forwarding the request to the backend service, * advanced routing actions like Url rewrites and header transformations are applied depending on * additional settings specified in this HttpRouteAction. * Structure is documented below. */ weightedBackendServices?: outputs.compute.URLMapDefaultRouteActionWeightedBackendService[]; } interface URLMapDefaultRouteActionCorsPolicy { /** * In response to a preflight request, setting this to true indicates that the actual request can include user credentials. * This translates to the Access-Control-Allow-Credentials header. */ allowCredentials?: boolean; /** * Specifies the content for the Access-Control-Allow-Headers header. */ allowHeaders?: string[]; /** * Specifies the content for the Access-Control-Allow-Methods header. */ allowMethods?: string[]; /** * Specifies the regular expression patterns that match allowed origins. For regular expression grammar * please see en.cppreference.com/w/cpp/regex/ecmascript * An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. */ allowOriginRegexes?: string[]; /** * Specifies the list of origins that will be allowed to do CORS requests. * An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. */ allowOrigins?: string[]; /** * If true, specifies the CORS policy is disabled. The default value is false, which indicates that the CORS policy is in effect. */ disabled?: boolean; /** * Specifies the content for the Access-Control-Expose-Headers header. */ exposeHeaders?: string[]; /** * Specifies how long results of a preflight request can be cached in seconds. * This translates to the Access-Control-Max-Age header. */ maxAge?: number; } interface URLMapDefaultRouteActionFaultInjectionPolicy { /** * The specification for how client requests are aborted as part of fault injection. * Structure is documented below. */ abort?: outputs.compute.URLMapDefaultRouteActionFaultInjectionPolicyAbort; /** * The specification for how client requests are delayed as part of fault injection, before being sent to a backend service. * Structure is documented below. */ delay?: outputs.compute.URLMapDefaultRouteActionFaultInjectionPolicyDelay; } interface URLMapDefaultRouteActionFaultInjectionPolicyAbort { /** * The HTTP status code used to abort the request. * The value must be between 200 and 599 inclusive. */ httpStatus?: number; /** * The percentage of traffic (connections/operations/requests) which will be aborted as part of fault injection. * The value must be between 0.0 and 100.0 inclusive. */ percentage?: number; } interface URLMapDefaultRouteActionFaultInjectionPolicyDelay { /** * Specifies the value of the fixed delay interval. * Structure is documented below. */ fixedDelay?: outputs.compute.URLMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay; /** * The percentage of traffic (connections/operations/requests) on which delay will be introduced as part of fault injection. * The value must be between 0.0 and 100.0 inclusive. */ percentage?: number; } interface URLMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are * represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds?: string; } interface URLMapDefaultRouteActionMaxStreamDuration { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds: string; } interface URLMapDefaultRouteActionRequestMirrorPolicy { /** * The full or partial URL to the BackendService resource being mirrored to. */ backendService: string; /** * (Optional, Beta) * The percentage of requests to be mirrored to backendService. * The value must be between 0.0 and 100.0 inclusive. */ mirrorPercent?: number; } interface URLMapDefaultRouteActionRetryPolicy { /** * Specifies the allowed number retries. This number must be > 0. If not specified, defaults to 1. */ numRetries?: number; /** * Specifies a non-zero timeout per retry attempt. * If not specified, will use the timeout set in HttpRouteAction. If timeout in HttpRouteAction is not set, * will use the largest timeout among all backend services associated with the route. * Structure is documented below. */ perTryTimeout?: outputs.compute.URLMapDefaultRouteActionRetryPolicyPerTryTimeout; /** * Specfies one or more conditions when this retry rule applies. Valid values are: * * 5xx: Loadbalancer will attempt a retry if the backend service responds with any 5xx response code, * or if the backend service does not respond at all, example: disconnects, reset, read timeout, * * connection failure, and refused streams. * * gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. * * connect-failure: Loadbalancer will retry on failures connecting to backend services, * for example due to connection timeouts. * * retriable-4xx: Loadbalancer will retry for retriable 4xx response codes. * Currently the only retriable error supported is 409. * * refused-stream:Loadbalancer will retry if the backend service resets the stream with a REFUSED_STREAM error code. * This reset type indicates that it is safe to retry. * * cancelled: Loadbalancer will retry if the gRPC status code in the response header is set to cancelled * * deadline-exceeded: Loadbalancer will retry if the gRPC status code in the response header is set to deadline-exceeded * * resource-exhausted: Loadbalancer will retry if the gRPC status code in the response header is set to resource-exhausted * * unavailable: Loadbalancer will retry if the gRPC status code in the response header is set to unavailable */ retryConditions?: string[]; } interface URLMapDefaultRouteActionRetryPolicyPerTryTimeout { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are * represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds?: string; } interface URLMapDefaultRouteActionTimeout { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds?: string; } interface URLMapDefaultRouteActionUrlRewrite { /** * Prior to forwarding the request to the selected service, the request's host header is replaced * with contents of hostRewrite. * The value must be between 1 and 255 characters. */ hostRewrite?: string; /** * Prior to forwarding the request to the selected backend service, the matching portion of the * request's path is replaced by pathPrefixRewrite. * The value must be between 1 and 1024 characters. */ pathPrefixRewrite?: string; } interface URLMapDefaultRouteActionWeightedBackendService { /** * The full or partial URL to the default BackendService resource. Before forwarding the * request to backendService, the loadbalancer applies any relevant headerActions * specified as part of this backendServiceWeight. */ backendService?: string; /** * Specifies changes to request and response headers that need to take effect for * the selected backendService. * headerAction specified here take effect before headerAction in the enclosing * HttpRouteRule, PathMatcher and UrlMap. * Structure is documented below. */ headerAction?: outputs.compute.URLMapDefaultRouteActionWeightedBackendServiceHeaderAction; /** * Specifies the fraction of traffic sent to backendService, computed as * weight / (sum of all weightedBackendService weights in routeAction) . * The selection of a backend service is determined only for new traffic. Once a user's request * has been directed to a backendService, subsequent requests will be sent to the same backendService * as determined by the BackendService's session affinity policy. * The value must be between 0 and 1000 */ weight?: number; } interface URLMapDefaultRouteActionWeightedBackendServiceHeaderAction { /** * Headers to add to a matching request prior to forwarding the request to the backendService. * Structure is documented below. */ requestHeadersToAdds?: outputs.compute.URLMapDefaultRouteActionWeightedBackendServiceHeaderActionRequestHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the request prior to * forwarding the request to the backendService. */ requestHeadersToRemoves?: string[]; /** * Headers to add the response prior to sending the response back to the client. * Structure is documented below. */ responseHeadersToAdds?: outputs.compute.URLMapDefaultRouteActionWeightedBackendServiceHeaderActionResponseHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the response prior to sending the * response back to the client. */ responseHeadersToRemoves?: string[]; } interface URLMapDefaultRouteActionWeightedBackendServiceHeaderActionRequestHeadersToAdd { /** * The name of the header to add. */ headerName?: string; /** * The value of the header to add. */ headerValue?: string; /** * If false, headerValue is appended to any values that already exist for the header. * If true, headerValue is set for the header, discarding any values that were set for that header. */ replace?: boolean; } interface URLMapDefaultRouteActionWeightedBackendServiceHeaderActionResponseHeadersToAdd { /** * The name of the header to add. */ headerName?: string; /** * The value of the header to add. */ headerValue?: string; /** * If false, headerValue is appended to any values that already exist for the header. * If true, headerValue is set for the header, discarding any values that were set for that header. */ replace?: boolean; } interface URLMapDefaultUrlRedirect { /** * The host that will be used in the redirect response instead of the one that was * supplied in the request. The value must be between 1 and 255 characters. */ hostRedirect?: string; /** * If set to true, the URL scheme in the redirected request is set to https. If set to * false, the URL scheme of the redirected request will remain the same as that of the * request. This must only be set for UrlMaps used in TargetHttpProxys. Setting this * true for TargetHttpsProxy is not permitted. The default is set to false. */ httpsRedirect?: boolean; /** * The path that will be used in the redirect response instead of the one that was * supplied in the request. pathRedirect cannot be supplied together with * prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the * original request will be used for the redirect. The value must be between 1 and 1024 * characters. */ pathRedirect?: string; /** * The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, * retaining the remaining portion of the URL before redirecting the request. * prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or * neither. If neither is supplied, the path of the original request will be used for * the redirect. The value must be between 1 and 1024 characters. */ prefixRedirect?: string; /** * The HTTP Status code to use for this RedirectAction. Supported values are: * * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. * * FOUND, which corresponds to 302. * * SEE_OTHER which corresponds to 303. * * TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method * will be retained. * * PERMANENT_REDIRECT, which corresponds to 308. In this case, * the request method will be retained. */ redirectResponseCode?: string; /** * If set to true, any accompanying query portion of the original URL is removed prior * to redirecting the request. If set to false, the query portion of the original URL is * retained. The default is set to false. * This field is required to ensure an empty block is not set. The normal default value is false. */ stripQuery: boolean; } interface URLMapHeaderAction { /** * Headers to add to a matching request prior to forwarding the request to the backendService. * Structure is documented below. */ requestHeadersToAdds?: outputs.compute.URLMapHeaderActionRequestHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the request prior to * forwarding the request to the backendService. */ requestHeadersToRemoves?: string[]; /** * Headers to add the response prior to sending the response back to the client. * Structure is documented below. */ responseHeadersToAdds?: outputs.compute.URLMapHeaderActionResponseHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the response prior to sending the * response back to the client. */ responseHeadersToRemoves?: string[]; } interface URLMapHeaderActionRequestHeadersToAdd { /** * The name of the header to add. */ headerName: string; /** * The value of the header to add. */ headerValue: string; /** * If false, headerValue is appended to any values that already exist for the header. * If true, headerValue is set for the header, discarding any values that were set for that header. */ replace: boolean; } interface URLMapHeaderActionResponseHeadersToAdd { /** * The name of the header to add. */ headerName: string; /** * The value of the header to add. */ headerValue: string; /** * If false, headerValue is appended to any values that already exist for the header. * If true, headerValue is set for the header, discarding any values that were set for that header. */ replace: boolean; } interface URLMapHostRule { /** * An optional description of this resource. Provide this property when you create * the resource. */ description?: string; /** * The list of host patterns to match. They must be valid hostnames, except * will * match any string of ([a-z0-9-.]*). In that case, * must be the first character * and must be followed in the pattern by either - or .. */ hosts: string[]; /** * The name of the PathMatcher to use to match the path portion of the URL if the * hostRule matches the URL's host portion. */ pathMatcher: string; } interface URLMapPathMatcher { /** * defaultCustomErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendService or BackendBucket responds with an error. * This policy takes effect at the PathMatcher level and applies only when no policy has been defined for the error code at lower levels like RouteRule and PathRule within this PathMatcher. If an error code does not have a policy defined in defaultCustomErrorResponsePolicy, then a policy defined for the error code in UrlMap.defaultCustomErrorResponsePolicy takes effect. * For example, consider a UrlMap with the following configuration: * UrlMap.defaultCustomErrorResponsePolicy is configured with policies for 5xx and 4xx errors * A RouteRule for /coming_soon/ is configured for the error code 404. * If the request is for www.myotherdomain.com and a 404 is encountered, the policy under UrlMap.defaultCustomErrorResponsePolicy takes effect. If a 404 response is encountered for the request www.example.com/current_events/, the pathMatcher's policy takes effect. If however, the request for www.example.com/coming_soon/ encounters a 404, the policy in RouteRule.customErrorResponsePolicy takes effect. If any of the requests in this example encounter a 500 error code, the policy at UrlMap.defaultCustomErrorResponsePolicy takes effect. * When used in conjunction with pathMatcher.defaultRouteAction.retryPolicy, retries take precedence. Only once all retries are exhausted, the defaultCustomErrorResponsePolicy is applied. While attempting a retry, if load balancer is successful in reaching the service, the defaultCustomErrorResponsePolicy is ignored and the response from the service is returned to the client. * defaultCustomErrorResponsePolicy is supported only for global external Application Load Balancers. * Structure is documented below. */ defaultCustomErrorResponsePolicy?: outputs.compute.URLMapPathMatcherDefaultCustomErrorResponsePolicy; /** * defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs * advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request * to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. * Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. * Only one of defaultRouteAction or defaultUrlRedirect must be set. * Structure is documented below. */ defaultRouteAction?: outputs.compute.URLMapPathMatcherDefaultRouteAction; /** * The backend service or backend bucket to use when none of the given paths match. */ defaultService?: string; /** * When none of the specified hostRules match, the request is redirected to a URL specified * by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService or * defaultRouteAction must not be set. * Structure is documented below. */ defaultUrlRedirect?: outputs.compute.URLMapPathMatcherDefaultUrlRedirect; /** * An optional description of this resource. Provide this property when you create * the resource. */ description?: string; /** * Specifies changes to request and response headers that need to take effect for * the selected backendService. HeaderAction specified here are applied after the * matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap * Structure is documented below. */ headerAction?: outputs.compute.URLMapPathMatcherHeaderAction; /** * The name to which this PathMatcher is referred by the HostRule. */ name: string; /** * The list of path rules. Use this list instead of routeRules when routing based * on simple path matching is all that's required. The order by which path rules * are specified does not matter. Matches are always done on the longest-path-first * basis. For example: a pathRule with a path /a/b/c/* will match before /a/b/* * irrespective of the order in which those paths appear in this list. Within a * given pathMatcher, only one of pathRules or routeRules must be set. * Structure is documented below. */ pathRules?: outputs.compute.URLMapPathMatcherPathRule[]; /** * The list of ordered HTTP route rules. Use this list instead of pathRules when * advanced route matching and routing actions are desired. The order of specifying * routeRules matters: the first rule that matches will cause its specified routing * action to take effect. Within a given pathMatcher, only one of pathRules or * routeRules must be set. routeRules are not supported in UrlMaps intended for * External load balancers. * Structure is documented below. */ routeRules?: outputs.compute.URLMapPathMatcherRouteRule[]; } interface URLMapPathMatcherDefaultCustomErrorResponsePolicy { /** * Specifies rules for returning error responses. * In a given policy, if you specify rules for both a range of error codes as well as rules for specific error codes then rules with specific error codes have a higher priority. * For example, assume that you configure a rule for 401 (Un-authorized) code, and another for all 4 series error codes (4XX). * If the backend service returns a 401, then the rule for 401 will be applied. However if the backend service returns a 403, the rule for 4xx takes effect. * Structure is documented below. */ errorResponseRules?: outputs.compute.URLMapPathMatcherDefaultCustomErrorResponsePolicyErrorResponseRule[]; /** * The full or partial URL to the BackendBucket resource that contains the custom error content. Examples are: * https://www.googleapis.com/compute/v1/projects/project/global/backendBuckets/myBackendBucket * compute/v1/projects/project/global/backendBuckets/myBackendBucket * global/backendBuckets/myBackendBucket * If errorService is not specified at lower levels like pathMatcher, pathRule and routeRule, an errorService specified at a higher level in the UrlMap will be used. If UrlMap.defaultCustomErrorResponsePolicy contains one or more errorResponseRules[], it must specify errorService. * If load balancer cannot reach the backendBucket, a simple Not Found Error will be returned, with the original response code (or overrideResponseCode if configured). */ errorService?: string; } interface URLMapPathMatcherDefaultCustomErrorResponsePolicyErrorResponseRule { /** * Valid values include: * - A number between 400 and 599: For example 401 or 503, in which case the load balancer applies the policy if the error code exactly matches this value. * - 5xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 500 to 599. * - 4xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 400 to 499. * Values must be unique within matchResponseCodes and across all errorResponseRules of CustomErrorResponsePolicy. */ matchResponseCodes?: string[]; /** * The HTTP status code returned with the response containing the custom error content. * If overrideResponseCode is not supplied, the same response code returned by the original backend bucket or backend service is returned to the client. */ overrideResponseCode?: number; /** * The full path to a file within backendBucket. For example: /errors/defaultError.html * path must start with a leading slash. path cannot have trailing slashes. * If the file is not available in backendBucket or the load balancer cannot reach the BackendBucket, a simple Not Found Error is returned to the client. * The value must be from 1 to 1024 characters. */ path?: string; } interface URLMapPathMatcherDefaultRouteAction { /** * The specification for allowing client side cross-origin requests. Please see * [W3C Recommendation for Cross Origin Resource Sharing](https://www.w3.org/TR/cors/) * Structure is documented below. */ corsPolicy?: outputs.compute.URLMapPathMatcherDefaultRouteActionCorsPolicy; /** * The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. * As part of fault injection, when clients send requests to a backend service, delays can be introduced by Loadbalancer on a * percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted * by the Loadbalancer for a percentage of requests. * timeout and retryPolicy will be ignored by clients that are configured with a faultInjectionPolicy. * Structure is documented below. */ faultInjectionPolicy?: outputs.compute.URLMapPathMatcherDefaultRouteActionFaultInjectionPolicy; /** * Specifies the maximum duration (timeout) for streams on the selected route. * Unlike the `Timeout` field where the timeout duration starts from the time the request * has been fully processed (known as end-of-stream), the duration in this field * is computed from the beginning of the stream until the response has been processed, * including all retries. A stream that does not complete in this duration is closed. * Structure is documented below. */ maxStreamDuration: outputs.compute.URLMapPathMatcherDefaultRouteActionMaxStreamDuration; /** * Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. * Loadbalancer does not wait for responses from the shadow service. Prior to sending traffic to the shadow service, * the host / authority header is suffixed with -shadow. * Structure is documented below. */ requestMirrorPolicy?: outputs.compute.URLMapPathMatcherDefaultRouteActionRequestMirrorPolicy; /** * Specifies the retry policy associated with this route. * Structure is documented below. */ retryPolicy?: outputs.compute.URLMapPathMatcherDefaultRouteActionRetryPolicy; /** * Specifies the timeout for the selected route. Timeout is computed from the time the request has been * fully processed (i.e. end-of-stream) up until the response has been completely processed. Timeout includes all retries. * If not specified, will use the largest timeout among all backend services associated with the route. * Structure is documented below. */ timeout: outputs.compute.URLMapPathMatcherDefaultRouteActionTimeout; /** * The spec to modify the URL of the request, prior to forwarding the request to the matched service. * Structure is documented below. */ urlRewrite?: outputs.compute.URLMapPathMatcherDefaultRouteActionUrlRewrite; /** * A list of weighted backend services to send traffic to when a route match occurs. * The weights determine the fraction of traffic that flows to their corresponding backend service. * If all traffic needs to go to a single backend service, there must be one weightedBackendService * with weight set to a non 0 number. * Once a backendService is identified and before forwarding the request to the backend service, * advanced routing actions like Url rewrites and header transformations are applied depending on * additional settings specified in this HttpRouteAction. * Structure is documented below. */ weightedBackendServices?: outputs.compute.URLMapPathMatcherDefaultRouteActionWeightedBackendService[]; } interface URLMapPathMatcherDefaultRouteActionCorsPolicy { /** * In response to a preflight request, setting this to true indicates that the actual request can include user credentials. * This translates to the Access-Control-Allow-Credentials header. */ allowCredentials?: boolean; /** * Specifies the content for the Access-Control-Allow-Headers header. */ allowHeaders?: string[]; /** * Specifies the content for the Access-Control-Allow-Methods header. */ allowMethods?: string[]; /** * Specifies the regular expression patterns that match allowed origins. For regular expression grammar * please see en.cppreference.com/w/cpp/regex/ecmascript * An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. */ allowOriginRegexes?: string[]; /** * Specifies the list of origins that will be allowed to do CORS requests. * An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. */ allowOrigins?: string[]; /** * If true, specifies the CORS policy is disabled. The default value is false, which indicates that the CORS policy is in effect. */ disabled?: boolean; /** * Specifies the content for the Access-Control-Expose-Headers header. */ exposeHeaders?: string[]; /** * Specifies how long results of a preflight request can be cached in seconds. * This translates to the Access-Control-Max-Age header. */ maxAge?: number; } interface URLMapPathMatcherDefaultRouteActionFaultInjectionPolicy { /** * The specification for how client requests are aborted as part of fault injection. * Structure is documented below. */ abort?: outputs.compute.URLMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbort; /** * The specification for how client requests are delayed as part of fault injection, before being sent to a backend service. * Structure is documented below. */ delay?: outputs.compute.URLMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelay; } interface URLMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbort { /** * The HTTP status code used to abort the request. * The value must be between 200 and 599 inclusive. */ httpStatus?: number; /** * The percentage of traffic (connections/operations/requests) which will be aborted as part of fault injection. * The value must be between 0.0 and 100.0 inclusive. */ percentage?: number; } interface URLMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelay { /** * Specifies the value of the fixed delay interval. * Structure is documented below. */ fixedDelay?: outputs.compute.URLMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelay; /** * The percentage of traffic (connections/operations/requests) on which delay will be introduced as part of fault injection. * The value must be between 0.0 and 100.0 inclusive. */ percentage?: number; } interface URLMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelay { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are * represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds?: string; } interface URLMapPathMatcherDefaultRouteActionMaxStreamDuration { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds: string; } interface URLMapPathMatcherDefaultRouteActionRequestMirrorPolicy { /** * The full or partial URL to the BackendService resource being mirrored to. */ backendService: string; /** * (Optional, Beta) * The percentage of requests to be mirrored to backendService. * The value must be between 0.0 and 100.0 inclusive. */ mirrorPercent?: number; } interface URLMapPathMatcherDefaultRouteActionRetryPolicy { /** * Specifies the allowed number retries. This number must be > 0. If not specified, defaults to 1. */ numRetries?: number; /** * Specifies a non-zero timeout per retry attempt. * If not specified, will use the timeout set in HttpRouteAction. If timeout in HttpRouteAction is not set, * will use the largest timeout among all backend services associated with the route. * Structure is documented below. */ perTryTimeout?: outputs.compute.URLMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeout; /** * Specfies one or more conditions when this retry rule applies. Valid values are: * * 5xx: Loadbalancer will attempt a retry if the backend service responds with any 5xx response code, * or if the backend service does not respond at all, example: disconnects, reset, read timeout, * * connection failure, and refused streams. * * gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. * * connect-failure: Loadbalancer will retry on failures connecting to backend services, * for example due to connection timeouts. * * retriable-4xx: Loadbalancer will retry for retriable 4xx response codes. * Currently the only retriable error supported is 409. * * refused-stream:Loadbalancer will retry if the backend service resets the stream with a REFUSED_STREAM error code. * This reset type indicates that it is safe to retry. * * cancelled: Loadbalancer will retry if the gRPC status code in the response header is set to cancelled * * deadline-exceeded: Loadbalancer will retry if the gRPC status code in the response header is set to deadline-exceeded * * resource-exhausted: Loadbalancer will retry if the gRPC status code in the response header is set to resource-exhausted * * unavailable: Loadbalancer will retry if the gRPC status code in the response header is set to unavailable */ retryConditions?: string[]; } interface URLMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeout { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are * represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds?: string; } interface URLMapPathMatcherDefaultRouteActionTimeout { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds?: string; } interface URLMapPathMatcherDefaultRouteActionUrlRewrite { /** * Prior to forwarding the request to the selected service, the request's host header is replaced * with contents of hostRewrite. * The value must be between 1 and 255 characters. */ hostRewrite?: string; /** * Prior to forwarding the request to the selected backend service, the matching portion of the * request's path is replaced by pathPrefixRewrite. * The value must be between 1 and 1024 characters. */ pathPrefixRewrite?: string; } interface URLMapPathMatcherDefaultRouteActionWeightedBackendService { /** * The full or partial URL to the default BackendService resource. Before forwarding the * request to backendService, the loadbalancer applies any relevant headerActions * specified as part of this backendServiceWeight. */ backendService?: string; /** * Specifies changes to request and response headers that need to take effect for * the selected backendService. * headerAction specified here take effect before headerAction in the enclosing * HttpRouteRule, PathMatcher and UrlMap. * Structure is documented below. */ headerAction?: outputs.compute.URLMapPathMatcherDefaultRouteActionWeightedBackendServiceHeaderAction; /** * Specifies the fraction of traffic sent to backendService, computed as * weight / (sum of all weightedBackendService weights in routeAction) . * The selection of a backend service is determined only for new traffic. Once a user's request * has been directed to a backendService, subsequent requests will be sent to the same backendService * as determined by the BackendService's session affinity policy. * The value must be between 0 and 1000 */ weight?: number; } interface URLMapPathMatcherDefaultRouteActionWeightedBackendServiceHeaderAction { /** * Headers to add to a matching request prior to forwarding the request to the backendService. * Structure is documented below. */ requestHeadersToAdds?: outputs.compute.URLMapPathMatcherDefaultRouteActionWeightedBackendServiceHeaderActionRequestHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the request prior to * forwarding the request to the backendService. */ requestHeadersToRemoves?: string[]; /** * Headers to add the response prior to sending the response back to the client. * Structure is documented below. */ responseHeadersToAdds?: outputs.compute.URLMapPathMatcherDefaultRouteActionWeightedBackendServiceHeaderActionResponseHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the response prior to sending the * response back to the client. */ responseHeadersToRemoves?: string[]; } interface URLMapPathMatcherDefaultRouteActionWeightedBackendServiceHeaderActionRequestHeadersToAdd { /** * The name of the header to add. */ headerName?: string; /** * The value of the header to add. */ headerValue?: string; /** * If false, headerValue is appended to any values that already exist for the header. * If true, headerValue is set for the header, discarding any values that were set for that header. */ replace?: boolean; } interface URLMapPathMatcherDefaultRouteActionWeightedBackendServiceHeaderActionResponseHeadersToAdd { /** * The name of the header to add. */ headerName?: string; /** * The value of the header to add. */ headerValue?: string; /** * If false, headerValue is appended to any values that already exist for the header. * If true, headerValue is set for the header, discarding any values that were set for that header. */ replace?: boolean; } interface URLMapPathMatcherDefaultUrlRedirect { /** * The host that will be used in the redirect response instead of the one that was * supplied in the request. The value must be between 1 and 255 characters. */ hostRedirect?: string; /** * If set to true, the URL scheme in the redirected request is set to https. If set to * false, the URL scheme of the redirected request will remain the same as that of the * request. This must only be set for UrlMaps used in TargetHttpProxys. Setting this * true for TargetHttpsProxy is not permitted. The default is set to false. */ httpsRedirect?: boolean; /** * The path that will be used in the redirect response instead of the one that was * supplied in the request. pathRedirect cannot be supplied together with * prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the * original request will be used for the redirect. The value must be between 1 and 1024 * characters. */ pathRedirect?: string; /** * The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, * retaining the remaining portion of the URL before redirecting the request. * prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or * neither. If neither is supplied, the path of the original request will be used for * the redirect. The value must be between 1 and 1024 characters. */ prefixRedirect?: string; /** * The HTTP Status code to use for this RedirectAction. Supported values are: * * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. * * FOUND, which corresponds to 302. * * SEE_OTHER which corresponds to 303. * * TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method * will be retained. * * PERMANENT_REDIRECT, which corresponds to 308. In this case, * the request method will be retained. */ redirectResponseCode?: string; /** * If set to true, any accompanying query portion of the original URL is removed prior * to redirecting the request. If set to false, the query portion of the original URL is * retained. The default is set to false. * This field is required to ensure an empty block is not set. The normal default value is false. */ stripQuery: boolean; } interface URLMapPathMatcherHeaderAction { /** * Headers to add to a matching request prior to forwarding the request to the backendService. * Structure is documented below. */ requestHeadersToAdds?: outputs.compute.URLMapPathMatcherHeaderActionRequestHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the request prior to * forwarding the request to the backendService. */ requestHeadersToRemoves?: string[]; /** * Headers to add the response prior to sending the response back to the client. * Structure is documented below. */ responseHeadersToAdds?: outputs.compute.URLMapPathMatcherHeaderActionResponseHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the response prior to sending the * response back to the client. */ responseHeadersToRemoves?: string[]; } interface URLMapPathMatcherHeaderActionRequestHeadersToAdd { /** * The name of the header to add. */ headerName: string; /** * The value of the header to add. */ headerValue: string; /** * If false, headerValue is appended to any values that already exist for the header. * If true, headerValue is set for the header, discarding any values that were set for that header. */ replace: boolean; } interface URLMapPathMatcherHeaderActionResponseHeadersToAdd { /** * The name of the header to add. */ headerName: string; /** * The value of the header to add. */ headerValue: string; /** * If false, headerValue is appended to any values that already exist for the header. * If true, headerValue is set for the header, discarding any values that were set for that header. */ replace: boolean; } interface URLMapPathMatcherPathRule { /** * customErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendService or BackendBucket responds with an error. * If a policy for an error code is not configured for the PathRule, a policy for the error code configured in pathMatcher.defaultCustomErrorResponsePolicy is applied. If one is not specified in pathMatcher.defaultCustomErrorResponsePolicy, the policy configured in UrlMap.defaultCustomErrorResponsePolicy takes effect. * For example, consider a UrlMap with the following configuration: * UrlMap.defaultCustomErrorResponsePolicy are configured with policies for 5xx and 4xx errors * A PathRule for /coming_soon/ is configured for the error code 404. * If the request is for www.myotherdomain.com and a 404 is encountered, the policy under UrlMap.defaultCustomErrorResponsePolicy takes effect. If a 404 response is encountered for the request www.example.com/current_events/, the pathMatcher's policy takes effect. If however, the request for www.example.com/coming_soon/ encounters a 404, the policy in PathRule.customErrorResponsePolicy takes effect. If any of the requests in this example encounter a 500 error code, the policy at UrlMap.defaultCustomErrorResponsePolicy takes effect. * customErrorResponsePolicy is supported only for global external Application Load Balancers. * Structure is documented below. */ customErrorResponsePolicy?: outputs.compute.URLMapPathMatcherPathRuleCustomErrorResponsePolicy; /** * The list of path patterns to match. Each must start with / and the only place a * \* is allowed is at the end following a /. The string fed to the path matcher * does not include any text after the first ? or #, and those chars are not * allowed here. */ paths: string[]; /** * In response to a matching path, the load balancer performs advanced routing * actions like URL rewrites, header transformations, etc. prior to forwarding the * request to the selected backend. If routeAction specifies any * weightedBackendServices, service must not be set. Conversely if service is set, * routeAction cannot contain any weightedBackendServices. Only one of routeAction * or urlRedirect must be set. * Structure is documented below. */ routeAction?: outputs.compute.URLMapPathMatcherPathRuleRouteAction; /** * The backend service or backend bucket to use if any of the given paths match. */ service?: string; /** * When a path pattern is matched, the request is redirected to a URL specified * by urlRedirect. If urlRedirect is specified, service or routeAction must not * be set. * Structure is documented below. */ urlRedirect?: outputs.compute.URLMapPathMatcherPathRuleUrlRedirect; } interface URLMapPathMatcherPathRuleCustomErrorResponsePolicy { /** * Specifies rules for returning error responses. * In a given policy, if you specify rules for both a range of error codes as well as rules for specific error codes then rules with specific error codes have a higher priority. * For example, assume that you configure a rule for 401 (Un-authorized) code, and another for all 4 series error codes (4XX). * If the backend service returns a 401, then the rule for 401 will be applied. However if the backend service returns a 403, the rule for 4xx takes effect. * Structure is documented below. */ errorResponseRules?: outputs.compute.URLMapPathMatcherPathRuleCustomErrorResponsePolicyErrorResponseRule[]; /** * The full or partial URL to the BackendBucket resource that contains the custom error content. Examples are: * https://www.googleapis.com/compute/v1/projects/project/global/backendBuckets/myBackendBucket * compute/v1/projects/project/global/backendBuckets/myBackendBucket * global/backendBuckets/myBackendBucket * If errorService is not specified at lower levels like pathMatcher, pathRule and routeRule, an errorService specified at a higher level in the UrlMap will be used. If UrlMap.defaultCustomErrorResponsePolicy contains one or more errorResponseRules[], it must specify errorService. * If load balancer cannot reach the backendBucket, a simple Not Found Error will be returned, with the original response code (or overrideResponseCode if configured). */ errorService?: string; } interface URLMapPathMatcherPathRuleCustomErrorResponsePolicyErrorResponseRule { /** * Valid values include: * - A number between 400 and 599: For example 401 or 503, in which case the load balancer applies the policy if the error code exactly matches this value. * - 5xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 500 to 599. * - 4xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 400 to 499. * Values must be unique within matchResponseCodes and across all errorResponseRules of CustomErrorResponsePolicy. */ matchResponseCodes?: string[]; /** * The HTTP status code returned with the response containing the custom error content. * If overrideResponseCode is not supplied, the same response code returned by the original backend bucket or backend service is returned to the client. */ overrideResponseCode?: number; /** * The full path to a file within backendBucket. For example: /errors/defaultError.html * path must start with a leading slash. path cannot have trailing slashes. * If the file is not available in backendBucket or the load balancer cannot reach the BackendBucket, a simple Not Found Error is returned to the client. * The value must be from 1 to 1024 characters. */ path?: string; } interface URLMapPathMatcherPathRuleRouteAction { /** * The specification for allowing client side cross-origin requests. Please see W3C * Recommendation for Cross Origin Resource Sharing * Structure is documented below. */ corsPolicy?: outputs.compute.URLMapPathMatcherPathRuleRouteActionCorsPolicy; /** * The specification for fault injection introduced into traffic to test the * resiliency of clients to backend service failure. As part of fault injection, * when clients send requests to a backend service, delays can be introduced by * Loadbalancer on a percentage of requests before sending those request to the * backend service. Similarly requests from clients can be aborted by the * Loadbalancer for a percentage of requests. timeout and retryPolicy will be * ignored by clients that are configured with a fault_injection_policy. * Structure is documented below. */ faultInjectionPolicy?: outputs.compute.URLMapPathMatcherPathRuleRouteActionFaultInjectionPolicy; /** * Specifies the maximum duration (timeout) for streams on the selected route. * Unlike the `Timeout` field where the timeout duration starts from the time the request * has been fully processed (known as end-of-stream), the duration in this field * is computed from the beginning of the stream until the response has been processed, * including all retries. A stream that does not complete in this duration is closed. * Structure is documented below. */ maxStreamDuration: outputs.compute.URLMapPathMatcherPathRuleRouteActionMaxStreamDuration; /** * Specifies the policy on how requests intended for the route's backends are * shadowed to a separate mirrored backend service. Loadbalancer does not wait for * responses from the shadow service. Prior to sending traffic to the shadow * service, the host / authority header is suffixed with -shadow. * Structure is documented below. */ requestMirrorPolicy?: outputs.compute.URLMapPathMatcherPathRuleRouteActionRequestMirrorPolicy; /** * Specifies the retry policy associated with this route. * Structure is documented below. */ retryPolicy?: outputs.compute.URLMapPathMatcherPathRuleRouteActionRetryPolicy; /** * Specifies the timeout for the selected route. Timeout is computed from the time * the request is has been fully processed (i.e. end-of-stream) up until the * response has been completely processed. Timeout includes all retries. If not * specified, the default value is 15 seconds. * Structure is documented below. */ timeout?: outputs.compute.URLMapPathMatcherPathRuleRouteActionTimeout; /** * The spec to modify the URL of the request, prior to forwarding the request to * the matched service * Structure is documented below. */ urlRewrite?: outputs.compute.URLMapPathMatcherPathRuleRouteActionUrlRewrite; /** * A list of weighted backend services to send traffic to when a route match * occurs. The weights determine the fraction of traffic that flows to their * corresponding backend service. If all traffic needs to go to a single backend * service, there must be one weightedBackendService with weight set to a non 0 * number. Once a backendService is identified and before forwarding the request to * the backend service, advanced routing actions like Url rewrites and header * transformations are applied depending on additional settings specified in this * HttpRouteAction. * Structure is documented below. */ weightedBackendServices?: outputs.compute.URLMapPathMatcherPathRuleRouteActionWeightedBackendService[]; } interface URLMapPathMatcherPathRuleRouteActionCorsPolicy { /** * In response to a preflight request, setting this to true indicates that the actual request can include user credentials. * This translates to the Access-Control-Allow-Credentials header. */ allowCredentials?: boolean; /** * Specifies the content for the Access-Control-Allow-Headers header. */ allowHeaders?: string[]; /** * Specifies the content for the Access-Control-Allow-Methods header. */ allowMethods?: string[]; /** * Specifies the regular expression patterns that match allowed origins. For regular expression grammar * please see en.cppreference.com/w/cpp/regex/ecmascript * An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. */ allowOriginRegexes?: string[]; /** * Specifies the list of origins that will be allowed to do CORS requests. * An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. */ allowOrigins?: string[]; /** * If true, specifies the CORS policy is disabled. The default value is false, which indicates that the CORS policy is in effect. */ disabled: boolean; /** * Specifies the content for the Access-Control-Expose-Headers header. */ exposeHeaders?: string[]; /** * Specifies how long results of a preflight request can be cached in seconds. * This translates to the Access-Control-Max-Age header. */ maxAge?: number; } interface URLMapPathMatcherPathRuleRouteActionFaultInjectionPolicy { /** * The specification for how client requests are aborted as part of fault injection. * Structure is documented below. */ abort?: outputs.compute.URLMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort; /** * The specification for how client requests are delayed as part of fault injection, before being sent to a backend service. * Structure is documented below. */ delay?: outputs.compute.URLMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay; } interface URLMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort { /** * The HTTP status code used to abort the request. * The value must be between 200 and 599 inclusive. */ httpStatus: number; /** * The percentage of traffic (connections/operations/requests) which will be aborted as part of fault injection. * The value must be between 0.0 and 100.0 inclusive. */ percentage: number; } interface URLMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay { /** * Specifies the value of the fixed delay interval. * Structure is documented below. */ fixedDelay: outputs.compute.URLMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay; /** * The percentage of traffic (connections/operations/requests) on which delay will be introduced as part of fault injection. * The value must be between 0.0 and 100.0 inclusive. */ percentage: number; } interface URLMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are * represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds: string; } interface URLMapPathMatcherPathRuleRouteActionMaxStreamDuration { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds: string; } interface URLMapPathMatcherPathRuleRouteActionRequestMirrorPolicy { /** * The full or partial URL to the BackendService resource being mirrored to. */ backendService: string; /** * (Optional, Beta) * The percentage of requests to be mirrored to backendService. * The value must be between 0.0 and 100.0 inclusive. */ mirrorPercent?: number; } interface URLMapPathMatcherPathRuleRouteActionRetryPolicy { /** * Specifies the allowed number retries. This number must be > 0. If not specified, defaults to 1. */ numRetries?: number; /** * Specifies a non-zero timeout per retry attempt. * If not specified, will use the timeout set in HttpRouteAction. If timeout in HttpRouteAction is not set, * will use the largest timeout among all backend services associated with the route. * Structure is documented below. */ perTryTimeout?: outputs.compute.URLMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout; /** * Specfies one or more conditions when this retry rule applies. Valid values are: * * 5xx: Loadbalancer will attempt a retry if the backend service responds with any 5xx response code, * or if the backend service does not respond at all, example: disconnects, reset, read timeout, * * connection failure, and refused streams. * * gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. * * connect-failure: Loadbalancer will retry on failures connecting to backend services, * for example due to connection timeouts. * * retriable-4xx: Loadbalancer will retry for retriable 4xx response codes. * Currently the only retriable error supported is 409. * * refused-stream:Loadbalancer will retry if the backend service resets the stream with a REFUSED_STREAM error code. * This reset type indicates that it is safe to retry. * * cancelled: Loadbalancer will retry if the gRPC status code in the response header is set to cancelled * * deadline-exceeded: Loadbalancer will retry if the gRPC status code in the response header is set to deadline-exceeded * * resource-exhausted: Loadbalancer will retry if the gRPC status code in the response header is set to resource-exhausted * * unavailable: Loadbalancer will retry if the gRPC status code in the response header is set to unavailable */ retryConditions?: string[]; } interface URLMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are * represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds: string; } interface URLMapPathMatcherPathRuleRouteActionTimeout { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds: string; } interface URLMapPathMatcherPathRuleRouteActionUrlRewrite { /** * Prior to forwarding the request to the selected service, the request's host header is replaced * with contents of hostRewrite. * The value must be between 1 and 255 characters. */ hostRewrite?: string; /** * Prior to forwarding the request to the selected backend service, the matching portion of the * request's path is replaced by pathPrefixRewrite. * The value must be between 1 and 1024 characters. */ pathPrefixRewrite?: string; } interface URLMapPathMatcherPathRuleRouteActionWeightedBackendService { /** * The full or partial URL to the default BackendService resource. Before forwarding the * request to backendService, the loadbalancer applies any relevant headerActions * specified as part of this backendServiceWeight. */ backendService: string; /** * Specifies changes to request and response headers that need to take effect for * the selected backendService. * headerAction specified here take effect before headerAction in the enclosing * HttpRouteRule, PathMatcher and UrlMap. * Structure is documented below. */ headerAction?: outputs.compute.URLMapPathMatcherPathRuleRouteActionWeightedBackendServiceHeaderAction; /** * Specifies the fraction of traffic sent to backendService, computed as * weight / (sum of all weightedBackendService weights in routeAction) . * The selection of a backend service is determined only for new traffic. Once a user's request * has been directed to a backendService, subsequent requests will be sent to the same backendService * as determined by the BackendService's session affinity policy. * The value must be between 0 and 1000 */ weight: number; } interface URLMapPathMatcherPathRuleRouteActionWeightedBackendServiceHeaderAction { /** * Headers to add to a matching request prior to forwarding the request to the backendService. * Structure is documented below. */ requestHeadersToAdds?: outputs.compute.URLMapPathMatcherPathRuleRouteActionWeightedBackendServiceHeaderActionRequestHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the request prior to * forwarding the request to the backendService. */ requestHeadersToRemoves?: string[]; /** * Headers to add the response prior to sending the response back to the client. * Structure is documented below. */ responseHeadersToAdds?: outputs.compute.URLMapPathMatcherPathRuleRouteActionWeightedBackendServiceHeaderActionResponseHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the response prior to sending the * response back to the client. */ responseHeadersToRemoves?: string[]; } interface URLMapPathMatcherPathRuleRouteActionWeightedBackendServiceHeaderActionRequestHeadersToAdd { /** * The name of the header to add. */ headerName: string; /** * The value of the header to add. */ headerValue: string; /** * If false, headerValue is appended to any values that already exist for the header. * If true, headerValue is set for the header, discarding any values that were set for that header. */ replace: boolean; } interface URLMapPathMatcherPathRuleRouteActionWeightedBackendServiceHeaderActionResponseHeadersToAdd { /** * The name of the header to add. */ headerName: string; /** * The value of the header to add. */ headerValue: string; /** * If false, headerValue is appended to any values that already exist for the header. * If true, headerValue is set for the header, discarding any values that were set for that header. */ replace: boolean; } interface URLMapPathMatcherPathRuleUrlRedirect { /** * The host that will be used in the redirect response instead of the one that was * supplied in the request. The value must be between 1 and 255 characters. */ hostRedirect?: string; /** * If set to true, the URL scheme in the redirected request is set to https. If set * to false, the URL scheme of the redirected request will remain the same as that * of the request. This must only be set for UrlMaps used in TargetHttpProxys. * Setting this true for TargetHttpsProxy is not permitted. Defaults to false. */ httpsRedirect?: boolean; /** * The path that will be used in the redirect response instead of the one that was * supplied in the request. Only one of pathRedirect or prefixRedirect must be * specified. The value must be between 1 and 1024 characters. */ pathRedirect?: string; /** * The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, * retaining the remaining portion of the URL before redirecting the request. */ prefixRedirect?: string; /** * The HTTP Status code to use for this RedirectAction. Supported values are: * * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. * * FOUND, which corresponds to 302. * * SEE_OTHER which corresponds to 303. * * TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method will be retained. * * PERMANENT_REDIRECT, which corresponds to 308. In this case, the request method will be retained. */ redirectResponseCode?: string; /** * If set to true, any accompanying query portion of the original URL is removed * prior to redirecting the request. If set to false, the query portion of the * original URL is retained. Defaults to false. */ stripQuery: boolean; } interface URLMapPathMatcherRouteRule { /** * customErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendService or BackendBucket responds with an error. * Structure is documented below. */ customErrorResponsePolicy?: outputs.compute.URLMapPathMatcherRouteRuleCustomErrorResponsePolicy; /** * Specifies changes to request and response headers that need to take effect for * the selected backendService. The headerAction specified here are applied before * the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].r * outeAction.weightedBackendService.backendServiceWeightAction[].headerAction * Structure is documented below. */ headerAction?: outputs.compute.URLMapPathMatcherRouteRuleHeaderAction; /** * (Optional, Beta) * Outbound route specific configuration for networkservices.HttpFilter resources enabled by Traffic Director. * httpFilterConfigs only applies for load balancers with loadBalancingScheme set to INTERNAL_SELF_MANAGED. * See ForwardingRule for more details. * Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. * Structure is documented below. */ httpFilterConfigs?: outputs.compute.URLMapPathMatcherRouteRuleHttpFilterConfig[]; /** * (Optional, Beta) * Outbound route specific metadata supplied to networkservices.HttpFilter resources enabled by Traffic Director. * httpFilterMetadata only applies for load balancers with loadBalancingScheme set to INTERNAL_SELF_MANAGED. * See ForwardingRule for more details. * Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. * Structure is documented below. */ httpFilterMetadatas?: outputs.compute.URLMapPathMatcherRouteRuleHttpFilterMetadata[]; /** * The rules for determining a match. * Structure is documented below. */ matchRules?: outputs.compute.URLMapPathMatcherRouteRuleMatchRule[]; /** * For routeRules within a given pathMatcher, priority determines the order * in which load balancer will interpret routeRules. RouteRules are evaluated * in order of priority, from the lowest to highest number. The priority of * a rule decreases as its number increases (1, 2, 3, N+1). The first rule * that matches the request is applied. * You cannot configure two or more routeRules with the same priority. * Priority for each rule must be set to a number between 0 and * 2147483647 inclusive. * Priority numbers can have gaps, which enable you to add or remove rules * in the future without affecting the rest of the rules. For example, * 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to which * you could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in the * future without any impact on existing rules. */ priority: number; /** * In response to a matching matchRule, the load balancer performs advanced routing * actions like URL rewrites, header transformations, etc. prior to forwarding the * request to the selected backend. If routeAction specifies any * weightedBackendServices, service must not be set. Conversely if service is set, * routeAction cannot contain any weightedBackendServices. Only one of routeAction * or urlRedirect must be set. * Structure is documented below. */ routeAction?: outputs.compute.URLMapPathMatcherRouteRuleRouteAction; /** * The backend service resource to which traffic is * directed if this rule is matched. If routeAction is additionally specified, * advanced routing actions like URL Rewrites, etc. take effect prior to sending * the request to the backend. However, if service is specified, routeAction cannot * contain any weightedBackendService s. Conversely, if routeAction specifies any * weightedBackendServices, service must not be specified. Only one of urlRedirect, * service or routeAction.weightedBackendService must be set. */ service?: string; /** * When this rule is matched, the request is redirected to a URL specified by * urlRedirect. If urlRedirect is specified, service or routeAction must not be * set. * Structure is documented below. */ urlRedirect?: outputs.compute.URLMapPathMatcherRouteRuleUrlRedirect; } interface URLMapPathMatcherRouteRuleCustomErrorResponsePolicy { /** * Specifies rules for returning error responses. * In a given policy, if you specify rules for both a range of error codes as well as rules for specific error codes then rules with specific error codes have a higher priority. * For example, assume that you configure a rule for 401 (Un-authorized) code, and another for all 4 series error codes (4XX). * If the backend service returns a 401, then the rule for 401 will be applied. However if the backend service returns a 403, the rule for 4xx takes effect. * Structure is documented below. */ errorResponseRules?: outputs.compute.URLMapPathMatcherRouteRuleCustomErrorResponsePolicyErrorResponseRule[]; /** * The full or partial URL to the BackendBucket resource that contains the custom error content. Examples are: * https://www.googleapis.com/compute/v1/projects/project/global/backendBuckets/myBackendBucket * compute/v1/projects/project/global/backendBuckets/myBackendBucket * global/backendBuckets/myBackendBucket * If errorService is not specified at lower levels like pathMatcher, pathRule and routeRule, an errorService specified at a higher level in the UrlMap will be used. If UrlMap.defaultCustomErrorResponsePolicy contains one or more errorResponseRules[], it must specify errorService. * If load balancer cannot reach the backendBucket, a simple Not Found Error will be returned, with the original response code (or overrideResponseCode if configured). */ errorService?: string; } interface URLMapPathMatcherRouteRuleCustomErrorResponsePolicyErrorResponseRule { /** * Valid values include: * - A number between 400 and 599: For example 401 or 503, in which case the load balancer applies the policy if the error code exactly matches this value. * - 5xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 500 to 599. * - 4xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 400 to 499. * Values must be unique within matchResponseCodes and across all errorResponseRules of CustomErrorResponsePolicy. */ matchResponseCodes?: string[]; /** * The HTTP status code returned with the response containing the custom error content. * If overrideResponseCode is not supplied, the same response code returned by the original backend bucket or backend service is returned to the client. */ overrideResponseCode?: number; /** * The full path to a file within backendBucket. For example: /errors/defaultError.html * path must start with a leading slash. path cannot have trailing slashes. * If the file is not available in backendBucket or the load balancer cannot reach the BackendBucket, a simple Not Found Error is returned to the client. * The value must be from 1 to 1024 characters. */ path?: string; } interface URLMapPathMatcherRouteRuleHeaderAction { /** * Headers to add to a matching request prior to forwarding the request to the backendService. * Structure is documented below. */ requestHeadersToAdds?: outputs.compute.URLMapPathMatcherRouteRuleHeaderActionRequestHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the request prior to * forwarding the request to the backendService. */ requestHeadersToRemoves?: string[]; /** * Headers to add the response prior to sending the response back to the client. * Structure is documented below. */ responseHeadersToAdds?: outputs.compute.URLMapPathMatcherRouteRuleHeaderActionResponseHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the response prior to sending the * response back to the client. */ responseHeadersToRemoves?: string[]; } interface URLMapPathMatcherRouteRuleHeaderActionRequestHeadersToAdd { /** * The name of the header to add. */ headerName: string; /** * The value of the header to add. */ headerValue: string; /** * If false, headerValue is appended to any values that already exist for the header. * If true, headerValue is set for the header, discarding any values that were set for that header. */ replace: boolean; } interface URLMapPathMatcherRouteRuleHeaderActionResponseHeadersToAdd { /** * The name of the header to add. */ headerName: string; /** * The value of the header to add. */ headerValue: string; /** * If false, headerValue is appended to any values that already exist for the header. * If true, headerValue is set for the header, discarding any values that were set for that header. */ replace: boolean; } interface URLMapPathMatcherRouteRuleHttpFilterConfig { /** * The configuration needed to enable the networkservices.HttpFilter resource. * The configuration must be YAML formatted and only contain fields defined in the protobuf identified in configTypeUrl */ config?: string; /** * The fully qualified versioned proto3 type url of the protobuf that the filter expects for its contextual settings, * for example: type.googleapis.com/google.protobuf.Struct */ configTypeUrl?: string; /** * Name of the networkservices.HttpFilter resource this configuration belongs to. * This name must be known to the xDS client. Example: envoy.wasm */ filterName?: string; } interface URLMapPathMatcherRouteRuleHttpFilterMetadata { /** * The configuration needed to enable the networkservices.HttpFilter resource. * The configuration must be YAML formatted and only contain fields defined in the protobuf identified in configTypeUrl */ config?: string; /** * The fully qualified versioned proto3 type url of the protobuf that the filter expects for its contextual settings, * for example: type.googleapis.com/google.protobuf.Struct */ configTypeUrl?: string; /** * Name of the networkservices.HttpFilter resource this configuration belongs to. * This name must be known to the xDS client. Example: envoy.wasm */ filterName?: string; } interface URLMapPathMatcherRouteRuleMatchRule { /** * For satisfying the matchRule condition, the path of the request must exactly * match the value specified in fullPathMatch after removing any query parameters * and anchor that may be part of the original URL. FullPathMatch must be between 1 * and 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must * be specified. */ fullPathMatch?: string; /** * Specifies a list of header match criteria, all of which must match corresponding * headers in the request. * Structure is documented below. */ headerMatches?: outputs.compute.URLMapPathMatcherRouteRuleMatchRuleHeaderMatch[]; /** * Specifies that prefixMatch and fullPathMatch matches are case sensitive. * Defaults to false. */ ignoreCase?: boolean; /** * Opaque filter criteria used by Loadbalancer to restrict routing configuration to * a limited set xDS compliant clients. In their xDS requests to Loadbalancer, xDS * clients present node metadata. If a match takes place, the relevant routing * configuration is made available to those proxies. For each metadataFilter in * this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the * filterLabels must match the corresponding label provided in the metadata. If its * filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match * with corresponding labels in the provided metadata. metadataFilters specified * here can be overrides those specified in ForwardingRule that refers to this * UrlMap. metadataFilters only applies to Loadbalancers that have their * loadBalancingScheme set to INTERNAL_SELF_MANAGED. * Structure is documented below. */ metadataFilters?: outputs.compute.URLMapPathMatcherRouteRuleMatchRuleMetadataFilter[]; /** * For satisfying the matchRule condition, the path of the request * must match the wildcard pattern specified in pathTemplateMatch * after removing any query parameters and anchor that may be part * of the original URL. * pathTemplateMatch must be between 1 and 255 characters * (inclusive). The pattern specified by pathTemplateMatch may * have at most 5 wildcard operators and at most 5 variable * captures in total. */ pathTemplateMatch?: string; /** * For satisfying the matchRule condition, the request's path must begin with the * specified prefixMatch. prefixMatch must begin with a /. The value must be * between 1 and 1024 characters. Only one of prefixMatch, fullPathMatch or * regexMatch must be specified. */ prefixMatch?: string; /** * Specifies a list of query parameter match criteria, all of which must match * corresponding query parameters in the request. * Structure is documented below. */ queryParameterMatches?: outputs.compute.URLMapPathMatcherRouteRuleMatchRuleQueryParameterMatch[]; /** * For satisfying the matchRule condition, the path of the request must satisfy the * regular expression specified in regexMatch after removing any query parameters * and anchor supplied with the original URL. For regular expression grammar please * see en.cppreference.com/w/cpp/regex/ecmascript Only one of prefixMatch, * fullPathMatch or regexMatch must be specified. */ regexMatch?: string; } interface URLMapPathMatcherRouteRuleMatchRuleHeaderMatch { /** * The value should exactly match contents of exactMatch. Only one of exactMatch, * prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. */ exactMatch?: string; /** * The name of the HTTP header to match. For matching against the HTTP request's * authority, use a headerMatch with the header name ":authority". For matching a * request's method, use the headerName ":method". */ headerName: string; /** * If set to false, the headerMatch is considered a match if the match criteria * above are met. If set to true, the headerMatch is considered a match if the * match criteria above are NOT met. Defaults to false. */ invertMatch?: boolean; /** * The value of the header must start with the contents of prefixMatch. Only one of * exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch * must be set. */ prefixMatch?: string; /** * A header with the contents of headerName must exist. The match takes place * whether or not the request's header has a value or not. Only one of exactMatch, * prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. */ presentMatch?: boolean; /** * The header value must be an integer and its value must be in the range specified * in rangeMatch. If the header does not contain an integer, number or is empty, * the match fails. For example for a range [-5, 0] - -3 will match. - 0 will * not match. - 0.25 will not match. - -3someString will not match. Only one of * exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch * must be set. * Structure is documented below. */ rangeMatch?: outputs.compute.URLMapPathMatcherRouteRuleMatchRuleHeaderMatchRangeMatch; /** * The value of the header must match the regular expression specified in * regexMatch. For regular expression grammar, please see: * en.cppreference.com/w/cpp/regex/ecmascript For matching against a port * specified in the HTTP request, use a headerMatch with headerName set to PORT and * a regular expression that satisfies the RFC2616 Host header's port specifier. * Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or * rangeMatch must be set. */ regexMatch?: string; /** * The value of the header must end with the contents of suffixMatch. Only one of * exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch * must be set. */ suffixMatch?: string; } interface URLMapPathMatcherRouteRuleMatchRuleHeaderMatchRangeMatch { /** * The end of the range (exclusive). */ rangeEnd: number; /** * The start of the range (inclusive). */ rangeStart: number; } interface URLMapPathMatcherRouteRuleMatchRuleMetadataFilter { /** * The list of label value pairs that must match labels in the provided metadata * based on filterMatchCriteria This list must not be empty and can have at the * most 64 entries. * Structure is documented below. */ filterLabels: outputs.compute.URLMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterLabel[]; /** * Specifies how individual filterLabel matches within the list of filterLabels * contribute towards the overall metadataFilter match. Supported values are: * - MATCH_ANY: At least one of the filterLabels must have a matching label in the * provided metadata. * - MATCH_ALL: All filterLabels must have matching labels in * the provided metadata. * Possible values are: `MATCH_ALL`, `MATCH_ANY`. */ filterMatchCriteria: string; } interface URLMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterLabel { /** * Name of metadata label. The name can have a maximum length of 1024 characters * and must be at least 1 character long. */ name: string; /** * The value of the label must match the specified value. value can have a maximum * length of 1024 characters. */ value: string; } interface URLMapPathMatcherRouteRuleMatchRuleQueryParameterMatch { /** * The queryParameterMatch matches if the value of the parameter exactly matches * the contents of exactMatch. Only one of presentMatch, exactMatch and regexMatch * must be set. */ exactMatch?: string; /** * The name of the query parameter to match. The query parameter must exist in the * request, in the absence of which the request match fails. */ name: string; /** * Specifies that the queryParameterMatch matches if the request contains the query * parameter, irrespective of whether the parameter has a value or not. Only one of * presentMatch, exactMatch and regexMatch must be set. */ presentMatch?: boolean; /** * The queryParameterMatch matches if the value of the parameter matches the * regular expression specified by regexMatch. For the regular expression grammar, * please see en.cppreference.com/w/cpp/regex/ecmascript Only one of presentMatch, * exactMatch and regexMatch must be set. */ regexMatch?: string; } interface URLMapPathMatcherRouteRuleRouteAction { /** * The specification for allowing client side cross-origin requests. Please see W3C * Recommendation for Cross Origin Resource Sharing * Structure is documented below. */ corsPolicy?: outputs.compute.URLMapPathMatcherRouteRuleRouteActionCorsPolicy; /** * The specification for fault injection introduced into traffic to test the * resiliency of clients to backend service failure. As part of fault injection, * when clients send requests to a backend service, delays can be introduced by * Loadbalancer on a percentage of requests before sending those request to the * backend service. Similarly requests from clients can be aborted by the * Loadbalancer for a percentage of requests. timeout and retryPolicy will be * ignored by clients that are configured with a fault_injection_policy. * Structure is documented below. */ faultInjectionPolicy?: outputs.compute.URLMapPathMatcherRouteRuleRouteActionFaultInjectionPolicy; /** * Specifies the maximum duration (timeout) for streams on the selected route. * Unlike the `Timeout` field where the timeout duration starts from the time the request * has been fully processed (known as end-of-stream), the duration in this field * is computed from the beginning of the stream until the response has been processed, * including all retries. A stream that does not complete in this duration is closed. * Structure is documented below. */ maxStreamDuration: outputs.compute.URLMapPathMatcherRouteRuleRouteActionMaxStreamDuration; /** * Specifies the policy on how requests intended for the route's backends are * shadowed to a separate mirrored backend service. Loadbalancer does not wait for * responses from the shadow service. Prior to sending traffic to the shadow * service, the host / authority header is suffixed with -shadow. * Structure is documented below. */ requestMirrorPolicy?: outputs.compute.URLMapPathMatcherRouteRuleRouteActionRequestMirrorPolicy; /** * Specifies the retry policy associated with this route. * Structure is documented below. */ retryPolicy?: outputs.compute.URLMapPathMatcherRouteRuleRouteActionRetryPolicy; /** * Specifies the timeout for the selected route. Timeout is computed from the time * the request is has been fully processed (i.e. end-of-stream) up until the * response has been completely processed. Timeout includes all retries. If not * specified, the default value is 15 seconds. * Structure is documented below. */ timeout?: outputs.compute.URLMapPathMatcherRouteRuleRouteActionTimeout; /** * The spec to modify the URL of the request, prior to forwarding the request to * the matched service * Structure is documented below. */ urlRewrite?: outputs.compute.URLMapPathMatcherRouteRuleRouteActionUrlRewrite; /** * A list of weighted backend services to send traffic to when a route match * occurs. The weights determine the fraction of traffic that flows to their * corresponding backend service. If all traffic needs to go to a single backend * service, there must be one weightedBackendService with weight set to a non 0 * number. Once a backendService is identified and before forwarding the request to * the backend service, advanced routing actions like Url rewrites and header * transformations are applied depending on additional settings specified in this * HttpRouteAction. * Structure is documented below. */ weightedBackendServices?: outputs.compute.URLMapPathMatcherRouteRuleRouteActionWeightedBackendService[]; } interface URLMapPathMatcherRouteRuleRouteActionCorsPolicy { /** * In response to a preflight request, setting this to true indicates that the actual request can include user credentials. * This translates to the Access-Control-Allow-Credentials header. */ allowCredentials?: boolean; /** * Specifies the content for the Access-Control-Allow-Headers header. */ allowHeaders?: string[]; /** * Specifies the content for the Access-Control-Allow-Methods header. */ allowMethods?: string[]; /** * Specifies the regular expression patterns that match allowed origins. For regular expression grammar * please see en.cppreference.com/w/cpp/regex/ecmascript * An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. */ allowOriginRegexes?: string[]; /** * Specifies the list of origins that will be allowed to do CORS requests. * An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. */ allowOrigins?: string[]; /** * If true, specifies the CORS policy is disabled. The default value is false, which indicates that the CORS policy is in effect. */ disabled?: boolean; /** * Specifies the content for the Access-Control-Expose-Headers header. */ exposeHeaders?: string[]; /** * Specifies how long results of a preflight request can be cached in seconds. * This translates to the Access-Control-Max-Age header. */ maxAge?: number; } interface URLMapPathMatcherRouteRuleRouteActionFaultInjectionPolicy { /** * The specification for how client requests are aborted as part of fault injection. * Structure is documented below. */ abort?: outputs.compute.URLMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyAbort; /** * The specification for how client requests are delayed as part of fault injection, before being sent to a backend service. * Structure is documented below. */ delay?: outputs.compute.URLMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelay; } interface URLMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyAbort { /** * The HTTP status code used to abort the request. * The value must be between 200 and 599 inclusive. */ httpStatus?: number; /** * The percentage of traffic (connections/operations/requests) which will be aborted as part of fault injection. * The value must be between 0.0 and 100.0 inclusive. */ percentage?: number; } interface URLMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelay { /** * Specifies the value of the fixed delay interval. * Structure is documented below. */ fixedDelay?: outputs.compute.URLMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayFixedDelay; /** * The percentage of traffic (connections/operations/requests) on which delay will be introduced as part of fault injection. * The value must be between 0.0 and 100.0 inclusive. */ percentage?: number; } interface URLMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayFixedDelay { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are * represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds: string; } interface URLMapPathMatcherRouteRuleRouteActionMaxStreamDuration { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds: string; } interface URLMapPathMatcherRouteRuleRouteActionRequestMirrorPolicy { /** * The full or partial URL to the BackendService resource being mirrored to. */ backendService: string; /** * (Optional, Beta) * The percentage of requests to be mirrored to backendService. * The value must be between 0.0 and 100.0 inclusive. */ mirrorPercent?: number; } interface URLMapPathMatcherRouteRuleRouteActionRetryPolicy { /** * Specifies the allowed number retries. This number must be > 0. If not specified, defaults to 1. */ numRetries: number; /** * Specifies a non-zero timeout per retry attempt. * If not specified, will use the timeout set in HttpRouteAction. If timeout in HttpRouteAction is not set, * will use the largest timeout among all backend services associated with the route. * Structure is documented below. */ perTryTimeout?: outputs.compute.URLMapPathMatcherRouteRuleRouteActionRetryPolicyPerTryTimeout; /** * Specfies one or more conditions when this retry rule applies. Valid values are: * * 5xx: Loadbalancer will attempt a retry if the backend service responds with any 5xx response code, * or if the backend service does not respond at all, example: disconnects, reset, read timeout, * * connection failure, and refused streams. * * gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. * * connect-failure: Loadbalancer will retry on failures connecting to backend services, * for example due to connection timeouts. * * retriable-4xx: Loadbalancer will retry for retriable 4xx response codes. * Currently the only retriable error supported is 409. * * refused-stream:Loadbalancer will retry if the backend service resets the stream with a REFUSED_STREAM error code. * This reset type indicates that it is safe to retry. * * cancelled: Loadbalancer will retry if the gRPC status code in the response header is set to cancelled * * deadline-exceeded: Loadbalancer will retry if the gRPC status code in the response header is set to deadline-exceeded * * resource-exhausted: Loadbalancer will retry if the gRPC status code in the response header is set to resource-exhausted * * unavailable: Loadbalancer will retry if the gRPC status code in the response header is set to unavailable */ retryConditions?: string[]; } interface URLMapPathMatcherRouteRuleRouteActionRetryPolicyPerTryTimeout { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are * represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds: string; } interface URLMapPathMatcherRouteRuleRouteActionTimeout { /** * Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented * with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. */ nanos?: number; /** * Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. * Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years */ seconds: string; } interface URLMapPathMatcherRouteRuleRouteActionUrlRewrite { /** * Prior to forwarding the request to the selected service, the request's host header is replaced * with contents of hostRewrite. * The value must be between 1 and 255 characters. */ hostRewrite?: string; /** * Prior to forwarding the request to the selected backend service, the matching portion of the * request's path is replaced by pathPrefixRewrite. * The value must be between 1 and 1024 characters. */ pathPrefixRewrite?: string; /** * Prior to forwarding the request to the selected origin, if the * request matched a pathTemplateMatch, the matching portion of the * request's path is replaced re-written using the pattern specified * by pathTemplateRewrite. * pathTemplateRewrite must be between 1 and 255 characters * (inclusive), must start with a '/', and must only use variables * captured by the route's pathTemplate matchers. * pathTemplateRewrite may only be used when all of a route's * MatchRules specify pathTemplate. * Only one of pathPrefixRewrite and pathTemplateRewrite may be * specified. */ pathTemplateRewrite?: string; } interface URLMapPathMatcherRouteRuleRouteActionWeightedBackendService { /** * The full or partial URL to the default BackendService resource. Before forwarding the * request to backendService, the loadbalancer applies any relevant headerActions * specified as part of this backendServiceWeight. */ backendService: string; /** * Specifies changes to request and response headers that need to take effect for * the selected backendService. * headerAction specified here take effect before headerAction in the enclosing * HttpRouteRule, PathMatcher and UrlMap. * Structure is documented below. */ headerAction?: outputs.compute.URLMapPathMatcherRouteRuleRouteActionWeightedBackendServiceHeaderAction; /** * Specifies the fraction of traffic sent to backendService, computed as * weight / (sum of all weightedBackendService weights in routeAction) . * The selection of a backend service is determined only for new traffic. Once a user's request * has been directed to a backendService, subsequent requests will be sent to the same backendService * as determined by the BackendService's session affinity policy. * The value must be between 0 and 1000 */ weight: number; } interface URLMapPathMatcherRouteRuleRouteActionWeightedBackendServiceHeaderAction { /** * Headers to add to a matching request prior to forwarding the request to the backendService. * Structure is documented below. */ requestHeadersToAdds?: outputs.compute.URLMapPathMatcherRouteRuleRouteActionWeightedBackendServiceHeaderActionRequestHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the request prior to * forwarding the request to the backendService. */ requestHeadersToRemoves?: string[]; /** * Headers to add the response prior to sending the response back to the client. * Structure is documented below. */ responseHeadersToAdds?: outputs.compute.URLMapPathMatcherRouteRuleRouteActionWeightedBackendServiceHeaderActionResponseHeadersToAdd[]; /** * A list of header names for headers that need to be removed from the response prior to sending the * response back to the client. */ responseHeadersToRemoves?: string[]; } interface URLMapPathMatcherRouteRuleRouteActionWeightedBackendServiceHeaderActionRequestHeadersToAdd { /** * The name of the header to add. */ headerName: string; /** * The value of the header to add. */ headerValue: string; /** * If false, headerValue is appended to any values that already exist for the header. * If true, headerValue is set for the header, discarding any values that were set for that header. */ replace: boolean; } interface URLMapPathMatcherRouteRuleRouteActionWeightedBackendServiceHeaderActionResponseHeadersToAdd { /** * The name of the header to add. */ headerName: string; /** * The value of the header to add. */ headerValue: string; /** * If false, headerValue is appended to any values that already exist for the header. * If true, headerValue is set for the header, discarding any values that were set for that header. */ replace: boolean; } interface URLMapPathMatcherRouteRuleUrlRedirect { /** * The host that will be used in the redirect response instead of the one that was * supplied in the request. The value must be between 1 and 255 characters. */ hostRedirect?: string; /** * If set to true, the URL scheme in the redirected request is set to https. If set * to false, the URL scheme of the redirected request will remain the same as that * of the request. This must only be set for UrlMaps used in TargetHttpProxys. * Setting this true for TargetHttpsProxy is not permitted. Defaults to false. */ httpsRedirect?: boolean; /** * The path that will be used in the redirect response instead of the one that was * supplied in the request. Only one of pathRedirect or prefixRedirect must be * specified. The value must be between 1 and 1024 characters. */ pathRedirect?: string; /** * The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, * retaining the remaining portion of the URL before redirecting the request. */ prefixRedirect?: string; /** * The HTTP Status code to use for this RedirectAction. Supported values are: * * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. * * FOUND, which corresponds to 302. * * SEE_OTHER which corresponds to 303. * * TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method will be retained. * * PERMANENT_REDIRECT, which corresponds to 308. In this case, the request method will be retained. */ redirectResponseCode?: string; /** * If set to true, any accompanying query portion of the original URL is removed * prior to redirecting the request. If set to false, the query portion of the * original URL is retained. Defaults to false. */ stripQuery?: boolean; } interface URLMapTest { /** * Description of this test case. */ description?: string; /** * The expected output URL evaluated by the load balancer containing the scheme, host, path and query parameters. * For rules that forward requests to backends, the test passes only when expectedOutputUrl matches the request forwarded by the load balancer to backends. For rules with urlRewrite, the test verifies that the forwarded request matches hostRewrite and pathPrefixRewrite in the urlRewrite action. When service is specified, expectedOutputUrl`s scheme is ignored. * For rules with urlRedirect, the test passes only if expectedOutputUrl matches the URL in the load balancer's redirect response. If urlRedirect specifies httpsRedirect, the test passes only if the scheme in expectedOutputUrl is also set to HTTPS. If urlRedirect specifies stripQuery, the test passes only if expectedOutputUrl does not contain any query parameters. * expectedOutputUrl is optional when service is specified. */ expectedOutputUrl?: string; /** * For rules with urlRedirect, the test passes only if expectedRedirectResponseCode matches the HTTP status code in load balancer's redirect response. * expectedRedirectResponseCode cannot be set when service is set. */ expectedRedirectResponseCode?: number; /** * HTTP headers for this request. * Structure is documented below. */ headers?: outputs.compute.URLMapTestHeader[]; /** * Host portion of the URL. */ host: string; /** * Path portion of the URL. */ path: string; /** * The backend service or backend bucket link that should be matched by this test. */ service?: string; } interface URLMapTestHeader { /** * Header name. */ name: string; /** * Header value. */ value: string; } interface VPNGatewayParams { /** * Resource manager tags to be bound to the Vpn Gateway. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. */ resourceManagerTags?: { [key: string]: string; }; } interface VPNTunnelCipherSuite { /** * Cipher configuration for phase 1 of the IKE protocol. * Structure is documented below. */ phase1?: outputs.compute.VPNTunnelCipherSuitePhase1; /** * Cipher configuration for phase 2 of the IKE protocol. * Structure is documented below. * * * The `phase1` block supports: */ phase2?: outputs.compute.VPNTunnelCipherSuitePhase2; } interface VPNTunnelCipherSuitePhase1 { /** * Diffie-Hellman groups. */ dhs?: string[]; /** * Encryption algorithms. */ encryptions?: string[]; /** * Integrity algorithms. */ integrities?: string[]; /** * Pseudo-random functions. */ prves?: string[]; } interface VPNTunnelCipherSuitePhase2 { /** * Encryption algorithms. */ encryptions?: string[]; /** * Integrity algorithms. */ integrities?: string[]; /** * Perfect forward secrecy groups. */ pfs?: string[]; } interface VPNTunnelParams { /** * Resource manager tags to be bound to the Vpn Tunnel. Tag keys and values have the * same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, * and values are in the format tagValues/456. */ resourceManagerTags?: { [key: string]: string; }; } interface WireGroupEndpoint { /** * The identifier for this object. Format specified above. */ endpoint: string; /** * Structure is documented below. */ interconnects?: outputs.compute.WireGroupEndpointInterconnect[]; } interface WireGroupEndpointInterconnect { /** * (Optional) */ interconnect?: string; /** * The identifier for this object. Format specified above. */ interconnectName: string; /** * VLAN tags for the interconnect. */ vlanTags?: number[]; } interface WireGroupTopology { /** * Endpoints grouped by location, each mapping to interconnect configurations. * Structure is documented below. */ endpoints: outputs.compute.WireGroupTopologyEndpoint[]; } interface WireGroupTopologyEndpoint { /** * (Output) */ city: string; /** * (Output) */ label: string; } interface WireGroupWire { /** * Indicates whether the wire group is administratively enabled. */ adminEnabled: boolean; /** * Endpoints grouped by location, each mapping to interconnect configurations. * Structure is documented below. */ endpoints: outputs.compute.WireGroupWireEndpoint[]; /** * (Output) */ label: string; /** * Default properties for wires within the group. * Structure is documented below. */ wireProperties: outputs.compute.WireGroupWireWireProperty[]; } interface WireGroupWireEndpoint { /** * (Output) */ interconnect: string; /** * (Output) */ vlanTag: number; } interface WireGroupWireGroupProperties { /** * Type of wire group (enum). * WIRE: a single pseudowire over two Interconnect connections with no redundancy. * REDUNDANT: two pseudowires over four Interconnect connections, with two connections in one metro and two connections in another metro. * BOX_AND_CROSS: four pseudowires over four Interconnect connections, with two connections in one metro and two connections in another metro. */ type?: string; } interface WireGroupWireProperties { /** * The configuration of a wire's bandwidth allocation. * ALLOCATE_PER_WIRE: configures a separate unmetered bandwidth allocation (and associated charges) for each wire in the group. * SHARED_WITH_WIRE_GROUP: this is the default behavior, which configures one unmetered bandwidth allocation for the wire group. The unmetered bandwidth is divided equally across each wire in the group, but dynamic * throttling reallocates unused unmetered bandwidth from unused or underused wires to other wires in the group. */ bandwidthAllocation: string; /** * The unmetered bandwidth setting. */ bandwidthUnmetered?: number; /** * Response when a fault is detected in a pseudowire: * NONE: default. * DISABLE_PORT: set the port line protocol down when inline probes detect a fault. This setting is only permitted on port mode pseudowires. */ faultResponse?: string; } interface WireGroupWireWireProperty { /** * The unmetered bandwidth setting. */ bandwidthUnmetered: number; /** * Response when a fault is detected in a pseudowire: * NONE: default. * DISABLE_PORT: set the port line protocol down when inline probes detect a fault. This setting is only permitted on port mode pseudowires. */ faultResponse: string; } } export declare namespace config { interface Batching { enableBatching?: boolean; sendAfter?: string; } interface ExternalCredentials { audience: string; identityToken: string; serviceAccountEmail: string; } } export declare namespace contactcenterinsights { interface AnalysisRuleAnnotatorSelector { /** * The issue model to run. If not provided, the most recently deployed topic * model will be used. The provided issue model will only be used for * inference if the issue model is deployed and if runIssueModelAnnotator * is set to true. If more than one issue model is provided, only the first * provided issue model will be used for inference. */ issueModels?: string[]; /** * The list of phrase matchers to run. If not provided, all active phrase * matchers will be used. If inactive phrase matchers are provided, they will * not be used. Phrase matchers will be run only if * runPhraseMatcherAnnotator is set to true. Format: * projects/{project}/locations/{location}/phraseMatchers/{phrase_matcher} */ phraseMatchers?: string[]; /** * Configuration for the QA feature. * Structure is documented below. */ qaConfig?: outputs.contactcenterinsights.AnalysisRuleAnnotatorSelectorQaConfig; /** * Whether to run the entity annotator. */ runEntityAnnotator?: boolean; /** * Whether to run the intent annotator. */ runIntentAnnotator?: boolean; /** * Whether to run the interruption annotator. */ runInterruptionAnnotator?: boolean; /** * Whether to run the issue model annotator. A model should have already been * deployed for this to take effect. */ runIssueModelAnnotator?: boolean; /** * Whether to run the active phrase matcher annotator(s). */ runPhraseMatcherAnnotator?: boolean; /** * Whether to run the QA annotator. */ runQaAnnotator?: boolean; /** * Whether to run the sentiment annotator. */ runSentimentAnnotator?: boolean; /** * Whether to run the silence annotator. */ runSilenceAnnotator?: boolean; /** * Whether to run the summarization annotator. */ runSummarizationAnnotator?: boolean; /** * Configuration for summarization. * Structure is documented below. */ summarizationConfig?: outputs.contactcenterinsights.AnalysisRuleAnnotatorSelectorSummarizationConfig; } interface AnalysisRuleAnnotatorSelectorQaConfig { /** * Container for a list of scorecards. * Structure is documented below. */ scorecardList?: outputs.contactcenterinsights.AnalysisRuleAnnotatorSelectorQaConfigScorecardList; } interface AnalysisRuleAnnotatorSelectorQaConfigScorecardList { /** * List of QaScorecardRevisions. */ qaScorecardRevisions?: string[]; } interface AnalysisRuleAnnotatorSelectorSummarizationConfig { /** * Resource name of the Dialogflow conversation profile. * Format: * projects/{project}/locations/{location}/conversationProfiles/{conversation_profile} */ conversationProfile?: string; /** * Default summarization model to be used. * Possible values: * SUMMARIZATION_MODEL_UNSPECIFIED * BASELINE_MODEL * BASELINE_MODEL_V2_0 * Possible values are: `BASELINE_MODEL`, `BASELINE_MODEL_V2_0`. */ summarizationModel?: string; } } export declare namespace container { interface AttachedClusterAuthorization { /** * Groups that can perform operations as a cluster admin. A managed * ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole * to the groups. Up to ten admin groups can be provided. * For more info on RBAC, see * https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles */ adminGroups?: string[]; /** * Users that can perform operations as a cluster admin. A managed * ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole * to the users. Up to ten admin users can be provided. * For more info on RBAC, see * https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles */ adminUsers?: string[]; } interface AttachedClusterBinaryAuthorization { /** * Configure Binary Authorization evaluation mode. * Possible values are: `DISABLED`, `PROJECT_SINGLETON_POLICY_ENFORCE`. */ evaluationMode?: string; } interface AttachedClusterError { /** * Human-friendly description of the error. */ message?: string; } interface AttachedClusterFleet { /** * (Output) * The name of the managed Hub Membership resource associated to this * cluster. Membership names are formatted as * projects//locations/global/membership/. */ membership: string; /** * The number of the Fleet host project where this cluster will be registered. */ project: string; } interface AttachedClusterLoggingConfig { /** * The configuration of the logging components * Structure is documented below. */ componentConfig?: outputs.container.AttachedClusterLoggingConfigComponentConfig; } interface AttachedClusterLoggingConfigComponentConfig { /** * The components to be enabled. * Each value may be one of: `SYSTEM_COMPONENTS`, `WORKLOADS`. */ enableComponents?: string[]; } interface AttachedClusterMonitoringConfig { /** * Enable Google Cloud Managed Service for Prometheus in the cluster. * Structure is documented below. */ managedPrometheusConfig?: outputs.container.AttachedClusterMonitoringConfigManagedPrometheusConfig; } interface AttachedClusterMonitoringConfigManagedPrometheusConfig { /** * Enable Managed Collection. */ enabled?: boolean; } interface AttachedClusterOidcConfig { /** * A JSON Web Token (JWT) issuer URI. `issuer` must start with `https://` */ issuerUrl: string; /** * OIDC verification keys in JWKS format (RFC 7517). */ jwks?: string; } interface AttachedClusterProxyConfig { /** * The Kubernetes Secret resource that contains the HTTP(S) proxy configuration. * Structure is documented below. */ kubernetesSecret?: outputs.container.AttachedClusterProxyConfigKubernetesSecret; } interface AttachedClusterProxyConfigKubernetesSecret { /** * Name of the kubernetes secret containing the proxy config. */ name: string; /** * Namespace of the kubernetes secret containing the proxy config. */ namespace: string; } interface AttachedClusterSecurityPostureConfig { /** * Sets the mode of the Kubernetes security posture API's workload vulnerability scanning. * Possible values are: `VULNERABILITY_DISABLED`, `VULNERABILITY_ENTERPRISE`. */ vulnerabilityMode: string; } interface AttachedClusterWorkloadIdentityConfig { /** * The ID of the OIDC Identity Provider (IdP) associated to * the Workload Identity Pool. */ identityProvider?: string; /** * The OIDC issuer URL for this cluster. */ issuerUri?: string; /** * The Workload Identity Pool associated to the cluster. */ workloadPool?: string; } interface AwsClusterAuthorization { /** * Groups of users that can perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the groups. Up to ten admin groups can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles */ adminGroups?: outputs.container.AwsClusterAuthorizationAdminGroup[]; /** * Users to perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the users. Up to ten admin users can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles */ adminUsers: outputs.container.AwsClusterAuthorizationAdminUser[]; } interface AwsClusterAuthorizationAdminGroup { /** * The name of the group, e.g. `my-group@domain.com`. */ group: string; } interface AwsClusterAuthorizationAdminUser { /** * The name of the user, e.g. `my-gcp-id@gmail.com`. */ username: string; } interface AwsClusterBinaryAuthorization { /** * Mode of operation for Binary Authorization policy evaluation. Possible values: DISABLED, PROJECT_SINGLETON_POLICY_ENFORCE */ evaluationMode: string; } interface AwsClusterControlPlane { /** * Authentication configuration for management of AWS resources. */ awsServicesAuthentication: outputs.container.AwsClusterControlPlaneAwsServicesAuthentication; /** * The ARN of the AWS KMS key used to encrypt cluster configuration. */ configEncryption: outputs.container.AwsClusterControlPlaneConfigEncryption; /** * The ARN of the AWS KMS key used to encrypt cluster secrets. */ databaseEncryption: outputs.container.AwsClusterControlPlaneDatabaseEncryption; /** * The name of the AWS IAM instance pofile to assign to each control plane replica. */ iamInstanceProfile: string; /** * Details of placement information for an instance. */ instancePlacement: outputs.container.AwsClusterControlPlaneInstancePlacement; /** * Optional. The AWS instance type. When unspecified, it defaults to `m5.large`. */ instanceType: string; /** * Optional. Configuration related to the main volume provisioned for each control plane replica. The main volume is in charge of storing all of the cluster's etcd state. Volumes will be provisioned in the availability zone associated with the corresponding subnet. When unspecified, it defaults to 8 GiB with the GP2 volume type. */ mainVolume: outputs.container.AwsClusterControlPlaneMainVolume; /** * Proxy configuration for outbound HTTP(S) traffic. */ proxyConfig?: outputs.container.AwsClusterControlPlaneProxyConfig; /** * Optional. Configuration related to the root volume provisioned for each control plane replica. Volumes will be provisioned in the availability zone associated with the corresponding subnet. When unspecified, it defaults to 32 GiB with the GP2 volume type. */ rootVolume: outputs.container.AwsClusterControlPlaneRootVolume; /** * Optional. The IDs of additional security groups to add to control plane replicas. The Anthos Multi-Cloud API will automatically create and manage security groups with the minimum rules needed for a functioning cluster. */ securityGroupIds?: string[]; /** * Optional. SSH configuration for how to access the underlying control plane machines. */ sshConfig?: outputs.container.AwsClusterControlPlaneSshConfig; /** * The list of subnets where control plane replicas will run. A replica will be provisioned on each subnet and up to three values can be provided. Each subnet must be in a different AWS Availability Zone (AZ). */ subnetIds: string[]; /** * Optional. A set of AWS resource tags to propagate to all underlying managed AWS resources. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters. */ tags?: { [key: string]: string; }; /** * The Kubernetes version to run on control plane replicas (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling . */ version: string; } interface AwsClusterControlPlaneAwsServicesAuthentication { /** * The Amazon Resource Name (ARN) of the role that the Anthos Multi-Cloud API will assume when managing AWS resources on your account. */ roleArn: string; /** * Optional. An identifier for the assumed role session. When unspecified, it defaults to `multicloud-service-agent`. */ roleSessionName: string; } interface AwsClusterControlPlaneConfigEncryption { /** * The ARN of the AWS KMS key used to encrypt cluster configuration. */ kmsKeyArn: string; } interface AwsClusterControlPlaneDatabaseEncryption { /** * The ARN of the AWS KMS key used to encrypt cluster secrets. */ kmsKeyArn: string; } interface AwsClusterControlPlaneInstancePlacement { /** * The tenancy for the instance. Possible values: TENANCY_UNSPECIFIED, DEFAULT, DEDICATED, HOST */ tenancy: string; } interface AwsClusterControlPlaneMainVolume { /** * Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume. */ iops: number; /** * Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used. */ kmsKeyArn?: string; /** * Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. */ sizeGib: number; /** * Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125. */ throughput: number; /** * Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3 */ volumeType: string; } interface AwsClusterControlPlaneProxyConfig { /** * The ARN of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration. */ secretArn: string; /** * The version string of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration. */ secretVersion: string; } interface AwsClusterControlPlaneRootVolume { /** * Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume. */ iops: number; /** * Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used. */ kmsKeyArn?: string; /** * Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. */ sizeGib: number; /** * Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125. */ throughput: number; /** * Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3 */ volumeType: string; } interface AwsClusterControlPlaneSshConfig { /** * The name of the EC2 key pair used to login into cluster machines. */ ec2KeyPair: string; } interface AwsClusterFleet { /** * The name of the managed Hub Membership resource associated to this cluster. Membership names are formatted as projects//locations/global/membership/. */ membership: string; /** * The number of the Fleet host project where this cluster will be registered. */ project: string; } interface AwsClusterLoggingConfig { /** * Configuration of the logging components. */ componentConfig: outputs.container.AwsClusterLoggingConfigComponentConfig; } interface AwsClusterLoggingConfigComponentConfig { /** * Components of the logging configuration to be enabled. */ enableComponents: string[]; } interface AwsClusterNetworking { /** * Disable the per node pool subnet security group rules on the control plane security group. When set to true, you must also provide one or more security groups that ensure node pools are able to send requests to the control plane on TCP/443 and TCP/8132. Failure to do so may result in unavailable node pools. */ perNodePoolSgRulesDisabled?: boolean; /** * All pods in the cluster are assigned an RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation. */ podAddressCidrBlocks: string[]; /** * All services in the cluster are assigned an RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation. */ serviceAddressCidrBlocks: string[]; /** * The VPC associated with the cluster. All component clusters (i.e. control plane and node pools) run on a single VPC. This field cannot be changed after creation. * * - - - */ vpcId: string; } interface AwsClusterWorkloadIdentityConfig { /** * The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool. */ identityProvider: string; /** * The OIDC issuer URL for this cluster. */ issuerUri: string; /** * The Workload Identity Pool associated to the cluster. */ workloadPool: string; } interface AwsNodePoolAutoscaling { /** * Maximum number of nodes in the NodePool. Must be >= min_node_count. */ maxNodeCount: number; /** * Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. */ minNodeCount: number; } interface AwsNodePoolConfig { /** * Optional. Configuration related to CloudWatch metrics collection on the Auto Scaling group of the node pool. When unspecified, metrics collection is disabled. */ autoscalingMetricsCollection?: outputs.container.AwsNodePoolConfigAutoscalingMetricsCollection; /** * The ARN of the AWS KMS key used to encrypt node pool configuration. */ configEncryption: outputs.container.AwsNodePoolConfigConfigEncryption; /** * The name of the AWS IAM role assigned to nodes in the pool. */ iamInstanceProfile: string; /** * The OS image type to use on node pool instances. */ imageType: string; /** * Details of placement information for an instance. */ instancePlacement: outputs.container.AwsNodePoolConfigInstancePlacement; /** * Optional. The AWS instance type. When unspecified, it defaults to `m5.large`. */ instanceType: string; /** * Optional. The initial labels assigned to nodes of this node pool. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ labels?: { [key: string]: string; }; /** * Proxy configuration for outbound HTTP(S) traffic. */ proxyConfig?: outputs.container.AwsNodePoolConfigProxyConfig; /** * Optional. Template for the root volume provisioned for node pool nodes. Volumes will be provisioned in the availability zone assigned to the node pool subnet. When unspecified, it defaults to 32 GiB with the GP2 volume type. */ rootVolume: outputs.container.AwsNodePoolConfigRootVolume; /** * Optional. The IDs of additional security groups to add to nodes in this pool. The manager will automatically create security groups with minimum rules needed for a functioning cluster. */ securityGroupIds?: string[]; /** * Optional. When specified, the node pool will provision Spot instances from the set of spot_config.instance_types. This field is mutually exclusive with `instanceType` */ spotConfig?: outputs.container.AwsNodePoolConfigSpotConfig; /** * Optional. The SSH configuration. */ sshConfig?: outputs.container.AwsNodePoolConfigSshConfig; /** * Optional. Key/value metadata to assign to each underlying AWS resource. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters. */ tags?: { [key: string]: string; }; /** * Optional. The initial taints assigned to nodes of this node pool. */ taints?: outputs.container.AwsNodePoolConfigTaint[]; } interface AwsNodePoolConfigAutoscalingMetricsCollection { /** * The frequency at which EC2 Auto Scaling sends aggregated data to AWS CloudWatch. The only valid value is "1Minute". */ granularity: string; /** * The metrics to enable. For a list of valid metrics, see https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_EnableMetricsCollection.html. If you specify granularity and don't specify any metrics, all metrics are enabled. */ metrics?: string[]; } interface AwsNodePoolConfigConfigEncryption { /** * The ARN of the AWS KMS key used to encrypt node pool configuration. */ kmsKeyArn: string; } interface AwsNodePoolConfigInstancePlacement { /** * The tenancy for the instance. Possible values: TENANCY_UNSPECIFIED, DEFAULT, DEDICATED, HOST */ tenancy: string; } interface AwsNodePoolConfigProxyConfig { /** * The ARN of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration. */ secretArn: string; /** * The version string of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration. */ secretVersion: string; } interface AwsNodePoolConfigRootVolume { /** * Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume. */ iops: number; /** * Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used. */ kmsKeyArn?: string; /** * Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. */ sizeGib: number; /** * Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125. */ throughput: number; /** * Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3 */ volumeType: string; } interface AwsNodePoolConfigSpotConfig { /** * List of AWS EC2 instance types for creating a spot node pool's nodes. The specified instance types must have the same number of CPUs and memory. You can use the Amazon EC2 Instance Selector tool (https://github.com/aws/amazon-ec2-instance-selector) to choose instance types with matching CPU and memory */ instanceTypes: string[]; } interface AwsNodePoolConfigSshConfig { /** * The name of the EC2 key pair used to login into cluster machines. */ ec2KeyPair: string; } interface AwsNodePoolConfigTaint { /** * The taint effect. Possible values: EFFECT_UNSPECIFIED, NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE */ effect: string; /** * Key for the taint. */ key: string; /** * Value for the taint. */ value: string; } interface AwsNodePoolKubeletConfig { /** * Whether or not to enable CPU CFS quota. Defaults to true. */ cpuCfsQuota: boolean; /** * Optional. The CPU CFS quota period to use for the node. Defaults to "100ms". */ cpuCfsQuotaPeriod?: string; /** * The CpuManagerPolicy to use for the node. Defaults to "none". */ cpuManagerPolicy: string; /** * Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset. */ podPidsLimit?: number; } interface AwsNodePoolManagement { /** * Optional. Whether or not the nodes will be automatically repaired. */ autoRepair: boolean; } interface AwsNodePoolMaxPodsConstraint { /** * The maximum number of pods to schedule on a single node. * * - - - */ maxPodsPerNode: number; } interface AwsNodePoolUpdateSettings { /** * Optional. Settings for surge update. */ surgeSettings: outputs.container.AwsNodePoolUpdateSettingsSurgeSettings; } interface AwsNodePoolUpdateSettingsSurgeSettings { /** * Optional. The maximum number of nodes that can be created beyond the current size of the node pool during the update process. */ maxSurge: number; /** * Optional. The maximum number of nodes that can be simultaneously unavailable during the update process. A node is considered unavailable if its status is not Ready. */ maxUnavailable: number; } interface AzureClusterAuthorization { /** * Groups of users that can perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the groups. Up to ten admin groups can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles */ adminGroups?: outputs.container.AzureClusterAuthorizationAdminGroup[]; /** * Users that can perform operations as a cluster admin. A new ClusterRoleBinding will be created to grant the cluster-admin ClusterRole to the users. Up to ten admin users can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles */ adminUsers: outputs.container.AzureClusterAuthorizationAdminUser[]; } interface AzureClusterAuthorizationAdminGroup { /** * The name of the group, e.g. `my-group@domain.com`. */ group: string; } interface AzureClusterAuthorizationAdminUser { /** * The name of the user, e.g. `my-gcp-id@gmail.com`. */ username: string; } interface AzureClusterAzureServicesAuthentication { /** * The Azure Active Directory Application ID for Authentication configuration. */ applicationId: string; /** * The Azure Active Directory Tenant ID for Authentication configuration. */ tenantId: string; } interface AzureClusterControlPlane { /** * Optional. Configuration related to application-layer secrets encryption. */ databaseEncryption?: outputs.container.AzureClusterControlPlaneDatabaseEncryption; /** * Optional. Configuration related to the main volume provisioned for each control plane replica. The main volume is in charge of storing all of the cluster's etcd state. When unspecified, it defaults to a 8-GiB Azure Disk. */ mainVolume: outputs.container.AzureClusterControlPlaneMainVolume; /** * Proxy configuration for outbound HTTP(S) traffic. */ proxyConfig?: outputs.container.AzureClusterControlPlaneProxyConfig; /** * Configuration for where to place the control plane replicas. Up to three replica placement instances can be specified. If replicaPlacements is set, the replica placement instances will be applied to the three control plane replicas as evenly as possible. */ replicaPlacements?: outputs.container.AzureClusterControlPlaneReplicaPlacement[]; /** * Optional. Configuration related to the root volume provisioned for each control plane replica. When unspecified, it defaults to 32-GiB Azure Disk. */ rootVolume: outputs.container.AzureClusterControlPlaneRootVolume; /** * SSH configuration for how to access the underlying control plane machines. */ sshConfig: outputs.container.AzureClusterControlPlaneSshConfig; /** * The ARM ID of the subnet where the control plane VMs are deployed. Example: `/subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/default`. */ subnetId: string; /** * Optional. A set of tags to apply to all underlying control plane Azure resources. */ tags?: { [key: string]: string; }; /** * The Kubernetes version to run on control plane replicas (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling GetAzureServerConfig. */ version: string; /** * Optional. The Azure VM size name. Example: `Standard_DS2_v2`. For available VM sizes, see https://docs.microsoft.com/en-us/azure/virtual-machines/vm-naming-conventions. When unspecified, it defaults to `Standard_DS2_v2`. */ vmSize: string; } interface AzureClusterControlPlaneDatabaseEncryption { /** * The ARM ID of the Azure Key Vault key to encrypt / decrypt data. For example: `/subscriptions//resourceGroups//providers/Microsoft.KeyVault/vaults//keys/` Encryption will always take the latest version of the key and hence specific version is not supported. */ keyId: string; } interface AzureClusterControlPlaneMainVolume { /** * Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. */ sizeGib: number; } interface AzureClusterControlPlaneProxyConfig { /** * The ARM ID the of the resource group containing proxy keyvault. Resource group ids are formatted as `/subscriptions//resourceGroups/` */ resourceGroupId: string; /** * The URL the of the proxy setting secret with its version. Secret ids are formatted as `https:.vault.azure.net/secrets//`. */ secretId: string; } interface AzureClusterControlPlaneReplicaPlacement { /** * For a given replica, the Azure availability zone where to provision the control plane VM and the ETCD disk. */ azureAvailabilityZone: string; /** * For a given replica, the ARM ID of the subnet where the control plane VM is deployed. Make sure it's a subnet under the virtual network in the cluster configuration. */ subnetId: string; } interface AzureClusterControlPlaneRootVolume { /** * Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. */ sizeGib: number; } interface AzureClusterControlPlaneSshConfig { /** * The SSH public key data for VMs managed by Anthos. This accepts the authorizedKeys file format used in OpenSSH according to the sshd(8) manual page. */ authorizedKey: string; } interface AzureClusterFleet { /** * The name of the managed Hub Membership resource associated to this cluster. Membership names are formatted as projects//locations/global/membership/. */ membership: string; /** * The number of the Fleet host project where this cluster will be registered. */ project: string; } interface AzureClusterLoggingConfig { /** * Configuration of the logging components. */ componentConfig: outputs.container.AzureClusterLoggingConfigComponentConfig; } interface AzureClusterLoggingConfigComponentConfig { /** * Components of the logging configuration to be enabled. */ enableComponents: string[]; } interface AzureClusterNetworking { /** * The IP address range of the pods in this cluster, in CIDR notation (e.g. `10.96.0.0/14`). All pods in the cluster get assigned a unique RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation. */ podAddressCidrBlocks: string[]; /** * The IP address range for services in this cluster, in CIDR notation (e.g. `10.96.0.0/14`). All services in the cluster get assigned a unique RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creating a cluster. */ serviceAddressCidrBlocks: string[]; /** * The Azure Resource Manager (ARM) ID of the VNet associated with your cluster. All components in the cluster (i.e. control plane and node pools) run on a single VNet. Example: `/subscriptions/*/resourceGroups/*/providers/Microsoft.Network/virtualNetworks/*` This field cannot be changed after creation. * * - - - */ virtualNetworkId: string; } interface AzureClusterWorkloadIdentityConfig { /** * The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool. */ identityProvider: string; /** * The OIDC issuer URL for this cluster. */ issuerUri: string; /** * The Workload Identity Pool associated to the cluster. */ workloadPool: string; } interface AzureNodePoolAutoscaling { /** * Maximum number of nodes in the node pool. Must be >= min_node_count. */ maxNodeCount: number; /** * Minimum number of nodes in the node pool. Must be >= 1 and <= max_node_count. */ minNodeCount: number; } interface AzureNodePoolConfig { /** * The OS image type to use on node pool instances. */ imageType: string; /** * Optional. The initial labels assigned to nodes of this node pool. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ labels?: { [key: string]: string; }; /** * Proxy configuration for outbound HTTP(S) traffic. */ proxyConfig?: outputs.container.AzureNodePoolConfigProxyConfig; /** * Optional. Configuration related to the root volume provisioned for each node pool machine. When unspecified, it defaults to a 32-GiB Azure Disk. */ rootVolume: outputs.container.AzureNodePoolConfigRootVolume; /** * SSH configuration for how to access the node pool machines. */ sshConfig: outputs.container.AzureNodePoolConfigSshConfig; /** * Optional. A set of tags to apply to all underlying Azure resources for this node pool. This currently only includes Virtual Machine Scale Sets. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters. */ tags?: { [key: string]: string; }; /** * Optional. The Azure VM size name. Example: `Standard_DS2_v2`. See (/anthos/clusters/docs/azure/reference/supported-vms) for options. When unspecified, it defaults to `Standard_DS2_v2`. */ vmSize: string; } interface AzureNodePoolConfigProxyConfig { /** * The ARM ID the of the resource group containing proxy keyvault. Resource group ids are formatted as `/subscriptions//resourceGroups/` */ resourceGroupId: string; /** * The URL the of the proxy setting secret with its version. Secret ids are formatted as `https:.vault.azure.net/secrets//`. */ secretId: string; } interface AzureNodePoolConfigRootVolume { /** * Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. */ sizeGib: number; } interface AzureNodePoolConfigSshConfig { /** * The SSH public key data for VMs managed by Anthos. This accepts the authorizedKeys file format used in OpenSSH according to the sshd(8) manual page. */ authorizedKey: string; } interface AzureNodePoolManagement { /** * Optional. Whether or not the nodes will be automatically repaired. */ autoRepair: boolean; } interface AzureNodePoolMaxPodsConstraint { /** * The maximum number of pods to schedule on a single node. * * - - - */ maxPodsPerNode: number; } interface ClusterAddonsConfig { /** * . Structure is documented below. */ cloudrunConfig: outputs.container.ClusterAddonsConfigCloudrunConfig; /** * . * The status of the ConfigConnector addon. It is disabled by default; Set `enabled = true` to enable. */ configConnectorConfig: outputs.container.ClusterAddonsConfigConfigConnectorConfig; /** * . * The status of the NodeLocal DNSCache addon. It is disabled by default. * Set `enabled = true` to enable. * * **Enabling/Disabling NodeLocal DNSCache in an existing cluster is a disruptive operation. * All cluster nodes running GKE 1.15 and higher are recreated.** */ dnsCacheConfig: outputs.container.ClusterAddonsConfigDnsCacheConfig; /** * . * Whether this cluster should enable the Google Compute Engine Persistent Disk Container Storage Interface (CSI) Driver. Set `enabled = true` to enable. * * **Note:** The Compute Engine persistent disk CSI Driver is enabled by default on newly created clusters for the following versions: Linux clusters: GKE version 1.18.10-gke.2100 or later, or 1.19.3-gke.2100 or later. */ gcePersistentDiskCsiDriverConfig: outputs.container.ClusterAddonsConfigGcePersistentDiskCsiDriverConfig; /** * The status of the Filestore CSI driver addon, * which allows the usage of filestore instance as volumes. * It is disabled by default; set `enabled = true` to enable. */ gcpFilestoreCsiDriverConfig: outputs.container.ClusterAddonsConfigGcpFilestoreCsiDriverConfig; /** * The status of the GCSFuse CSI driver addon, * which allows the usage of a gcs bucket as volumes. * It is disabled by default for Standard clusters; set `enabled = true` to enable. * It is enabled by default for Autopilot clusters with version 1.24 or later; set `enabled = true` to enable it explicitly. * See [Enable the Cloud Storage FUSE CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/cloud-storage-fuse-csi-driver#enable) for more information. */ gcsFuseCsiDriverConfig: outputs.container.ClusterAddonsConfigGcsFuseCsiDriverConfig; /** * . * The status of the Backup for GKE agent addon. It is disabled by default; Set `enabled = true` to enable. */ gkeBackupAgentConfig: outputs.container.ClusterAddonsConfigGkeBackupAgentConfig; /** * The status of the Horizontal Pod Autoscaling * addon, which increases or decreases the number of replica pods a replication controller * has based on the resource usage of the existing pods. * It is enabled by default; * set `disabled = true` to disable. */ horizontalPodAutoscaling: outputs.container.ClusterAddonsConfigHorizontalPodAutoscaling; /** * The status of the HTTP (L7) load balancing * controller addon, which makes it easy to set up HTTP load balancers for services in a * cluster. It is enabled by default; set `disabled = true` to disable. */ httpLoadBalancing: outputs.container.ClusterAddonsConfigHttpLoadBalancing; /** * ). * Structure is documented below. */ istioConfig: outputs.container.ClusterAddonsConfigIstioConfig; /** * ). * Configuration for the KALM addon, which manages the lifecycle of k8s. It is disabled by default; Set `enabled = true` to enable. */ kalmConfig: outputs.container.ClusterAddonsConfigKalmConfig; /** * The status of the Lustre CSI driver addon, * which allows the usage of a Lustre instances as volumes. * It is disabled by default for Standard clusters; set `enabled = true` to enable. * It is disabled by default for Autopilot clusters; set `enabled = true` to enable. * Lustre CSI Driver Config has optional subfield * `enableLegacyLustrePort` which allows the Lustre CSI driver to initialize LNet (the virtual networklayer for Lustre kernel module) using port 6988. * This flag is required to workaround a port conflict with the gke-metadata-server on GKE nodes. * See [Enable Lustre CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/lustre-csi-driver-new-volume) for more information. */ lustreCsiDriverConfig: outputs.container.ClusterAddonsConfigLustreCsiDriverConfig; /** * Whether we should enable the network policy addon * for the master. This must be enabled in order to enable network policy for the nodes. * To enable this, you must also define a `networkPolicy` block, * otherwise nothing will happen. * It can only be disabled if the nodes already do not have network policies enabled. * Defaults to disabled; set `disabled = false` to enable. */ networkPolicyConfig: outputs.container.ClusterAddonsConfigNetworkPolicyConfig; /** * The status of the Parallelstore CSI driver addon, * which allows the usage of a Parallelstore instances as volumes. * It is disabled by default for Standard clusters; set `enabled = true` to enable. * It is enabled by default for Autopilot clusters with version 1.29 or later; set `enabled = true` to enable it explicitly. * See [Enable the Parallelstore CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/parallelstore-csi-new-volume#enable) for more information. */ parallelstoreCsiDriverConfig: outputs.container.ClusterAddonsConfigParallelstoreCsiDriverConfig; /** * ) The status of the Pod Snapshot addon. It is disabled by default. Set `enabled = true` to enable. * * This example `addonsConfig` disables two addons: */ podSnapshotConfig: outputs.container.ClusterAddonsConfigPodSnapshotConfig; /** * . The status of the [Ray Operator * addon](https://cloud.google.com/kubernetes-engine/docs/add-on/ray-on-gke/concepts/overview). * It is disabled by default. Set `enabled = true` to enable. The minimum * cluster version to enable Ray is 1.30.0-gke.1747000. * * Ray Operator config has optional subfields * `ray_cluster_logging_config.enabled` and * `ray_cluster_monitoring_config.enabled` which control Ray Cluster logging * and monitoring respectively. See [Collect and view logs and metrics for Ray * clusters on * GKE](https://cloud.google.com/kubernetes-engine/docs/add-on/ray-on-gke/how-to/collect-view-logs-metrics) * for more information. */ rayOperatorConfigs: outputs.container.ClusterAddonsConfigRayOperatorConfig[]; /** * The status of the Slice Controller addon. It is disabled by default; set enabled = true to enable. */ sliceControllerConfig: outputs.container.ClusterAddonsConfigSliceControllerConfig; /** * . * The status of the Stateful HA addon, which provides automatic configurable failover for stateful applications. * It is disabled by default for Standard clusters. Set `enabled = true` to enable. */ statefulHaConfig: outputs.container.ClusterAddonsConfigStatefulHaConfig; } interface ClusterAddonsConfigCloudrunConfig { /** * The status of the CloudRun addon. It is disabled by default. Set `disabled=false` to enable. */ disabled: boolean; /** * The load balancer type of CloudRun ingress service. It is external load balancer by default. * Set `load_balancer_type=LOAD_BALANCER_TYPE_INTERNAL` to configure it as internal load balancer. */ loadBalancerType?: string; } interface ClusterAddonsConfigConfigConnectorConfig { enabled: boolean; } interface ClusterAddonsConfigDnsCacheConfig { enabled: boolean; } interface ClusterAddonsConfigGcePersistentDiskCsiDriverConfig { enabled: boolean; } interface ClusterAddonsConfigGcpFilestoreCsiDriverConfig { enabled: boolean; } interface ClusterAddonsConfigGcsFuseCsiDriverConfig { enabled: boolean; } interface ClusterAddonsConfigGkeBackupAgentConfig { enabled: boolean; } interface ClusterAddonsConfigHorizontalPodAutoscaling { /** * Whether the cluster disables default in-node sNAT rules. In-node sNAT rules will be disabled when defaultSnatStatus is disabled.When disabled is set to false, default IP masquerade rules will be applied to the nodes to prevent sNAT on cluster internal traffic * * The `clusterTelemetry` block supports */ disabled: boolean; } interface ClusterAddonsConfigHttpLoadBalancing { /** * Whether the cluster disables default in-node sNAT rules. In-node sNAT rules will be disabled when defaultSnatStatus is disabled.When disabled is set to false, default IP masquerade rules will be applied to the nodes to prevent sNAT on cluster internal traffic * * The `clusterTelemetry` block supports */ disabled: boolean; } interface ClusterAddonsConfigIstioConfig { /** * The authentication type between services in Istio. Available options include `AUTH_MUTUAL_TLS`. */ auth?: string; /** * The status of the Istio addon, which makes it easy to set up Istio for services in a * cluster. It is disabled by default. Set `disabled = false` to enable. */ disabled: boolean; } interface ClusterAddonsConfigKalmConfig { enabled: boolean; } interface ClusterAddonsConfigLustreCsiDriverConfig { /** * If set to true, the Lustre CSI driver will initialize LNet (the virtual network layer for Lustre kernel module) using port 6988. * This flag is required to workaround a port conflict with the gke-metadata-server on GKE nodes. */ enableLegacyLustrePort?: boolean; /** * Whether the Lustre CSI driver is enabled for this cluster. */ enabled: boolean; } interface ClusterAddonsConfigNetworkPolicyConfig { /** * Whether the cluster disables default in-node sNAT rules. In-node sNAT rules will be disabled when defaultSnatStatus is disabled.When disabled is set to false, default IP masquerade rules will be applied to the nodes to prevent sNAT on cluster internal traffic * * The `clusterTelemetry` block supports */ disabled: boolean; } interface ClusterAddonsConfigParallelstoreCsiDriverConfig { enabled: boolean; } interface ClusterAddonsConfigPodSnapshotConfig { /** * Whether the Pod Snapshot feature is enabled for this cluster. */ enabled: boolean; } interface ClusterAddonsConfigRayOperatorConfig { enabled: boolean; /** * The status of Ray Logging, which scrapes Ray cluster logs to Cloud Logging. Defaults to disabled; set enabled = true to enable. */ rayClusterLoggingConfig: outputs.container.ClusterAddonsConfigRayOperatorConfigRayClusterLoggingConfig; /** * The status of Ray Cluster monitoring, which shows Ray cluster metrics in Cloud Console. Defaults to disabled; set enabled = true to enable. */ rayClusterMonitoringConfig: outputs.container.ClusterAddonsConfigRayOperatorConfigRayClusterMonitoringConfig; } interface ClusterAddonsConfigRayOperatorConfigRayClusterLoggingConfig { enabled: boolean; } interface ClusterAddonsConfigRayOperatorConfigRayClusterMonitoringConfig { enabled: boolean; } interface ClusterAddonsConfigSliceControllerConfig { enabled: boolean; } interface ClusterAddonsConfigStatefulHaConfig { enabled: boolean; } interface ClusterAnonymousAuthenticationConfig { /** * Sets or removes authentication restrictions. Available options include `LIMITED` and `ENABLED`. */ mode: string; } interface ClusterAuthenticatorGroupsConfig { /** * The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format `gke-security-groups@yourdomain.com`. */ securityGroup: string; } interface ClusterBinaryAuthorization { /** * Enable Binary Authorization for this cluster. * * @deprecated Deprecated in favor of evaluation_mode. */ enabled?: boolean; /** * Mode of operation for Binary Authorization policy evaluation. */ evaluationMode: string; } interface ClusterClusterAutoscaling { /** * Contains defaults for a node pool created by NAP. A subset of fields also apply to * GKE Autopilot clusters. * Structure is documented below. */ autoProvisioningDefaults: outputs.container.ClusterClusterAutoscalingAutoProvisioningDefaults; /** * The list of Google Compute Engine * [zones](https://cloud.google.com/compute/docs/zones#available) in which the * NodePool's nodes can be created by NAP. */ autoProvisioningLocations: string[]; /** * Configuration * options for the [Autoscaling profile](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler#autoscaling_profiles) * feature, which lets you choose whether the cluster autoscaler should optimize for resource utilization or resource availability * when deciding to remove nodes from a cluster. Can be `BALANCED` or `OPTIMIZE_UTILIZATION`. Defaults to `BALANCED`. */ autoscalingProfile?: string; /** * Specifies whether default compute class behaviour is enabled. If enabled, cluster autoscaler will use Compute Class with name default for all the workloads, if not overriden. */ defaultComputeClassEnabled?: boolean; /** * Whether node auto-provisioning is enabled. Must be supplied for GKE Standard clusters, `true` is implied * for autopilot clusters. Resource limits for `cpu` and `memory` must be defined to enable node auto-provisioning for GKE Standard. */ enabled: boolean; /** * Global constraints for machine resources in the * cluster. Configuring the `cpu` and `memory` types is required if node * auto-provisioning is enabled. These limits will apply to node pool autoscaling * in addition to node auto-provisioning. Limits can't be unset entirely, they can only be replaced. Structure is documented below. */ resourceLimits: outputs.container.ClusterClusterAutoscalingResourceLimit[]; } interface ClusterClusterAutoscalingAutoProvisioningDefaults { /** * The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption */ bootDiskKmsKey?: string; /** * Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. Defaults to `100` */ diskSize?: number; /** * Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd', 'pd-balanced', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. */ diskType?: string; /** * The default image type used by NAP once a new node pool is being created. Please note that according to the [official documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning#default-image-type) the value must be one of the [COS_CONTAINERD, COS, UBUNTU_CONTAINERD, UBUNTU]. __NOTE__ : COS AND UBUNTU are deprecated as of `GKE 1.24` */ imageType?: string; /** * NodeManagement configuration for this NodePool. Structure is documented below. */ management: outputs.container.ClusterClusterAutoscalingAutoProvisioningDefaultsManagement; /** * ) * Minimum CPU platform to be used for NAP created node pools. The instance may be scheduled on the * specified or newer CPU platform. Applicable values are the friendly names of CPU platforms, such * as "Intel Haswell" or "Intel Sandy Bridge". */ minCpuPlatform?: string; /** * Scopes that are used by NAP and GKE Autopilot when creating node pools. Use the "https://www.googleapis.com/auth/cloud-platform" scope to grant access to all APIs. It is recommended that you set `serviceAccount` to a non-default service account and grant IAM roles to that service account for only the resources that it needs. * * > `monitoring.write` is always enabled regardless of user input. `monitoring` and `logging.write` may also be enabled depending on the values for `monitoringService` and `loggingService`. */ oauthScopes: string[]; /** * The `email` of the Google Cloud Platform Service Account to be used by the node VMs created by GKE Autopilot or NAP. */ serviceAccount?: string; /** * Shielded Instance options. Structure is documented below. */ shieldedInstanceConfig?: outputs.container.ClusterClusterAutoscalingAutoProvisioningDefaultsShieldedInstanceConfig; /** * Specifies the upgrade settings for NAP created node pools */ upgradeSettings: outputs.container.ClusterClusterAutoscalingAutoProvisioningDefaultsUpgradeSettings; } interface ClusterClusterAutoscalingAutoProvisioningDefaultsManagement { /** * Specifies whether the node auto-repair is enabled for the node pool. If enabled, the nodes in this node pool will be monitored and, if they fail health checks too many times, an automatic repair action will be triggered. * * This block also contains several computed attributes, documented below. */ autoRepair: boolean; /** * Specifies whether node auto-upgrade is enabled for the node pool. If enabled, node auto-upgrade helps keep the nodes in your node pool up to date with the latest release version of Kubernetes. */ autoUpgrade: boolean; /** * Specifies the Auto Upgrade knobs for the node pool. */ upgradeOptions: outputs.container.ClusterClusterAutoscalingAutoProvisioningDefaultsManagementUpgradeOption[]; } interface ClusterClusterAutoscalingAutoProvisioningDefaultsManagementUpgradeOption { /** * This field is set when upgrades are about to commence with the approximate start time for the upgrades, in RFC3339 text format. */ autoUpgradeStartTime: string; /** * Description of the cluster. */ description: string; } interface ClusterClusterAutoscalingAutoProvisioningDefaultsShieldedInstanceConfig { /** * Defines if the instance has integrity monitoring enabled. * * Enables monitoring and attestation of the boot integrity of the instance. The attestation is performed against the integrity policy baseline. This baseline is initially derived from the implicitly trusted boot image when the instance is created. Defaults to `true`. */ enableIntegrityMonitoring?: boolean; /** * Defines if the instance has Secure Boot enabled. * * Secure Boot helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails. Defaults to `false`. */ enableSecureBoot?: boolean; } interface ClusterClusterAutoscalingAutoProvisioningDefaultsUpgradeSettings { /** * Settings for blue-green upgrade strategy. To be specified when strategy is set to BLUE_GREEN. Structure is documented below. */ blueGreenSettings: outputs.container.ClusterClusterAutoscalingAutoProvisioningDefaultsUpgradeSettingsBlueGreenSettings; /** * The maximum number of nodes that can be created beyond the current size of the node pool during the upgrade process. To be used when strategy is set to SURGE. Default is 0. */ maxSurge?: number; /** * The maximum number of nodes that can be simultaneously unavailable during the upgrade process. To be used when strategy is set to SURGE. Default is 0. */ maxUnavailable?: number; /** * Strategy used for node pool update. Strategy can only be one of BLUE_GREEN or SURGE. The default is value is SURGE. */ strategy: string; } interface ClusterClusterAutoscalingAutoProvisioningDefaultsUpgradeSettingsBlueGreenSettings { /** * Time needed after draining entire blue pool. After this period, blue pool will be cleaned up. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ nodePoolSoakDuration: string; /** * Standard policy for the blue-green upgrade. To be specified when strategy is set to BLUE_GREEN. Structure is documented below. */ standardRolloutPolicy: outputs.container.ClusterClusterAutoscalingAutoProvisioningDefaultsUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy; } interface ClusterClusterAutoscalingAutoProvisioningDefaultsUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy { /** * Number of blue nodes to drain in a batch. Only one of the batchPercentage or batchNodeCount can be specified. */ batchNodeCount: number; /** * Percentage of the bool pool nodes to drain in a batch. The range of this field should be (0.0, 1.0). Only one of the batchPercentage or batchNodeCount can be specified. */ batchPercentage: number; /** * Soak time after each batch gets drained. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s".`. */ batchSoakDuration?: string; } interface ClusterClusterAutoscalingResourceLimit { /** * Maximum amount of the resource in the cluster. */ maximum: number; /** * Minimum amount of the resource in the cluster. */ minimum?: number; /** * The type of the resource. For example, `cpu` and * `memory`. See the [guide to using Node Auto-Provisioning](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning) * for a list of types. */ resourceType: string; } interface ClusterClusterTelemetry { /** * Telemetry integration for the cluster. Supported values (`ENABLED, DISABLED, SYSTEM_ONLY`); * `SYSTEM_ONLY` (Only system components are monitored and logged) is only available in GKE versions 1.15 and later. */ type: string; } interface ClusterConfidentialNodes { /** * Defines the type of technology used * by the confidential node. */ confidentialInstanceType?: string; /** * Enable Confidential GKE Nodes for this cluster, to * enforce encryption of data in-use. */ enabled: boolean; } interface ClusterControlPlaneEndpointsConfig { /** * DNS endpoint configuration. */ dnsEndpointConfig: outputs.container.ClusterControlPlaneEndpointsConfigDnsEndpointConfig; /** * IP endpoint configuration. */ ipEndpointsConfig: outputs.container.ClusterControlPlaneEndpointsConfigIpEndpointsConfig; } interface ClusterControlPlaneEndpointsConfigDnsEndpointConfig { /** * Controls whether user traffic is allowed over this endpoint. Note that GCP-managed services may still use the endpoint even if this is false. */ allowExternalTraffic?: boolean; /** * Controls whether the k8s certs auth is allowed via Dns. */ enableK8sCertsViaDns?: boolean; /** * Controls whether the k8s token auth is allowed via Dns. */ enableK8sTokensViaDns?: boolean; /** * The cluster's DNS endpoint. */ endpoint: string; } interface ClusterControlPlaneEndpointsConfigIpEndpointsConfig { /** * Controls whether to allow direct IP access. Defaults to `true`. */ enabled?: boolean; } interface ClusterCostManagementConfig { /** * Whether to enable the [cost allocation](https://cloud.google.com/kubernetes-engine/docs/how-to/cost-allocations) feature. */ enabled: boolean; } interface ClusterDatabaseEncryption { /** * the key to use to encrypt/decrypt secrets. See the [DatabaseEncryption definition](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#Cluster.DatabaseEncryption) for more information. * * The `enableK8sBetaApis` block supports: */ keyName?: string; /** * `ENCRYPTED` or `DECRYPTED` */ state: string; } interface ClusterDefaultSnatStatus { /** * Whether the cluster disables default in-node sNAT rules. In-node sNAT rules will be disabled when defaultSnatStatus is disabled.When disabled is set to false, default IP masquerade rules will be applied to the nodes to prevent sNAT on cluster internal traffic * * The `clusterTelemetry` block supports */ disabled: boolean; } interface ClusterDnsConfig { /** * This will enable Cloud DNS additive VPC scope. Must provide a domain name that is unique within the VPC. For this to work `clusterDns = "CLOUD_DNS"` and `clusterDnsScope = "CLUSTER_SCOPE"` must both be set as well. */ additiveVpcScopeDnsDomain?: string; /** * Which in-cluster DNS provider should be used. `PROVIDER_UNSPECIFIED` (default) or `PLATFORM_DEFAULT` or `CLOUD_DNS` or `KUBE_DNS`. */ clusterDns?: string; /** * The suffix used for all cluster service records. */ clusterDnsDomain?: string; /** * The scope of access to cluster DNS records. `DNS_SCOPE_UNSPECIFIED` or `CLUSTER_SCOPE` or `VPC_SCOPE`. If the `clusterDns` field is set to `CLOUD_DNS`, `DNS_SCOPE_UNSPECIFIED` and empty/null behave like `CLUSTER_SCOPE`. */ clusterDnsScope?: string; } interface ClusterEnableK8sBetaApis { /** * Enabled Kubernetes Beta APIs. */ enabledApis: string[]; } interface ClusterEnterpriseConfig { /** * Indicates the effective cluster tier. Available options include STANDARD and ENTERPRISE. * * @deprecated GKE Enterprise features are now available without an Enterprise tier. This field is deprecated and will be removed in a future major release */ clusterTier: string; /** * (DEPRECATED) Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`. Deprecated as GKE Enterprise features are now available without an Enterprise tier. See https://cloud.google.com/blog/products/containers-kubernetes/gke-gets-new-pricing-and-capabilities-on-10th-birthday for the announcement of this change. * * @deprecated GKE Enterprise features are now available without an Enterprise tier. This field is deprecated and will be removed in a future major release */ desiredTier: string; } interface ClusterFleet { /** * Full resource name of the registered fleet membership of the cluster. */ membership: string; /** * Short name of the fleet membership, for example "member-1". */ membershipId: string; /** * Location of the fleet membership, for example "us-central1". */ membershipLocation: string; /** * Sets the membership type of the cluster. Available option is `LIGHTWEIGHT` to support only lightweight compatible features. If unspecified, the membershipType will be a regular membership that supports all features. */ membershipType?: string; /** * Whether the cluster has been registered via the fleet API. */ preRegistered: boolean; /** * The name of the Fleet host project where this cluster will be registered. */ project?: string; } interface ClusterGatewayApiConfig { /** * Which Gateway Api channel should be used. `CHANNEL_DISABLED`, `CHANNEL_EXPERIMENTAL` or `CHANNEL_STANDARD`. */ channel: string; } interface ClusterGkeAutoUpgradeConfig { /** * The selected patch mode. * Accepted values are: * * ACCELERATED: Upgrades to the latest available patch version in a given minor and release channel. */ patchMode: string; } interface ClusterIdentityServiceConfig { /** * Whether to enable the Identity Service component. It is disabled by default. Set `enabled=true` to enable. */ enabled?: boolean; } interface ClusterIpAllocationPolicy { /** * The configuration for individual additional subnetworks attached to the cluster. * Structure is documented below. */ additionalIpRangesConfigs?: outputs.container.ClusterIpAllocationPolicyAdditionalIpRangesConfig[]; /** * The configuration for additional pod secondary ranges at * the cluster level. Used for Autopilot clusters and Standard clusters with which control of the * secondary Pod IP address assignment to node pools isn't needed. Structure is documented below. */ additionalPodRangesConfig?: outputs.container.ClusterIpAllocationPolicyAdditionalPodRangesConfig; /** * All the information related to Auto IPAM. Structure is documented below */ autoIpamConfig: outputs.container.ClusterIpAllocationPolicyAutoIpamConfig; /** * The IP address range for the cluster pod IPs. * Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) * to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) * from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to * pick a specific range to use. */ clusterIpv4CidrBlock: string; /** * The name of the existing secondary * range in the cluster's subnetwork to use for pod IP addresses. Alternatively, * `clusterIpv4CidrBlock` can be used to automatically create a GKE-managed one. */ clusterSecondaryRangeName: string; /** * Contains network tier information. Structure is documented below * * The auto ipam config supports: */ networkTierConfig: outputs.container.ClusterIpAllocationPolicyNetworkTierConfig; /** * Configuration for cluster level pod cidr overprovision. Default is disabled=false. */ podCidrOverprovisionConfig: outputs.container.ClusterIpAllocationPolicyPodCidrOverprovisionConfig; /** * The IP address range of the services IPs in this cluster. * Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) * to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) * from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to * pick a specific range to use. */ servicesIpv4CidrBlock: string; /** * The name of the existing * secondary range in the cluster's subnetwork to use for service `ClusterIP`s. * Alternatively, `servicesIpv4CidrBlock` can be used to automatically create a * GKE-managed one. */ servicesSecondaryRangeName: string; /** * The IP Stack Type of the cluster. * Default value is `IPV4`. * Possible values are `IPV4` and `IPV4_IPV6`. */ stackType?: string; } interface ClusterIpAllocationPolicyAdditionalIpRangesConfig { /** * List of secondary ranges names within this subnetwork that can be used for pod IPs. */ podIpv4RangeNames?: string[]; /** * Status of the subnetwork. Additional subnet with DRAINING status will not be selected during new node pool creation * Accepted values are: * * `ACTIVE`: ACTIVE status indicates that the subnet is available for new node pool creation. * * `DRAINING`: DRAINING status indicates that the subnet is not used for new node pool creation. */ status?: string; /** * Name of the subnetwork. This can be the full path of the subnetwork or just the name. */ subnetwork: string; } interface ClusterIpAllocationPolicyAdditionalPodRangesConfig { /** * The names of the Pod ranges to add to the cluster. */ podRangeNames: string[]; } interface ClusterIpAllocationPolicyAutoIpamConfig { /** * The flag that enables Auto IPAM on this cluster. */ enabled: boolean; } interface ClusterIpAllocationPolicyNetworkTierConfig { /** * Network tier configuration. * Accepted values are: * * `NETWORK_TIER_DEFAULT`: (Default) Use project-level configuration. * * `NETWORK_TIER_PREMIUM`: Premium network tier. * * `NETWORK_TIER_STANDARD`: Standard network tier. */ networkTier: string; } interface ClusterIpAllocationPolicyPodCidrOverprovisionConfig { /** * Whether the cluster disables default in-node sNAT rules. In-node sNAT rules will be disabled when defaultSnatStatus is disabled.When disabled is set to false, default IP masquerade rules will be applied to the nodes to prevent sNAT on cluster internal traffic * * The `clusterTelemetry` block supports */ disabled: boolean; } interface ClusterLoggingConfig { /** * The GKE components exposing logs. Supported values include: * `SYSTEM_COMPONENTS`, `APISERVER`, `CONTROLLER_MANAGER`, `SCHEDULER`, and `WORKLOADS`. */ enableComponents: string[]; } interface ClusterMaintenancePolicy { /** * Time window specified for daily maintenance operations. * Specify `startTime` in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format "HH:MMā€, * where HH : \[00-23\] and MM : \[00-59\] GMT. For example: * * Examples: */ dailyMaintenanceWindow?: outputs.container.ClusterMaintenancePolicyDailyMaintenanceWindow; /** * Exceptions to maintenance window. Non-emergency maintenance should not occur in these windows. A cluster can have up to 20 maintenance exclusions at a time [Maintenance Window and Exclusions](https://cloud.google.com/kubernetes-engine/docs/concepts/maintenance-windows-and-exclusions) */ maintenanceExclusions?: outputs.container.ClusterMaintenancePolicyMaintenanceExclusion[]; /** * Time window for recurring maintenance operations. * * Specify `startTime` and `endTime` in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) "Zulu" date format. The start time's date is * the initial date that the window starts, and the end time is used for calculating duration. Specify `recurrence` in * [RFC5545](https://tools.ietf.org/html/rfc5545#section-3.8.5.3) RRULE format, to specify when this recurs. * Note that GKE may accept other formats, but will return values in UTC, causing a permanent diff. * * Examples: * ``` * maintenance_policy { * recurring_window { * start_time = "2019-08-01T02:00:00Z" * end_time = "2019-08-01T06:00:00Z" * recurrence = "FREQ=DAILY" * } * } * ``` * * ``` * maintenance_policy { * recurring_window { * start_time = "2019-01-01T09:00:00Z" * end_time = "2019-01-01T17:00:00Z" * recurrence = "FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR" * } * } * ``` */ recurringWindow?: outputs.container.ClusterMaintenancePolicyRecurringWindow; } interface ClusterMaintenancePolicyDailyMaintenanceWindow { duration: string; startTime: string; } interface ClusterMaintenancePolicyMaintenanceExclusion { endTime?: string; exclusionName: string; /** * MaintenanceExclusionOptions provides maintenance exclusion related options. */ exclusionOptions?: outputs.container.ClusterMaintenancePolicyMaintenanceExclusionExclusionOptions; startTime: string; } interface ClusterMaintenancePolicyMaintenanceExclusionExclusionOptions { /** * The exclusion window end time behavior. One of: **UNTIL_END_OF_SUPPORT**. One and and one of `endTimeBehavior` and `endTime` should be specified. * * Specify `startTime` and `endTime` in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) "Zulu" date format. The start time's date is * the initial date that the window starts, and the end time is used for calculating duration.Specify `recurrence` in * [RFC5545](https://tools.ietf.org/html/rfc5545#section-3.8.5.3) RRULE format, to specify when this recurs. * Note that GKE may accept other formats, but will return values in UTC, causing a permanent diff. * * Examples: * * ``` * maintenance_policy { * recurring_window { * start_time = "2019-01-01T00:00:00Z" * end_time = "2019-01-02T00:00:00Z" * recurrence = "FREQ=DAILY" * } * maintenance_exclusion{ * exclusion_name = "batch job" * start_time = "2019-01-01T00:00:00Z" * end_time = "2019-01-02T00:00:00Z" * exclusion_options { * scope = "NO_UPGRADES" * } * } * maintenance_exclusion{ * exclusion_name = "holiday data load" * start_time = "2019-05-01T00:00:00Z" * exclusion_options { * scope = "NO_MINOR_UPGRADES" * end_time_behavior = "UNTIL_END_OF_SUPPORT" * } * } * } * ``` */ endTimeBehavior?: string; /** * The scope of automatic upgrades to restrict in the exclusion window. One of: **NO_UPGRADES | NO_MINOR_UPGRADES | NO_MINOR_OR_NODE_UPGRADES** */ scope: string; } interface ClusterMaintenancePolicyRecurringWindow { endTime: string; recurrence: string; startTime: string; } interface ClusterManagedOpentelemetryConfig { /** * The scope of the Managed OpenTelemetry pipeline. Supported values include: `SCOPE_UNSPECIFIED`, `NONE`, `COLLECTION_AND_INSTRUMENTATION_COMPONENTS`. */ scope?: string; } interface ClusterMasterAuth { /** * Base64 encoded public certificate used by clients to authenticate to the cluster endpoint. */ clientCertificate: string; /** * Whether client certificate authorization is enabled for this cluster. For example: */ clientCertificateConfig: outputs.container.ClusterMasterAuthClientCertificateConfig; /** * Base64 encoded private key used by clients to authenticate to the cluster endpoint. */ clientKey: string; /** * Base64 encoded public certificate that is the root of trust for the cluster. */ clusterCaCertificate: string; } interface ClusterMasterAuthClientCertificateConfig { /** * Whether client certificate authorization is enabled for this cluster. */ issueClientCertificate: boolean; } interface ClusterMasterAuthorizedNetworksConfig { /** * External networks that can access the * Kubernetes cluster master through HTTPS. */ cidrBlocks?: outputs.container.ClusterMasterAuthorizedNetworksConfigCidrBlock[]; /** * Whether Kubernetes master is * accessible via Google Compute Engine Public IPs. */ gcpPublicCidrsAccessEnabled: boolean; /** * Whether authorized networks is enforced on the private endpoint or not. */ privateEndpointEnforcementEnabled: boolean; } interface ClusterMasterAuthorizedNetworksConfigCidrBlock { /** * External network that can access Kubernetes master through HTTPS. * Must be specified in CIDR notation. */ cidrBlock: string; /** * Field for users to identify CIDR blocks. */ displayName?: string; } interface ClusterMeshCertificates { /** * Controls the issuance of workload mTLS certificates. It is enabled by default. Workload Identity is required, see workload_config. */ enableCertificates: boolean; } interface ClusterMonitoringConfig { /** * Configuration for Advanced Datapath Monitoring. Structure is documented below. */ advancedDatapathObservabilityConfig: outputs.container.ClusterMonitoringConfigAdvancedDatapathObservabilityConfig; /** * The GKE components exposing metrics. Supported values include: `SYSTEM_COMPONENTS`, `APISERVER`, `SCHEDULER`, `CONTROLLER_MANAGER`, `STORAGE`, `HPA`, `POD`, `DAEMONSET`, `DEPLOYMENT`, `STATEFULSET`, `KUBELET`, `CADVISOR`, `DCGM` and `JOBSET`. In beta provider, `WORKLOADS` is supported on top of those 12 values. (`WORKLOADS` is deprecated and removed in GKE 1.24.) `KUBELET` and `CADVISOR` are only supported in GKE 1.29.3-gke.1093000 and above. `JOBSET` is only supported in GKE 1.32.1-gke.1357001 and above. */ enableComponents: string[]; /** * Configuration for Managed Service for Prometheus. Structure is documented below. */ managedPrometheus: outputs.container.ClusterMonitoringConfigManagedPrometheus; } interface ClusterMonitoringConfigAdvancedDatapathObservabilityConfig { /** * Whether or not to enable advanced datapath metrics. */ enableMetrics: boolean; /** * Whether or not Relay is enabled. */ enableRelay: boolean; } interface ClusterMonitoringConfigManagedPrometheus { /** * Configuration options for GKE Auto-Monitoring. */ autoMonitoringConfig: outputs.container.ClusterMonitoringConfigManagedPrometheusAutoMonitoringConfig; /** * Whether or not the managed collection is enabled. */ enabled: boolean; } interface ClusterMonitoringConfigManagedPrometheusAutoMonitoringConfig { /** * Whether or not to enable GKE Auto-Monitoring. Supported values include: `ALL`, `NONE`. */ scope: string; } interface ClusterNetworkPerformanceConfig { /** * Specifies the total network bandwidth tier for NodePools in the cluster. */ totalEgressBandwidthTier: string; } interface ClusterNetworkPolicy { /** * Whether network policy is enabled on the cluster. */ enabled: boolean; /** * The selected network policy provider. Defaults to PROVIDER_UNSPECIFIED. */ provider?: string; } interface ClusterNodeConfig { /** * Specifies options for controlling * advanced machine features. Structure is documented below. */ advancedMachineFeatures?: outputs.container.ClusterNodeConfigAdvancedMachineFeatures; /** * Configuration of the node pool boot disk. Structure is documented below */ bootDisk: outputs.container.ClusterNodeConfigBootDisk; /** * The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption */ bootDiskKmsKey?: string; /** * Configuration for Confidential Nodes feature. Structure is documented below. */ confidentialNodes: outputs.container.ClusterNodeConfigConfidentialNodes; /** * Parameters to customize containerd runtime. Structure is documented below. */ containerdConfig: outputs.container.ClusterNodeConfigContainerdConfig; /** * Size of the disk attached to each node, specified * in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places. * Prefer configuring `bootDisk`. */ diskSizeGb: number; /** * Type of the disk attached to each node * (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `bootDisk`. */ diskType: string; /** * List of kubernetes taints applied to each node. */ effectiveTaints: outputs.container.ClusterNodeConfigEffectiveTaint[]; /** * Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default. */ enableConfidentialStorage?: boolean; /** * ) Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below. */ ephemeralStorageConfig?: outputs.container.ClusterNodeConfigEphemeralStorageConfig; /** * Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below. */ ephemeralStorageLocalSsdConfig?: outputs.container.ClusterNodeConfigEphemeralStorageLocalSsdConfig; /** * Parameters for the NCCL Fast Socket feature. If unspecified, NCCL Fast Socket will not be enabled on the node pool. * Node Pool must enable gvnic. * GKE version 1.25.2-gke.1700 or later. * Structure is documented below. */ fastSocket?: outputs.container.ClusterNodeConfigFastSocket; /** * Enables Flex Start provisioning model for the node pool. */ flexStart?: boolean; /** * Parameters for the Google Container Filesystem (GCFS). * If unspecified, GCFS will not be enabled on the node pool. When enabling this feature you must specify `imageType = "COS_CONTAINERD"` and `nodeVersion` from GKE versions 1.19 or later to use it. * For GKE versions 1.19, 1.20, and 1.21, the recommended minimum `nodeVersion` would be 1.19.15-gke.1300, 1.20.11-gke.1300, and 1.21.5-gke.1300 respectively. * A `machineType` that has more than 16 GiB of memory is also recommended. * GCFS must be enabled in order to use [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming). * Structure is documented below. */ gcfsConfig: outputs.container.ClusterNodeConfigGcfsConfig; /** * List of the type and count of accelerator cards attached to the instance. * Structure documented below. */ guestAccelerators: outputs.container.ClusterNodeConfigGuestAccelerator[]; /** * Google Virtual NIC (gVNIC) is a virtual network interface. * Installing the gVNIC driver allows for more efficient traffic transmission across the Google network infrastructure. * gVNIC is an alternative to the virtIO-based ethernet driver. GKE nodes must use a Container-Optimized OS node image. * GKE node version 1.15.11-gke.15 or later * Structure is documented below. */ gvnic?: outputs.container.ClusterNodeConfigGvnic; /** * The maintenance policy for the hosts on which the GKE VMs run on. */ hostMaintenancePolicy?: outputs.container.ClusterNodeConfigHostMaintenancePolicy; /** * The image type to use for this node. Note that changing the image type * will delete and recreate all nodes in the node pool. */ imageType: string; /** * Kubelet configuration, currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file). * Structure is documented below. * * ``` * kubelet_config { * cpu_manager_policy = "static" * cpu_cfs_quota = true * cpu_cfs_quota_period = "100us" * pod_pids_limit = 1024 * } * ``` */ kubeletConfig: outputs.container.ClusterNodeConfigKubeletConfig; /** * The Kubernetes labels (key/value pairs) to be applied to each node. The kubernetes.io/ and k8s.io/ prefixes are * reserved by Kubernetes Core components and cannot be specified. */ labels: { [key: string]: string; }; /** * Parameters that can be configured on Linux nodes. Structure is documented below. */ linuxNodeConfig: outputs.container.ClusterNodeConfigLinuxNodeConfig; /** * Parameters for the local NVMe SSDs. Structure is documented below. */ localNvmeSsdBlockConfig?: outputs.container.ClusterNodeConfigLocalNvmeSsdBlockConfig; /** * The amount of local SSD disks that will be * attached to each cluster node. Defaults to 0. */ localSsdCount: number; /** * Possible Local SSD encryption modes: * Accepted values are: * * `STANDARD_ENCRYPTION`: The given node will be encrypted using keys managed by Google infrastructure and the keys wll be deleted when the node is deleted. * * `EPHEMERAL_KEY_ENCRYPTION`: The given node will opt-in for using ephemeral key for encrypting Local SSDs. The Local SSDs will not be able to recover data in case of node crash. */ localSsdEncryptionMode?: string; /** * Parameter for specifying the type of logging agent used in a node pool. This will override any cluster-wide default value. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information. */ loggingVariant: string; /** * The name of a Google Compute Engine machine type. * Defaults to `e2-medium`. To create a custom machine type, value should be set as specified * [here](https://cloud.google.com/compute/docs/reference/latest/instances#machineType). */ machineType: string; /** * The runtime of each node in the node pool in seconds, terminated by 's'. Example: "3600s". */ maxRunDuration?: string; /** * The metadata key/value pairs assigned to instances in * the cluster. From GKE `1.12` onwards, `disable-legacy-endpoints` is set to * `true` by the API; if `metadata` is set but that default value is not * included, the provider will attempt to unset the value. To avoid this, set the * value in your config. */ metadata: { [key: string]: string; }; /** * Minimum CPU platform to be used by this instance. * The instance may be scheduled on the specified or newer CPU platform. Applicable * values are the friendly names of CPU platforms, such as `Intel Haswell`. See the * [official documentation](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) * for more information. */ minCpuPlatform: string; /** * Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on [sole tenant nodes](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes). */ nodeGroup?: string; /** * The set of Google API scopes to be made available * on all of the node VMs under the "default" service account. * Use the "https://www.googleapis.com/auth/cloud-platform" scope to grant access to all APIs. It is recommended that you set `serviceAccount` to a non-default service account and grant IAM roles to that service account for only the resources that it needs. * * See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/access-scopes) for information on migrating off of legacy access scopes. */ oauthScopes: string[]; /** * A boolean that represents whether or not the underlying node VMs * are preemptible. See the [official documentation](https://cloud.google.com/container-engine/docs/preemptible-vm) * for more information. Defaults to false. */ preemptible?: boolean; /** * The configuration of the desired reservation which instances could take capacity from. Structure is documented below. */ reservationAffinity?: outputs.container.ClusterNodeConfigReservationAffinity; /** * The GCP labels (key/value pairs) to be applied to each node. Refer [here](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-managing-labels) * for how these labels are applied to clusters, node pools and nodes. */ resourceLabels?: { [key: string]: string; }; /** * A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`. */ resourceManagerTags?: { [key: string]: string; }; /** * ) [GKE Sandbox](https://cloud.google.com/kubernetes-engine/docs/how-to/sandbox-pods) configuration. When enabling this feature you must specify `imageType = "COS_CONTAINERD"` and `nodeVersion = "1.12.7-gke.17"` or later to use it. * Structure is documented below. */ sandboxConfig?: outputs.container.ClusterNodeConfigSandboxConfig; /** * Parameters for secondary boot disks to preload container images and data on new nodes. Structure is documented below. `gcfsConfig` must be `enabled=true` for this feature to work. `minMasterVersion` must also be set to use GKE 1.28.3-gke.106700 or later versions. */ secondaryBootDisks?: outputs.container.ClusterNodeConfigSecondaryBootDisk[]; /** * The service account to be used by the Node VMs. * If not specified, the "default" service account is used. */ serviceAccount: string; /** * Shielded Instance options. Structure is documented below. */ shieldedInstanceConfig: outputs.container.ClusterNodeConfigShieldedInstanceConfig; /** * Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below. */ soleTenantConfig?: outputs.container.ClusterNodeConfigSoleTenantConfig; /** * A boolean that represents whether the underlying node VMs are spot. * See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms) * for more information. Defaults to false. */ spot?: boolean; /** * The list of Storage Pools where boot disks are provisioned. */ storagePools?: string[]; /** * The list of instance tags applied to all nodes. Tags are used to identify * valid sources or targets for network firewalls. */ tags?: string[]; /** * A list of [Kubernetes taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) * to apply to nodes. GKE's API can only set this field on cluster creation. * However, GKE will add taints to your nodes if you enable certain features such * as GPUs. If this field is set, any diffs on this field will cause the provider to * recreate the underlying resource. Taint values can be updated safely in * Kubernetes (eg. through `kubectl`), and it's recommended that you do not use * this field to manage taints. If you do, `lifecycle.ignore_changes` is * recommended. Structure is documented below. */ taints?: outputs.container.ClusterNodeConfigTaint[]; /** * Windows node configuration, currently supporting OSVersion [attribute](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/NodeConfig#osversion). The value must be one of [OS_VERSION_UNSPECIFIED, OS_VERSION_LTSC2019, OS_VERSION_LTSC2022]. For example: */ windowsNodeConfig: outputs.container.ClusterNodeConfigWindowsNodeConfig; /** * Metadata configuration to expose to workloads on the node pool. * Structure is documented below. */ workloadMetadataConfig: outputs.container.ClusterNodeConfigWorkloadMetadataConfig; } interface ClusterNodeConfigAdvancedMachineFeatures { /** * Defines whether the instance should have nested virtualization enabled. Defaults to false. */ enableNestedVirtualization?: boolean; /** * Defines the performance monitoring unit [PMU](https://cloud.google.com/compute/docs/pmu-overview) level. Valid values are `ARCHITECTURAL`, `STANDARD`, or `ENHANCED`. Defaults to off. */ performanceMonitoringUnit?: string; /** * The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. */ threadsPerCore: number; } interface ClusterNodeConfigBootDisk { /** * Type of the disk attached to each node * (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field. */ diskType: string; /** * Configure disk IOPs. This is only valid if the `diskType` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values. */ provisionedIops: number; /** * Configure disk throughput. This is only valid if the `diskType` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values. */ provisionedThroughput: number; /** * Size of the disk attached to each node, specified * in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field. */ sizeGb: number; } interface ClusterNodeConfigConfidentialNodes { /** * Defines the type of technology used * by the confidential node. */ confidentialInstanceType?: string; /** * Enable Confidential GKE Nodes for this cluster, to * enforce encryption of data in-use. */ enabled: boolean; } interface ClusterNodeConfigContainerdConfig { /** * Configuration for private container registries. There are two fields in this config: */ privateRegistryAccessConfig?: outputs.container.ClusterNodeConfigContainerdConfigPrivateRegistryAccessConfig; /** * Defines containerd registry host configuration. Each `registryHosts` entry represents a `hosts.toml` file. See [customize containerd configuration in GKE nodes](https://docs.cloud.google.com/kubernetes-engine/docs/how-to/customize-containerd-configuration#registryHosts) for more detail. Example: */ registryHosts?: outputs.container.ClusterNodeConfigContainerdConfigRegistryHost[]; /** * Configuration for writable cgroups. This allows containers to have a writable `/sys/fs/cgroup` directory, which is required for some workloads to create their own sub-cgroups. The `writableCgroups` block supports: */ writableCgroups?: outputs.container.ClusterNodeConfigContainerdConfigWritableCgroups; } interface ClusterNodeConfigContainerdConfigPrivateRegistryAccessConfig { /** * List of configuration objects for CA and domains. Each object identifies a certificate and its assigned domains. See [how to configure for private container registries](https://cloud.google.com/kubernetes-engine/docs/how-to/access-private-registries-private-certificates) for more detail. Example: */ certificateAuthorityDomainConfigs?: outputs.container.ClusterNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig[]; /** * Enables private registry config. If set to false, all other fields in this object must not be set. */ enabled: boolean; } interface ClusterNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig { /** * List of fully-qualified-domain-names. IPv4s and port specification are supported. */ fqdns: string[]; /** * Parameters for configuring a certificate hosted in GCP SecretManager. */ gcpSecretManagerCertificateConfig: outputs.container.ClusterNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig; } interface ClusterNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig { /** * URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'. */ secretUri: string; } interface ClusterNodeConfigContainerdConfigRegistryHost { /** * Configures a list of host-specific configurations for the server. */ hosts?: outputs.container.ClusterNodeConfigContainerdConfigRegistryHostHost[]; /** * Defines the host name of the registry server. */ server: string; } interface ClusterNodeConfigContainerdConfigRegistryHostHost { /** * Represent the capabilities of the registry host, specifying what operations a host is capable of performing. */ capabilities?: string[]; /** * Configures the registry host certificate. */ cas?: outputs.container.ClusterNodeConfigContainerdConfigRegistryHostHostCa[]; /** * Configures the registry host client certificate and key. */ clients?: outputs.container.ClusterNodeConfigContainerdConfigRegistryHostHostClient[]; /** * Specifies the maximum duration allowed for a connection attempt to complete. */ dialTimeout?: string; /** * Configures the registry host headers. */ headers?: outputs.container.ClusterNodeConfigContainerdConfigRegistryHostHostHeader[]; /** * Configures the registry host/mirror. */ host: string; /** * Indicate the host's API root endpoint is defined in the URL path rather than by the API specification. */ overridePath?: boolean; } interface ClusterNodeConfigContainerdConfigRegistryHostHostCa { /** * URI for the Secret Manager secret that hosts the certificate. */ gcpSecretManagerSecretUri?: string; } interface ClusterNodeConfigContainerdConfigRegistryHostHostClient { /** * Configures the client certificate. */ cert: outputs.container.ClusterNodeConfigContainerdConfigRegistryHostHostClientCert; /** * Configures the client private key. */ key?: outputs.container.ClusterNodeConfigContainerdConfigRegistryHostHostClientKey; } interface ClusterNodeConfigContainerdConfigRegistryHostHostClientCert { /** * URI for the Secret Manager secret that hosts the client certificate. */ gcpSecretManagerSecretUri?: string; } interface ClusterNodeConfigContainerdConfigRegistryHostHostClientKey { /** * URI for the Secret Manager secret that hosts the private key. */ gcpSecretManagerSecretUri?: string; } interface ClusterNodeConfigContainerdConfigRegistryHostHostHeader { /** * Configures the header key. */ key: string; /** * Configures the header value. */ values: string[]; } interface ClusterNodeConfigContainerdConfigWritableCgroups { /** * Whether writable cgroups are enabled. */ enabled: boolean; } interface ClusterNodeConfigEffectiveTaint { /** * Effect for taint. */ effect: string; /** * Key for taint. */ key: string; /** * Value for taint. */ value: string; } interface ClusterNodeConfigEphemeralStorageConfig { /** * Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD is 375 GB in size. If zero, it means to disable using local SSDs as ephemeral storage. */ localSsdCount: number; } interface ClusterNodeConfigEphemeralStorageLocalSsdConfig { /** * Number of raw-block local NVMe SSD disks to be attached to the node utilized for GKE Data Cache. If zero, then GKE Data Cache will not be enabled in the nodes. */ dataCacheCount?: number; /** * Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD is 375 GB in size. If zero, it means to disable using local SSDs as ephemeral storage. */ localSsdCount: number; } interface ClusterNodeConfigFastSocket { /** * Whether or not the NCCL Fast Socket is enabled */ enabled: boolean; } interface ClusterNodeConfigGcfsConfig { /** * Whether or not the Google Container Filesystem (GCFS) is enabled */ enabled: boolean; } interface ClusterNodeConfigGuestAccelerator { /** * The number of the guest accelerator cards exposed to this instance. */ count: number; /** * Configuration for auto installation of GPU driver. Structure is documented below. */ gpuDriverInstallationConfig: outputs.container.ClusterNodeConfigGuestAcceleratorGpuDriverInstallationConfig; /** * Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig [user guide](https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning). */ gpuPartitionSize?: string; /** * Configuration for GPU sharing. Structure is documented below. */ gpuSharingConfig?: outputs.container.ClusterNodeConfigGuestAcceleratorGpuSharingConfig; /** * The accelerator type resource to expose to this instance. E.g. `nvidia-tesla-k80`. */ type: string; } interface ClusterNodeConfigGuestAcceleratorGpuDriverInstallationConfig { /** * Mode for how the GPU driver is installed. * Accepted values are: * * `"GPU_DRIVER_VERSION_UNSPECIFIED"`: Default value is to install the "Default" GPU driver. Before GKE `1.30.1-gke.1156000`, the default value is to not install any GPU driver. * * `"INSTALLATION_DISABLED"`: Disable GPU driver auto installation and needs manual installation. * * `"DEFAULT"`: "Default" GPU driver in COS and Ubuntu. * * `"LATEST"`: "Latest" GPU driver in COS. */ gpuDriverVersion: string; } interface ClusterNodeConfigGuestAcceleratorGpuSharingConfig { /** * The type of GPU sharing strategy to enable on the GPU node. * Accepted values are: * * `"TIME_SHARING"`: Allow multiple containers to have [time-shared](https://cloud.google.com/kubernetes-engine/docs/concepts/timesharing-gpus) access to a single GPU device. * * `"MPS"`: Enable co-operative multi-process CUDA workloads to run concurrently on a single GPU device with [MPS](https://cloud.google.com/kubernetes-engine/docs/how-to/nvidia-mps-gpus) */ gpuSharingStrategy: string; /** * The maximum number of containers that can share a GPU. */ maxSharedClientsPerGpu: number; } interface ClusterNodeConfigGvnic { /** * Whether or not the Google Virtual NIC (gVNIC) is enabled */ enabled: boolean; } interface ClusterNodeConfigHostMaintenancePolicy { /** * . */ maintenanceInterval: string; } interface ClusterNodeConfigKubeletConfig { /** * Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. The allowed sysctl groups are `kernel.shm*`, `kernel.msg*`, `kernel.sem`, `fs.mqueue.*`, and `net.*`. */ allowedUnsafeSysctls?: string[]; /** * Defines the maximum number of container log files that can be present for a container. The integer must be between 2 and 10, inclusive. */ containerLogMaxFiles?: number; /** * Defines the maximum size of the * container log file before it is rotated. Specified as a positive number and a * unit suffix, such as `"100Ki"`, `"10Mi"`. Valid units are "Ki", "Mi", "Gi". * The value must be between `"10Mi"` and `"500Mi"`, inclusive. And the total container log size * (`containerLogMaxSize` * `containerLogMaxFiles`) cannot exceed 1% of the total storage of the node. */ containerLogMaxSize?: string; /** * If true, enables CPU CFS quota enforcement for * containers that specify CPU limits. */ cpuCfsQuota: boolean; /** * The CPU CFS quota period value. Specified * as a sequence of decimal numbers, each with optional fraction and a unit suffix, * such as `"300ms"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", * "h". The value must be a positive duration. */ cpuCfsQuotaPeriod?: string; /** * The CPU management policy on the node. See * [K8S CPU Management Policies](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/). * One of `"none"` or `"static"`. If unset (or set to the empty string `""`), the API will treat the field as if set to "none". * Prior to the 6.4.0 this field was marked as required. The workaround for the required field * is setting the empty string `""`, which will function identically to not setting this field. */ cpuManagerPolicy?: string; /** * Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300. */ evictionMaxPodGracePeriodSeconds?: number; /** * Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below. */ evictionMinimumReclaim?: outputs.container.ClusterNodeConfigKubeletConfigEvictionMinimumReclaim; /** * Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below. */ evictionSoft?: outputs.container.ClusterNodeConfigKubeletConfigEvictionSoft; /** * Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below. */ evictionSoftGracePeriod?: outputs.container.ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod; /** * Defines the percent of disk usage after which image garbage collection is always run. The integer must be between 10 and 85, inclusive. */ imageGcHighThresholdPercent?: number; /** * Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. The integer must be between 10 and 85, inclusive. */ imageGcLowThresholdPercent?: number; /** * Defines the maximum age an image can be unused before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`, and `"2h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". The value must be a positive duration. */ imageMaximumGcAge?: string; /** * Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m". */ imageMinimumGcAge?: string; /** * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. */ insecureKubeletReadonlyPortEnabled: string; /** * Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive. */ maxParallelImagePulls: number; /** * Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node. * The memory manager optimizes memory and hugepages allocation for pods, especially * those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below. */ memoryManager?: outputs.container.ClusterNodeConfigKubeletConfigMemoryManager; /** * Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. */ podPidsLimit?: number; /** * Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group. */ singleProcessOomKill?: boolean; /** * These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below. */ topologyManager?: outputs.container.ClusterNodeConfigKubeletConfigTopologyManager; } interface ClusterNodeConfigKubeletConfigEvictionMinimumReclaim { /** * Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`. */ imagefsAvailable?: string; /** * Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`. */ imagefsInodesFree?: string; /** * Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`. */ memoryAvailable?: string; /** * Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`. */ nodefsAvailable?: string; /** * Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`. */ nodefsInodesFree?: string; /** * Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`. */ pidAvailable?: string; } interface ClusterNodeConfigKubeletConfigEvictionSoft { /** * Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`. */ imagefsAvailable?: string; /** * Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`. */ imagefsInodesFree?: string; /** * Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory. */ memoryAvailable?: string; /** * Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`. */ nodefsAvailable?: string; /** * Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`. */ nodefsInodesFree?: string; /** * Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`. */ pidAvailable?: string; } interface ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod { /** * Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`. */ imagefsAvailable?: string; /** * Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`. */ imagefsInodesFree?: string; /** * Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". */ memoryAvailable?: string; /** * Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`. */ nodefsAvailable?: string; /** * Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`. */ nodefsInodesFree?: string; /** * Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`. */ pidAvailable?: string; } interface ClusterNodeConfigKubeletConfigMemoryManager { /** * The [Memory * Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) * policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None". */ policy: string; } interface ClusterNodeConfigKubeletConfigTopologyManager { /** * The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none". */ policy: string; /** * The Topology Manager scope, defining the granularity at which * policy decisions are applied. Valid values are "container" (resources are aligned * per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container". */ scope: string; } interface ClusterNodeConfigLinuxNodeConfig { /** * Possible cgroup modes that can be used. * Accepted values are: * * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used. * * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image. * * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image. */ cgroupMode: string; /** * Amounts for 2M and 1G hugepages. Structure is documented below. */ hugepagesConfig?: outputs.container.ClusterNodeConfigLinuxNodeConfigHugepagesConfig; /** * Settings for kernel module loading. Structure is documented below. */ nodeKernelModuleLoading?: outputs.container.ClusterNodeConfigLinuxNodeConfigNodeKernelModuleLoading; /** * The Linux kernel parameters to be applied to the nodes * and all pods running on the nodes. Specified as a map from the key, such as * `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file). * Note that validations happen all server side. All attributes are optional. */ sysctls?: { [key: string]: string; }; /** * The Linux kernel transparent hugepage defrag setting. */ transparentHugepageDefrag?: string; /** * The Linux kernel transparent hugepage setting. */ transparentHugepageEnabled: string; } interface ClusterNodeConfigLinuxNodeConfigHugepagesConfig { /** * Amount of 1G hugepages. */ hugepageSize1g?: number; /** * Amount of 2M hugepages. */ hugepageSize2m?: number; } interface ClusterNodeConfigLinuxNodeConfigNodeKernelModuleLoading { /** * Possible kernel module loading policies. * Accepted values are: * * `POLICY_UNSPECIFIED`: Default if unset. GKE selects the image based on node type. For CPU and TPU nodes, the image will not allow loading external kernel modules. For GPU nodes, the image will allow loading any module, whether it is signed or not. * * `ENFORCE_SIGNED_MODULES`: Enforced signature verification: Node pools will use a Container-Optimized OS image configured to allow loading of *Google-signed* external kernel modules. Loadpin is enabled but configured to exclude modules, and kernel module signature checking is enforced. * * `DO_NOT_ENFORCE_SIGNED_MODULES`: Mirrors existing DEFAULT behavior: For CPU and TPU nodes, the image will not allow loading external kernel modules. For GPU nodes, the image will allow loading any module, whether it is signed or not. */ policy?: string; } interface ClusterNodeConfigLocalNvmeSsdBlockConfig { /** * Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node. * > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later. */ localSsdCount: number; } interface ClusterNodeConfigReservationAffinity { /** * The type of reservation consumption * Accepted values are: * * * `"UNSPECIFIED"`: Default value. This should not be used. * * `"NO_RESERVATION"`: Do not consume from any reserved capacity. * * `"ANY_RESERVATION"`: Consume any reservation available. * * `"SPECIFIC_RESERVATION"`: Must consume from a specific reservation. Must specify key value fields for specifying the reservations. */ consumeReservationType: string; /** * The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value. */ key?: string; /** * The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name" */ values?: string[]; } interface ClusterNodeConfigSandboxConfig { /** * Which sandbox to use for pods in the node pool. * Accepted values are: * * * `"gvisor"`: Pods run within a gVisor sandbox. */ sandboxType: string; } interface ClusterNodeConfigSecondaryBootDisk { /** * Path to disk image to create the secondary boot disk from. After using the [gke-disk-image-builder](https://github.com/GoogleCloudPlatform/ai-on-gke/tree/main/tools/gke-disk-image-builder), this argument should be `global/images/DISK_IMAGE_NAME`. */ diskImage: string; /** * Mode for how the secondary boot disk is used. An example mode is `CONTAINER_IMAGE_CACHE`. */ mode?: string; } interface ClusterNodeConfigShieldedInstanceConfig { /** * Defines if the instance has integrity monitoring enabled. * * Enables monitoring and attestation of the boot integrity of the instance. The attestation is performed against the integrity policy baseline. This baseline is initially derived from the implicitly trusted boot image when the instance is created. Defaults to `true`. */ enableIntegrityMonitoring?: boolean; /** * Defines if the instance has Secure Boot enabled. * * Secure Boot helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails. Defaults to `false`. */ enableSecureBoot?: boolean; } interface ClusterNodeConfigSoleTenantConfig { /** * Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count. */ minNodeCpus?: number; /** * The node affinity settings for the sole tenant node pool. Structure is documented below. */ nodeAffinities: outputs.container.ClusterNodeConfigSoleTenantConfigNodeAffinity[]; } interface ClusterNodeConfigSoleTenantConfigNodeAffinity { /** * The default or custom node affinity label key name. */ key: string; /** * Specifies affinity or anti-affinity. Accepted values are `"IN"` or `"NOT_IN"` */ operator: string; /** * List of node affinity label values as strings. */ values: string[]; } interface ClusterNodeConfigTaint { /** * Effect for taint. Accepted values are `NO_SCHEDULE`, `PREFER_NO_SCHEDULE`, and `NO_EXECUTE`. */ effect: string; /** * Key for taint. */ key: string; /** * Value for taint. */ value: string; } interface ClusterNodeConfigWindowsNodeConfig { /** * The OS Version of the windows nodepool.Values are OS_VERSION_UNSPECIFIED,OS_VERSION_LTSC2019 and OS_VERSION_LTSC2022 */ osversion?: string; } interface ClusterNodeConfigWorkloadMetadataConfig { /** * How to expose the node metadata to the workload running on the node. * Accepted values are: * * UNSPECIFIED: Not Set * * GCE_METADATA: Expose all Compute Engine metadata to pods. * * GKE_METADATA: Run the GKE Metadata Server on this node. The GKE Metadata Server exposes a metadata API to workloads that is compatible with the V1 Compute Metadata APIs exposed by the Compute Engine and App Engine Metadata Servers. This feature can only be enabled if [workload identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) is enabled at the cluster level. */ mode: string; } interface ClusterNodePool { /** * Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. */ autoscaling?: outputs.container.ClusterNodePoolAutoscaling; /** * The number of nodes to create in this * cluster's default node pool. In regional or multi-zonal clusters, this is the * number of nodes per zone. Must be set if `nodePool` is not set. If you're using * `gcp.container.NodePool` objects with no default node pool, you'll need to * set this to a value of at least `1`, alongside setting * `removeDefaultNodePool` to `true`. */ initialNodeCount: number; /** * The resource URLs of the managed instance groups associated with this node pool. */ instanceGroupUrls: string[]; /** * List of instance group URLs which have been assigned to this node pool. */ managedInstanceGroupUrls: string[]; /** * Node management configuration, wherein auto-repair and auto-upgrade is configured. */ management: outputs.container.ClusterNodePoolManagement; /** * The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. */ maxPodsPerNode: number; /** * The name of the cluster, unique within the project and * location. * * - - - */ name: string; /** * Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name. */ namePrefix: string; /** * Configuration for * [Adding Pod IP address ranges](https://cloud.google.com/kubernetes-engine/docs/how-to/multi-pod-cidr)) to the node pool. Structure is documented below */ networkConfig: outputs.container.ClusterNodePoolNetworkConfig; /** * Parameters used in creating the default node pool. * Generally, this field should not be used at the same time as a * `gcp.container.NodePool` or a `nodePool` block; this configuration * manages the default node pool, which isn't recommended to be used. * Structure is documented below. */ nodeConfig: outputs.container.ClusterNodePoolNodeConfig; /** * The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling. */ nodeCount: number; /** * Node drain configuration for this NodePool. */ nodeDrainConfigs: outputs.container.ClusterNodePoolNodeDrainConfig[]; /** * The list of zones in which the cluster's nodes * are located. Nodes must be in the region of their regional cluster or in the * same region as their cluster's zone for zonal clusters. If this is specified for * a zonal cluster, omit the cluster's zone. * * > A "multi-zonal" cluster is a zonal cluster with at least one additional zone * defined; in a multi-zonal cluster, the cluster master is only present in a * single zone while nodes are present in each of the primary zone and the node * locations. In contrast, in a regional cluster, cluster master nodes are present * in multiple zones in the region. For that reason, regional clusters should be * preferred. */ nodeLocations: string[]; /** * Specifies the node placement policy */ placementPolicy?: outputs.container.ClusterNodePoolPlacementPolicy; /** * Specifies the configuration of queued provisioning */ queuedProvisioning?: outputs.container.ClusterNodePoolQueuedProvisioning; /** * Specify node upgrade settings to change how many nodes GKE attempts to upgrade at once. The number of nodes upgraded simultaneously is the sum of maxSurge and max_unavailable. The maximum number of nodes upgraded simultaneously is limited to 20. */ upgradeSettings: outputs.container.ClusterNodePoolUpgradeSettings; /** * The Kubernetes version for the nodes in this pool. Note that if this field and autoUpgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as Terraform will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's versionPrefix field to approximate fuzzy versions in a Terraform-compatible way. */ version: string; } interface ClusterNodePoolAutoConfig { /** * Linux system configuration for the cluster's automatically provisioned node pools. Only `cgroupMode` and `nodeKernelModuleLoading` fields are supported in `nodePoolAutoConfig`. Structure is documented below. */ linuxNodeConfig?: outputs.container.ClusterNodePoolAutoConfigLinuxNodeConfig; /** * The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. */ networkTags?: outputs.container.ClusterNodePoolAutoConfigNetworkTags; /** * Kubelet configuration for Autopilot clusters. Currently, only `insecureKubeletReadonlyPortEnabled` is supported here. * Structure is documented below. */ nodeKubeletConfig: outputs.container.ClusterNodePoolAutoConfigNodeKubeletConfig; /** * A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`. */ resourceManagerTags?: { [key: string]: string; }; } interface ClusterNodePoolAutoConfigLinuxNodeConfig { /** * Possible cgroup modes that can be used. * Accepted values are: * * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used. * * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image. * * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image. */ cgroupMode: string; /** * Settings for kernel module loading. Structure is documented below. */ nodeKernelModuleLoading?: outputs.container.ClusterNodePoolAutoConfigLinuxNodeConfigNodeKernelModuleLoading; } interface ClusterNodePoolAutoConfigLinuxNodeConfigNodeKernelModuleLoading { /** * Possible kernel module loading policies. * Accepted values are: * * `POLICY_UNSPECIFIED`: Default if unset. GKE selects the image based on node type. For CPU and TPU nodes, the image will not allow loading external kernel modules. For GPU nodes, the image will allow loading any module, whether it is signed or not. * * `ENFORCE_SIGNED_MODULES`: Enforced signature verification: Node pools will use a Container-Optimized OS image configured to allow loading of *Google-signed* external kernel modules. Loadpin is enabled but configured to exclude modules, and kernel module signature checking is enforced. * * `DO_NOT_ENFORCE_SIGNED_MODULES`: Mirrors existing DEFAULT behavior: For CPU and TPU nodes, the image will not allow loading external kernel modules. For GPU nodes, the image will allow loading any module, whether it is signed or not. */ policy?: string; } interface ClusterNodePoolAutoConfigNetworkTags { /** * List of network tags applied to auto-provisioned node pools. */ tags?: string[]; } interface ClusterNodePoolAutoConfigNodeKubeletConfig { /** * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. */ insecureKubeletReadonlyPortEnabled: string; } interface ClusterNodePoolAutoscaling { /** * Location policy specifies the algorithm used when scaling-up the node pool. "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones. "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduces preemption risk for Spot VMs. */ locationPolicy: string; /** * Maximum number of nodes per zone in the node pool. Must be >= min_node_count. Cannot be used with total limits. */ maxNodeCount?: number; /** * Minimum number of nodes per zone in the node pool. Must be >=0 and <= max_node_count. Cannot be used with total limits. */ minNodeCount?: number; /** * Maximum number of all nodes in the node pool. Must be >= total_min_node_count. Cannot be used with per zone limits. */ totalMaxNodeCount?: number; /** * Minimum number of all nodes in the node pool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. */ totalMinNodeCount?: number; } interface ClusterNodePoolDefaults { /** * Subset of NodeConfig message that has defaults. */ nodeConfigDefaults?: outputs.container.ClusterNodePoolDefaultsNodeConfigDefaults; } interface ClusterNodePoolDefaultsNodeConfigDefaults { /** * Parameters for containerd configuration. */ containerdConfig: outputs.container.ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfig; /** * The default Google Container Filesystem (GCFS) configuration at the cluster level. e.g. enable [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming) across all the node pools within the cluster. Structure is documented below. */ gcfsConfig: outputs.container.ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfig; /** * Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. */ insecureKubeletReadonlyPortEnabled: string; /** * The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information. */ loggingVariant: string; } interface ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfig { /** * Configuration for private container registries. There are two fields in this config: */ privateRegistryAccessConfig?: outputs.container.ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigPrivateRegistryAccessConfig; /** * Defines containerd registry host configuration. Each `registryHosts` entry represents a `hosts.toml` file. See [customize containerd configuration in GKE nodes](https://docs.cloud.google.com/kubernetes-engine/docs/how-to/customize-containerd-configuration#registryHosts) for more detail. Example: */ registryHosts?: outputs.container.ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigRegistryHost[]; /** * Configuration for writable cgroups. This allows containers to have a writable `/sys/fs/cgroup` directory, which is required for some workloads to create their own sub-cgroups. The `writableCgroups` block supports: */ writableCgroups?: outputs.container.ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigWritableCgroups; } interface ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigPrivateRegistryAccessConfig { /** * List of configuration objects for CA and domains. Each object identifies a certificate and its assigned domains. See [how to configure for private container registries](https://cloud.google.com/kubernetes-engine/docs/how-to/access-private-registries-private-certificates) for more detail. Example: */ certificateAuthorityDomainConfigs?: outputs.container.ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig[]; /** * Enables private registry config. If set to false, all other fields in this object must not be set. */ enabled: boolean; } interface ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig { /** * List of fully-qualified-domain-names. IPv4s and port specification are supported. */ fqdns: string[]; /** * Parameters for configuring a certificate hosted in GCP SecretManager. */ gcpSecretManagerCertificateConfig: outputs.container.ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig; } interface ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig { /** * URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'. */ secretUri: string; } interface ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigRegistryHost { /** * Configures a list of host-specific configurations for the server. */ hosts?: outputs.container.ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigRegistryHostHost[]; /** * Defines the host name of the registry server. */ server: string; } interface ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigRegistryHostHost { /** * Represent the capabilities of the registry host, specifying what operations a host is capable of performing. */ capabilities?: string[]; /** * Configures the registry host certificate. */ cas?: outputs.container.ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigRegistryHostHostCa[]; /** * Configures the registry host client certificate and key. */ clients?: outputs.container.ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigRegistryHostHostClient[]; /** * Specifies the maximum duration allowed for a connection attempt to complete. */ dialTimeout?: string; /** * Configures the registry host headers. */ headers?: outputs.container.ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigRegistryHostHostHeader[]; /** * Configures the registry host/mirror. */ host: string; /** * Indicate the host's API root endpoint is defined in the URL path rather than by the API specification. */ overridePath?: boolean; } interface ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigRegistryHostHostCa { /** * URI for the Secret Manager secret that hosts the certificate. */ gcpSecretManagerSecretUri?: string; } interface ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigRegistryHostHostClient { /** * Configures the client certificate. */ cert: outputs.container.ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigRegistryHostHostClientCert; /** * Configures the client private key. */ key?: outputs.container.ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigRegistryHostHostClientKey; } interface ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigRegistryHostHostClientCert { /** * URI for the Secret Manager secret that hosts the client certificate. */ gcpSecretManagerSecretUri?: string; } interface ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigRegistryHostHostClientKey { /** * URI for the Secret Manager secret that hosts the private key. */ gcpSecretManagerSecretUri?: string; } interface ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigRegistryHostHostHeader { /** * Configures the header key. */ key: string; /** * Configures the header value. */ values: string[]; } interface ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigWritableCgroups { /** * Whether writable cgroups are enabled. */ enabled: boolean; } interface ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfig { /** * Whether or not the Google Container Filesystem (GCFS) is enabled */ enabled: boolean; } interface ClusterNodePoolManagement { /** * Specifies whether the node auto-repair is enabled for the node pool. If enabled, the nodes in this node pool will be monitored and, if they fail health checks too many times, an automatic repair action will be triggered. * * This block also contains several computed attributes, documented below. */ autoRepair?: boolean; /** * Specifies whether node auto-upgrade is enabled for the node pool. If enabled, node auto-upgrade helps keep the nodes in your node pool up to date with the latest release version of Kubernetes. */ autoUpgrade?: boolean; } interface ClusterNodePoolNetworkConfig { /** * The accelerator network profile to use for this node pool. */ acceleratorNetworkProfile?: string; /** * We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface */ additionalNodeNetworkConfigs: outputs.container.ClusterNodePoolNetworkConfigAdditionalNodeNetworkConfig[]; /** * We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node */ additionalPodNetworkConfigs?: outputs.container.ClusterNodePoolNetworkConfigAdditionalPodNetworkConfig[]; /** * Whether to create a new range for pod IPs in this node pool. Defaults are provided for podRange and podIpv4CidrBlock if they are not specified. */ createPodRange?: boolean; /** * Whether nodes have internal IP addresses only. */ enablePrivateNodes: boolean; /** * Network bandwidth tier configuration. */ networkPerformanceConfig?: outputs.container.ClusterNodePoolNetworkConfigNetworkPerformanceConfig; /** * Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited */ podCidrOverprovisionConfig: outputs.container.ClusterNodePoolNetworkConfigPodCidrOverprovisionConfig; /** * The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use. */ podIpv4CidrBlock: string; /** * The ID of the secondary range for pod IPs. If createPodRange is true, this ID is used for the new range. If createPodRange is false, uses an existing secondary range with this ID. */ podRange: string; /** * The name or selfLink of the Google Compute Engine * subnetwork in which the cluster's instances are launched. */ subnetwork: string; } interface ClusterNodePoolNetworkConfigAdditionalNodeNetworkConfig { /** * The name or selfLink of the Google Compute Engine * network to which the cluster is connected. For Shared VPC, set this to the self link of the * shared network. */ network: string; /** * The name or selfLink of the Google Compute Engine * subnetwork in which the cluster's instances are launched. */ subnetwork: string; } interface ClusterNodePoolNetworkConfigAdditionalPodNetworkConfig { /** * The maximum number of pods per node which use this pod network. */ maxPodsPerNode: number; /** * The name of the secondary range on the subnet which provides IP address for this pod range. */ secondaryPodRange?: string; /** * The name or selfLink of the Google Compute Engine * subnetwork in which the cluster's instances are launched. */ subnetwork?: string; } interface ClusterNodePoolNetworkConfigNetworkPerformanceConfig { /** * Specifies the total network bandwidth tier for NodePools in the cluster. */ totalEgressBandwidthTier: string; } interface ClusterNodePoolNetworkConfigPodCidrOverprovisionConfig { /** * Whether the cluster disables default in-node sNAT rules. In-node sNAT rules will be disabled when defaultSnatStatus is disabled.When disabled is set to false, default IP masquerade rules will be applied to the nodes to prevent sNAT on cluster internal traffic * * The `clusterTelemetry` block supports */ disabled: boolean; } interface ClusterNodePoolNodeConfig { /** * Specifies options for controlling * advanced machine features. Structure is documented below. */ advancedMachineFeatures?: outputs.container.ClusterNodePoolNodeConfigAdvancedMachineFeatures; /** * Configuration of the node pool boot disk. Structure is documented below */ bootDisk: outputs.container.ClusterNodePoolNodeConfigBootDisk; /** * The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption */ bootDiskKmsKey?: string; /** * Configuration for Confidential Nodes feature. Structure is documented below. */ confidentialNodes: outputs.container.ClusterNodePoolNodeConfigConfidentialNodes; /** * Parameters to customize containerd runtime. Structure is documented below. */ containerdConfig: outputs.container.ClusterNodePoolNodeConfigContainerdConfig; /** * Size of the disk attached to each node, specified * in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places. * Prefer configuring `bootDisk`. */ diskSizeGb: number; /** * Type of the disk attached to each node * (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `bootDisk`. */ diskType: string; /** * List of kubernetes taints applied to each node. */ effectiveTaints: outputs.container.ClusterNodePoolNodeConfigEffectiveTaint[]; /** * Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default. */ enableConfidentialStorage?: boolean; /** * ) Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below. */ ephemeralStorageConfig?: outputs.container.ClusterNodePoolNodeConfigEphemeralStorageConfig; /** * Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below. */ ephemeralStorageLocalSsdConfig?: outputs.container.ClusterNodePoolNodeConfigEphemeralStorageLocalSsdConfig; /** * Parameters for the NCCL Fast Socket feature. If unspecified, NCCL Fast Socket will not be enabled on the node pool. * Node Pool must enable gvnic. * GKE version 1.25.2-gke.1700 or later. * Structure is documented below. */ fastSocket?: outputs.container.ClusterNodePoolNodeConfigFastSocket; /** * Enables Flex Start provisioning model for the node pool. */ flexStart?: boolean; /** * Parameters for the Google Container Filesystem (GCFS). * If unspecified, GCFS will not be enabled on the node pool. When enabling this feature you must specify `imageType = "COS_CONTAINERD"` and `nodeVersion` from GKE versions 1.19 or later to use it. * For GKE versions 1.19, 1.20, and 1.21, the recommended minimum `nodeVersion` would be 1.19.15-gke.1300, 1.20.11-gke.1300, and 1.21.5-gke.1300 respectively. * A `machineType` that has more than 16 GiB of memory is also recommended. * GCFS must be enabled in order to use [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming). * Structure is documented below. */ gcfsConfig: outputs.container.ClusterNodePoolNodeConfigGcfsConfig; /** * List of the type and count of accelerator cards attached to the instance. * Structure documented below. */ guestAccelerators: outputs.container.ClusterNodePoolNodeConfigGuestAccelerator[]; /** * Google Virtual NIC (gVNIC) is a virtual network interface. * Installing the gVNIC driver allows for more efficient traffic transmission across the Google network infrastructure. * gVNIC is an alternative to the virtIO-based ethernet driver. GKE nodes must use a Container-Optimized OS node image. * GKE node version 1.15.11-gke.15 or later * Structure is documented below. */ gvnic?: outputs.container.ClusterNodePoolNodeConfigGvnic; /** * The maintenance policy for the hosts on which the GKE VMs run on. */ hostMaintenancePolicy?: outputs.container.ClusterNodePoolNodeConfigHostMaintenancePolicy; /** * The image type to use for this node. Note that changing the image type * will delete and recreate all nodes in the node pool. */ imageType: string; /** * Kubelet configuration, currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file). * Structure is documented below. * * ``` * kubelet_config { * cpu_manager_policy = "static" * cpu_cfs_quota = true * cpu_cfs_quota_period = "100us" * pod_pids_limit = 1024 * } * ``` */ kubeletConfig: outputs.container.ClusterNodePoolNodeConfigKubeletConfig; /** * The Kubernetes labels (key/value pairs) to be applied to each node. The kubernetes.io/ and k8s.io/ prefixes are * reserved by Kubernetes Core components and cannot be specified. */ labels: { [key: string]: string; }; /** * Parameters that can be configured on Linux nodes. Structure is documented below. */ linuxNodeConfig: outputs.container.ClusterNodePoolNodeConfigLinuxNodeConfig; /** * Parameters for the local NVMe SSDs. Structure is documented below. */ localNvmeSsdBlockConfig?: outputs.container.ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfig; /** * The amount of local SSD disks that will be * attached to each cluster node. Defaults to 0. */ localSsdCount: number; /** * Possible Local SSD encryption modes: * Accepted values are: * * `STANDARD_ENCRYPTION`: The given node will be encrypted using keys managed by Google infrastructure and the keys wll be deleted when the node is deleted. * * `EPHEMERAL_KEY_ENCRYPTION`: The given node will opt-in for using ephemeral key for encrypting Local SSDs. The Local SSDs will not be able to recover data in case of node crash. */ localSsdEncryptionMode?: string; /** * Parameter for specifying the type of logging agent used in a node pool. This will override any cluster-wide default value. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information. */ loggingVariant: string; /** * The name of a Google Compute Engine machine type. * Defaults to `e2-medium`. To create a custom machine type, value should be set as specified * [here](https://cloud.google.com/compute/docs/reference/latest/instances#machineType). */ machineType: string; /** * The runtime of each node in the node pool in seconds, terminated by 's'. Example: "3600s". */ maxRunDuration?: string; /** * The metadata key/value pairs assigned to instances in * the cluster. From GKE `1.12` onwards, `disable-legacy-endpoints` is set to * `true` by the API; if `metadata` is set but that default value is not * included, the provider will attempt to unset the value. To avoid this, set the * value in your config. */ metadata: { [key: string]: string; }; /** * Minimum CPU platform to be used by this instance. * The instance may be scheduled on the specified or newer CPU platform. Applicable * values are the friendly names of CPU platforms, such as `Intel Haswell`. See the * [official documentation](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) * for more information. */ minCpuPlatform: string; /** * Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on [sole tenant nodes](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes). */ nodeGroup?: string; /** * The set of Google API scopes to be made available * on all of the node VMs under the "default" service account. * Use the "https://www.googleapis.com/auth/cloud-platform" scope to grant access to all APIs. It is recommended that you set `serviceAccount` to a non-default service account and grant IAM roles to that service account for only the resources that it needs. * * See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/access-scopes) for information on migrating off of legacy access scopes. */ oauthScopes: string[]; /** * A boolean that represents whether or not the underlying node VMs * are preemptible. See the [official documentation](https://cloud.google.com/container-engine/docs/preemptible-vm) * for more information. Defaults to false. */ preemptible?: boolean; /** * The configuration of the desired reservation which instances could take capacity from. Structure is documented below. */ reservationAffinity?: outputs.container.ClusterNodePoolNodeConfigReservationAffinity; /** * The GCP labels (key/value pairs) to be applied to each node. Refer [here](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-managing-labels) * for how these labels are applied to clusters, node pools and nodes. */ resourceLabels?: { [key: string]: string; }; /** * A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`. */ resourceManagerTags?: { [key: string]: string; }; /** * ) [GKE Sandbox](https://cloud.google.com/kubernetes-engine/docs/how-to/sandbox-pods) configuration. When enabling this feature you must specify `imageType = "COS_CONTAINERD"` and `nodeVersion = "1.12.7-gke.17"` or later to use it. * Structure is documented below. */ sandboxConfig?: outputs.container.ClusterNodePoolNodeConfigSandboxConfig; /** * Parameters for secondary boot disks to preload container images and data on new nodes. Structure is documented below. `gcfsConfig` must be `enabled=true` for this feature to work. `minMasterVersion` must also be set to use GKE 1.28.3-gke.106700 or later versions. */ secondaryBootDisks?: outputs.container.ClusterNodePoolNodeConfigSecondaryBootDisk[]; /** * The service account to be used by the Node VMs. * If not specified, the "default" service account is used. */ serviceAccount: string; /** * Shielded Instance options. Structure is documented below. */ shieldedInstanceConfig: outputs.container.ClusterNodePoolNodeConfigShieldedInstanceConfig; /** * Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below. */ soleTenantConfig?: outputs.container.ClusterNodePoolNodeConfigSoleTenantConfig; /** * A boolean that represents whether the underlying node VMs are spot. * See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms) * for more information. Defaults to false. */ spot?: boolean; /** * The list of Storage Pools where boot disks are provisioned. */ storagePools?: string[]; /** * The list of instance tags applied to all nodes. Tags are used to identify * valid sources or targets for network firewalls. */ tags?: string[]; /** * A list of [Kubernetes taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) * to apply to nodes. GKE's API can only set this field on cluster creation. * However, GKE will add taints to your nodes if you enable certain features such * as GPUs. If this field is set, any diffs on this field will cause the provider to * recreate the underlying resource. Taint values can be updated safely in * Kubernetes (eg. through `kubectl`), and it's recommended that you do not use * this field to manage taints. If you do, `lifecycle.ignore_changes` is * recommended. Structure is documented below. */ taints?: outputs.container.ClusterNodePoolNodeConfigTaint[]; /** * Windows node configuration, currently supporting OSVersion [attribute](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/NodeConfig#osversion). The value must be one of [OS_VERSION_UNSPECIFIED, OS_VERSION_LTSC2019, OS_VERSION_LTSC2022]. For example: */ windowsNodeConfig: outputs.container.ClusterNodePoolNodeConfigWindowsNodeConfig; /** * Metadata configuration to expose to workloads on the node pool. * Structure is documented below. */ workloadMetadataConfig: outputs.container.ClusterNodePoolNodeConfigWorkloadMetadataConfig; } interface ClusterNodePoolNodeConfigAdvancedMachineFeatures { /** * Defines whether the instance should have nested virtualization enabled. Defaults to false. */ enableNestedVirtualization?: boolean; /** * Defines the performance monitoring unit [PMU](https://cloud.google.com/compute/docs/pmu-overview) level. Valid values are `ARCHITECTURAL`, `STANDARD`, or `ENHANCED`. Defaults to off. */ performanceMonitoringUnit?: string; /** * The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. */ threadsPerCore: number; } interface ClusterNodePoolNodeConfigBootDisk { /** * Type of the disk attached to each node * (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field. */ diskType: string; /** * Configure disk IOPs. This is only valid if the `diskType` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values. */ provisionedIops: number; /** * Configure disk throughput. This is only valid if the `diskType` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values. */ provisionedThroughput: number; /** * Size of the disk attached to each node, specified * in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field. */ sizeGb: number; } interface ClusterNodePoolNodeConfigConfidentialNodes { /** * Defines the type of technology used * by the confidential node. */ confidentialInstanceType?: string; /** * Enable Confidential GKE Nodes for this cluster, to * enforce encryption of data in-use. */ enabled: boolean; } interface ClusterNodePoolNodeConfigContainerdConfig { /** * Configuration for private container registries. There are two fields in this config: */ privateRegistryAccessConfig?: outputs.container.ClusterNodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfig; /** * Defines containerd registry host configuration. Each `registryHosts` entry represents a `hosts.toml` file. See [customize containerd configuration in GKE nodes](https://docs.cloud.google.com/kubernetes-engine/docs/how-to/customize-containerd-configuration#registryHosts) for more detail. Example: */ registryHosts?: outputs.container.ClusterNodePoolNodeConfigContainerdConfigRegistryHost[]; /** * Configuration for writable cgroups. This allows containers to have a writable `/sys/fs/cgroup` directory, which is required for some workloads to create their own sub-cgroups. The `writableCgroups` block supports: */ writableCgroups?: outputs.container.ClusterNodePoolNodeConfigContainerdConfigWritableCgroups; } interface ClusterNodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfig { /** * List of configuration objects for CA and domains. Each object identifies a certificate and its assigned domains. See [how to configure for private container registries](https://cloud.google.com/kubernetes-engine/docs/how-to/access-private-registries-private-certificates) for more detail. Example: */ certificateAuthorityDomainConfigs?: outputs.container.ClusterNodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig[]; /** * Enables private registry config. If set to false, all other fields in this object must not be set. */ enabled: boolean; } interface ClusterNodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig { /** * List of fully-qualified-domain-names. IPv4s and port specification are supported. */ fqdns: string[]; /** * Parameters for configuring a certificate hosted in GCP SecretManager. */ gcpSecretManagerCertificateConfig: outputs.container.ClusterNodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig; } interface ClusterNodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig { /** * URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'. */ secretUri: string; } interface ClusterNodePoolNodeConfigContainerdConfigRegistryHost { /** * Configures a list of host-specific configurations for the server. */ hosts?: outputs.container.ClusterNodePoolNodeConfigContainerdConfigRegistryHostHost[]; /** * Defines the host name of the registry server. */ server: string; } interface ClusterNodePoolNodeConfigContainerdConfigRegistryHostHost { /** * Represent the capabilities of the registry host, specifying what operations a host is capable of performing. */ capabilities?: string[]; /** * Configures the registry host certificate. */ cas?: outputs.container.ClusterNodePoolNodeConfigContainerdConfigRegistryHostHostCa[]; /** * Configures the registry host client certificate and key. */ clients?: outputs.container.ClusterNodePoolNodeConfigContainerdConfigRegistryHostHostClient[]; /** * Specifies the maximum duration allowed for a connection attempt to complete. */ dialTimeout?: string; /** * Configures the registry host headers. */ headers?: outputs.container.ClusterNodePoolNodeConfigContainerdConfigRegistryHostHostHeader[]; /** * Configures the registry host/mirror. */ host: string; /** * Indicate the host's API root endpoint is defined in the URL path rather than by the API specification. */ overridePath?: boolean; } interface ClusterNodePoolNodeConfigContainerdConfigRegistryHostHostCa { /** * URI for the Secret Manager secret that hosts the certificate. */ gcpSecretManagerSecretUri?: string; } interface ClusterNodePoolNodeConfigContainerdConfigRegistryHostHostClient { /** * Configures the client certificate. */ cert: outputs.container.ClusterNodePoolNodeConfigContainerdConfigRegistryHostHostClientCert; /** * Configures the client private key. */ key?: outputs.container.ClusterNodePoolNodeConfigContainerdConfigRegistryHostHostClientKey; } interface ClusterNodePoolNodeConfigContainerdConfigRegistryHostHostClientCert { /** * URI for the Secret Manager secret that hosts the client certificate. */ gcpSecretManagerSecretUri?: string; } interface ClusterNodePoolNodeConfigContainerdConfigRegistryHostHostClientKey { /** * URI for the Secret Manager secret that hosts the private key. */ gcpSecretManagerSecretUri?: string; } interface ClusterNodePoolNodeConfigContainerdConfigRegistryHostHostHeader { /** * Configures the header key. */ key: string; /** * Configures the header value. */ values: string[]; } interface ClusterNodePoolNodeConfigContainerdConfigWritableCgroups { /** * Whether writable cgroups are enabled. */ enabled: boolean; } interface ClusterNodePoolNodeConfigEffectiveTaint { /** * Effect for taint. */ effect: string; /** * Key for taint. */ key: string; /** * Value for taint. */ value: string; } interface ClusterNodePoolNodeConfigEphemeralStorageConfig { /** * Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD is 375 GB in size. If zero, it means to disable using local SSDs as ephemeral storage. */ localSsdCount: number; } interface ClusterNodePoolNodeConfigEphemeralStorageLocalSsdConfig { /** * Number of raw-block local NVMe SSD disks to be attached to the node utilized for GKE Data Cache. If zero, then GKE Data Cache will not be enabled in the nodes. */ dataCacheCount?: number; /** * Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD is 375 GB in size. If zero, it means to disable using local SSDs as ephemeral storage. */ localSsdCount: number; } interface ClusterNodePoolNodeConfigFastSocket { /** * Whether or not the NCCL Fast Socket is enabled */ enabled: boolean; } interface ClusterNodePoolNodeConfigGcfsConfig { /** * Whether or not the Google Container Filesystem (GCFS) is enabled */ enabled: boolean; } interface ClusterNodePoolNodeConfigGuestAccelerator { /** * The number of the guest accelerator cards exposed to this instance. */ count: number; /** * Configuration for auto installation of GPU driver. Structure is documented below. */ gpuDriverInstallationConfig: outputs.container.ClusterNodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig; /** * Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig [user guide](https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning). */ gpuPartitionSize?: string; /** * Configuration for GPU sharing. Structure is documented below. */ gpuSharingConfig?: outputs.container.ClusterNodePoolNodeConfigGuestAcceleratorGpuSharingConfig; /** * The accelerator type resource to expose to this instance. E.g. `nvidia-tesla-k80`. */ type: string; } interface ClusterNodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig { /** * Mode for how the GPU driver is installed. * Accepted values are: * * `"GPU_DRIVER_VERSION_UNSPECIFIED"`: Default value is to install the "Default" GPU driver. Before GKE `1.30.1-gke.1156000`, the default value is to not install any GPU driver. * * `"INSTALLATION_DISABLED"`: Disable GPU driver auto installation and needs manual installation. * * `"DEFAULT"`: "Default" GPU driver in COS and Ubuntu. * * `"LATEST"`: "Latest" GPU driver in COS. */ gpuDriverVersion: string; } interface ClusterNodePoolNodeConfigGuestAcceleratorGpuSharingConfig { /** * The type of GPU sharing strategy to enable on the GPU node. * Accepted values are: * * `"TIME_SHARING"`: Allow multiple containers to have [time-shared](https://cloud.google.com/kubernetes-engine/docs/concepts/timesharing-gpus) access to a single GPU device. * * `"MPS"`: Enable co-operative multi-process CUDA workloads to run concurrently on a single GPU device with [MPS](https://cloud.google.com/kubernetes-engine/docs/how-to/nvidia-mps-gpus) */ gpuSharingStrategy: string; /** * The maximum number of containers that can share a GPU. */ maxSharedClientsPerGpu: number; } interface ClusterNodePoolNodeConfigGvnic { /** * Whether or not the Google Virtual NIC (gVNIC) is enabled */ enabled: boolean; } interface ClusterNodePoolNodeConfigHostMaintenancePolicy { /** * . */ maintenanceInterval: string; } interface ClusterNodePoolNodeConfigKubeletConfig { /** * Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. The allowed sysctl groups are `kernel.shm*`, `kernel.msg*`, `kernel.sem`, `fs.mqueue.*`, and `net.*`. */ allowedUnsafeSysctls?: string[]; /** * Defines the maximum number of container log files that can be present for a container. The integer must be between 2 and 10, inclusive. */ containerLogMaxFiles?: number; /** * Defines the maximum size of the * container log file before it is rotated. Specified as a positive number and a * unit suffix, such as `"100Ki"`, `"10Mi"`. Valid units are "Ki", "Mi", "Gi". * The value must be between `"10Mi"` and `"500Mi"`, inclusive. And the total container log size * (`containerLogMaxSize` * `containerLogMaxFiles`) cannot exceed 1% of the total storage of the node. */ containerLogMaxSize?: string; /** * If true, enables CPU CFS quota enforcement for * containers that specify CPU limits. */ cpuCfsQuota: boolean; /** * The CPU CFS quota period value. Specified * as a sequence of decimal numbers, each with optional fraction and a unit suffix, * such as `"300ms"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", * "h". The value must be a positive duration. */ cpuCfsQuotaPeriod?: string; /** * The CPU management policy on the node. See * [K8S CPU Management Policies](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/). * One of `"none"` or `"static"`. If unset (or set to the empty string `""`), the API will treat the field as if set to "none". * Prior to the 6.4.0 this field was marked as required. The workaround for the required field * is setting the empty string `""`, which will function identically to not setting this field. */ cpuManagerPolicy?: string; /** * Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300. */ evictionMaxPodGracePeriodSeconds?: number; /** * Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below. */ evictionMinimumReclaim?: outputs.container.ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim; /** * Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below. */ evictionSoft?: outputs.container.ClusterNodePoolNodeConfigKubeletConfigEvictionSoft; /** * Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below. */ evictionSoftGracePeriod?: outputs.container.ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod; /** * Defines the percent of disk usage after which image garbage collection is always run. The integer must be between 10 and 85, inclusive. */ imageGcHighThresholdPercent?: number; /** * Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. The integer must be between 10 and 85, inclusive. */ imageGcLowThresholdPercent?: number; /** * Defines the maximum age an image can be unused before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`, and `"2h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". The value must be a positive duration. */ imageMaximumGcAge?: string; /** * Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m". */ imageMinimumGcAge?: string; /** * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. */ insecureKubeletReadonlyPortEnabled: string; /** * Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive. */ maxParallelImagePulls: number; /** * Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node. * The memory manager optimizes memory and hugepages allocation for pods, especially * those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below. */ memoryManager?: outputs.container.ClusterNodePoolNodeConfigKubeletConfigMemoryManager; /** * Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. */ podPidsLimit?: number; /** * Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group. */ singleProcessOomKill?: boolean; /** * These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below. */ topologyManager?: outputs.container.ClusterNodePoolNodeConfigKubeletConfigTopologyManager; } interface ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim { /** * Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`. */ imagefsAvailable?: string; /** * Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`. */ imagefsInodesFree?: string; /** * Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`. */ memoryAvailable?: string; /** * Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`. */ nodefsAvailable?: string; /** * Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`. */ nodefsInodesFree?: string; /** * Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`. */ pidAvailable?: string; } interface ClusterNodePoolNodeConfigKubeletConfigEvictionSoft { /** * Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`. */ imagefsAvailable?: string; /** * Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`. */ imagefsInodesFree?: string; /** * Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory. */ memoryAvailable?: string; /** * Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`. */ nodefsAvailable?: string; /** * Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`. */ nodefsInodesFree?: string; /** * Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`. */ pidAvailable?: string; } interface ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod { /** * Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`. */ imagefsAvailable?: string; /** * Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`. */ imagefsInodesFree?: string; /** * Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". */ memoryAvailable?: string; /** * Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`. */ nodefsAvailable?: string; /** * Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`. */ nodefsInodesFree?: string; /** * Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`. */ pidAvailable?: string; } interface ClusterNodePoolNodeConfigKubeletConfigMemoryManager { /** * The [Memory * Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) * policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None". */ policy: string; } interface ClusterNodePoolNodeConfigKubeletConfigTopologyManager { /** * The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none". */ policy: string; /** * The Topology Manager scope, defining the granularity at which * policy decisions are applied. Valid values are "container" (resources are aligned * per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container". */ scope: string; } interface ClusterNodePoolNodeConfigLinuxNodeConfig { /** * Possible cgroup modes that can be used. * Accepted values are: * * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used. * * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image. * * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image. */ cgroupMode: string; /** * Amounts for 2M and 1G hugepages. Structure is documented below. */ hugepagesConfig?: outputs.container.ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfig; /** * Settings for kernel module loading. Structure is documented below. */ nodeKernelModuleLoading?: outputs.container.ClusterNodePoolNodeConfigLinuxNodeConfigNodeKernelModuleLoading; /** * The Linux kernel parameters to be applied to the nodes * and all pods running on the nodes. Specified as a map from the key, such as * `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file). * Note that validations happen all server side. All attributes are optional. */ sysctls?: { [key: string]: string; }; /** * The Linux kernel transparent hugepage defrag setting. */ transparentHugepageDefrag?: string; /** * The Linux kernel transparent hugepage setting. */ transparentHugepageEnabled: string; } interface ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfig { /** * Amount of 1G hugepages. */ hugepageSize1g?: number; /** * Amount of 2M hugepages. */ hugepageSize2m?: number; } interface ClusterNodePoolNodeConfigLinuxNodeConfigNodeKernelModuleLoading { /** * Possible kernel module loading policies. * Accepted values are: * * `POLICY_UNSPECIFIED`: Default if unset. GKE selects the image based on node type. For CPU and TPU nodes, the image will not allow loading external kernel modules. For GPU nodes, the image will allow loading any module, whether it is signed or not. * * `ENFORCE_SIGNED_MODULES`: Enforced signature verification: Node pools will use a Container-Optimized OS image configured to allow loading of *Google-signed* external kernel modules. Loadpin is enabled but configured to exclude modules, and kernel module signature checking is enforced. * * `DO_NOT_ENFORCE_SIGNED_MODULES`: Mirrors existing DEFAULT behavior: For CPU and TPU nodes, the image will not allow loading external kernel modules. For GPU nodes, the image will allow loading any module, whether it is signed or not. */ policy?: string; } interface ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfig { /** * Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node. * > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later. */ localSsdCount: number; } interface ClusterNodePoolNodeConfigReservationAffinity { /** * The type of reservation consumption * Accepted values are: * * * `"UNSPECIFIED"`: Default value. This should not be used. * * `"NO_RESERVATION"`: Do not consume from any reserved capacity. * * `"ANY_RESERVATION"`: Consume any reservation available. * * `"SPECIFIC_RESERVATION"`: Must consume from a specific reservation. Must specify key value fields for specifying the reservations. */ consumeReservationType: string; /** * The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value. */ key?: string; /** * The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name" */ values?: string[]; } interface ClusterNodePoolNodeConfigSandboxConfig { /** * Which sandbox to use for pods in the node pool. * Accepted values are: * * * `"gvisor"`: Pods run within a gVisor sandbox. */ sandboxType: string; } interface ClusterNodePoolNodeConfigSecondaryBootDisk { /** * Path to disk image to create the secondary boot disk from. After using the [gke-disk-image-builder](https://github.com/GoogleCloudPlatform/ai-on-gke/tree/main/tools/gke-disk-image-builder), this argument should be `global/images/DISK_IMAGE_NAME`. */ diskImage: string; /** * Mode for how the secondary boot disk is used. An example mode is `CONTAINER_IMAGE_CACHE`. */ mode?: string; } interface ClusterNodePoolNodeConfigShieldedInstanceConfig { /** * Defines if the instance has integrity monitoring enabled. * * Enables monitoring and attestation of the boot integrity of the instance. The attestation is performed against the integrity policy baseline. This baseline is initially derived from the implicitly trusted boot image when the instance is created. Defaults to `true`. */ enableIntegrityMonitoring?: boolean; /** * Defines if the instance has Secure Boot enabled. * * Secure Boot helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails. Defaults to `false`. */ enableSecureBoot?: boolean; } interface ClusterNodePoolNodeConfigSoleTenantConfig { /** * Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count. */ minNodeCpus?: number; /** * The node affinity settings for the sole tenant node pool. Structure is documented below. */ nodeAffinities: outputs.container.ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinity[]; } interface ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinity { /** * The default or custom node affinity label key name. */ key: string; /** * Specifies affinity or anti-affinity. Accepted values are `"IN"` or `"NOT_IN"` */ operator: string; /** * List of node affinity label values as strings. */ values: string[]; } interface ClusterNodePoolNodeConfigTaint { /** * Effect for taint. Accepted values are `NO_SCHEDULE`, `PREFER_NO_SCHEDULE`, and `NO_EXECUTE`. */ effect: string; /** * Key for taint. */ key: string; /** * Value for taint. */ value: string; } interface ClusterNodePoolNodeConfigWindowsNodeConfig { /** * The OS Version of the windows nodepool.Values are OS_VERSION_UNSPECIFIED,OS_VERSION_LTSC2019 and OS_VERSION_LTSC2022 */ osversion?: string; } interface ClusterNodePoolNodeConfigWorkloadMetadataConfig { /** * How to expose the node metadata to the workload running on the node. * Accepted values are: * * UNSPECIFIED: Not Set * * GCE_METADATA: Expose all Compute Engine metadata to pods. * * GKE_METADATA: Run the GKE Metadata Server on this node. The GKE Metadata Server exposes a metadata API to workloads that is compatible with the V1 Compute Metadata APIs exposed by the Compute Engine and App Engine Metadata Servers. This feature can only be enabled if [workload identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) is enabled at the cluster level. */ mode: string; } interface ClusterNodePoolNodeDrainConfig { /** * Whether to respect PodDisruptionBudget policy during node pool deletion. */ respectPdbDuringNodePoolDeletion?: boolean; } interface ClusterNodePoolPlacementPolicy { /** * If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned. */ policyName?: string; /** * The TPU topology like "2x4" or "2x2x2". https://cloud.google.com/kubernetes-engine/docs/concepts/plan-tpus#topology */ tpuTopology?: string; /** * Telemetry integration for the cluster. Supported values (`ENABLED, DISABLED, SYSTEM_ONLY`); * `SYSTEM_ONLY` (Only system components are monitored and logged) is only available in GKE versions 1.15 and later. */ type: string; } interface ClusterNodePoolQueuedProvisioning { /** * Whether nodes in this node pool are obtainable solely through the ProvisioningRequest API */ enabled: boolean; } interface ClusterNodePoolUpgradeSettings { /** * Settings for blue-green upgrade strategy. To be specified when strategy is set to BLUE_GREEN. Structure is documented below. */ blueGreenSettings: outputs.container.ClusterNodePoolUpgradeSettingsBlueGreenSettings; /** * The maximum number of nodes that can be created beyond the current size of the node pool during the upgrade process. To be used when strategy is set to SURGE. Default is 0. */ maxSurge: number; /** * The maximum number of nodes that can be simultaneously unavailable during the upgrade process. To be used when strategy is set to SURGE. Default is 0. */ maxUnavailable: number; /** * Strategy used for node pool update. Strategy can only be one of BLUE_GREEN or SURGE. The default is value is SURGE. */ strategy?: string; } interface ClusterNodePoolUpgradeSettingsBlueGreenSettings { /** * Autoscaled rollout policy for blue-green upgrade. */ autoscaledRolloutPolicy?: outputs.container.ClusterNodePoolUpgradeSettingsBlueGreenSettingsAutoscaledRolloutPolicy; /** * Time needed after draining entire blue pool. After this period, blue pool will be cleaned up. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ nodePoolSoakDuration: string; /** * Standard policy for the blue-green upgrade. To be specified when strategy is set to BLUE_GREEN. Structure is documented below. */ standardRolloutPolicy?: outputs.container.ClusterNodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy; } interface ClusterNodePoolUpgradeSettingsBlueGreenSettingsAutoscaledRolloutPolicy { /** * Time in seconds to wait after cordoning the blue pool before draining the nodes. */ waitForDrainDuration: string; } interface ClusterNodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy { /** * Number of blue nodes to drain in a batch. Only one of the batchPercentage or batchNodeCount can be specified. */ batchNodeCount: number; /** * Percentage of the bool pool nodes to drain in a batch. The range of this field should be (0.0, 1.0). Only one of the batchPercentage or batchNodeCount can be specified. */ batchPercentage: number; /** * Soak time after each batch gets drained. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s".`. */ batchSoakDuration: string; } interface ClusterNotificationConfig { /** * The pubsub config for the cluster's upgrade notifications. */ pubsub: outputs.container.ClusterNotificationConfigPubsub; } interface ClusterNotificationConfigPubsub { /** * Whether or not the notification config is enabled */ enabled: boolean; /** * Choose what type of notifications you want to receive. If no filters are applied, you'll receive all notification types. Structure is documented below. */ filter?: outputs.container.ClusterNotificationConfigPubsubFilter; /** * The pubsub topic to push upgrade notifications to. Must be in the same project as the cluster. Must be in the format: `projects/{project}/topics/{topic}`. */ topic?: string; } interface ClusterNotificationConfigPubsubFilter { /** * Can be used to filter what notifications are sent. Accepted values are `UPGRADE_AVAILABLE_EVENT`, `UPGRADE_EVENT`, `SECURITY_BULLETIN_EVENT` and `UPGRADE_INFO_EVENT`. See [Filtering notifications](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-notifications#filtering) for more details. */ eventTypes: string[]; } interface ClusterPodAutoscaling { /** * Enable the Horizontal Pod Autoscaling profile for this cluster. * Acceptable values are: * * `"NONE"`: Customers explicitly opt-out of HPA profiles. * * `"PERFORMANCE"`: PERFORMANCE is used when customers opt-in to the performance HPA profile. In this profile we support a higher number of HPAs per cluster and faster metrics collection for workload autoscaling. * See [HPAProfile](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#hpaprofile) for more details. */ hpaProfile: string; } interface ClusterPodSecurityPolicyConfig { /** * Enable the PodSecurityPolicy controller for this cluster. * If enabled, pods must be valid under a PodSecurityPolicy to be created. */ enabled: boolean; } interface ClusterPrivateClusterConfig { /** * When `true`, the cluster's private * endpoint is used as the cluster endpoint and access through the public endpoint * is disabled. When `false`, either endpoint can be used. This field only applies * to private clusters, when `enablePrivateNodes` is `true`. */ enablePrivateEndpoint?: boolean; /** * Enables the private cluster feature, * creating a private endpoint on the cluster. In a private cluster, nodes only * have RFC 1918 private addresses and communicate with the master's private * endpoint via private networking. */ enablePrivateNodes?: boolean; /** * Controls cluster master global * access settings. If unset, the provider will no longer manage this field and will * not modify the previously-set value. Structure is documented below. */ masterGlobalAccessConfig: outputs.container.ClusterPrivateClusterConfigMasterGlobalAccessConfig; /** * The IP range in CIDR notation to use for * the hosted master network. This range will be used for assigning private IP * addresses to the cluster master(s) and the ILB VIP. This range must not overlap * with any other ranges in use within the cluster's network, and it must be a /28 * subnet. See [Private Cluster Limitations](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#req_res_lim) * for more details. This field only applies to private clusters, when * `enablePrivateNodes` is `true`. */ masterIpv4CidrBlock: string; /** * The name of the peering between this cluster and the Google owned VPC. */ peeringName: string; /** * The internal IP address of this cluster's master endpoint. */ privateEndpoint: string; /** * Subnetwork in cluster's network where master's endpoint will be provisioned. */ privateEndpointSubnetwork?: string; /** * The external IP address of this cluster's master endpoint. * * !> The Google provider is unable to validate certain configurations of * `privateClusterConfig` when `enablePrivateNodes` is `false`. It's * recommended that you omit the block entirely if the field is not set to `true`. */ publicEndpoint: string; } interface ClusterPrivateClusterConfigMasterGlobalAccessConfig { /** * Whether the cluster master is accessible globally or * not. */ enabled: boolean; } interface ClusterProtectConfig { /** * ) WorkloadConfig defines which actions are enabled for a cluster's workload configurations. Structure is documented below */ workloadConfig: outputs.container.ClusterProtectConfigWorkloadConfig; /** * ) Sets which mode to use for Protect workload vulnerability scanning feature. Accepted values are DISABLED, BASIC. */ workloadVulnerabilityMode: string; } interface ClusterProtectConfigWorkloadConfig { /** * ) Sets which mode of auditing should be used for the cluster's workloads. Accepted values are DISABLED, BASIC. */ auditMode: string; } interface ClusterRbacBindingConfig { /** * Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjects system:authenticated. * * * ## Attributes Reference * * In addition to the arguments listed above, the following computed attributes are * exported: */ enableInsecureBindingSystemAuthenticated?: boolean; /** * Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjects system:anonymous or system:unauthenticated. */ enableInsecureBindingSystemUnauthenticated?: boolean; } interface ClusterReleaseChannel { /** * The selected release channel. * Accepted values are: * * UNSPECIFIED: Not set. * * RAPID: Weekly upgrade cadence; Early testers and developers who requires new features. * * REGULAR: Multiple per month upgrade cadence; Production users who need features not yet offered in the Stable channel. * * STABLE: Every few months upgrade cadence; Production users who need stability above all else, and for whom frequent upgrades are too risky. * * EXTENDED: GKE provides extended support for Kubernetes minor versions through the Extended channel. With this channel, you can stay on a minor version for up to 24 months. */ channel: string; } interface ClusterResourceUsageExportConfig { /** * Parameters for using BigQuery as the destination of resource usage export. * * * `bigquery_destination.dataset_id` (Required) - The ID of a BigQuery Dataset. For Example: */ bigqueryDestination: outputs.container.ClusterResourceUsageExportConfigBigqueryDestination; /** * Whether to enable network egress metering for this cluster. If enabled, a daemonset will be created * in the cluster to meter network egress traffic. */ enableNetworkEgressMetering?: boolean; /** * Whether to enable resource * consumption metering on this cluster. When enabled, a table will be created in * the resource export BigQuery dataset to store resource consumption data. The * resulting table can be joined with the resource usage table or with BigQuery * billing export. Defaults to `true`. */ enableResourceConsumptionMetering?: boolean; } interface ClusterResourceUsageExportConfigBigqueryDestination { /** * The ID of a BigQuery Dataset. */ datasetId: string; } interface ClusterSecretManagerConfig { /** * Enable the Secret Manager add-on for this cluster. */ enabled: boolean; /** * config for secret manager auto rotation. Structure is docuemented below */ rotationConfig: outputs.container.ClusterSecretManagerConfigRotationConfig; } interface ClusterSecretManagerConfigRotationConfig { /** * ) - Enable the roation in Sync as K8s secret feature for this cluster. */ enabled: boolean; /** * ) - The interval between two consecutive rotations. Default rotation interval is 2 minutes. */ rotationInterval: string; } interface ClusterSecretSyncConfig { /** * ) - Enable the Sync as K8s secret feature for this cluster. */ enabled: boolean; /** * ) - config for secret sync auto rotation. Structure is docuemented below */ rotationConfig: outputs.container.ClusterSecretSyncConfigRotationConfig; } interface ClusterSecretSyncConfigRotationConfig { /** * ) - Enable the roation in Sync as K8s secret feature for this cluster. */ enabled: boolean; /** * ) - The interval between two consecutive rotations. Default rotation interval is 2 minutes. */ rotationInterval: string; } interface ClusterSecurityPostureConfig { /** * Sets the mode of the Kubernetes security posture API's off-cluster features. Available options include `DISABLED`, `BASIC`, and `ENTERPRISE`. */ mode: string; /** * Sets the mode of the Kubernetes security posture API's workload vulnerability scanning. Available options include `VULNERABILITY_DISABLED`, `VULNERABILITY_BASIC` and `VULNERABILITY_ENTERPRISE`. */ vulnerabilityMode: string; } interface ClusterServiceExternalIpsConfig { /** * Controls whether external ips specified by a service will be allowed. It is enabled by default. */ enabled: boolean; } interface ClusterTpuConfig { /** * Whether Cloud TPU integration is enabled or not */ enabled: boolean; /** * IPv4 CIDR block reserved for Cloud TPU in the VPC. */ ipv4CidrBlock: string; /** * Whether to use service networking for Cloud TPU or not */ useServiceNetworking?: boolean; } interface ClusterUserManagedKeysConfig { /** * The Certificate Authority Service caPool to use for the aggreation CA in this cluster. */ aggregationCa?: string; /** * The Certificate Authority Service caPool to use for the cluster CA in this cluster. */ clusterCa?: string; /** * The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes. */ controlPlaneDiskEncryptionKey?: string; /** * The Certificate Authority Service caPool to use for the etcd API CA in this cluster. */ etcdApiCa?: string; /** * The Certificate Authority Service caPool to use for the etcd peer CA in this cluster. */ etcdPeerCa?: string; /** * Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups. */ gkeopsEtcdBackupEncryptionKey?: string; /** * The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. */ serviceAccountSigningKeys?: string[]; /** * The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. */ serviceAccountVerificationKeys?: string[]; } interface ClusterVerticalPodAutoscaling { /** * Enables vertical pod autoscaling */ enabled: boolean; } interface ClusterWorkloadAltsConfig { /** * Whether the alts handshaker should be enabled or not for direct-path. Requires Workload Identity (workloadPool) must be non-empty). */ enableAlts: boolean; } interface ClusterWorkloadIdentityConfig { /** * The workload pool to attach all Kubernetes service accounts to. */ workloadPool?: string; } interface GetClusterAddonsConfig { /** * The status of the CloudRun addon. It is disabled by default. Set disabled = false to enable. */ cloudrunConfigs: outputs.container.GetClusterAddonsConfigCloudrunConfig[]; /** * The of the Config Connector addon. */ configConnectorConfigs: outputs.container.GetClusterAddonsConfigConfigConnectorConfig[]; /** * The status of the NodeLocal DNSCache addon. It is disabled by default. Set enabled = true to enable. */ dnsCacheConfigs: outputs.container.GetClusterAddonsConfigDnsCacheConfig[]; /** * Whether this cluster should enable the Google Compute Engine Persistent Disk Container Storage Interface (CSI) Driver. Set enabled = true to enable. The Compute Engine persistent disk CSI Driver is enabled by default on newly created clusters for the following versions: Linux clusters: GKE version 1.18.10-gke.2100 or later, or 1.19.3-gke.2100 or later. */ gcePersistentDiskCsiDriverConfigs: outputs.container.GetClusterAddonsConfigGcePersistentDiskCsiDriverConfig[]; /** * The status of the Filestore CSI driver addon, which allows the usage of filestore instance as volumes. Defaults to disabled for Standard clusters; set enabled = true to enable. It is enabled by default for Autopilot clusters; set enabled = true to enable it explicitly. */ gcpFilestoreCsiDriverConfigs: outputs.container.GetClusterAddonsConfigGcpFilestoreCsiDriverConfig[]; /** * The status of the GCS Fuse CSI driver addon, which allows the usage of gcs bucket as volumes. Defaults to disabled; set enabled = true to enable. */ gcsFuseCsiDriverConfigs: outputs.container.GetClusterAddonsConfigGcsFuseCsiDriverConfig[]; /** * The status of the Backup for GKE Agent addon. It is disabled by default. Set enabled = true to enable. */ gkeBackupAgentConfigs: outputs.container.GetClusterAddonsConfigGkeBackupAgentConfig[]; /** * The status of the Horizontal Pod Autoscaling addon, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. It ensures that a Heapster pod is running in the cluster, which is also used by the Cloud Monitoring service. It is enabled by default; set disabled = true to disable. */ horizontalPodAutoscalings: outputs.container.GetClusterAddonsConfigHorizontalPodAutoscaling[]; /** * The status of the HTTP (L7) load balancing controller addon, which makes it easy to set up HTTP load balancers for services in a cluster. It is enabled by default; set disabled = true to disable. */ httpLoadBalancings: outputs.container.GetClusterAddonsConfigHttpLoadBalancing[]; /** * The status of the Istio addon. */ istioConfigs: outputs.container.GetClusterAddonsConfigIstioConfig[]; /** * Configuration for the KALM addon, which manages the lifecycle of k8s. It is disabled by default; Set enabled = true to enable. */ kalmConfigs: outputs.container.GetClusterAddonsConfigKalmConfig[]; /** * Configuration for the Lustre CSI driver. Defaults to disabled; set enabled = true to enable. */ lustreCsiDriverConfigs: outputs.container.GetClusterAddonsConfigLustreCsiDriverConfig[]; /** * Whether we should enable the network policy addon for the master. This must be enabled in order to enable network policy for the nodes. To enable this, you must also define a networkPolicy block, otherwise nothing will happen. It can only be disabled if the nodes already do not have network policies enabled. Defaults to disabled; set disabled = false to enable. */ networkPolicyConfigs: outputs.container.GetClusterAddonsConfigNetworkPolicyConfig[]; /** * The status of the Parallelstore CSI driver addon, which allows the usage of Parallelstore instances as volumes. Defaults to disabled; set enabled = true to enable. */ parallelstoreCsiDriverConfigs: outputs.container.GetClusterAddonsConfigParallelstoreCsiDriverConfig[]; /** * Configuration for the Pod Snapshot feature. */ podSnapshotConfigs: outputs.container.GetClusterAddonsConfigPodSnapshotConfig[]; /** * The status of the Ray Operator addon, which enabled management of Ray AI/ML jobs on GKE. Defaults to disabled; set enabled = true to enable. */ rayOperatorConfigs: outputs.container.GetClusterAddonsConfigRayOperatorConfig[]; /** * The status of the Slice Controller addon. It is disabled by default; set enabled = true to enable. */ sliceControllerConfigs: outputs.container.GetClusterAddonsConfigSliceControllerConfig[]; /** * The status of the Stateful HA addon, which provides automatic configurable failover for stateful applications. Defaults to disabled; set enabled = true to enable. */ statefulHaConfigs: outputs.container.GetClusterAddonsConfigStatefulHaConfig[]; } interface GetClusterAddonsConfigCloudrunConfig { disabled: boolean; loadBalancerType: string; } interface GetClusterAddonsConfigConfigConnectorConfig { enabled: boolean; } interface GetClusterAddonsConfigDnsCacheConfig { enabled: boolean; } interface GetClusterAddonsConfigGcePersistentDiskCsiDriverConfig { enabled: boolean; } interface GetClusterAddonsConfigGcpFilestoreCsiDriverConfig { enabled: boolean; } interface GetClusterAddonsConfigGcsFuseCsiDriverConfig { enabled: boolean; } interface GetClusterAddonsConfigGkeBackupAgentConfig { enabled: boolean; } interface GetClusterAddonsConfigHorizontalPodAutoscaling { disabled: boolean; } interface GetClusterAddonsConfigHttpLoadBalancing { disabled: boolean; } interface GetClusterAddonsConfigIstioConfig { /** * The authentication type between services in Istio. Available options include AUTH_MUTUAL_TLS. */ auth: string; /** * The status of the Istio addon, which makes it easy to set up Istio for services in a cluster. It is disabled by default. Set disabled = false to enable. */ disabled: boolean; } interface GetClusterAddonsConfigKalmConfig { enabled: boolean; } interface GetClusterAddonsConfigLustreCsiDriverConfig { /** * If set to true, the Lustre CSI driver will initialize LNet (the virtual network layer for Lustre kernel module) using port 6988. * This flag is required to workaround a port conflict with the gke-metadata-server on GKE nodes. */ enableLegacyLustrePort: boolean; /** * Whether the Lustre CSI driver is enabled for this cluster. */ enabled: boolean; } interface GetClusterAddonsConfigNetworkPolicyConfig { disabled: boolean; } interface GetClusterAddonsConfigParallelstoreCsiDriverConfig { enabled: boolean; } interface GetClusterAddonsConfigPodSnapshotConfig { /** * Whether the Pod Snapshot feature is enabled for this cluster. */ enabled: boolean; } interface GetClusterAddonsConfigRayOperatorConfig { enabled: boolean; /** * The status of Ray Logging, which scrapes Ray cluster logs to Cloud Logging. Defaults to disabled; set enabled = true to enable. */ rayClusterLoggingConfigs: outputs.container.GetClusterAddonsConfigRayOperatorConfigRayClusterLoggingConfig[]; /** * The status of Ray Cluster monitoring, which shows Ray cluster metrics in Cloud Console. Defaults to disabled; set enabled = true to enable. */ rayClusterMonitoringConfigs: outputs.container.GetClusterAddonsConfigRayOperatorConfigRayClusterMonitoringConfig[]; } interface GetClusterAddonsConfigRayOperatorConfigRayClusterLoggingConfig { enabled: boolean; } interface GetClusterAddonsConfigRayOperatorConfigRayClusterMonitoringConfig { enabled: boolean; } interface GetClusterAddonsConfigSliceControllerConfig { enabled: boolean; } interface GetClusterAddonsConfigStatefulHaConfig { enabled: boolean; } interface GetClusterAnonymousAuthenticationConfig { /** * Setting this to LIMITED will restrict authentication of anonymous users to health check endpoints only. * Accepted values are: * * ENABLED: Authentication of anonymous users is enabled for all endpoints. * * LIMITED: Anonymous access is only allowed for health check endpoints. */ mode: string; } interface GetClusterAuthenticatorGroupsConfig { /** * The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com. */ securityGroup: string; } interface GetClusterBinaryAuthorization { /** * Enable Binary Authorization for this cluster. */ enabled: boolean; /** * Mode of operation for Binary Authorization policy evaluation. */ evaluationMode: string; } interface GetClusterClusterAutoscaling { /** * Contains defaults for a node pool created by NAP. */ autoProvisioningDefaults: outputs.container.GetClusterClusterAutoscalingAutoProvisioningDefault[]; /** * The list of Google Compute Engine zones in which the NodePool's nodes can be created by NAP. */ autoProvisioningLocations: string[]; /** * Configuration options for the Autoscaling profile feature, which lets you choose whether the cluster autoscaler should optimize for resource utilization or resource availability when deciding to remove nodes from a cluster. Can be BALANCED or OPTIMIZE_UTILIZATION. Defaults to BALANCED. */ autoscalingProfile: string; /** * Specifies whether default compute class behaviour is enabled. If enabled, cluster autoscaler will use Compute Class with name default for all the workloads, if not overriden. */ defaultComputeClassEnabled: boolean; /** * Whether node auto-provisioning is enabled. Resource limits for cpu and memory must be defined to enable node auto-provisioning. */ enabled: boolean; /** * Global constraints for machine resources in the cluster. Configuring the cpu and memory types is required if node auto-provisioning is enabled. These limits will apply to node pool autoscaling in addition to node auto-provisioning. */ resourceLimits: outputs.container.GetClusterClusterAutoscalingResourceLimit[]; } interface GetClusterClusterAutoscalingAutoProvisioningDefault { /** * The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. */ bootDiskKmsKey: string; /** * Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. */ diskSize: number; /** * Type of the disk attached to each node. */ diskType: string; /** * The default image type used by NAP once a new node pool is being created. */ imageType: string; /** * NodeManagement configuration for this NodePool. */ managements: outputs.container.GetClusterClusterAutoscalingAutoProvisioningDefaultManagement[]; /** * Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform. Applicable values are the friendly names of CPU platforms, such as Intel Haswell. */ minCpuPlatform: string; /** * Scopes that are used by NAP when creating node pools. */ oauthScopes: string[]; /** * The Google Cloud Platform Service Account to be used by the node VMs. */ serviceAccount: string; /** * Shielded Instance options. */ shieldedInstanceConfigs: outputs.container.GetClusterClusterAutoscalingAutoProvisioningDefaultShieldedInstanceConfig[]; /** * Specifies the upgrade settings for NAP created node pools */ upgradeSettings: outputs.container.GetClusterClusterAutoscalingAutoProvisioningDefaultUpgradeSetting[]; } interface GetClusterClusterAutoscalingAutoProvisioningDefaultManagement { /** * Specifies whether the node auto-repair is enabled for the node pool. If enabled, the nodes in this node pool will be monitored and, if they fail health checks too many times, an automatic repair action will be triggered. */ autoRepair: boolean; /** * Specifies whether node auto-upgrade is enabled for the node pool. If enabled, node auto-upgrade helps keep the nodes in your node pool up to date with the latest release version of Kubernetes. */ autoUpgrade: boolean; /** * Specifies the Auto Upgrade knobs for the node pool. */ upgradeOptions: outputs.container.GetClusterClusterAutoscalingAutoProvisioningDefaultManagementUpgradeOption[]; } interface GetClusterClusterAutoscalingAutoProvisioningDefaultManagementUpgradeOption { /** * This field is set when upgrades are about to commence with the approximate start time for the upgrades, in RFC3339 text format. */ autoUpgradeStartTime: string; /** * This field is set when upgrades are about to commence with the description of the upgrade. */ description: string; } interface GetClusterClusterAutoscalingAutoProvisioningDefaultShieldedInstanceConfig { /** * Defines whether the instance has integrity monitoring enabled. */ enableIntegrityMonitoring: boolean; /** * Defines whether the instance has Secure Boot enabled. */ enableSecureBoot: boolean; } interface GetClusterClusterAutoscalingAutoProvisioningDefaultUpgradeSetting { /** * Settings for blue-green upgrade strategy. */ blueGreenSettings: outputs.container.GetClusterClusterAutoscalingAutoProvisioningDefaultUpgradeSettingBlueGreenSetting[]; /** * The maximum number of nodes that can be created beyond the current size of the node pool during the upgrade process. */ maxSurge: number; /** * The maximum number of nodes that can be simultaneously unavailable during the upgrade process. */ maxUnavailable: number; /** * Update strategy of the node pool. */ strategy: string; } interface GetClusterClusterAutoscalingAutoProvisioningDefaultUpgradeSettingBlueGreenSetting { /** * Time needed after draining entire blue pool. After this period, blue pool will be cleaned up. * * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ nodePoolSoakDuration: string; /** * Standard policy for the blue-green upgrade. */ standardRolloutPolicies: outputs.container.GetClusterClusterAutoscalingAutoProvisioningDefaultUpgradeSettingBlueGreenSettingStandardRolloutPolicy[]; } interface GetClusterClusterAutoscalingAutoProvisioningDefaultUpgradeSettingBlueGreenSettingStandardRolloutPolicy { /** * Number of blue nodes to drain in a batch. */ batchNodeCount: number; /** * Percentage of the bool pool nodes to drain in a batch. The range of this field should be (0.0, 1.0]. */ batchPercentage: number; /** * Soak time after each batch gets drained. * * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ batchSoakDuration: string; } interface GetClusterClusterAutoscalingResourceLimit { /** * Maximum amount of the resource in the cluster. */ maximum: number; /** * Minimum amount of the resource in the cluster. */ minimum: number; /** * The type of the resource. For example, cpu and memory. See the guide to using Node Auto-Provisioning for a list of types. */ resourceType: string; } interface GetClusterClusterTelemetry { /** * Type of the integration. */ type: string; } interface GetClusterConfidentialNode { /** * Defines the type of technology used by the confidential node. */ confidentialInstanceType: string; /** * Whether Confidential Nodes feature is enabled for all nodes in this cluster. */ enabled: boolean; } interface GetClusterControlPlaneEndpointsConfig { /** * DNS endpoint configuration. */ dnsEndpointConfigs: outputs.container.GetClusterControlPlaneEndpointsConfigDnsEndpointConfig[]; /** * IP endpoint configuration. */ ipEndpointsConfigs: outputs.container.GetClusterControlPlaneEndpointsConfigIpEndpointsConfig[]; } interface GetClusterControlPlaneEndpointsConfigDnsEndpointConfig { /** * Controls whether user traffic is allowed over this endpoint. Note that GCP-managed services may still use the endpoint even if this is false. */ allowExternalTraffic: boolean; /** * Controls whether the k8s certs auth is allowed via dns. */ enableK8sCertsViaDns: boolean; /** * Controls whether the k8s token auth is allowed via dns. */ enableK8sTokensViaDns: boolean; /** * The cluster's DNS endpoint. */ endpoint: string; } interface GetClusterControlPlaneEndpointsConfigIpEndpointsConfig { /** * Controls whether to allow direct IP access. */ enabled: boolean; } interface GetClusterCostManagementConfig { /** * Whether to enable GKE cost allocation. When you enable GKE cost allocation, the cluster name and namespace of your GKE workloads appear in the labels field of the billing export to BigQuery. Defaults to false. */ enabled: boolean; } interface GetClusterDatabaseEncryption { /** * The key to use to encrypt/decrypt secrets. */ keyName: string; /** * ENCRYPTED or DECRYPTED. */ state: string; } interface GetClusterDefaultSnatStatus { /** * When disabled is set to false, default IP masquerade rules will be applied to the nodes to prevent sNAT on cluster internal traffic. */ disabled: boolean; } interface GetClusterDnsConfig { /** * Enable additive VPC scope DNS in a GKE cluster. */ additiveVpcScopeDnsDomain: string; /** * Which in-cluster DNS provider should be used. */ clusterDns: string; /** * The suffix used for all cluster service records. */ clusterDnsDomain: string; /** * The scope of access to cluster DNS records. */ clusterDnsScope: string; } interface GetClusterEnableK8sBetaApi { /** * Enabled Kubernetes Beta APIs. */ enabledApis: string[]; } interface GetClusterEnterpriseConfig { /** * Indicates the effective cluster tier. Available options include STANDARD and ENTERPRISE. */ clusterTier: string; /** * Indicates the desired cluster tier. Available options include STANDARD and ENTERPRISE. */ desiredTier: string; } interface GetClusterFleet { /** * Full resource name of the registered fleet membership of the cluster. */ membership: string; /** * Short name of the fleet membership, for example "member-1". */ membershipId: string; /** * Location of the fleet membership, for example "us-central1". */ membershipLocation: string; /** * The type of the cluster's fleet membership. */ membershipType: string; /** * Whether the cluster has been registered via the fleet API. */ preRegistered: boolean; /** * The project in which the resource belongs. If it * is not provided, the provider project is used. */ project: string; } interface GetClusterGatewayApiConfig { /** * The Gateway API release channel to use for Gateway API. */ channel: string; } interface GetClusterGkeAutoUpgradeConfig { /** * The selected auto-upgrade patch type. Accepted values are: * * ACCELERATED: Upgrades to the latest available patch version in a given minor and release channel. */ patchMode: string; } interface GetClusterIdentityServiceConfig { /** * Whether to enable the Identity Service component. */ enabled: boolean; } interface GetClusterIpAllocationPolicy { /** * AdditionalIPRangesConfig is the configuration for individual additional subnetworks attached to the cluster */ additionalIpRangesConfigs: outputs.container.GetClusterIpAllocationPolicyAdditionalIpRangesConfig[]; /** * AdditionalPodRangesConfig is the configuration for additional pod secondary ranges supporting the ClusterUpdate message. */ additionalPodRangesConfigs: outputs.container.GetClusterIpAllocationPolicyAdditionalPodRangesConfig[]; /** * AutoIpamConfig contains all information related to Auto IPAM. */ autoIpamConfigs: outputs.container.GetClusterIpAllocationPolicyAutoIpamConfig[]; /** * The IP address range for the cluster pod IPs. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. */ clusterIpv4CidrBlock: string; /** * The name of the existing secondary range in the cluster's subnetwork to use for pod IP addresses. Alternatively, clusterIpv4CidrBlock can be used to automatically create a GKE-managed one. */ clusterSecondaryRangeName: string; /** * Used to determine the default network tier for external IP addresses on cluster resources, such as node pools and load balancers. */ networkTierConfigs: outputs.container.GetClusterIpAllocationPolicyNetworkTierConfig[]; /** * Configuration for cluster level pod cidr overprovision. Default is disabled=false. */ podCidrOverprovisionConfigs: outputs.container.GetClusterIpAllocationPolicyPodCidrOverprovisionConfig[]; /** * The IP address range of the services IPs in this cluster. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. */ servicesIpv4CidrBlock: string; /** * The name of the existing secondary range in the cluster's subnetwork to use for service ClusterIPs. Alternatively, servicesIpv4CidrBlock can be used to automatically create a GKE-managed one. */ servicesSecondaryRangeName: string; /** * The IP Stack type of the cluster. Choose between IPV4 and IPV4_IPV6. Default type is IPV4 Only if not set */ stackType: string; } interface GetClusterIpAllocationPolicyAdditionalIpRangesConfig { /** * List of secondary ranges names within this subnetwork that can be used for pod IPs. */ podIpv4RangeNames: string[]; /** * Status of the subnetwork, If in draining status, subnet will not be selected for new node pools. */ status: string; /** * Name of the subnetwork. This can be the full path of the subnetwork or just the name. */ subnetwork: string; } interface GetClusterIpAllocationPolicyAdditionalPodRangesConfig { /** * Name for pod secondary ipv4 range which has the actual range defined ahead. */ podRangeNames: string[]; } interface GetClusterIpAllocationPolicyAutoIpamConfig { /** * The flag that enables Auto IPAM on this cluster. */ enabled: boolean; } interface GetClusterIpAllocationPolicyNetworkTierConfig { /** * Network tier configuration. */ networkTier: string; } interface GetClusterIpAllocationPolicyPodCidrOverprovisionConfig { disabled: boolean; } interface GetClusterLoggingConfig { /** * GKE components exposing logs. Valid values include SYSTEM_COMPONENTS, APISERVER, CONTROLLER_MANAGER, KCP_CONNECTION, KCP_SSHD, KCP_HPA, SCHEDULER, and WORKLOADS. */ enableComponents: string[]; } interface GetClusterMaintenancePolicy { /** * Time window specified for daily maintenance operations. Specify startTime in RFC3339 format "HH:MMā€, where HH : [00-23] and MM : [00-59] GMT. */ dailyMaintenanceWindows: outputs.container.GetClusterMaintenancePolicyDailyMaintenanceWindow[]; /** * Exceptions to maintenance window. Non-emergency maintenance should not occur in these windows. */ maintenanceExclusions: outputs.container.GetClusterMaintenancePolicyMaintenanceExclusion[]; /** * Time window for recurring maintenance operations. */ recurringWindows: outputs.container.GetClusterMaintenancePolicyRecurringWindow[]; } interface GetClusterMaintenancePolicyDailyMaintenanceWindow { duration: string; startTime: string; } interface GetClusterMaintenancePolicyMaintenanceExclusion { endTime: string; exclusionName: string; /** * Maintenance exclusion related options. */ exclusionOptions: outputs.container.GetClusterMaintenancePolicyMaintenanceExclusionExclusionOption[]; startTime: string; } interface GetClusterMaintenancePolicyMaintenanceExclusionExclusionOption { /** * The behavior of the exclusion end time. */ endTimeBehavior: string; /** * The scope of automatic upgrades to restrict in the exclusion window. */ scope: string; } interface GetClusterMaintenancePolicyRecurringWindow { endTime: string; recurrence: string; startTime: string; } interface GetClusterManagedOpentelemetryConfig { /** * The scope of the Managed OpenTelemetry pipeline. Available options include SCOPE_UNSPECIFIED, NONE, and COLLECTION_AND_INSTRUMENTATION_COMPONENTS. */ scope: string; } interface GetClusterMasterAuth { /** * Base64 encoded public certificate used by clients to authenticate to the cluster endpoint. */ clientCertificate: string; /** * Whether client certificate authorization is enabled for this cluster. */ clientCertificateConfigs: outputs.container.GetClusterMasterAuthClientCertificateConfig[]; /** * Base64 encoded private key used by clients to authenticate to the cluster endpoint. */ clientKey: string; /** * Base64 encoded public certificate that is the root of trust for the cluster. */ clusterCaCertificate: string; } interface GetClusterMasterAuthClientCertificateConfig { /** * Whether client certificate authorization is enabled for this cluster. */ issueClientCertificate: boolean; } interface GetClusterMasterAuthorizedNetworksConfig { /** * External networks that can access the Kubernetes cluster master through HTTPS. */ cidrBlocks: outputs.container.GetClusterMasterAuthorizedNetworksConfigCidrBlock[]; /** * Whether Kubernetes master is accessible via Google Compute Engine Public IPs. */ gcpPublicCidrsAccessEnabled: boolean; /** * Whether authorized networks is enforced on the private endpoint or not. Defaults to false. */ privateEndpointEnforcementEnabled: boolean; } interface GetClusterMasterAuthorizedNetworksConfigCidrBlock { /** * External network that can access Kubernetes master through HTTPS. Must be specified in CIDR notation. */ cidrBlock: string; /** * Field for users to identify CIDR blocks. */ displayName: string; } interface GetClusterMeshCertificate { /** * When enabled the GKE Workload Identity Certificates controller and node agent will be deployed in the cluster. */ enableCertificates: boolean; } interface GetClusterMonitoringConfig { /** * Configuration of Advanced Datapath Observability features. */ advancedDatapathObservabilityConfigs: outputs.container.GetClusterMonitoringConfigAdvancedDatapathObservabilityConfig[]; /** * GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT, STATEFULSET, WORKLOADS, KUBELET, CADVISOR, DCGM and JOBSET. */ enableComponents: string[]; /** * Configuration for Google Cloud Managed Services for Prometheus. */ managedPrometheuses: outputs.container.GetClusterMonitoringConfigManagedPrometheus[]; } interface GetClusterMonitoringConfigAdvancedDatapathObservabilityConfig { /** * Whether or not the advanced datapath metrics are enabled. */ enableMetrics: boolean; /** * Whether or not Relay is enabled. */ enableRelay: boolean; } interface GetClusterMonitoringConfigManagedPrometheus { /** * Configuration for GKE Workload Auto-Monitoring. */ autoMonitoringConfigs: outputs.container.GetClusterMonitoringConfigManagedPrometheusAutoMonitoringConfig[]; /** * Whether or not the managed collection is enabled. */ enabled: boolean; } interface GetClusterMonitoringConfigManagedPrometheusAutoMonitoringConfig { /** * The scope of auto-monitoring. */ scope: string; } interface GetClusterNetworkPerformanceConfig { /** * Specifies the total network bandwidth tier for NodePools in the cluster. */ totalEgressBandwidthTier: string; } interface GetClusterNetworkPolicy { /** * Whether network policy is enabled on the cluster. */ enabled: boolean; /** * The selected network policy provider. */ provider: string; } interface GetClusterNodeConfig { /** * Specifies options for controlling advanced machine features. */ advancedMachineFeatures: outputs.container.GetClusterNodeConfigAdvancedMachineFeature[]; /** * The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. */ bootDiskKmsKey: string; /** * Boot disk configuration for node pools nodes. */ bootDisks: outputs.container.GetClusterNodeConfigBootDisk[]; /** * Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. */ confidentialNodes: outputs.container.GetClusterNodeConfigConfidentialNode[]; /** * Parameters for containerd configuration. */ containerdConfigs: outputs.container.GetClusterNodeConfigContainerdConfig[]; /** * Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. */ diskSizeGb: number; /** * Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd */ diskType: string; /** * List of kubernetes taints applied to each node. */ effectiveTaints: outputs.container.GetClusterNodeConfigEffectiveTaint[]; /** * If enabled boot disks are configured with confidential mode. */ enableConfidentialStorage: boolean; /** * Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. */ ephemeralStorageConfigs: outputs.container.GetClusterNodeConfigEphemeralStorageConfig[]; /** * Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. */ ephemeralStorageLocalSsdConfigs: outputs.container.GetClusterNodeConfigEphemeralStorageLocalSsdConfig[]; /** * Enable or disable NCCL Fast Socket in the node pool. */ fastSockets: outputs.container.GetClusterNodeConfigFastSocket[]; /** * Enables Flex Start provisioning model for the node pool */ flexStart: boolean; /** * GCFS configuration for this node. */ gcfsConfigs: outputs.container.GetClusterNodeConfigGcfsConfig[]; /** * List of the type and count of accelerator cards attached to the instance. */ guestAccelerators: outputs.container.GetClusterNodeConfigGuestAccelerator[]; /** * Enable or disable gvnic in the node pool. */ gvnics: outputs.container.GetClusterNodeConfigGvnic[]; /** * The maintenance policy for the hosts on which the GKE VMs run on. */ hostMaintenancePolicies: outputs.container.GetClusterNodeConfigHostMaintenancePolicy[]; /** * The image type to use for this node. Note that for a given image type, the latest version of it will be used. */ imageType: string; /** * Node kubelet configs. */ kubeletConfigs: outputs.container.GetClusterNodeConfigKubeletConfig[]; /** * The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node. */ labels: { [key: string]: string; }; /** * Parameters that can be configured on Linux nodes. */ linuxNodeConfigs: outputs.container.GetClusterNodeConfigLinuxNodeConfig[]; /** * Parameters for raw-block local NVMe SSDs. */ localNvmeSsdBlockConfigs: outputs.container.GetClusterNodeConfigLocalNvmeSsdBlockConfig[]; /** * The number of local SSD disks to be attached to the node. */ localSsdCount: number; /** * LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node. */ localSsdEncryptionMode: string; /** * Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. */ loggingVariant: string; /** * The name of a Google Compute Engine machine type. */ machineType: string; /** * The runtime of each node in the node pool in seconds, terminated by 's'. Example: "3600s". */ maxRunDuration: string; /** * The metadata key/value pairs assigned to instances in the cluster. */ metadata: { [key: string]: string; }; /** * Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform. */ minCpuPlatform: string; /** * Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes. */ nodeGroup: string; /** * The set of Google API scopes to be made available on all of the node VMs. */ oauthScopes: string[]; /** * Whether the nodes are created as preemptible VM instances. */ preemptible: boolean; /** * The reservation affinity configuration for the node pool. */ reservationAffinities: outputs.container.GetClusterNodeConfigReservationAffinity[]; /** * The GCE resource labels (a map of key/value pairs) to be applied to the node pool. */ resourceLabels: { [key: string]: string; }; /** * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. */ resourceManagerTags: { [key: string]: string; }; /** * Sandbox configuration for this node. */ sandboxConfigs: outputs.container.GetClusterNodeConfigSandboxConfig[]; /** * Secondary boot disks for preloading data or container images. */ secondaryBootDisks: outputs.container.GetClusterNodeConfigSecondaryBootDisk[]; /** * The Google Cloud Platform Service Account to be used by the node VMs. */ serviceAccount: string; /** * Shielded Instance options. */ shieldedInstanceConfigs: outputs.container.GetClusterNodeConfigShieldedInstanceConfig[]; /** * Node affinity options for sole tenant node pools. */ soleTenantConfigs: outputs.container.GetClusterNodeConfigSoleTenantConfig[]; /** * Whether the nodes are created as spot VM instances. */ spot: boolean; /** * The list of Storage Pools where boot disks are provisioned. */ storagePools: string[]; /** * The list of instance tags applied to all nodes. */ tags: string[]; /** * List of Kubernetes taints to be applied to each node. */ taints: outputs.container.GetClusterNodeConfigTaint[]; /** * Parameters that can be configured on Windows nodes. */ windowsNodeConfigs: outputs.container.GetClusterNodeConfigWindowsNodeConfig[]; /** * The workload metadata configuration for this node. */ workloadMetadataConfigs: outputs.container.GetClusterNodeConfigWorkloadMetadataConfig[]; } interface GetClusterNodeConfigAdvancedMachineFeature { /** * Whether the node should have nested virtualization enabled. */ enableNestedVirtualization: boolean; /** * Level of Performance Monitoring Unit (PMU) requested. If unset, no access to the PMU is assumed. */ performanceMonitoringUnit: string; /** * The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. */ threadsPerCore: number; } interface GetClusterNodeConfigBootDisk { /** * Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd */ diskType: string; /** * Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced. */ provisionedIops: number; /** * Configured throughput provisioning. Only valid with disk type hyperdisk-balanced. */ provisionedThroughput: number; /** * Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. */ sizeGb: number; } interface GetClusterNodeConfigConfidentialNode { /** * Defines the type of technology used by the confidential node. */ confidentialInstanceType: string; /** * Whether Confidential Nodes feature is enabled for all nodes in this pool. */ enabled: boolean; } interface GetClusterNodeConfigContainerdConfig { /** * Parameters for private container registries configuration. */ privateRegistryAccessConfigs: outputs.container.GetClusterNodeConfigContainerdConfigPrivateRegistryAccessConfig[]; /** * Configures containerd registry host configuration. Each registryHosts entry represents a hosts.toml file. */ registryHosts: outputs.container.GetClusterNodeConfigContainerdConfigRegistryHost[]; /** * Parameters for writable cgroups configuration. */ writableCgroups: outputs.container.GetClusterNodeConfigContainerdConfigWritableCgroup[]; } interface GetClusterNodeConfigContainerdConfigPrivateRegistryAccessConfig { /** * Parameters for configuring CA certificate and domains. */ certificateAuthorityDomainConfigs: outputs.container.GetClusterNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig[]; /** * Whether or not private registries are configured. */ enabled: boolean; } interface GetClusterNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig { /** * List of fully-qualified-domain-names. IPv4s and port specification are supported. */ fqdns: string[]; /** * Parameters for configuring a certificate hosted in GCP SecretManager. */ gcpSecretManagerCertificateConfigs: outputs.container.GetClusterNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig[]; } interface GetClusterNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig { /** * URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'. */ secretUri: string; } interface GetClusterNodeConfigContainerdConfigRegistryHost { /** * Configures a list of host-specific configurations for the server. */ hosts: outputs.container.GetClusterNodeConfigContainerdConfigRegistryHostHost[]; /** * Defines the host name of the registry server. */ server: string; } interface GetClusterNodeConfigContainerdConfigRegistryHostHost { /** * Represent the capabilities of the registry host, specifying what operations a host is capable of performing. */ capabilities: string[]; /** * Configures the registry host certificate. */ cas: outputs.container.GetClusterNodeConfigContainerdConfigRegistryHostHostCa[]; /** * Configures the registry host client certificate and key. */ clients: outputs.container.GetClusterNodeConfigContainerdConfigRegistryHostHostClient[]; /** * Specifies the maximum duration allowed for a connection attempt to complete. */ dialTimeout: string; /** * Configures the registry host headers. */ headers: outputs.container.GetClusterNodeConfigContainerdConfigRegistryHostHostHeader[]; /** * Configures the registry host/mirror. */ host: string; /** * Indicate the host's API root endpoint is defined in the URL path rather than by the API specification. */ overridePath: boolean; } interface GetClusterNodeConfigContainerdConfigRegistryHostHostCa { /** * URI for the Secret Manager secret that hosts the certificate. */ gcpSecretManagerSecretUri: string; } interface GetClusterNodeConfigContainerdConfigRegistryHostHostClient { /** * Configures the client certificate. */ certs: outputs.container.GetClusterNodeConfigContainerdConfigRegistryHostHostClientCert[]; /** * Configures the client private key. */ keys: outputs.container.GetClusterNodeConfigContainerdConfigRegistryHostHostClientKey[]; } interface GetClusterNodeConfigContainerdConfigRegistryHostHostClientCert { /** * URI for the Secret Manager secret that hosts the client certificate. */ gcpSecretManagerSecretUri: string; } interface GetClusterNodeConfigContainerdConfigRegistryHostHostClientKey { /** * URI for the Secret Manager secret that hosts the private key. */ gcpSecretManagerSecretUri: string; } interface GetClusterNodeConfigContainerdConfigRegistryHostHostHeader { /** * Configures the header key. */ key: string; /** * Configures the header value. */ values: string[]; } interface GetClusterNodeConfigContainerdConfigWritableCgroup { /** * Whether writable cgroups are enabled. */ enabled: boolean; } interface GetClusterNodeConfigEffectiveTaint { /** * Effect for taint. */ effect: string; /** * Key for taint. */ key: string; /** * Value for taint. */ value: string; } interface GetClusterNodeConfigEphemeralStorageConfig { /** * Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size. */ localSsdCount: number; } interface GetClusterNodeConfigEphemeralStorageLocalSsdConfig { /** * Number of local SSDs to be utilized for GKE Data Cache. Uses NVMe interfaces. */ dataCacheCount: number; /** * Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size. */ localSsdCount: number; } interface GetClusterNodeConfigFastSocket { /** * Whether or not NCCL Fast Socket is enabled */ enabled: boolean; } interface GetClusterNodeConfigGcfsConfig { /** * Whether or not GCFS is enabled */ enabled: boolean; } interface GetClusterNodeConfigGuestAccelerator { /** * The number of the accelerator cards exposed to an instance. */ count: number; /** * Configuration for auto installation of GPU driver. */ gpuDriverInstallationConfigs: outputs.container.GetClusterNodeConfigGuestAcceleratorGpuDriverInstallationConfig[]; /** * Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning) */ gpuPartitionSize: string; /** * Configuration for GPU sharing. */ gpuSharingConfigs: outputs.container.GetClusterNodeConfigGuestAcceleratorGpuSharingConfig[]; /** * The accelerator type resource name. */ type: string; } interface GetClusterNodeConfigGuestAcceleratorGpuDriverInstallationConfig { /** * Mode for how the GPU driver is installed. */ gpuDriverVersion: string; } interface GetClusterNodeConfigGuestAcceleratorGpuSharingConfig { /** * The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig) */ gpuSharingStrategy: string; /** * The maximum number of containers that can share a GPU. */ maxSharedClientsPerGpu: number; } interface GetClusterNodeConfigGvnic { /** * Whether or not gvnic is enabled */ enabled: boolean; } interface GetClusterNodeConfigHostMaintenancePolicy { /** * . */ maintenanceInterval: string; } interface GetClusterNodeConfigKubeletConfig { /** * Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. */ allowedUnsafeSysctls: string[]; /** * Defines the maximum number of container log files that can be present for a container. */ containerLogMaxFiles: number; /** * Defines the maximum size of the container log file before it is rotated. */ containerLogMaxSize: string; /** * Enable CPU CFS quota enforcement for containers that specify CPU limits. */ cpuCfsQuota: boolean; /** * Set the CPU CFS quota period value 'cpu.cfs_period_us'. */ cpuCfsQuotaPeriod: string; /** * Control the CPU management policy on the node. */ cpuManagerPolicy: string; /** * Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. */ evictionMaxPodGracePeriodSeconds: number; /** * Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. */ evictionMinimumReclaims: outputs.container.GetClusterNodeConfigKubeletConfigEvictionMinimumReclaim[]; /** * Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. */ evictionSoftGracePeriods: outputs.container.GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriod[]; /** * Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. */ evictionSofts: outputs.container.GetClusterNodeConfigKubeletConfigEvictionSoft[]; /** * Defines the percent of disk usage after which image garbage collection is always run. */ imageGcHighThresholdPercent: number; /** * Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. */ imageGcLowThresholdPercent: number; /** * Defines the maximum age an image can be unused before it is garbage collected. */ imageMaximumGcAge: string; /** * Defines the minimum age for an unused image before it is garbage collected. */ imageMinimumGcAge: string; /** * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. */ insecureKubeletReadonlyPortEnabled: string; /** * Set the maximum number of image pulls in parallel. */ maxParallelImagePulls: number; /** * Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity. */ memoryManagers: outputs.container.GetClusterNodeConfigKubeletConfigMemoryManager[]; /** * Controls the maximum number of processes allowed to run in a pod. */ podPidsLimit: number; /** * Defines whether to enable single process OOM killer. */ singleProcessOomKill: boolean; /** * Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location. */ topologyManagers: outputs.container.GetClusterNodeConfigKubeletConfigTopologyManager[]; } interface GetClusterNodeConfigKubeletConfigEvictionMinimumReclaim { /** * Defines percentage of minimum reclaim for imagefs.available. */ imagefsAvailable: string; /** * Defines percentage of minimum reclaim for imagefs.inodesFree. */ imagefsInodesFree: string; /** * Defines percentage of minimum reclaim for memory.available. */ memoryAvailable: string; /** * Defines percentage of minimum reclaim for nodefs.available. */ nodefsAvailable: string; /** * Defines percentage of minimum reclaim for nodefs.inodesFree. */ nodefsInodesFree: string; /** * Defines percentage of minimum reclaim for pid.available. */ pidAvailable: string; } interface GetClusterNodeConfigKubeletConfigEvictionSoft { /** * Defines percentage of soft eviction threshold for imagefs.available. */ imagefsAvailable: string; /** * Defines percentage of soft eviction threshold for imagefs.inodesFree. */ imagefsInodesFree: string; /** * Defines quantity of soft eviction threshold for memory.available. */ memoryAvailable: string; /** * Defines percentage of soft eviction threshold for nodefs.available. */ nodefsAvailable: string; /** * Defines percentage of soft eviction threshold for nodefs.inodesFree. */ nodefsInodesFree: string; /** * Defines percentage of soft eviction threshold for pid.available. */ pidAvailable: string; } interface GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriod { /** * Defines grace period for the imagefs.available soft eviction threshold */ imagefsAvailable: string; /** * Defines grace period for the imagefs.inodesFree soft eviction threshold. */ imagefsInodesFree: string; /** * Defines grace period for the memory.available soft eviction threshold. */ memoryAvailable: string; /** * Defines grace period for the nodefs.available soft eviction threshold. */ nodefsAvailable: string; /** * Defines grace period for the nodefs.inodesFree soft eviction threshold. */ nodefsInodesFree: string; /** * Defines grace period for the pid.available soft eviction threshold. */ pidAvailable: string; } interface GetClusterNodeConfigKubeletConfigMemoryManager { /** * The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity. */ policy: string; } interface GetClusterNodeConfigKubeletConfigTopologyManager { /** * The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node. */ policy: string; /** * The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod). */ scope: string; } interface GetClusterNodeConfigLinuxNodeConfig { /** * cgroupMode specifies the cgroup mode to be used on the node. */ cgroupMode: string; /** * Amounts for 2M and 1G hugepages. */ hugepagesConfigs: outputs.container.GetClusterNodeConfigLinuxNodeConfigHugepagesConfig[]; /** * The settings for kernel module loading. */ nodeKernelModuleLoadings: outputs.container.GetClusterNodeConfigLinuxNodeConfigNodeKernelModuleLoading[]; /** * The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. */ sysctls: { [key: string]: string; }; /** * The Linux kernel transparent hugepage defrag setting. */ transparentHugepageDefrag: string; /** * The Linux kernel transparent hugepage setting. */ transparentHugepageEnabled: string; } interface GetClusterNodeConfigLinuxNodeConfigHugepagesConfig { /** * Amount of 1G hugepages. */ hugepageSize1g: number; /** * Amount of 2M hugepages. */ hugepageSize2m: number; } interface GetClusterNodeConfigLinuxNodeConfigNodeKernelModuleLoading { /** * The policy for kernel module loading. */ policy: string; } interface GetClusterNodeConfigLocalNvmeSsdBlockConfig { /** * Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. */ localSsdCount: number; } interface GetClusterNodeConfigReservationAffinity { /** * Corresponds to the type of reservation consumption. */ consumeReservationType: string; /** * The label key of a reservation resource. */ key: string; /** * The label values of the reservation resource. */ values: string[]; } interface GetClusterNodeConfigSandboxConfig { /** * Type of the sandbox to use for the node (e.g. 'gvisor') */ sandboxType: string; } interface GetClusterNodeConfigSecondaryBootDisk { /** * Disk image to create the secondary boot disk from */ diskImage: string; /** * Mode for how the secondary boot disk is used. */ mode: string; } interface GetClusterNodeConfigShieldedInstanceConfig { /** * Defines whether the instance has integrity monitoring enabled. */ enableIntegrityMonitoring: boolean; /** * Defines whether the instance has Secure Boot enabled. */ enableSecureBoot: boolean; } interface GetClusterNodeConfigSoleTenantConfig { /** * Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled. */ minNodeCpus: number; /** * . */ nodeAffinities: outputs.container.GetClusterNodeConfigSoleTenantConfigNodeAffinity[]; } interface GetClusterNodeConfigSoleTenantConfigNodeAffinity { /** * . */ key: string; /** * . */ operator: string; /** * . */ values: string[]; } interface GetClusterNodeConfigTaint { /** * Effect for taint. */ effect: string; /** * Key for taint. */ key: string; /** * Value for taint. */ value: string; } interface GetClusterNodeConfigWindowsNodeConfig { /** * The OS Version of the windows nodepool.Values are OS_VERSION_UNSPECIFIED,OS_VERSION_LTSC2019 and OS_VERSION_LTSC2022 */ osversion: string; } interface GetClusterNodeConfigWorkloadMetadataConfig { /** * Mode is the configuration for how to expose metadata to workloads running on the node. */ mode: string; } interface GetClusterNodePool { /** * Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. */ autoscalings: outputs.container.GetClusterNodePoolAutoscaling[]; /** * The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. */ initialNodeCount: number; /** * The resource URLs of the managed instance groups associated with this node pool. */ instanceGroupUrls: string[]; /** * List of instance group URLs which have been assigned to this node pool. */ managedInstanceGroupUrls: string[]; /** * Node management configuration, wherein auto-repair and auto-upgrade is configured. */ managements: outputs.container.GetClusterNodePoolManagement[]; /** * The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. */ maxPodsPerNode: number; /** * The name of the cluster. */ name: string; /** * Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name. */ namePrefix: string; /** * Networking configuration for this NodePool. If specified, it overrides the cluster-level defaults. */ networkConfigs: outputs.container.GetClusterNodePoolNetworkConfig[]; /** * The configuration of the nodepool */ nodeConfigs: outputs.container.GetClusterNodePoolNodeConfig[]; /** * The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling. */ nodeCount: number; /** * Node drain configuration for this NodePool. */ nodeDrainConfigs: outputs.container.GetClusterNodePoolNodeDrainConfig[]; /** * The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level nodeLocations will be used. */ nodeLocations: string[]; /** * Specifies the node placement policy */ placementPolicies: outputs.container.GetClusterNodePoolPlacementPolicy[]; /** * Specifies the configuration of queued provisioning */ queuedProvisionings: outputs.container.GetClusterNodePoolQueuedProvisioning[]; /** * Specify node upgrade settings to change how many nodes GKE attempts to upgrade at once. The number of nodes upgraded simultaneously is the sum of maxSurge and max_unavailable. The maximum number of nodes upgraded simultaneously is limited to 20. */ upgradeSettings: outputs.container.GetClusterNodePoolUpgradeSetting[]; /** * The Kubernetes version for the nodes in this pool. Note that if this field and autoUpgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as Terraform will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's versionPrefix field to approximate fuzzy versions in a Terraform-compatible way. */ version: string; } interface GetClusterNodePoolAutoConfig { /** * Linux node configuration options. */ linuxNodeConfigs: outputs.container.GetClusterNodePoolAutoConfigLinuxNodeConfig[]; /** * Collection of Compute Engine network tags that can be applied to a node's underlying VM instance. */ networkTags: outputs.container.GetClusterNodePoolAutoConfigNetworkTag[]; /** * Node kubelet configs. */ nodeKubeletConfigs: outputs.container.GetClusterNodePoolAutoConfigNodeKubeletConfig[]; /** * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. */ resourceManagerTags: { [key: string]: string; }; } interface GetClusterNodePoolAutoConfigLinuxNodeConfig { /** * cgroupMode specifies the cgroup mode to be used on the node. */ cgroupMode: string; /** * The settings for kernel module loading. */ nodeKernelModuleLoadings: outputs.container.GetClusterNodePoolAutoConfigLinuxNodeConfigNodeKernelModuleLoading[]; } interface GetClusterNodePoolAutoConfigLinuxNodeConfigNodeKernelModuleLoading { /** * The policy for kernel module loading. */ policy: string; } interface GetClusterNodePoolAutoConfigNetworkTag { /** * List of network tags applied to auto-provisioned node pools. */ tags: string[]; } interface GetClusterNodePoolAutoConfigNodeKubeletConfig { /** * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. */ insecureKubeletReadonlyPortEnabled: string; } interface GetClusterNodePoolAutoscaling { /** * Location policy specifies the algorithm used when scaling-up the node pool. "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones. "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduces preemption risk for Spot VMs. */ locationPolicy: string; /** * Maximum number of nodes per zone in the node pool. Must be >= min_node_count. Cannot be used with total limits. */ maxNodeCount: number; /** * Minimum number of nodes per zone in the node pool. Must be >=0 and <= max_node_count. Cannot be used with total limits. */ minNodeCount: number; /** * Maximum number of all nodes in the node pool. Must be >= total_min_node_count. Cannot be used with per zone limits. */ totalMaxNodeCount: number; /** * Minimum number of all nodes in the node pool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. */ totalMinNodeCount: number; } interface GetClusterNodePoolDefault { /** * Subset of NodeConfig message that has defaults. */ nodeConfigDefaults: outputs.container.GetClusterNodePoolDefaultNodeConfigDefault[]; } interface GetClusterNodePoolDefaultNodeConfigDefault { /** * Parameters for containerd configuration. */ containerdConfigs: outputs.container.GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfig[]; /** * GCFS configuration for this node. */ gcfsConfigs: outputs.container.GetClusterNodePoolDefaultNodeConfigDefaultGcfsConfig[]; /** * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. */ insecureKubeletReadonlyPortEnabled: string; /** * Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. */ loggingVariant: string; } interface GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfig { /** * Parameters for private container registries configuration. */ privateRegistryAccessConfigs: outputs.container.GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigPrivateRegistryAccessConfig[]; /** * Configures containerd registry host configuration. Each registryHosts entry represents a hosts.toml file. */ registryHosts: outputs.container.GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigRegistryHost[]; /** * Parameters for writable cgroups configuration. */ writableCgroups: outputs.container.GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigWritableCgroup[]; } interface GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigPrivateRegistryAccessConfig { /** * Parameters for configuring CA certificate and domains. */ certificateAuthorityDomainConfigs: outputs.container.GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig[]; /** * Whether or not private registries are configured. */ enabled: boolean; } interface GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig { /** * List of fully-qualified-domain-names. IPv4s and port specification are supported. */ fqdns: string[]; /** * Parameters for configuring a certificate hosted in GCP SecretManager. */ gcpSecretManagerCertificateConfigs: outputs.container.GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig[]; } interface GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig { /** * URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'. */ secretUri: string; } interface GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigRegistryHost { /** * Configures a list of host-specific configurations for the server. */ hosts: outputs.container.GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigRegistryHostHost[]; /** * Defines the host name of the registry server. */ server: string; } interface GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigRegistryHostHost { /** * Represent the capabilities of the registry host, specifying what operations a host is capable of performing. */ capabilities: string[]; /** * Configures the registry host certificate. */ cas: outputs.container.GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigRegistryHostHostCa[]; /** * Configures the registry host client certificate and key. */ clients: outputs.container.GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigRegistryHostHostClient[]; /** * Specifies the maximum duration allowed for a connection attempt to complete. */ dialTimeout: string; /** * Configures the registry host headers. */ headers: outputs.container.GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigRegistryHostHostHeader[]; /** * Configures the registry host/mirror. */ host: string; /** * Indicate the host's API root endpoint is defined in the URL path rather than by the API specification. */ overridePath: boolean; } interface GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigRegistryHostHostCa { /** * URI for the Secret Manager secret that hosts the certificate. */ gcpSecretManagerSecretUri: string; } interface GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigRegistryHostHostClient { /** * Configures the client certificate. */ certs: outputs.container.GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigRegistryHostHostClientCert[]; /** * Configures the client private key. */ keys: outputs.container.GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigRegistryHostHostClientKey[]; } interface GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigRegistryHostHostClientCert { /** * URI for the Secret Manager secret that hosts the client certificate. */ gcpSecretManagerSecretUri: string; } interface GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigRegistryHostHostClientKey { /** * URI for the Secret Manager secret that hosts the private key. */ gcpSecretManagerSecretUri: string; } interface GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigRegistryHostHostHeader { /** * Configures the header key. */ key: string; /** * Configures the header value. */ values: string[]; } interface GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigWritableCgroup { /** * Whether writable cgroups are enabled. */ enabled: boolean; } interface GetClusterNodePoolDefaultNodeConfigDefaultGcfsConfig { /** * Whether or not GCFS is enabled */ enabled: boolean; } interface GetClusterNodePoolManagement { /** * Whether the nodes will be automatically repaired. Enabled by default. */ autoRepair: boolean; /** * Whether the nodes will be automatically upgraded. Enabled by default. */ autoUpgrade: boolean; } interface GetClusterNodePoolNetworkConfig { /** * The accelerator network profile to use for this node pool. */ acceleratorNetworkProfile: string; /** * We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface */ additionalNodeNetworkConfigs: outputs.container.GetClusterNodePoolNetworkConfigAdditionalNodeNetworkConfig[]; /** * We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node */ additionalPodNetworkConfigs: outputs.container.GetClusterNodePoolNetworkConfigAdditionalPodNetworkConfig[]; /** * Whether to create a new range for pod IPs in this node pool. Defaults are provided for podRange and podIpv4CidrBlock if they are not specified. */ createPodRange: boolean; /** * Whether nodes have internal IP addresses only. */ enablePrivateNodes: boolean; /** * Network bandwidth tier configuration. */ networkPerformanceConfigs: outputs.container.GetClusterNodePoolNetworkConfigNetworkPerformanceConfig[]; /** * Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited */ podCidrOverprovisionConfigs: outputs.container.GetClusterNodePoolNetworkConfigPodCidrOverprovisionConfig[]; /** * The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use. */ podIpv4CidrBlock: string; /** * The ID of the secondary range for pod IPs. If createPodRange is true, this ID is used for the new range. If createPodRange is false, uses an existing secondary range with this ID. */ podRange: string; /** * The subnetwork path for the node pool. Format: projects/{project}/regions/{region}/subnetworks/{subnetwork} . If the cluster is associated with multiple subnetworks, the subnetwork for the node pool is picked based on the IP utilization during node pool creation and is immutable. */ subnetwork: string; } interface GetClusterNodePoolNetworkConfigAdditionalNodeNetworkConfig { /** * Name of the VPC where the additional interface belongs. */ network: string; /** * Name of the subnetwork where the additional interface belongs. */ subnetwork: string; } interface GetClusterNodePoolNetworkConfigAdditionalPodNetworkConfig { /** * The maximum number of pods per node which use this pod network. */ maxPodsPerNode: number; /** * The name of the secondary range on the subnet which provides IP address for this pod range. */ secondaryPodRange: string; /** * Name of the subnetwork where the additional pod network belongs. */ subnetwork: string; } interface GetClusterNodePoolNetworkConfigNetworkPerformanceConfig { /** * Specifies the total network bandwidth tier for the NodePool. [Valid values](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools#NodePool.Tier) include: "TIER_1" and "TIER_UNSPECIFIED". */ totalEgressBandwidthTier: string; } interface GetClusterNodePoolNetworkConfigPodCidrOverprovisionConfig { disabled: boolean; } interface GetClusterNodePoolNodeConfig { /** * Specifies options for controlling advanced machine features. */ advancedMachineFeatures: outputs.container.GetClusterNodePoolNodeConfigAdvancedMachineFeature[]; /** * The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. */ bootDiskKmsKey: string; /** * Boot disk configuration for node pools nodes. */ bootDisks: outputs.container.GetClusterNodePoolNodeConfigBootDisk[]; /** * Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. */ confidentialNodes: outputs.container.GetClusterNodePoolNodeConfigConfidentialNode[]; /** * Parameters for containerd configuration. */ containerdConfigs: outputs.container.GetClusterNodePoolNodeConfigContainerdConfig[]; /** * Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. */ diskSizeGb: number; /** * Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd */ diskType: string; /** * List of kubernetes taints applied to each node. */ effectiveTaints: outputs.container.GetClusterNodePoolNodeConfigEffectiveTaint[]; /** * If enabled boot disks are configured with confidential mode. */ enableConfidentialStorage: boolean; /** * Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. */ ephemeralStorageConfigs: outputs.container.GetClusterNodePoolNodeConfigEphemeralStorageConfig[]; /** * Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. */ ephemeralStorageLocalSsdConfigs: outputs.container.GetClusterNodePoolNodeConfigEphemeralStorageLocalSsdConfig[]; /** * Enable or disable NCCL Fast Socket in the node pool. */ fastSockets: outputs.container.GetClusterNodePoolNodeConfigFastSocket[]; /** * Enables Flex Start provisioning model for the node pool */ flexStart: boolean; /** * GCFS configuration for this node. */ gcfsConfigs: outputs.container.GetClusterNodePoolNodeConfigGcfsConfig[]; /** * List of the type and count of accelerator cards attached to the instance. */ guestAccelerators: outputs.container.GetClusterNodePoolNodeConfigGuestAccelerator[]; /** * Enable or disable gvnic in the node pool. */ gvnics: outputs.container.GetClusterNodePoolNodeConfigGvnic[]; /** * The maintenance policy for the hosts on which the GKE VMs run on. */ hostMaintenancePolicies: outputs.container.GetClusterNodePoolNodeConfigHostMaintenancePolicy[]; /** * The image type to use for this node. Note that for a given image type, the latest version of it will be used. */ imageType: string; /** * Node kubelet configs. */ kubeletConfigs: outputs.container.GetClusterNodePoolNodeConfigKubeletConfig[]; /** * The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node. */ labels: { [key: string]: string; }; /** * Parameters that can be configured on Linux nodes. */ linuxNodeConfigs: outputs.container.GetClusterNodePoolNodeConfigLinuxNodeConfig[]; /** * Parameters for raw-block local NVMe SSDs. */ localNvmeSsdBlockConfigs: outputs.container.GetClusterNodePoolNodeConfigLocalNvmeSsdBlockConfig[]; /** * The number of local SSD disks to be attached to the node. */ localSsdCount: number; /** * LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node. */ localSsdEncryptionMode: string; /** * Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. */ loggingVariant: string; /** * The name of a Google Compute Engine machine type. */ machineType: string; /** * The runtime of each node in the node pool in seconds, terminated by 's'. Example: "3600s". */ maxRunDuration: string; /** * The metadata key/value pairs assigned to instances in the cluster. */ metadata: { [key: string]: string; }; /** * Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform. */ minCpuPlatform: string; /** * Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes. */ nodeGroup: string; /** * The set of Google API scopes to be made available on all of the node VMs. */ oauthScopes: string[]; /** * Whether the nodes are created as preemptible VM instances. */ preemptible: boolean; /** * The reservation affinity configuration for the node pool. */ reservationAffinities: outputs.container.GetClusterNodePoolNodeConfigReservationAffinity[]; /** * The GCE resource labels (a map of key/value pairs) to be applied to the node pool. */ resourceLabels: { [key: string]: string; }; /** * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. */ resourceManagerTags: { [key: string]: string; }; /** * Sandbox configuration for this node. */ sandboxConfigs: outputs.container.GetClusterNodePoolNodeConfigSandboxConfig[]; /** * Secondary boot disks for preloading data or container images. */ secondaryBootDisks: outputs.container.GetClusterNodePoolNodeConfigSecondaryBootDisk[]; /** * The Google Cloud Platform Service Account to be used by the node VMs. */ serviceAccount: string; /** * Shielded Instance options. */ shieldedInstanceConfigs: outputs.container.GetClusterNodePoolNodeConfigShieldedInstanceConfig[]; /** * Node affinity options for sole tenant node pools. */ soleTenantConfigs: outputs.container.GetClusterNodePoolNodeConfigSoleTenantConfig[]; /** * Whether the nodes are created as spot VM instances. */ spot: boolean; /** * The list of Storage Pools where boot disks are provisioned. */ storagePools: string[]; /** * The list of instance tags applied to all nodes. */ tags: string[]; /** * List of Kubernetes taints to be applied to each node. */ taints: outputs.container.GetClusterNodePoolNodeConfigTaint[]; /** * Parameters that can be configured on Windows nodes. */ windowsNodeConfigs: outputs.container.GetClusterNodePoolNodeConfigWindowsNodeConfig[]; /** * The workload metadata configuration for this node. */ workloadMetadataConfigs: outputs.container.GetClusterNodePoolNodeConfigWorkloadMetadataConfig[]; } interface GetClusterNodePoolNodeConfigAdvancedMachineFeature { /** * Whether the node should have nested virtualization enabled. */ enableNestedVirtualization: boolean; /** * Level of Performance Monitoring Unit (PMU) requested. If unset, no access to the PMU is assumed. */ performanceMonitoringUnit: string; /** * The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. */ threadsPerCore: number; } interface GetClusterNodePoolNodeConfigBootDisk { /** * Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd */ diskType: string; /** * Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced. */ provisionedIops: number; /** * Configured throughput provisioning. Only valid with disk type hyperdisk-balanced. */ provisionedThroughput: number; /** * Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. */ sizeGb: number; } interface GetClusterNodePoolNodeConfigConfidentialNode { /** * Defines the type of technology used by the confidential node. */ confidentialInstanceType: string; /** * Whether Confidential Nodes feature is enabled for all nodes in this pool. */ enabled: boolean; } interface GetClusterNodePoolNodeConfigContainerdConfig { /** * Parameters for private container registries configuration. */ privateRegistryAccessConfigs: outputs.container.GetClusterNodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfig[]; /** * Configures containerd registry host configuration. Each registryHosts entry represents a hosts.toml file. */ registryHosts: outputs.container.GetClusterNodePoolNodeConfigContainerdConfigRegistryHost[]; /** * Parameters for writable cgroups configuration. */ writableCgroups: outputs.container.GetClusterNodePoolNodeConfigContainerdConfigWritableCgroup[]; } interface GetClusterNodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfig { /** * Parameters for configuring CA certificate and domains. */ certificateAuthorityDomainConfigs: outputs.container.GetClusterNodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig[]; /** * Whether or not private registries are configured. */ enabled: boolean; } interface GetClusterNodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig { /** * List of fully-qualified-domain-names. IPv4s and port specification are supported. */ fqdns: string[]; /** * Parameters for configuring a certificate hosted in GCP SecretManager. */ gcpSecretManagerCertificateConfigs: outputs.container.GetClusterNodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig[]; } interface GetClusterNodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig { /** * URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'. */ secretUri: string; } interface GetClusterNodePoolNodeConfigContainerdConfigRegistryHost { /** * Configures a list of host-specific configurations for the server. */ hosts: outputs.container.GetClusterNodePoolNodeConfigContainerdConfigRegistryHostHost[]; /** * Defines the host name of the registry server. */ server: string; } interface GetClusterNodePoolNodeConfigContainerdConfigRegistryHostHost { /** * Represent the capabilities of the registry host, specifying what operations a host is capable of performing. */ capabilities: string[]; /** * Configures the registry host certificate. */ cas: outputs.container.GetClusterNodePoolNodeConfigContainerdConfigRegistryHostHostCa[]; /** * Configures the registry host client certificate and key. */ clients: outputs.container.GetClusterNodePoolNodeConfigContainerdConfigRegistryHostHostClient[]; /** * Specifies the maximum duration allowed for a connection attempt to complete. */ dialTimeout: string; /** * Configures the registry host headers. */ headers: outputs.container.GetClusterNodePoolNodeConfigContainerdConfigRegistryHostHostHeader[]; /** * Configures the registry host/mirror. */ host: string; /** * Indicate the host's API root endpoint is defined in the URL path rather than by the API specification. */ overridePath: boolean; } interface GetClusterNodePoolNodeConfigContainerdConfigRegistryHostHostCa { /** * URI for the Secret Manager secret that hosts the certificate. */ gcpSecretManagerSecretUri: string; } interface GetClusterNodePoolNodeConfigContainerdConfigRegistryHostHostClient { /** * Configures the client certificate. */ certs: outputs.container.GetClusterNodePoolNodeConfigContainerdConfigRegistryHostHostClientCert[]; /** * Configures the client private key. */ keys: outputs.container.GetClusterNodePoolNodeConfigContainerdConfigRegistryHostHostClientKey[]; } interface GetClusterNodePoolNodeConfigContainerdConfigRegistryHostHostClientCert { /** * URI for the Secret Manager secret that hosts the client certificate. */ gcpSecretManagerSecretUri: string; } interface GetClusterNodePoolNodeConfigContainerdConfigRegistryHostHostClientKey { /** * URI for the Secret Manager secret that hosts the private key. */ gcpSecretManagerSecretUri: string; } interface GetClusterNodePoolNodeConfigContainerdConfigRegistryHostHostHeader { /** * Configures the header key. */ key: string; /** * Configures the header value. */ values: string[]; } interface GetClusterNodePoolNodeConfigContainerdConfigWritableCgroup { /** * Whether writable cgroups are enabled. */ enabled: boolean; } interface GetClusterNodePoolNodeConfigEffectiveTaint { /** * Effect for taint. */ effect: string; /** * Key for taint. */ key: string; /** * Value for taint. */ value: string; } interface GetClusterNodePoolNodeConfigEphemeralStorageConfig { /** * Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size. */ localSsdCount: number; } interface GetClusterNodePoolNodeConfigEphemeralStorageLocalSsdConfig { /** * Number of local SSDs to be utilized for GKE Data Cache. Uses NVMe interfaces. */ dataCacheCount: number; /** * Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size. */ localSsdCount: number; } interface GetClusterNodePoolNodeConfigFastSocket { /** * Whether or not NCCL Fast Socket is enabled */ enabled: boolean; } interface GetClusterNodePoolNodeConfigGcfsConfig { /** * Whether or not GCFS is enabled */ enabled: boolean; } interface GetClusterNodePoolNodeConfigGuestAccelerator { /** * The number of the accelerator cards exposed to an instance. */ count: number; /** * Configuration for auto installation of GPU driver. */ gpuDriverInstallationConfigs: outputs.container.GetClusterNodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig[]; /** * Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning) */ gpuPartitionSize: string; /** * Configuration for GPU sharing. */ gpuSharingConfigs: outputs.container.GetClusterNodePoolNodeConfigGuestAcceleratorGpuSharingConfig[]; /** * The accelerator type resource name. */ type: string; } interface GetClusterNodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig { /** * Mode for how the GPU driver is installed. */ gpuDriverVersion: string; } interface GetClusterNodePoolNodeConfigGuestAcceleratorGpuSharingConfig { /** * The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig) */ gpuSharingStrategy: string; /** * The maximum number of containers that can share a GPU. */ maxSharedClientsPerGpu: number; } interface GetClusterNodePoolNodeConfigGvnic { /** * Whether or not gvnic is enabled */ enabled: boolean; } interface GetClusterNodePoolNodeConfigHostMaintenancePolicy { /** * . */ maintenanceInterval: string; } interface GetClusterNodePoolNodeConfigKubeletConfig { /** * Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. */ allowedUnsafeSysctls: string[]; /** * Defines the maximum number of container log files that can be present for a container. */ containerLogMaxFiles: number; /** * Defines the maximum size of the container log file before it is rotated. */ containerLogMaxSize: string; /** * Enable CPU CFS quota enforcement for containers that specify CPU limits. */ cpuCfsQuota: boolean; /** * Set the CPU CFS quota period value 'cpu.cfs_period_us'. */ cpuCfsQuotaPeriod: string; /** * Control the CPU management policy on the node. */ cpuManagerPolicy: string; /** * Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. */ evictionMaxPodGracePeriodSeconds: number; /** * Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. */ evictionMinimumReclaims: outputs.container.GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim[]; /** * Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. */ evictionSoftGracePeriods: outputs.container.GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod[]; /** * Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. */ evictionSofts: outputs.container.GetClusterNodePoolNodeConfigKubeletConfigEvictionSoft[]; /** * Defines the percent of disk usage after which image garbage collection is always run. */ imageGcHighThresholdPercent: number; /** * Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. */ imageGcLowThresholdPercent: number; /** * Defines the maximum age an image can be unused before it is garbage collected. */ imageMaximumGcAge: string; /** * Defines the minimum age for an unused image before it is garbage collected. */ imageMinimumGcAge: string; /** * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. */ insecureKubeletReadonlyPortEnabled: string; /** * Set the maximum number of image pulls in parallel. */ maxParallelImagePulls: number; /** * Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity. */ memoryManagers: outputs.container.GetClusterNodePoolNodeConfigKubeletConfigMemoryManager[]; /** * Controls the maximum number of processes allowed to run in a pod. */ podPidsLimit: number; /** * Defines whether to enable single process OOM killer. */ singleProcessOomKill: boolean; /** * Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location. */ topologyManagers: outputs.container.GetClusterNodePoolNodeConfigKubeletConfigTopologyManager[]; } interface GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim { /** * Defines percentage of minimum reclaim for imagefs.available. */ imagefsAvailable: string; /** * Defines percentage of minimum reclaim for imagefs.inodesFree. */ imagefsInodesFree: string; /** * Defines percentage of minimum reclaim for memory.available. */ memoryAvailable: string; /** * Defines percentage of minimum reclaim for nodefs.available. */ nodefsAvailable: string; /** * Defines percentage of minimum reclaim for nodefs.inodesFree. */ nodefsInodesFree: string; /** * Defines percentage of minimum reclaim for pid.available. */ pidAvailable: string; } interface GetClusterNodePoolNodeConfigKubeletConfigEvictionSoft { /** * Defines percentage of soft eviction threshold for imagefs.available. */ imagefsAvailable: string; /** * Defines percentage of soft eviction threshold for imagefs.inodesFree. */ imagefsInodesFree: string; /** * Defines quantity of soft eviction threshold for memory.available. */ memoryAvailable: string; /** * Defines percentage of soft eviction threshold for nodefs.available. */ nodefsAvailable: string; /** * Defines percentage of soft eviction threshold for nodefs.inodesFree. */ nodefsInodesFree: string; /** * Defines percentage of soft eviction threshold for pid.available. */ pidAvailable: string; } interface GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod { /** * Defines grace period for the imagefs.available soft eviction threshold */ imagefsAvailable: string; /** * Defines grace period for the imagefs.inodesFree soft eviction threshold. */ imagefsInodesFree: string; /** * Defines grace period for the memory.available soft eviction threshold. */ memoryAvailable: string; /** * Defines grace period for the nodefs.available soft eviction threshold. */ nodefsAvailable: string; /** * Defines grace period for the nodefs.inodesFree soft eviction threshold. */ nodefsInodesFree: string; /** * Defines grace period for the pid.available soft eviction threshold. */ pidAvailable: string; } interface GetClusterNodePoolNodeConfigKubeletConfigMemoryManager { /** * The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity. */ policy: string; } interface GetClusterNodePoolNodeConfigKubeletConfigTopologyManager { /** * The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node. */ policy: string; /** * The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod). */ scope: string; } interface GetClusterNodePoolNodeConfigLinuxNodeConfig { /** * cgroupMode specifies the cgroup mode to be used on the node. */ cgroupMode: string; /** * Amounts for 2M and 1G hugepages. */ hugepagesConfigs: outputs.container.GetClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfig[]; /** * The settings for kernel module loading. */ nodeKernelModuleLoadings: outputs.container.GetClusterNodePoolNodeConfigLinuxNodeConfigNodeKernelModuleLoading[]; /** * The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. */ sysctls: { [key: string]: string; }; /** * The Linux kernel transparent hugepage defrag setting. */ transparentHugepageDefrag: string; /** * The Linux kernel transparent hugepage setting. */ transparentHugepageEnabled: string; } interface GetClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfig { /** * Amount of 1G hugepages. */ hugepageSize1g: number; /** * Amount of 2M hugepages. */ hugepageSize2m: number; } interface GetClusterNodePoolNodeConfigLinuxNodeConfigNodeKernelModuleLoading { /** * The policy for kernel module loading. */ policy: string; } interface GetClusterNodePoolNodeConfigLocalNvmeSsdBlockConfig { /** * Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. */ localSsdCount: number; } interface GetClusterNodePoolNodeConfigReservationAffinity { /** * Corresponds to the type of reservation consumption. */ consumeReservationType: string; /** * The label key of a reservation resource. */ key: string; /** * The label values of the reservation resource. */ values: string[]; } interface GetClusterNodePoolNodeConfigSandboxConfig { /** * Type of the sandbox to use for the node (e.g. 'gvisor') */ sandboxType: string; } interface GetClusterNodePoolNodeConfigSecondaryBootDisk { /** * Disk image to create the secondary boot disk from */ diskImage: string; /** * Mode for how the secondary boot disk is used. */ mode: string; } interface GetClusterNodePoolNodeConfigShieldedInstanceConfig { /** * Defines whether the instance has integrity monitoring enabled. */ enableIntegrityMonitoring: boolean; /** * Defines whether the instance has Secure Boot enabled. */ enableSecureBoot: boolean; } interface GetClusterNodePoolNodeConfigSoleTenantConfig { /** * Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled. */ minNodeCpus: number; /** * . */ nodeAffinities: outputs.container.GetClusterNodePoolNodeConfigSoleTenantConfigNodeAffinity[]; } interface GetClusterNodePoolNodeConfigSoleTenantConfigNodeAffinity { /** * . */ key: string; /** * . */ operator: string; /** * . */ values: string[]; } interface GetClusterNodePoolNodeConfigTaint { /** * Effect for taint. */ effect: string; /** * Key for taint. */ key: string; /** * Value for taint. */ value: string; } interface GetClusterNodePoolNodeConfigWindowsNodeConfig { /** * The OS Version of the windows nodepool.Values are OS_VERSION_UNSPECIFIED,OS_VERSION_LTSC2019 and OS_VERSION_LTSC2022 */ osversion: string; } interface GetClusterNodePoolNodeConfigWorkloadMetadataConfig { /** * Mode is the configuration for how to expose metadata to workloads running on the node. */ mode: string; } interface GetClusterNodePoolNodeDrainConfig { /** * Whether to respect PodDisruptionBudget policy during node pool deletion. */ respectPdbDuringNodePoolDeletion: boolean; } interface GetClusterNodePoolPlacementPolicy { /** * If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned. */ policyName: string; /** * The TPU topology like "2x4" or "2x2x2". https://cloud.google.com/kubernetes-engine/docs/concepts/plan-tpus#topology */ tpuTopology: string; /** * Type defines the type of placement policy */ type: string; } interface GetClusterNodePoolQueuedProvisioning { /** * Whether nodes in this node pool are obtainable solely through the ProvisioningRequest API */ enabled: boolean; } interface GetClusterNodePoolUpgradeSetting { /** * Settings for BlueGreen node pool upgrade. */ blueGreenSettings: outputs.container.GetClusterNodePoolUpgradeSettingBlueGreenSetting[]; /** * The number of additional nodes that can be added to the node pool during an upgrade. Increasing maxSurge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. */ maxSurge: number; /** * The number of nodes that can be simultaneously unavailable during an upgrade. Increasing maxUnavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. */ maxUnavailable: number; /** * Update strategy for the given nodepool. */ strategy: string; } interface GetClusterNodePoolUpgradeSettingBlueGreenSetting { /** * Autoscaled rollout policy for blue-green upgrade. */ autoscaledRolloutPolicies: outputs.container.GetClusterNodePoolUpgradeSettingBlueGreenSettingAutoscaledRolloutPolicy[]; /** * Time needed after draining entire blue pool. After this period, blue pool will be cleaned up. */ nodePoolSoakDuration: string; /** * Standard rollout policy is the default policy for blue-green. */ standardRolloutPolicies: outputs.container.GetClusterNodePoolUpgradeSettingBlueGreenSettingStandardRolloutPolicy[]; } interface GetClusterNodePoolUpgradeSettingBlueGreenSettingAutoscaledRolloutPolicy { /** * Time in seconds to wait after cordoning the blue pool before draining the nodes. */ waitForDrainDuration: string; } interface GetClusterNodePoolUpgradeSettingBlueGreenSettingStandardRolloutPolicy { /** * Number of blue nodes to drain in a batch. */ batchNodeCount: number; /** * Percentage of the blue pool nodes to drain in a batch. */ batchPercentage: number; /** * Soak time after each batch gets drained. */ batchSoakDuration: string; } interface GetClusterNotificationConfig { /** * Notification config for Cloud Pub/Sub */ pubsubs: outputs.container.GetClusterNotificationConfigPubsub[]; } interface GetClusterNotificationConfigPubsub { /** * Whether or not the notification config is enabled */ enabled: boolean; /** * Allows filtering to one or more specific event types. If event types are present, those and only those event types will be transmitted to the cluster. Other types will be skipped. If no filter is specified, or no event types are present, all event types will be sent */ filters: outputs.container.GetClusterNotificationConfigPubsubFilter[]; /** * The pubsub topic to push upgrade notifications to. Must be in the same project as the cluster. Must be in the format: projects/{project}/topics/{topic}. */ topic: string; } interface GetClusterNotificationConfigPubsubFilter { /** * Can be used to filter what notifications are sent. Valid values include include UPGRADE_AVAILABLE_EVENT, UPGRADE_EVENT, SECURITY_BULLETIN_EVENT, and UPGRADE_INFO_EVENT */ eventTypes: string[]; } interface GetClusterPodAutoscaling { /** * HPA Profile is used to configure the Horizontal Pod Autoscaler (HPA) profile for the cluster. * Available options include: * - NONE: Customers explicitly opt-out of HPA profiles. * - PERFORMANCE: PERFORMANCE is used when customers opt-in to the performance HPA profile. In this profile we support a higher number of HPAs per cluster and faster metrics collection for workload autoscaling. */ hpaProfile: string; } interface GetClusterPodSecurityPolicyConfig { /** * Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. */ enabled: boolean; } interface GetClusterPrivateClusterConfig { /** * When true, the cluster's private endpoint is used as the cluster endpoint and access through the public endpoint is disabled. When false, either endpoint can be used. */ enablePrivateEndpoint: boolean; /** * Enables the private cluster feature, creating a private endpoint on the cluster. In a private cluster, nodes only have RFC 1918 private addresses and communicate with the master's private endpoint via private networking. */ enablePrivateNodes: boolean; /** * Controls cluster master global access settings. */ masterGlobalAccessConfigs: outputs.container.GetClusterPrivateClusterConfigMasterGlobalAccessConfig[]; /** * The IP range in CIDR notation to use for the hosted master network. This range will be used for assigning private IP addresses to the cluster master(s) and the ILB VIP. This range must not overlap with any other ranges in use within the cluster's network, and it must be a /28 subnet. See Private Cluster Limitations for more details. This field only applies to private clusters, when enablePrivateNodes is true. */ masterIpv4CidrBlock: string; /** * The name of the peering between this cluster and the Google owned VPC. */ peeringName: string; /** * The internal IP address of this cluster's master endpoint. */ privateEndpoint: string; /** * Subnetwork in cluster's network where master's endpoint will be provisioned. */ privateEndpointSubnetwork: string; /** * The external IP address of this cluster's master endpoint. */ publicEndpoint: string; } interface GetClusterPrivateClusterConfigMasterGlobalAccessConfig { /** * Whether the cluster master is accessible globally or not. */ enabled: boolean; } interface GetClusterProtectConfig { /** * WorkloadConfig defines which actions are enabled for a cluster's workload configurations. */ workloadConfigs: outputs.container.GetClusterProtectConfigWorkloadConfig[]; /** * Sets which mode to use for Protect workload vulnerability scanning feature. Accepted values are DISABLED, BASIC. */ workloadVulnerabilityMode: string; } interface GetClusterProtectConfigWorkloadConfig { /** * Sets which mode of auditing should be used for the cluster's workloads. Accepted values are DISABLED, BASIC. */ auditMode: string; } interface GetClusterRbacBindingConfig { /** * Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjects system:authenticated. */ enableInsecureBindingSystemAuthenticated: boolean; /** * Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjects system:anonymous or system:unauthenticated. */ enableInsecureBindingSystemUnauthenticated: boolean; } interface GetClusterReleaseChannel { /** * The selected release channel. Accepted values are: * * UNSPECIFIED: Not set. * * RAPID: Weekly upgrade cadence; Early testers and developers who requires new features. * * REGULAR: Multiple per month upgrade cadence; Production users who need features not yet offered in the Stable channel. * * STABLE: Every few months upgrade cadence; Production users who need stability above all else, and for whom frequent upgrades are too risky. * * EXTENDED: GKE provides extended support for Kubernetes minor versions through the Extended channel. With this channel, you can stay on a minor version for up to 24 months. */ channel: string; } interface GetClusterResourceUsageExportConfig { /** * Parameters for using BigQuery as the destination of resource usage export. */ bigqueryDestinations: outputs.container.GetClusterResourceUsageExportConfigBigqueryDestination[]; /** * Whether to enable network egress metering for this cluster. If enabled, a daemonset will be created in the cluster to meter network egress traffic. */ enableNetworkEgressMetering: boolean; /** * Whether to enable resource consumption metering on this cluster. When enabled, a table will be created in the resource export BigQuery dataset to store resource consumption data. The resulting table can be joined with the resource usage table or with BigQuery billing export. Defaults to true. */ enableResourceConsumptionMetering: boolean; } interface GetClusterResourceUsageExportConfigBigqueryDestination { /** * The ID of a BigQuery Dataset. */ datasetId: string; } interface GetClusterSecretManagerConfig { /** * Enable the Secret manager csi component. */ enabled: boolean; /** * Configuration for Secret Manager auto rotation. */ rotationConfigs: outputs.container.GetClusterSecretManagerConfigRotationConfig[]; } interface GetClusterSecretManagerConfigRotationConfig { /** * Enable the Secret manager auto rotation. */ enabled: boolean; /** * The interval between two consecutive rotations. Default rotation interval is 2 minutes */ rotationInterval: string; } interface GetClusterSecretSyncConfig { /** * Enable the Sync as k8s secret add-on. */ enabled: boolean; /** * Configuration for Secret Sync auto rotation. */ rotationConfigs: outputs.container.GetClusterSecretSyncConfigRotationConfig[]; } interface GetClusterSecretSyncConfigRotationConfig { /** * Enable the Secret sync auto rotation. */ enabled: boolean; /** * The interval between two consecutive rotations. Default rotation interval is 2 minutes */ rotationInterval: string; } interface GetClusterSecurityPostureConfig { /** * Sets the mode of the Kubernetes security posture API's off-cluster features. Available options include DISABLED, BASIC, and ENTERPRISE. */ mode: string; /** * Sets the mode of the Kubernetes security posture API's workload vulnerability scanning. Available options include VULNERABILITY_DISABLED, VULNERABILITY_BASIC and VULNERABILITY_ENTERPRISE. */ vulnerabilityMode: string; } interface GetClusterServiceExternalIpsConfig { /** * When enabled, services with external ips specified will be allowed. */ enabled: boolean; } interface GetClusterTpuConfig { /** * Whether Cloud TPU integration is enabled or not */ enabled: boolean; /** * IPv4 CIDR block reserved for Cloud TPU in the VPC. */ ipv4CidrBlock: string; /** * Whether to use service networking for Cloud TPU or not */ useServiceNetworking: boolean; } interface GetClusterUserManagedKeysConfig { /** * The Certificate Authority Service caPool to use for the aggreation CA in this cluster. */ aggregationCa: string; /** * The Certificate Authority Service caPool to use for the cluster CA in this cluster. */ clusterCa: string; /** * The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes. */ controlPlaneDiskEncryptionKey: string; /** * The Certificate Authority Service caPool to use for the etcd API CA in this cluster. */ etcdApiCa: string; /** * The Certificate Authority Service caPool to use for the etcd peer CA in this cluster. */ etcdPeerCa: string; /** * Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups. */ gkeopsEtcdBackupEncryptionKey: string; /** * The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. */ serviceAccountSigningKeys: string[]; /** * The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. */ serviceAccountVerificationKeys: string[]; } interface GetClusterVerticalPodAutoscaling { /** * Enables vertical pod autoscaling. */ enabled: boolean; } interface GetClusterWorkloadAltsConfig { /** * Whether the alts handshaker should be enabled or not for direct-path. Requires Workload Identity (workloadPool must be non-empty). */ enableAlts: boolean; } interface GetClusterWorkloadIdentityConfig { /** * The workload pool to attach all Kubernetes service accounts to. */ workloadPool: string; } interface NodePoolAutoscaling { /** * Location policy specifies the algorithm used when * scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters. * * "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones. * * "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, * and reduce preemption risk for Spot VMs. */ locationPolicy: string; /** * Maximum number of nodes per zone in the NodePool. * Must be >= min_node_count. Cannot be used with total limits. */ maxNodeCount?: number; /** * Minimum number of nodes per zone in the NodePool. * Must be >=0 and <= `maxNodeCount`. Cannot be used with total limits. */ minNodeCount?: number; /** * Total maximum number of nodes in the NodePool. * Must be >= total_min_node_count. Cannot be used with per zone limits. * Total size limits are supported only in 1.24.1+ clusters. */ totalMaxNodeCount?: number; /** * Total minimum number of nodes in the NodePool. * Must be >=0 and <= `totalMaxNodeCount`. Cannot be used with per zone limits. * Total size limits are supported only in 1.24.1+ clusters. */ totalMinNodeCount?: number; } interface NodePoolManagement { /** * Whether the nodes will be automatically repaired. Enabled by default. */ autoRepair?: boolean; /** * Whether the nodes will be automatically upgraded. Enabled by default. */ autoUpgrade?: boolean; } interface NodePoolNetworkConfig { /** * ) - Specifies the accelerator network profile for nodes in this node pool. Setting to `"auto"` enables GKE to automatically configure high-performance networking settings for nodes with accelerators (like GPUs). GKE manages the underlying resources (like VPCs and subnets) for this configuration. */ acceleratorNetworkProfile?: string; /** * We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. * Structure is documented below */ additionalNodeNetworkConfigs: outputs.container.NodePoolNetworkConfigAdditionalNodeNetworkConfig[]; /** * We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. * Structure is documented below */ additionalPodNetworkConfigs?: outputs.container.NodePoolNetworkConfigAdditionalPodNetworkConfig[]; /** * Whether to create a new range for pod IPs in this node pool. Defaults are provided for `podRange` and `podIpv4CidrBlock` if they are not specified. */ createPodRange?: boolean; /** * Whether nodes have internal IP addresses only. */ enablePrivateNodes: boolean; /** * Network bandwidth tier configuration. Structure is documented below. */ networkPerformanceConfig?: outputs.container.NodePoolNetworkConfigNetworkPerformanceConfig; /** * Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below. */ podCidrOverprovisionConfig: outputs.container.NodePoolNetworkConfigPodCidrOverprovisionConfig; /** * The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use. */ podIpv4CidrBlock: string; /** * The ID of the secondary range for pod IPs. If `createPodRange` is true, this ID is used for the new range. If `createPodRange` is false, uses an existing secondary range with this ID. */ podRange: string; /** * The subnetwork path for the node pool. Format: `projects/{project}/regions/{region}/subnetworks/{subnetwork}`. If the cluster is associated with multiple subnetworks, the subnetwork for the node pool is picked based on the IP utilization during node pool creation and is immutable */ subnetwork: string; } interface NodePoolNetworkConfigAdditionalNodeNetworkConfig { /** * Name of the VPC where the additional interface belongs. */ network: string; /** * Name of the subnetwork where the additional interface belongs. */ subnetwork: string; } interface NodePoolNetworkConfigAdditionalPodNetworkConfig { /** * The maximum number of pods per node which use this pod network. */ maxPodsPerNode: number; /** * The name of the secondary range on the subnet which provides IP address for this pod range. */ secondaryPodRange?: string; /** * Name of the subnetwork where the additional pod network belongs. */ subnetwork?: string; } interface NodePoolNetworkConfigNetworkPerformanceConfig { /** * Specifies the total network bandwidth tier for the NodePool. [Valid values](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools#NodePool.Tier) include: "TIER_1" and "TIER_UNSPECIFIED". */ totalEgressBandwidthTier: string; } interface NodePoolNetworkConfigPodCidrOverprovisionConfig { /** * Whether pod cidr overprovision is disabled. */ disabled: boolean; } interface NodePoolNodeConfig { /** * Specifies options for controlling advanced machine features. */ advancedMachineFeatures?: outputs.container.NodePoolNodeConfigAdvancedMachineFeatures; /** * Boot disk configuration for node pools nodes. */ bootDisk: outputs.container.NodePoolNodeConfigBootDisk; /** * The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. */ bootDiskKmsKey?: string; /** * Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. */ confidentialNodes: outputs.container.NodePoolNodeConfigConfidentialNodes; /** * Parameters for containerd configuration. */ containerdConfig: outputs.container.NodePoolNodeConfigContainerdConfig; /** * Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. */ diskSizeGb: number; /** * Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd */ diskType: string; /** * List of kubernetes taints applied to each node. */ effectiveTaints: outputs.container.NodePoolNodeConfigEffectiveTaint[]; /** * If enabled boot disks are configured with confidential mode. */ enableConfidentialStorage?: boolean; /** * Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. */ ephemeralStorageConfig?: outputs.container.NodePoolNodeConfigEphemeralStorageConfig; /** * Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. */ ephemeralStorageLocalSsdConfig?: outputs.container.NodePoolNodeConfigEphemeralStorageLocalSsdConfig; /** * Enable or disable NCCL Fast Socket in the node pool. */ fastSocket?: outputs.container.NodePoolNodeConfigFastSocket; /** * Enables Flex Start provisioning model for the node pool */ flexStart?: boolean; /** * GCFS configuration for this node. */ gcfsConfig: outputs.container.NodePoolNodeConfigGcfsConfig; /** * List of the type and count of accelerator cards attached to the instance. */ guestAccelerators: outputs.container.NodePoolNodeConfigGuestAccelerator[]; /** * Enable or disable gvnic in the node pool. */ gvnic?: outputs.container.NodePoolNodeConfigGvnic; /** * The maintenance policy for the hosts on which the GKE VMs run on. */ hostMaintenancePolicy?: outputs.container.NodePoolNodeConfigHostMaintenancePolicy; /** * The image type to use for this node. Note that for a given image type, the latest version of it will be used. */ imageType: string; /** * Node kubelet configs. */ kubeletConfig: outputs.container.NodePoolNodeConfigKubeletConfig; /** * The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node. */ labels: { [key: string]: string; }; /** * Parameters that can be configured on Linux nodes. */ linuxNodeConfig: outputs.container.NodePoolNodeConfigLinuxNodeConfig; /** * Parameters for raw-block local NVMe SSDs. */ localNvmeSsdBlockConfig?: outputs.container.NodePoolNodeConfigLocalNvmeSsdBlockConfig; /** * The number of local SSD disks to be attached to the node. */ localSsdCount: number; /** * LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node. */ localSsdEncryptionMode?: string; /** * Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. */ loggingVariant: string; /** * The name of a Google Compute Engine machine type. */ machineType: string; /** * The runtime of each node in the node pool in seconds, terminated by 's'. Example: "3600s". */ maxRunDuration?: string; /** * The metadata key/value pairs assigned to instances in the cluster. */ metadata: { [key: string]: string; }; /** * Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform. */ minCpuPlatform: string; /** * Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes. */ nodeGroup?: string; /** * The set of Google API scopes to be made available on all of the node VMs. */ oauthScopes: string[]; /** * Whether the nodes are created as preemptible VM instances. */ preemptible?: boolean; /** * The reservation affinity configuration for the node pool. */ reservationAffinity?: outputs.container.NodePoolNodeConfigReservationAffinity; /** * The GCE resource labels (a map of key/value pairs) to be applied to the node pool. */ resourceLabels?: { [key: string]: string; }; /** * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. */ resourceManagerTags?: { [key: string]: string; }; /** * Sandbox configuration for this node. */ sandboxConfig?: outputs.container.NodePoolNodeConfigSandboxConfig; /** * Secondary boot disks for preloading data or container images. */ secondaryBootDisks?: outputs.container.NodePoolNodeConfigSecondaryBootDisk[]; /** * The Google Cloud Platform Service Account to be used by the node VMs. */ serviceAccount: string; /** * Shielded Instance options. */ shieldedInstanceConfig: outputs.container.NodePoolNodeConfigShieldedInstanceConfig; /** * Node affinity options for sole tenant node pools. */ soleTenantConfig?: outputs.container.NodePoolNodeConfigSoleTenantConfig; /** * Whether the nodes are created as spot VM instances. */ spot?: boolean; /** * The list of Storage Pools where boot disks are provisioned. */ storagePools?: string[]; /** * The list of instance tags applied to all nodes. */ tags?: string[]; /** * List of Kubernetes taints to be applied to each node. */ taints?: outputs.container.NodePoolNodeConfigTaint[]; /** * Parameters that can be configured on Windows nodes. */ windowsNodeConfig: outputs.container.NodePoolNodeConfigWindowsNodeConfig; /** * The workload metadata configuration for this node. */ workloadMetadataConfig: outputs.container.NodePoolNodeConfigWorkloadMetadataConfig; } interface NodePoolNodeConfigAdvancedMachineFeatures { /** * Whether the node should have nested virtualization enabled. */ enableNestedVirtualization?: boolean; /** * Level of Performance Monitoring Unit (PMU) requested. If unset, no access to the PMU is assumed. */ performanceMonitoringUnit?: string; /** * The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. */ threadsPerCore: number; } interface NodePoolNodeConfigBootDisk { /** * Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd */ diskType: string; /** * Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced. */ provisionedIops: number; /** * Configured throughput provisioning. Only valid with disk type hyperdisk-balanced. */ provisionedThroughput: number; /** * Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. */ sizeGb: number; } interface NodePoolNodeConfigConfidentialNodes { /** * Defines the type of technology used by the confidential node. */ confidentialInstanceType?: string; /** * Whether Confidential Nodes feature is enabled for all nodes in this pool. */ enabled: boolean; } interface NodePoolNodeConfigContainerdConfig { /** * Parameters for private container registries configuration. */ privateRegistryAccessConfig?: outputs.container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfig; /** * Configures containerd registry host configuration. Each registryHosts entry represents a hosts.toml file. */ registryHosts?: outputs.container.NodePoolNodeConfigContainerdConfigRegistryHost[]; /** * Parameters for writable cgroups configuration. */ writableCgroups?: outputs.container.NodePoolNodeConfigContainerdConfigWritableCgroups; } interface NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfig { /** * Parameters for configuring CA certificate and domains. */ certificateAuthorityDomainConfigs?: outputs.container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig[]; /** * Whether or not private registries are configured. */ enabled: boolean; } interface NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig { /** * List of fully-qualified-domain-names. IPv4s and port specification are supported. */ fqdns: string[]; /** * Parameters for configuring a certificate hosted in GCP SecretManager. */ gcpSecretManagerCertificateConfig: outputs.container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig; } interface NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig { /** * URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'. */ secretUri: string; } interface NodePoolNodeConfigContainerdConfigRegistryHost { /** * Configures a list of host-specific configurations for the server. */ hosts?: outputs.container.NodePoolNodeConfigContainerdConfigRegistryHostHost[]; /** * Defines the host name of the registry server. */ server: string; } interface NodePoolNodeConfigContainerdConfigRegistryHostHost { /** * Represent the capabilities of the registry host, specifying what operations a host is capable of performing. */ capabilities?: string[]; /** * Configures the registry host certificate. */ cas?: outputs.container.NodePoolNodeConfigContainerdConfigRegistryHostHostCa[]; /** * Configures the registry host client certificate and key. */ clients?: outputs.container.NodePoolNodeConfigContainerdConfigRegistryHostHostClient[]; /** * Specifies the maximum duration allowed for a connection attempt to complete. */ dialTimeout?: string; /** * Configures the registry host headers. */ headers?: outputs.container.NodePoolNodeConfigContainerdConfigRegistryHostHostHeader[]; /** * Configures the registry host/mirror. */ host: string; /** * Indicate the host's API root endpoint is defined in the URL path rather than by the API specification. */ overridePath?: boolean; } interface NodePoolNodeConfigContainerdConfigRegistryHostHostCa { /** * URI for the Secret Manager secret that hosts the certificate. */ gcpSecretManagerSecretUri?: string; } interface NodePoolNodeConfigContainerdConfigRegistryHostHostClient { /** * Configures the client certificate. */ cert: outputs.container.NodePoolNodeConfigContainerdConfigRegistryHostHostClientCert; /** * Configures the client private key. */ key?: outputs.container.NodePoolNodeConfigContainerdConfigRegistryHostHostClientKey; } interface NodePoolNodeConfigContainerdConfigRegistryHostHostClientCert { /** * URI for the Secret Manager secret that hosts the client certificate. */ gcpSecretManagerSecretUri?: string; } interface NodePoolNodeConfigContainerdConfigRegistryHostHostClientKey { /** * URI for the Secret Manager secret that hosts the private key. */ gcpSecretManagerSecretUri?: string; } interface NodePoolNodeConfigContainerdConfigRegistryHostHostHeader { /** * Configures the header key. */ key: string; /** * Configures the header value. */ values: string[]; } interface NodePoolNodeConfigContainerdConfigWritableCgroups { /** * Whether writable cgroups are enabled. */ enabled: boolean; } interface NodePoolNodeConfigEffectiveTaint { /** * Effect for taint. */ effect: string; /** * Key for taint. */ key: string; /** * Value for taint. */ value: string; } interface NodePoolNodeConfigEphemeralStorageConfig { /** * Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size. */ localSsdCount: number; } interface NodePoolNodeConfigEphemeralStorageLocalSsdConfig { /** * Number of local SSDs to be utilized for GKE Data Cache. Uses NVMe interfaces. */ dataCacheCount?: number; /** * Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size. */ localSsdCount: number; } interface NodePoolNodeConfigFastSocket { /** * Whether or not NCCL Fast Socket is enabled */ enabled: boolean; } interface NodePoolNodeConfigGcfsConfig { /** * Whether or not GCFS is enabled */ enabled: boolean; } interface NodePoolNodeConfigGuestAccelerator { /** * The number of the accelerator cards exposed to an instance. */ count: number; /** * Configuration for auto installation of GPU driver. */ gpuDriverInstallationConfig: outputs.container.NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig; /** * Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning) */ gpuPartitionSize?: string; /** * Configuration for GPU sharing. */ gpuSharingConfig?: outputs.container.NodePoolNodeConfigGuestAcceleratorGpuSharingConfig; /** * The accelerator type resource name. */ type: string; } interface NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig { /** * Mode for how the GPU driver is installed. */ gpuDriverVersion: string; } interface NodePoolNodeConfigGuestAcceleratorGpuSharingConfig { /** * The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig) */ gpuSharingStrategy: string; /** * The maximum number of containers that can share a GPU. */ maxSharedClientsPerGpu: number; } interface NodePoolNodeConfigGvnic { /** * Whether or not gvnic is enabled */ enabled: boolean; } interface NodePoolNodeConfigHostMaintenancePolicy { /** * . */ maintenanceInterval: string; } interface NodePoolNodeConfigKubeletConfig { /** * Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. */ allowedUnsafeSysctls?: string[]; /** * Defines the maximum number of container log files that can be present for a container. */ containerLogMaxFiles?: number; /** * Defines the maximum size of the container log file before it is rotated. */ containerLogMaxSize?: string; /** * Enable CPU CFS quota enforcement for containers that specify CPU limits. */ cpuCfsQuota: boolean; /** * Set the CPU CFS quota period value 'cpu.cfs_period_us'. */ cpuCfsQuotaPeriod?: string; /** * Control the CPU management policy on the node. */ cpuManagerPolicy?: string; /** * Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. */ evictionMaxPodGracePeriodSeconds?: number; /** * Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. */ evictionMinimumReclaim?: outputs.container.NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim; /** * Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. */ evictionSoft?: outputs.container.NodePoolNodeConfigKubeletConfigEvictionSoft; /** * Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. */ evictionSoftGracePeriod?: outputs.container.NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod; /** * Defines the percent of disk usage after which image garbage collection is always run. */ imageGcHighThresholdPercent?: number; /** * Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. */ imageGcLowThresholdPercent?: number; /** * Defines the maximum age an image can be unused before it is garbage collected. */ imageMaximumGcAge?: string; /** * Defines the minimum age for an unused image before it is garbage collected. */ imageMinimumGcAge?: string; /** * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. */ insecureKubeletReadonlyPortEnabled: string; /** * Set the maximum number of image pulls in parallel. */ maxParallelImagePulls: number; /** * Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity. */ memoryManager?: outputs.container.NodePoolNodeConfigKubeletConfigMemoryManager; /** * Controls the maximum number of processes allowed to run in a pod. */ podPidsLimit?: number; /** * Defines whether to enable single process OOM killer. */ singleProcessOomKill?: boolean; /** * Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location. */ topologyManager?: outputs.container.NodePoolNodeConfigKubeletConfigTopologyManager; } interface NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim { /** * Defines percentage of minimum reclaim for imagefs.available. */ imagefsAvailable?: string; /** * Defines percentage of minimum reclaim for imagefs.inodesFree. */ imagefsInodesFree?: string; /** * Defines percentage of minimum reclaim for memory.available. */ memoryAvailable?: string; /** * Defines percentage of minimum reclaim for nodefs.available. */ nodefsAvailable?: string; /** * Defines percentage of minimum reclaim for nodefs.inodesFree. */ nodefsInodesFree?: string; /** * Defines percentage of minimum reclaim for pid.available. */ pidAvailable?: string; } interface NodePoolNodeConfigKubeletConfigEvictionSoft { /** * Defines percentage of soft eviction threshold for imagefs.available. */ imagefsAvailable?: string; /** * Defines percentage of soft eviction threshold for imagefs.inodesFree. */ imagefsInodesFree?: string; /** * Defines quantity of soft eviction threshold for memory.available. */ memoryAvailable?: string; /** * Defines percentage of soft eviction threshold for nodefs.available. */ nodefsAvailable?: string; /** * Defines percentage of soft eviction threshold for nodefs.inodesFree. */ nodefsInodesFree?: string; /** * Defines percentage of soft eviction threshold for pid.available. */ pidAvailable?: string; } interface NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod { /** * Defines grace period for the imagefs.available soft eviction threshold */ imagefsAvailable?: string; /** * Defines grace period for the imagefs.inodesFree soft eviction threshold. */ imagefsInodesFree?: string; /** * Defines grace period for the memory.available soft eviction threshold. */ memoryAvailable?: string; /** * Defines grace period for the nodefs.available soft eviction threshold. */ nodefsAvailable?: string; /** * Defines grace period for the nodefs.inodesFree soft eviction threshold. */ nodefsInodesFree?: string; /** * Defines grace period for the pid.available soft eviction threshold. */ pidAvailable?: string; } interface NodePoolNodeConfigKubeletConfigMemoryManager { /** * The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity. */ policy: string; } interface NodePoolNodeConfigKubeletConfigTopologyManager { /** * The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node. */ policy: string; /** * The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod). */ scope: string; } interface NodePoolNodeConfigLinuxNodeConfig { /** * cgroupMode specifies the cgroup mode to be used on the node. */ cgroupMode: string; /** * Amounts for 2M and 1G hugepages. */ hugepagesConfig?: outputs.container.NodePoolNodeConfigLinuxNodeConfigHugepagesConfig; /** * The settings for kernel module loading. */ nodeKernelModuleLoading?: outputs.container.NodePoolNodeConfigLinuxNodeConfigNodeKernelModuleLoading; /** * The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. */ sysctls?: { [key: string]: string; }; /** * The Linux kernel transparent hugepage defrag setting. */ transparentHugepageDefrag?: string; /** * The Linux kernel transparent hugepage setting. */ transparentHugepageEnabled: string; } interface NodePoolNodeConfigLinuxNodeConfigHugepagesConfig { /** * Amount of 1G hugepages. */ hugepageSize1g?: number; /** * Amount of 2M hugepages. */ hugepageSize2m?: number; } interface NodePoolNodeConfigLinuxNodeConfigNodeKernelModuleLoading { /** * The policy for kernel module loading. */ policy?: string; } interface NodePoolNodeConfigLocalNvmeSsdBlockConfig { /** * Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. */ localSsdCount: number; } interface NodePoolNodeConfigReservationAffinity { /** * The type of reservation consumption * Accepted values are: * * * `"UNSPECIFIED"`: Default value. This should not be used. * * `"NO_RESERVATION"`: Do not consume from any reserved capacity. * * `"ANY_RESERVATION"`: Consume any reservation available. * * `"SPECIFIC_RESERVATION"`: Must consume from a specific reservation. Must specify key value fields for specifying the reservations. */ consumeReservationType: string; /** * The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value. */ key?: string; /** * The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name" */ values?: string[]; } interface NodePoolNodeConfigSandboxConfig { /** * Type of the sandbox to use for the node (e.g. 'gvisor') */ sandboxType: string; } interface NodePoolNodeConfigSecondaryBootDisk { /** * Disk image to create the secondary boot disk from */ diskImage: string; /** * Mode for how the secondary boot disk is used. */ mode?: string; } interface NodePoolNodeConfigShieldedInstanceConfig { /** * Defines whether the instance has integrity monitoring enabled. */ enableIntegrityMonitoring?: boolean; /** * Defines whether the instance has Secure Boot enabled. */ enableSecureBoot?: boolean; } interface NodePoolNodeConfigSoleTenantConfig { /** * Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled. */ minNodeCpus?: number; /** * . */ nodeAffinities: outputs.container.NodePoolNodeConfigSoleTenantConfigNodeAffinity[]; } interface NodePoolNodeConfigSoleTenantConfigNodeAffinity { /** * . */ key: string; /** * . */ operator: string; /** * . */ values: string[]; } interface NodePoolNodeConfigTaint { /** * Effect for taint. */ effect: string; /** * Key for taint. */ key: string; /** * Value for taint. */ value: string; } interface NodePoolNodeConfigWindowsNodeConfig { /** * The OS Version of the windows nodepool.Values are OS_VERSION_UNSPECIFIED,OS_VERSION_LTSC2019 and OS_VERSION_LTSC2022 */ osversion?: string; } interface NodePoolNodeConfigWorkloadMetadataConfig { /** * Mode is the configuration for how to expose metadata to workloads running on the node. */ mode: string; } interface NodePoolNodeDrainConfig { /** * Whether to respect PodDisruptionBudget policy during node pool deletion. */ respectPdbDuringNodePoolDeletion?: boolean; } interface NodePoolPlacementPolicy { /** * If set, refers to the name of a custom resource policy supplied by the user. * The resource policy must be in the same project and region as the node pool. * If not found, InvalidArgument error is returned. */ policyName?: string; /** * The [TPU topology](https://cloud.google.com/kubernetes-engine/docs/concepts/plan-tpus#topology) like `"2x4"` or `"2x2x2"`. */ tpuTopology?: string; /** * The type of the policy. Supports a single value: COMPACT. * Specifying COMPACT placement policy type places node pool's nodes in a closer * physical proximity in order to reduce network latency between nodes. */ type: string; } interface NodePoolQueuedProvisioning { /** * Makes nodes obtainable through the [ProvisioningRequest API](https://cloud.google.com/kubernetes-engine/docs/how-to/provisioningrequest) exclusively. */ enabled: boolean; } interface NodePoolUpgradeSettings { /** * The settings to adjust [blue green upgrades](https://cloud.google.com/kubernetes-engine/docs/concepts/node-pool-upgrade-strategies#blue-green-upgrade-strategy). * Structure is documented below */ blueGreenSettings: outputs.container.NodePoolUpgradeSettingsBlueGreenSettings; /** * The number of additional nodes that can be added to the node pool during * an upgrade. Increasing `maxSurge` raises the number of nodes that can be upgraded simultaneously. * Can be set to 0 or greater. */ maxSurge: number; /** * The number of nodes that can be simultaneously unavailable during * an upgrade. Increasing `maxUnavailable` raises the number of nodes that can be upgraded in * parallel. Can be set to 0 or greater. * * `maxSurge` and `maxUnavailable` must not be negative and at least one of them must be greater than zero. */ maxUnavailable: number; /** * The upgrade strategy to be used for upgrading the nodes. */ strategy?: string; } interface NodePoolUpgradeSettingsBlueGreenSettings { /** * ) Autoscaled rollout policy for blue-green upgrade. */ autoscaledRolloutPolicy?: outputs.container.NodePoolUpgradeSettingsBlueGreenSettingsAutoscaledRolloutPolicy; /** * Time needed after draining the entire blue pool. * After this period, the blue pool will be cleaned up. */ nodePoolSoakDuration: string; /** * Specifies the standard policy settings for blue-green upgrades. */ standardRolloutPolicy?: outputs.container.NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy; } interface NodePoolUpgradeSettingsBlueGreenSettingsAutoscaledRolloutPolicy { /** * Time in seconds to wait after cordoning the blue pool before draining the nodes. */ waitForDrainDuration: string; } interface NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy { /** * Number of blue nodes to drain in a batch. */ batchNodeCount: number; /** * Percentage of the blue pool nodes to drain in a batch. */ batchPercentage: number; /** * Soak time after each batch gets drained. */ batchSoakDuration: string; } } export declare namespace containeranalysis { interface NoteAttestationAuthority { /** * This submessage provides human-readable hints about the purpose of * the AttestationAuthority. Because the name of a Note acts as its * resource reference, it is important to disambiguate the canonical * name of the Note (which might be a UUID for security purposes) * from "readable" names more suitable for debug output. Note that * these hints should NOT be used to look up AttestationAuthorities * in security sensitive contexts, such as when looking up * Attestations to verify. * Structure is documented below. */ hint: outputs.containeranalysis.NoteAttestationAuthorityHint; } interface NoteAttestationAuthorityHint { /** * The human readable name of this Attestation Authority, for * example "qa". */ humanReadableName: string; } interface NoteIamBindingCondition { description?: string; expression: string; title: string; } interface NoteIamMemberCondition { description?: string; expression: string; title: string; } interface NoteRelatedUrl { /** * Label to describe usage of the URL */ label?: string; /** * Specific URL associated with the resource. */ url: string; } interface OccurenceAttestation { /** * The serialized payload that is verified by one or * more signatures. A base64-encoded string. */ serializedPayload: string; /** * One or more signatures over serializedPayload. * Verifier implementations should consider this attestation * message verified if at least one signature verifies * serializedPayload. See Signature in common.proto for more * details on signature structure and verification. * Structure is documented below. */ signatures: outputs.containeranalysis.OccurenceAttestationSignature[]; } interface OccurenceAttestationSignature { /** * The identifier for the public key that verifies this * signature. MUST be an RFC3986 conformant * URI. * When possible, the key id should be an * immutable reference, such as a cryptographic digest. * Examples of valid values: * * OpenPGP V4 public key fingerprint. See https://www.iana.org/assignments/uri-schemes/prov/openpgp4fpr * for more details on this scheme. * * `openpgp4fpr:74FAF3B861BDA0870C7B6DEF607E48D2A663AEEA` * * RFC6920 digest-named SubjectPublicKeyInfo (digest of the DER serialization): * * "ni:///sha-256;cD9o9Cq6LG3jD0iKXqEi_vdjJGecm_iXkbqVoScViaU" */ publicKeyId: string; /** * The content of the signature, an opaque bytestring. * The payload that this signature verifies MUST be * unambiguously provided with the Signature during * verification. A wrapper message might provide the * payload explicitly. Alternatively, a message might * have a canonical serialization that can always be * unambiguously computed to derive the payload. */ signature?: string; } } export declare namespace databasemigrationservice { interface ConnectionProfileAlloydb { /** * Required. The AlloyDB cluster ID that this connection profile is associated with. */ clusterId: string; /** * Immutable. Metadata used to create the destination AlloyDB cluster. * Structure is documented below. */ settings?: outputs.databasemigrationservice.ConnectionProfileAlloydbSettings; } interface ConnectionProfileAlloydbSettings { /** * Required. Input only. Initial user to setup during cluster creation. * Structure is documented below. */ initialUser: outputs.databasemigrationservice.ConnectionProfileAlloydbSettingsInitialUser; /** * Labels for the AlloyDB cluster created by DMS. */ labels?: { [key: string]: string; }; /** * Settings for the cluster's primary instance * Structure is documented below. */ primaryInstanceSettings?: outputs.databasemigrationservice.ConnectionProfileAlloydbSettingsPrimaryInstanceSettings; /** * Required. The resource link for the VPC network in which cluster resources are created and from which they are accessible via Private IP. The network must belong to the same project as the cluster. * It is specified in the form: 'projects/{project_number}/global/networks/{network_id}'. This is required to create a cluster. */ vpcNetwork: string; } interface ConnectionProfileAlloydbSettingsInitialUser { /** * The initial password for the user. * **Note**: This property is sensitive and will not be displayed in the plan. */ password: string; /** * (Output) * Output only. Indicates if the initialUser.password field has been set. */ passwordSet: boolean; /** * The database username. */ user: string; } interface ConnectionProfileAlloydbSettingsPrimaryInstanceSettings { /** * Database flags to pass to AlloyDB when DMS is creating the AlloyDB cluster and instances. See the AlloyDB documentation for how these can be used. */ databaseFlags?: { [key: string]: string; }; /** * The database username. */ id: string; /** * Labels for the AlloyDB primary instance created by DMS. */ labels?: { [key: string]: string; }; /** * Configuration for the machines that host the underlying database engine. * Structure is documented below. */ machineConfig: outputs.databasemigrationservice.ConnectionProfileAlloydbSettingsPrimaryInstanceSettingsMachineConfig; /** * (Output) * Output only. The private IP address for the Instance. This is the connection endpoint for an end-user application. */ privateIp: string; } interface ConnectionProfileAlloydbSettingsPrimaryInstanceSettingsMachineConfig { /** * The number of CPU's in the VM instance. */ cpuCount: number; } interface ConnectionProfileCloudsql { /** * (Output) * Output only. The Cloud SQL instance ID that this connection profile is associated with. */ cloudSqlId: string; /** * (Output) * Output only. The Cloud SQL database instance's private IP. */ privateIp: string; /** * (Output) * Output only. The Cloud SQL database instance's public IP. */ publicIp: string; /** * Immutable. Metadata used to create the destination Cloud SQL database. * Structure is documented below. */ settings?: outputs.databasemigrationservice.ConnectionProfileCloudsqlSettings; } interface ConnectionProfileCloudsqlSettings { /** * The activation policy specifies when the instance is activated; it is applicable only when the instance state is 'RUNNABLE'. * Possible values are: `ALWAYS`, `NEVER`. */ activationPolicy?: string; /** * If you enable this setting, Cloud SQL checks your available storage every 30 seconds. If the available storage falls below a threshold size, Cloud SQL automatically adds additional storage capacity. * If the available storage repeatedly falls below the threshold size, Cloud SQL continues to add storage until it reaches the maximum of 30 TB. */ autoStorageIncrease?: boolean; /** * The KMS key name used for the csql instance. */ cmekKeyName?: string; /** * The Cloud SQL default instance level collation. */ collation?: string; /** * The storage capacity available to the database, in GB. The minimum (and default) size is 10GB. */ dataDiskSizeGb?: string; /** * The type of storage. * Possible values are: `PD_SSD`, `PD_HDD`. */ dataDiskType?: string; /** * The database flags passed to the Cloud SQL instance at startup. */ databaseFlags?: { [key: string]: string; }; /** * The database engine type and version. * Currently supported values located at https://cloud.google.com/database-migration/docs/reference/rest/v1/projects.locations.connectionProfiles#sqldatabaseversion */ databaseVersion?: string; /** * The edition of the given Cloud SQL instance. * Possible values are: `ENTERPRISE`, `ENTERPRISE_PLUS`. */ edition?: string; /** * The settings for IP Management. This allows to enable or disable the instance IP and manage which external networks can connect to the instance. The IPv4 address cannot be disabled. * Structure is documented below. */ ipConfig?: outputs.databasemigrationservice.ConnectionProfileCloudsqlSettingsIpConfig; /** * Input only. Initial root password. * **Note**: This property is sensitive and will not be displayed in the plan. */ rootPassword?: string; /** * (Output) * Output only. Indicates If this connection profile root password is stored. */ rootPasswordSet: boolean; /** * The Database Migration Service source connection profile ID, in the format: projects/my_project_name/locations/us-central1/connectionProfiles/connection_profile_ID */ sourceId: string; /** * The maximum size to which storage capacity can be automatically increased. The default value is 0, which specifies that there is no limit. */ storageAutoResizeLimit?: string; /** * The tier (or machine type) for this instance, for example: db-n1-standard-1 (MySQL instances) or db-custom-1-3840 (PostgreSQL instances). * For more information, see https://cloud.google.com/sql/docs/mysql/instance-settings */ tier?: string; /** * The resource labels for a Cloud SQL instance to use to annotate any related underlying resources such as Compute Engine VMs. */ userLabels?: { [key: string]: string; }; /** * The Google Cloud Platform zone where your Cloud SQL datdabse instance is located. */ zone?: string; } interface ConnectionProfileCloudsqlSettingsIpConfig { /** * The list of external networks that are allowed to connect to the instance using the IP. * Structure is documented below. */ authorizedNetworks?: outputs.databasemigrationservice.ConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetwork[]; /** * Whether the instance should be assigned an IPv4 address or not. */ enableIpv4?: boolean; /** * The resource link for the VPC network from which the Cloud SQL instance is accessible for private IP. For example, projects/myProject/global/networks/default. * This setting can be updated, but it cannot be removed after it is set. */ privateNetwork?: string; /** * Whether SSL connections over IP should be enforced or not. */ requireSsl?: boolean; } interface ConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetwork { /** * The time when this access control entry expires in RFC 3339 format. */ expireTime?: string; /** * A label to identify this entry. */ label?: string; /** * Input only. The time-to-leave of this access control entry. */ ttl?: string; /** * The allowlisted value for the access control list. */ value: string; } interface ConnectionProfileError { /** * (Output) * The status code, which should be an enum value of google.rpc.Code. */ code: number; /** * (Output) * A list of messages that carry the error details. */ details: { [key: string]: string; }[]; /** * (Output) * Human readable message indicating details about the current status. */ message: string; } interface ConnectionProfileMysql { /** * If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. */ cloudSqlId?: string; /** * The IP or hostname of the source MySQL database. */ host?: string; /** * Input only. The password for the user that Database Migration Service will be using to connect to the database. * This field is not returned on request, and the value is encrypted when stored in Database Migration Service. * **Note**: This property is sensitive and will not be displayed in the plan. */ password?: string; /** * (Output) * Output only. Indicates If this connection profile password is stored. */ passwordSet: boolean; /** * The network port of the source MySQL database. */ port?: number; /** * SSL configuration for the destination to connect to the source database. * Structure is documented below. */ ssl?: outputs.databasemigrationservice.ConnectionProfileMysqlSsl; /** * The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. */ username?: string; } interface ConnectionProfileMysqlSsl { /** * Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. * The replica will use this certificate to verify it's connecting to the right host. * **Note**: This property is sensitive and will not be displayed in the plan. */ caCertificate?: string; /** * Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server. * If this field is used then the 'clientKey' field is mandatory * **Note**: This property is sensitive and will not be displayed in the plan. */ clientCertificate?: string; /** * Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. * If this field is used then the 'clientCertificate' field is mandatory. * **Note**: This property is sensitive and will not be displayed in the plan. */ clientKey?: string; /** * (Output) * The current connection profile state. */ type?: string; } interface ConnectionProfileOracle { /** * Required. Database service for the Oracle connection. */ databaseService: string; /** * SSL configuration for the destination to connect to the source database. * Structure is documented below. */ forwardSshConnectivity?: outputs.databasemigrationservice.ConnectionProfileOracleForwardSshConnectivity; /** * Required. The IP or hostname of the source Oracle database. */ host: string; /** * Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. * This field is not returned on request, and the value is encrypted when stored in Database Migration Service. * **Note**: This property is sensitive and will not be displayed in the plan. */ password: string; /** * (Output) * Output only. Indicates If this connection profile password is stored. */ passwordSet: boolean; /** * Required. The network port of the source Oracle database. */ port: number; /** * Configuration for using a private network to communicate with the source database * Structure is documented below. */ privateConnectivity?: outputs.databasemigrationservice.ConnectionProfileOraclePrivateConnectivity; /** * SSL configuration for the destination to connect to the source database. * Structure is documented below. */ ssl?: outputs.databasemigrationservice.ConnectionProfileOracleSsl; /** * This object has no nested fields. * Static IP address connectivity configured on service project. */ staticServiceIpConnectivity?: outputs.databasemigrationservice.ConnectionProfileOracleStaticServiceIpConnectivity; /** * Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. */ username: string; } interface ConnectionProfileOracleForwardSshConnectivity { /** * Required. Hostname for the SSH tunnel. */ hostname: string; /** * Input only. SSH password. Only one of `password` and `privateKey` can be configured. * **Note**: This property is sensitive and will not be displayed in the plan. */ password?: string; /** * Port for the SSH tunnel, default value is 22. */ port: number; /** * Input only. SSH private key. Only one of `password` and `privateKey` can be configured. * **Note**: This property is sensitive and will not be displayed in the plan. */ privateKey?: string; /** * Required. Username for the SSH tunnel. */ username: string; } interface ConnectionProfileOraclePrivateConnectivity { /** * Required. The resource name (URI) of the private connection. */ privateConnection: string; } interface ConnectionProfileOracleSsl { /** * Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. * The replica will use this certificate to verify it's connecting to the right host. * **Note**: This property is sensitive and will not be displayed in the plan. */ caCertificate?: string; /** * Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server. * If this field is used then the 'clientKey' field is mandatory * **Note**: This property is sensitive and will not be displayed in the plan. */ clientCertificate?: string; /** * Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. * If this field is used then the 'clientCertificate' field is mandatory. * **Note**: This property is sensitive and will not be displayed in the plan. */ clientKey?: string; /** * (Output) * The current connection profile state. */ type: string; } interface ConnectionProfileOracleStaticServiceIpConnectivity { } interface ConnectionProfilePostgresql { /** * If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. */ alloydbClusterId?: string; /** * If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. */ cloudSqlId?: string; /** * The IP or hostname of the source MySQL database. */ host?: string; /** * (Output) * Output only. If the source is a Cloud SQL database, this field indicates the network architecture it's associated with. */ networkArchitecture: string; /** * Input only. The password for the user that Database Migration Service will be using to connect to the database. * This field is not returned on request, and the value is encrypted when stored in Database Migration Service. * **Note**: This property is sensitive and will not be displayed in the plan. */ password?: string; /** * (Output) * Output only. Indicates If this connection profile password is stored. */ passwordSet: boolean; /** * The network port of the source MySQL database. */ port?: number; /** * SSL configuration for the destination to connect to the source database. * Structure is documented below. */ ssl?: outputs.databasemigrationservice.ConnectionProfilePostgresqlSsl; /** * The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. */ username?: string; } interface ConnectionProfilePostgresqlSsl { /** * Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. * The replica will use this certificate to verify it's connecting to the right host. * **Note**: This property is sensitive and will not be displayed in the plan. */ caCertificate?: string; /** * Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server. * If this field is used then the 'clientKey' field is mandatory * **Note**: This property is sensitive and will not be displayed in the plan. */ clientCertificate?: string; /** * Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. * If this field is used then the 'clientCertificate' field is mandatory. * **Note**: This property is sensitive and will not be displayed in the plan. */ clientKey?: string; /** * (Output) * The current connection profile state. */ type?: string; } interface MigrationJobDumpFlags { /** * A list of dump flags * Structure is documented below. */ dumpFlags?: outputs.databasemigrationservice.MigrationJobDumpFlagsDumpFlag[]; } interface MigrationJobDumpFlagsDumpFlag { /** * The name of the flag */ name?: string; /** * The vale of the flag */ value?: string; } interface MigrationJobError { /** * (Output) * The status code, which should be an enum value of google.rpc.Code. */ code: number; /** * (Output) * A list of messages that carry the error details. */ details: { [key: string]: string; }[]; /** * (Output) * Human readable message indicating details about the current status. */ message: string; } interface MigrationJobPerformanceConfig { /** * Initial dump parallelism level. * Possible values are: `MIN`, `OPTIMAL`, `MAX`. */ dumpParallelLevel?: string; } interface MigrationJobReverseSshConnectivity { /** * The name of the virtual machine (Compute Engine) used as the bastion server * for the SSH tunnel. */ vm?: string; /** * The IP of the virtual machine (Compute Engine) used as the bastion server * for the SSH tunnel. */ vmIp?: string; /** * The forwarding port of the virtual machine (Compute Engine) used as the * bastion server for the SSH tunnel. */ vmPort?: number; /** * The name of the VPC to peer with the Cloud SQL private network. */ vpc?: string; } interface MigrationJobStaticIpConnectivity { } interface MigrationJobVpcPeeringConnectivity { /** * The name of the VPC network to peer with the Cloud SQL private network. */ vpc?: string; } interface PrivateConnectionError { /** * A list of messages that carry the error details. */ details?: { [key: string]: string; }; /** * A message containing more information about the error that occurred. */ message?: string; } interface PrivateConnectionVpcPeeringConfig { /** * A free subnet for peering. (CIDR of /29) */ subnet: string; /** * Fully qualified name of the VPC that Database Migration Service will peer to. * Format: projects/{project}/global/{networks}/{name} */ vpcName: string; } } export declare namespace datacatalog { interface EntryBigqueryDateShardedSpec { /** * (Output) * The Data Catalog resource name of the dataset entry the current table belongs to, for example, * projects/{project_id}/locations/{location}/entrygroups/{entryGroupId}/entries/{entryId} */ dataset: string; /** * (Output) * Total number of shards. */ shardCount: number; /** * (Output) * The table name prefix of the shards. The name of any given shard is [tablePrefix]YYYYMMDD, * for example, for shard MyTable20180101, the tablePrefix is MyTable. */ tablePrefix: string; } interface EntryBigqueryTableSpec { /** * (Output) * The table source type. */ tableSourceType: string; /** * (Output) * Spec of a BigQuery table. This field should only be populated if tableSourceType is BIGQUERY_TABLE. * Structure is documented below. */ tableSpecs: outputs.datacatalog.EntryBigqueryTableSpecTableSpec[]; /** * (Output) * Table view specification. This field should only be populated if tableSourceType is BIGQUERY_VIEW. * Structure is documented below. */ viewSpecs: outputs.datacatalog.EntryBigqueryTableSpecViewSpec[]; } interface EntryBigqueryTableSpecTableSpec { /** * (Output) * If the table is a dated shard, i.e., with name pattern [prefix]YYYYMMDD, groupedEntry is the * Data Catalog resource name of the date sharded grouped entry, for example, * projects/{project_id}/locations/{location}/entrygroups/{entryGroupId}/entries/{entryId}. * Otherwise, groupedEntry is empty. */ groupedEntry: string; } interface EntryBigqueryTableSpecViewSpec { /** * (Output) * The query that defines the table view. */ viewQuery: string; } interface EntryGcsFilesetSpec { /** * Patterns to identify a set of files in Google Cloud Storage. * See [Cloud Storage documentation](https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames) * for more information. Note that bucket wildcards are currently not supported. Examples of valid filePatterns: * * gs://bucket_name/dir/*: matches all files within bucket_name/dir directory. * * gs://bucket_name/dir/**: matches all files in bucket_name/dir spanning all subdirectories. * * gs://bucket_name/file*: matches files prefixed by file in bucketName * * gs://bucket_name/??.txt: matches files with two characters followed by .txt in bucketName * * gs://bucket_name/[aeiou].txt: matches files that contain a single vowel character followed by .txt in bucketName * * gs://bucket_name/[a-m].txt: matches files that contain a, b, ... or m followed by .txt in bucketName * * gs://bucket_name/a/*/b: matches all files in bucketName that match a/*/b pattern, such as a/c/b, a/d/b * * gs://another_bucket/a.txt: matches gs://another_bucket/a.txt */ filePatterns: string[]; /** * (Output) * Sample files contained in this fileset, not all files contained in this fileset are represented here. * Structure is documented below. * * * The `sampleGcsFileSpecs` block contains: */ sampleGcsFileSpecs: outputs.datacatalog.EntryGcsFilesetSpecSampleGcsFileSpec[]; } interface EntryGcsFilesetSpecSampleGcsFileSpec { /** * The full file path */ filePath: string; /** * The size of the file, in bytes. */ sizeBytes: number; } interface EntryGroupIamBindingCondition { description?: string; expression: string; title: string; } interface EntryGroupIamMemberCondition { description?: string; expression: string; title: string; } interface PolicyTagIamBindingCondition { description?: string; expression: string; title: string; } interface PolicyTagIamMemberCondition { description?: string; expression: string; title: string; } interface TagField { /** * Holds the value for a tag field with boolean type. */ boolValue?: boolean; /** * (Output) * The display name of this field */ displayName: string; /** * Holds the value for a tag field with double type. */ doubleValue?: number; /** * Holds the value for a tag field with enum type. This value must be one of the allowed values in the definition of this enum. */ enumValue?: string; /** * The identifier for this object. Format specified above. */ fieldName: string; /** * (Output) * The order of this field with respect to other fields in this tag. For example, a higher value can indicate * a more important field. The value can be negative. Multiple fields can have the same order, and field orders * within a tag do not have to be sequential. */ order: number; /** * Holds the value for a tag field with string type. */ stringValue?: string; /** * Holds the value for a tag field with timestamp type. */ timestampValue?: string; } interface TagTemplateField { /** * A description for this field. */ description: string; /** * The display name for this field. */ displayName: string; /** * The identifier for this object. Format specified above. */ fieldId: string; /** * Whether this is a required field. Defaults to false. */ isRequired: boolean; /** * (Output) * The resource name of the tag template field in URL format. Example: projects/{project_id}/locations/{location}/tagTemplates/{tagTemplateId}/fields/{field} */ name: string; /** * The order of this field with respect to other fields in this tag template. * A higher value indicates a more important field. The value can be negative. * Multiple fields can have the same order, and field orders within a tag do not have to be sequential. */ order: number; /** * The type of value this tag field can contain. * Structure is documented below. */ type: outputs.datacatalog.TagTemplateFieldType; } interface TagTemplateFieldType { /** * Represents an enum type. * Exactly one of `primitiveType` or `enumType` must be set * Structure is documented below. */ enumType?: outputs.datacatalog.TagTemplateFieldTypeEnumType; /** * Represents primitive types - string, bool etc. * Exactly one of `primitiveType` or `enumType` must be set * Possible values are: `DOUBLE`, `STRING`, `BOOL`, `TIMESTAMP`. */ primitiveType: string; } interface TagTemplateFieldTypeEnumType { /** * The set of allowed values for this enum. The display names of the * values must be case-insensitively unique within this set. Currently, * enum values can only be added to the list of allowed values. Deletion * and renaming of enum values are not supported. * Can have up to 500 allowed values. * Structure is documented below. */ allowedValues: outputs.datacatalog.TagTemplateFieldTypeEnumTypeAllowedValue[]; } interface TagTemplateFieldTypeEnumTypeAllowedValue { /** * The display name of the enum value. */ displayName: string; } interface TagTemplateIamBindingCondition { description?: string; expression: string; title: string; } interface TagTemplateIamMemberCondition { description?: string; expression: string; title: string; } interface TaxonomyIamBindingCondition { description?: string; expression: string; title: string; } interface TaxonomyIamMemberCondition { description?: string; expression: string; title: string; } } export declare namespace dataflow { interface PipelineScheduleInfo { /** * (Output) * When the next Scheduler job is going to run. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ nextJobTime: string; /** * Unix-cron format of the schedule. This information is retrieved from the linked Cloud Scheduler. */ schedule?: string; /** * Timezone ID. This matches the timezone IDs used by the Cloud Scheduler API. If empty, UTC time is assumed. */ timeZone?: string; } interface PipelineWorkload { /** * Template information and additional parameters needed to launch a Dataflow job using the flex launch API. * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplaterequest * Structure is documented below. */ dataflowFlexTemplateRequest?: outputs.dataflow.PipelineWorkloadDataflowFlexTemplateRequest; /** * Template information and additional parameters needed to launch a Dataflow job using the standard launch API. * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplaterequest * Structure is documented below. */ dataflowLaunchTemplateRequest?: outputs.dataflow.PipelineWorkloadDataflowLaunchTemplateRequest; } interface PipelineWorkloadDataflowFlexTemplateRequest { /** * Parameter to launch a job from a Flex Template. * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplateparameter * Structure is documented below. */ launchParameter: outputs.dataflow.PipelineWorkloadDataflowFlexTemplateRequestLaunchParameter; /** * The regional endpoint to which to direct the request. For example, us-central1, us-west1. */ location: string; /** * The ID of the Cloud Platform project that the job belongs to. */ projectId: string; /** * If true, the request is validated but not actually executed. Defaults to false. */ validateOnly?: boolean; } interface PipelineWorkloadDataflowFlexTemplateRequestLaunchParameter { /** * Cloud Storage path to a file with a JSON-serialized ContainerSpec as content. */ containerSpecGcsPath?: string; /** * The runtime environment for the Flex Template job. * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexTemplateRuntimeEnvironment * Structure is documented below. */ environment?: outputs.dataflow.PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterEnvironment; /** * The job name to use for the created job. For an update job request, the job name should be the same as the existing running job. */ jobName: string; /** * Launch options for this Flex Template job. This is a common set of options across languages and templates. This should not be used to pass job parameters. * 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' */ launchOptions?: { [key: string]: string; }; /** * 'The parameters for the Flex Template. Example: {"numWorkers":"5"}' * 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' */ parameters?: { [key: string]: string; }; /** * 'Use this to pass transform name mappings for streaming update jobs. Example: {"oldTransformName":"newTransformName",...}' * 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' */ transformNameMappings?: { [key: string]: string; }; /** * Set this to true if you are sending a request to update a running streaming job. When set, the job name should be the same as the running job. */ update?: boolean; } interface PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterEnvironment { /** * Additional experiment flags for the job. */ additionalExperiments?: string[]; /** * Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. * 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' * 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' */ additionalUserLabels?: { [key: string]: string; }; /** * Whether to enable Streaming Engine for the job. */ enableStreamingEngine?: boolean; /** * Set FlexRS goal for the job. https://cloud.google.com/dataflow/docs/guides/flexrs * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexResourceSchedulingGoal * Possible values are: `FLEXRS_UNSPECIFIED`, `FLEXRS_SPEED_OPTIMIZED`, `FLEXRS_COST_OPTIMIZED`. */ flexrsGoal?: string; /** * Configuration for VM IPs. * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration * Possible values are: `WORKER_IP_UNSPECIFIED`, `WORKER_IP_PUBLIC`, `WORKER_IP_PRIVATE`. */ ipConfiguration?: string; /** * 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/' */ kmsKeyName?: string; /** * The machine type to use for the job. Defaults to the value from the template if not specified. */ machineType?: string; /** * The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000. */ maxWorkers?: number; /** * Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default". */ network?: string; /** * The initial number of Compute Engine instances for the job. */ numWorkers?: number; /** * The email address of the service account to run the job as. */ serviceAccountEmail?: string; /** * Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL. */ subnetwork?: string; /** * The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://. */ tempLocation?: string; /** * The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region. */ workerRegion?: string; /** * The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence. */ workerZone?: string; /** * The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence. */ zone?: string; } interface PipelineWorkloadDataflowLaunchTemplateRequest { /** * A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with 'gs://'. */ gcsPath?: string; /** * The parameters of the template to launch. This should be part of the body of the POST request. * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplateparameters * Structure is documented below. */ launchParameters?: outputs.dataflow.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParameters; /** * The regional endpoint to which to direct the request. */ location?: string; /** * The ID of the Cloud Platform project that the job belongs to. */ projectId: string; /** * (Optional) */ validateOnly?: boolean; } interface PipelineWorkloadDataflowLaunchTemplateRequestLaunchParameters { /** * The runtime environment for the job. * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#RuntimeEnvironment * Structure is documented below. */ environment?: outputs.dataflow.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironment; /** * The job name to use for the created job. */ jobName: string; /** * The runtime parameters to pass to the job. * 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' */ parameters?: { [key: string]: string; }; /** * Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. Only applicable when updating a pipeline. * 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' */ transformNameMapping?: { [key: string]: string; }; /** * If set, replace the existing pipeline with the name specified by jobName with this pipeline, preserving state. */ update?: boolean; } interface PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironment { /** * Additional experiment flags for the job. */ additionalExperiments?: string[]; /** * Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. * 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' * 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' */ additionalUserLabels?: { [key: string]: string; }; /** * Whether to bypass the safety checks for the job's temporary directory. Use with caution. */ bypassTempDirValidation?: boolean; /** * Whether to enable Streaming Engine for the job. */ enableStreamingEngine?: boolean; /** * Configuration for VM IPs. * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration * Possible values are: `WORKER_IP_UNSPECIFIED`, `WORKER_IP_PUBLIC`, `WORKER_IP_PRIVATE`. */ ipConfiguration?: string; /** * 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/' */ kmsKeyName?: string; /** * The machine type to use for the job. Defaults to the value from the template if not specified. */ machineType?: string; /** * The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000. */ maxWorkers?: number; /** * Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default". */ network: string; /** * The initial number of Compute Engine instances for the job. */ numWorkers?: number; /** * The email address of the service account to run the job as. */ serviceAccountEmail?: string; /** * Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL. */ subnetwork?: string; /** * The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://. */ tempLocation?: string; /** * The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region. */ workerRegion?: string; /** * The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence. */ workerZone?: string; /** * The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence. */ zone?: string; } } export declare namespace dataform { interface RepositoryGitRemoteSettings { /** * The name of the Secret Manager secret version to use as an authentication token for Git operations. This secret is for assigning with HTTPS only(for SSH use `sshAuthenticationConfig`). Must be in the format projects/*/secrets/*/versions/*. */ authenticationTokenSecretVersion?: string; /** * The Git remote's default branch name. */ defaultBranch: string; /** * Authentication fields for remote uris using SSH protocol. * Structure is documented below. */ sshAuthenticationConfig?: outputs.dataform.RepositoryGitRemoteSettingsSshAuthenticationConfig; /** * (Output) * Indicates the status of the Git access token. https://cloud.google.com/dataform/reference/rest/v1beta1/projects.locations.repositories#TokenStatus */ tokenStatus: string; /** * The Git remote's URL. */ url: string; } interface RepositoryGitRemoteSettingsSshAuthenticationConfig { /** * Content of a public SSH key to verify an identity of a remote Git host. */ hostPublicKey: string; /** * The name of the Secret Manager secret version to use as a ssh private key for Git operations. Must be in the format projects/*/secrets/*/versions/*. */ userPrivateKeySecretVersion: string; } interface RepositoryIamBindingCondition { description?: string; expression: string; title: string; } interface RepositoryIamMemberCondition { description?: string; expression: string; title: string; } interface RepositoryReleaseConfigCodeCompilationConfig { /** * Optional. The default schema (BigQuery dataset ID) for assertions. */ assertionSchema?: string; /** * Optional. The suffix that should be appended to all database (Google Cloud project ID) names. */ databaseSuffix?: string; /** * Optional. The default database (Google Cloud project ID). */ defaultDatabase?: string; /** * Optional. The default BigQuery location to use. Defaults to "US". * See the BigQuery docs for a full list of locations: https://cloud.google.com/bigquery/docs/locations. */ defaultLocation?: string; /** * Optional. The default schema (BigQuery dataset ID). */ defaultSchema?: string; /** * Optional. The suffix that should be appended to all schema (BigQuery dataset ID) names. */ schemaSuffix?: string; /** * Optional. The prefix that should be prepended to all table names. */ tablePrefix?: string; /** * Optional. User-defined variables that are made available to project code during compilation. * An object containing a list of "key": value pairs. * Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ vars?: { [key: string]: string; }; } interface RepositoryReleaseConfigRecentScheduledReleaseRecord { /** * (Output) * The name of the created compilation result, if one was successfully created. Must be in the format projects/*/locations/*/repositories/*/compilationResults/*. */ compilationResult: string; /** * (Output) * The error status encountered upon this attempt to create the compilation result, if the attempt was unsuccessful. * Structure is documented below. */ errorStatuses: outputs.dataform.RepositoryReleaseConfigRecentScheduledReleaseRecordErrorStatus[]; /** * (Output) * The timestamp of this release attempt. */ releaseTime: string; } interface RepositoryReleaseConfigRecentScheduledReleaseRecordErrorStatus { /** * (Output) * The status code, which should be an enum value of google.rpc.Code. */ code: number; /** * (Output) * A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. */ message: string; } interface RepositoryWorkflowConfigInvocationConfig { /** * Optional. When set to true, any incremental tables will be fully refreshed. */ fullyRefreshIncrementalTablesEnabled?: boolean; /** * Optional. The set of tags to include. */ includedTags?: string[]; /** * Optional. The set of action identifiers to include. * Structure is documented below. */ includedTargets?: outputs.dataform.RepositoryWorkflowConfigInvocationConfigIncludedTarget[]; /** * Optional. The service account to run workflow invocations under. */ serviceAccount?: string; /** * Optional. When set to true, transitive dependencies of included actions will be executed. */ transitiveDependenciesIncluded?: boolean; /** * Optional. When set to true, transitive dependents of included actions will be executed. */ transitiveDependentsIncluded?: boolean; } interface RepositoryWorkflowConfigInvocationConfigIncludedTarget { /** * The action's database (Google Cloud project ID). */ database?: string; /** * The action's name, within database and schema. */ name?: string; /** * The action's schema (BigQuery dataset ID), within database. */ schema?: string; } interface RepositoryWorkflowConfigRecentScheduledExecutionRecord { /** * (Output) * The error status encountered upon this attempt to create the workflow invocation, if the attempt was unsuccessful. * Structure is documented below. */ errorStatuses: outputs.dataform.RepositoryWorkflowConfigRecentScheduledExecutionRecordErrorStatus[]; /** * (Output) * The timestamp of this workflow attempt. */ executionTime: string; /** * (Output) * The name of the created workflow invocation, if one was successfully created. In the format projects/*/locations/*/repositories/*/workflowInvocations/*. */ workflowInvocation: string; } interface RepositoryWorkflowConfigRecentScheduledExecutionRecordErrorStatus { /** * (Output) * The status code, which should be an enum value of google.rpc.Code. */ code: number; /** * (Output) * A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. */ message: string; } interface RepositoryWorkspaceCompilationOverrides { /** * The default database (Google Cloud project ID). */ defaultDatabase?: string; /** * The suffix that should be appended to all schema (BigQuery dataset ID) names. */ schemaSuffix?: string; /** * The prefix that should be prepended to all table names. */ tablePrefix?: string; } } export declare namespace datafusion { interface InstanceAccelerator { /** * The type of an accelator for a CDF instance. * Possible values are: `CDC`, `HEALTHCARE`, `CCAI_INSIGHTS`. */ acceleratorType: string; /** * The type of an accelator for a CDF instance. * Possible values are: `ENABLED`, `DISABLED`. */ state: string; } interface InstanceCryptoKeyConfig { /** * The name of the key which is used to encrypt/decrypt customer data. For key in Cloud KMS, the key should be in the format of projects/*/locations/*/keyRings/*/cryptoKeys/*. */ keyReference: string; } interface InstanceEventPublishConfig { /** * Option to enable Event Publishing. */ enabled: boolean; /** * The resource name of the Pub/Sub topic. Format: projects/{projectId}/topics/{topic_id} */ topic: string; } interface InstanceNetworkConfig { /** * Optional. Type of connection for establishing private IP connectivity between the Data Fusion customer project VPC and * the corresponding tenant project from a predefined list of available connection modes. * If this field is unspecified for a private instance, VPC peering is used. * Possible values are: `VPC_PEERING`, `PRIVATE_SERVICE_CONNECT_INTERFACES`. */ connectionType?: string; /** * The IP range in CIDR notation to use for the managed Data Fusion instance * nodes. This range must not overlap with any other ranges used in the Data Fusion instance network. */ ipAllocation?: string; /** * Name of the network in the project with which the tenant project * will be peered for executing pipelines. In case of shared VPC where the network resides in another host * project the network should specified in the form of projects/{host-project-id}/global/networks/{network} */ network?: string; /** * Optional. Configuration for Private Service Connect. * This is required only when using connection type PRIVATE_SERVICE_CONNECT_INTERFACES. * Structure is documented below. */ privateServiceConnectConfig?: outputs.datafusion.InstanceNetworkConfigPrivateServiceConnectConfig; } interface InstanceNetworkConfigPrivateServiceConnectConfig { /** * (Output) * Output only. The CIDR block to which the CDF instance can't route traffic to in the consumer project VPC. * The size of this block is /25. The format of this field is governed by RFC 4632. */ effectiveUnreachableCidrBlock: string; /** * Optional. The reference to the network attachment used to establish private connectivity. * It will be of the form projects/{project-id}/regions/{region}/networkAttachments/{network-attachment-id}. * This is required only when using connection type PRIVATE_SERVICE_CONNECT_INTERFACES. */ networkAttachment?: string; /** * Optional. Input only. The CIDR block to which the CDF instance can't route traffic to in the consumer project VPC. * The size of this block should be at least /25. This range should not overlap with the primary address range of any subnetwork used by the network attachment. * This range can be used for other purposes in the consumer VPC as long as there is no requirement for CDF to reach destinations using these addresses. * If this value is not provided, the server chooses a non RFC 1918 address range. The format of this field is governed by RFC 4632. */ unreachableCidrBlock?: string; } } export declare namespace dataloss { interface PreventionDeidentifyTemplateDeidentifyConfig { /** * Treat the dataset as an image and redact. * Structure is documented below. */ imageTransformations?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigImageTransformations; /** * Treat the dataset as free-form text and apply the same free text transformation everywhere * Structure is documented below. */ infoTypeTransformations?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformations; /** * Treat the dataset as structured. Transformations can be applied to specific locations within structured datasets, such as transforming a column within a table. * Structure is documented below. */ recordTransformations?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformations; } interface PreventionDeidentifyTemplateDeidentifyConfigImageTransformations { /** * For determination of how redaction of images should occur. * Structure is documented below. */ transforms: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransform[]; } interface PreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransform { /** * Apply transformation to all findings not specified in other ImageTransformation's selectedInfoTypes. */ allInfoTypes?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformAllInfoTypes; /** * Apply transformation to all text that doesn't match an infoType. */ allText?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformAllText; /** * The color to use when redacting content from an image. If not specified, the default is black. * Structure is documented below. */ redactionColor?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformRedactionColor; /** * Apply transformation to the selected infoTypes. * Structure is documented below. */ selectedInfoTypes?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformSelectedInfoTypes; } interface PreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformAllInfoTypes { } interface PreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformAllText { } interface PreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformRedactionColor { /** * The amount of blue in the color as a value in the interval [0, 1]. */ blue?: number; /** * The amount of green in the color as a value in the interval [0, 1]. */ green?: number; /** * The amount of red in the color as a value in the interval [0, 1]. */ red?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformSelectedInfoTypes { /** * InfoTypes to apply the transformation to. Leaving this empty will apply the transformation to apply to * all findings that correspond to infoTypes that were requested in InspectConfig. * Structure is documented below. */ infoTypes: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformSelectedInfoTypesInfoType[]; } interface PreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformSelectedInfoTypesInfoType { /** * Name of the information type. */ name: string; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformSelectedInfoTypesInfoTypeSensitivityScore; /** * Version name for this InfoType. */ version?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformSelectedInfoTypesInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformations { /** * Transformation for each infoType. Cannot specify more than one for a given infoType. * Structure is documented below. */ transformations: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformation[]; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformation { /** * InfoTypes to apply the transformation to. Leaving this empty will apply the transformation to apply to * all findings that correspond to infoTypes that were requested in InspectConfig. * Structure is documented below. */ infoTypes?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoType[]; /** * Apply the transformation to the entire field. * The `primitiveTransformation` block must only contain one argument, corresponding to the type of transformation. * Structure is documented below. */ primitiveTransformation: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformation; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoType { /** * Name of the information type. */ name: string; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeSensitivityScore; /** * Version name for this InfoType. */ version?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformation { /** * Generalization function that buckets values based on ranges. The ranges and replacement values are dynamically provided by the user for custom behavior, such as 1-30 > LOW 31-65 > MEDIUM 66-100 > HIGH * This can be used on data of type: number, long, string, timestamp. * If the provided value type differs from the type of data being transformed, we will first attempt converting the type of the data to be transformed to match the type of the bound before comparing. * See https://cloud.google.com/dlp/docs/concepts-bucketing to learn more. * Structure is documented below. */ bucketingConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfig; /** * Partially mask a string by replacing a given number of characters with a fixed character. Masking can start from the beginning or end of the string. This can be used on data of any type (numbers, longs, and so on) and when de-identifying structured data we'll attempt to preserve the original data's type. (This allows you to take a long like 123 and modify it to a string like **3). * Structure is documented below. */ characterMaskConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCharacterMaskConfig; /** * Pseudonymization method that generates deterministic encryption for the given input. Outputs a base64 encoded representation of the encrypted output. Uses AES-SIV based on the RFC [https://tools.ietf.org/html/rfc5297](https://tools.ietf.org/html/rfc5297). * Structure is documented below. */ cryptoDeterministicConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfig; /** * Pseudonymization method that generates surrogates via cryptographic hashing. Uses SHA-256. The key size must be either 32 or 64 bytes. * Outputs a base64 encoded representation of the hashed output (for example, L7k0BHmF1ha5U3NfGykjro4xWi1MPVQPjhMAZbSV9mM=). * Currently, only string and integer values can be hashed. * See https://cloud.google.com/dlp/docs/pseudonymization to learn more. * Structure is documented below. */ cryptoHashConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfig; /** * Replaces an identifier with a surrogate using Format Preserving Encryption (FPE) with the FFX mode of operation; however when used in the `content.reidentify` API method, it serves the opposite function by reversing the surrogate back into the original identifier. The identifier must be encoded as ASCII. For a given crypto key and context, the same identifier will be replaced with the same surrogate. Identifiers must be at least two characters long. In the case that the identifier is the empty string, it will be skipped. See [https://cloud.google.com/dlp/docs/pseudonymization](https://cloud.google.com/dlp/docs/pseudonymization) to learn more. * Note: We recommend using CryptoDeterministicConfig for all use cases which do not require preserving the input alphabet space and size, plus warrant referential integrity. * Structure is documented below. */ cryptoReplaceFfxFpeConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfig; /** * Shifts dates by random number of days, with option to be consistent for the same context. See https://cloud.google.com/dlp/docs/concepts-date-shifting to learn more. * Structure is documented below. */ dateShiftConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfig; /** * Buckets values based on fixed size ranges. The Bucketing transformation can provide all of this functionality, but requires more configuration. This message is provided as a convenience to the user for simple bucketing strategies. * The transformed value will be a hyphenated string of {lower_bound}-{upper_bound}. For example, if lowerBound = 10 and upperBound = 20, all values that are within this bucket will be replaced with "10-20". * This can be used on data of type: double, long. * If the bound Value type differs from the type of data being transformed, we will first attempt converting the type of the data to be transformed to match the type of the bound before comparing. * See https://cloud.google.com/dlp/docs/concepts-bucketing to learn more. * Structure is documented below. */ fixedSizeBucketingConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationFixedSizeBucketingConfig; /** * Redact a given value. For example, if used with an InfoTypeTransformation transforming PHONE_NUMBER, and input 'My phone number is 206-555-0123', the output would be 'My phone number is '. */ redactConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationRedactConfig; /** * Replace each input value with a given value. * Structure is documented below. */ replaceConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfig; /** * Replace with a value randomly drawn (with replacement) from a dictionary. * Structure is documented below. */ replaceDictionaryConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceDictionaryConfig; /** * Replace each matching finding with the name of the info type. */ replaceWithInfoTypeConfig?: boolean; /** * For use with Date, Timestamp, and TimeOfDay, extract or preserve a portion of the value. * Structure is documented below. */ timePartConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationTimePartConfig; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfig { /** * Set of buckets. Ranges must be non-overlapping. * Bucket is represented as a range, along with replacement values. * Structure is documented below. */ buckets?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucket[]; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucket { /** * Upper bound of the range, exclusive; type must match min. * The `max` block must only contain one argument. See the `bucketingConfig` block description for more information about choosing a data type. * Structure is documented below. */ max?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMax; /** * Lower bound of the range, inclusive. Type should be the same as max if used. * The `min` block must only contain one argument. See the `bucketingConfig` block description for more information about choosing a data type. * Structure is documented below. */ min?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMin; /** * Replacement value for this bucket. * The `replacementValue` block must only contain one argument. * Structure is documented below. */ replacementValue: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketReplacementValue; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMax { /** * Represents a whole or partial calendar date. * Structure is documented below. */ dateValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMaxDateValue; /** * Represents a day of the week. * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeekValue?: string; /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: string; /** * A string value. */ stringValue?: string; /** * Represents a time of day. * Structure is documented below. */ timeValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMaxTimeValue; /** * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ timestampValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMaxDateValue { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ year?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMaxTimeValue { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMin { /** * Represents a whole or partial calendar date. * Structure is documented below. */ dateValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMinDateValue; /** * Represents a day of the week. * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeekValue?: string; /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: string; /** * A string value. */ stringValue?: string; /** * Represents a time of day. * Structure is documented below. */ timeValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMinTimeValue; /** * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ timestampValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMinDateValue { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ year?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMinTimeValue { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketReplacementValue { /** * Represents a whole or partial calendar date. * Structure is documented below. */ dateValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketReplacementValueDateValue; /** * Represents a day of the week. * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeekValue?: string; /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: string; /** * A string value. */ stringValue?: string; /** * Represents a time of day. * Structure is documented below. */ timeValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketReplacementValueTimeValue; /** * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ timestampValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketReplacementValueDateValue { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ year?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketReplacementValueTimeValue { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCharacterMaskConfig { /** * Characters to skip when doing de-identification of a value. These will be left alone and skipped. * Structure is documented below. */ charactersToIgnores?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCharacterMaskConfigCharactersToIgnore[]; /** * Character to use to mask the sensitive values—for example, * for an alphabetic string such as a name, or 0 for a numeric string * such as ZIP code or credit card number. This string must have a length of 1. If not supplied, this value defaults to * for * strings, and 0 for digits. */ maskingCharacter?: string; /** * Number of characters to mask. If not set, all matching chars will be masked. Skipped characters do not count towards this tally. * If numberToMask is negative, this denotes inverse masking. Cloud DLP masks all but a number of characters. For example, suppose you have the following values: */ numberToMask?: number; /** * Mask characters in reverse order. For example, if maskingCharacter is 0, numberToMask is 14, and reverseOrder is `false`, then the * input string `1234-5678-9012-3456` is masked as `00000000000000-3456`. */ reverseOrder?: boolean; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCharacterMaskConfigCharactersToIgnore { /** * Characters to not transform when masking. Only one of this or `commonCharactersToIgnore` must be specified. */ charactersToSkip?: string; /** * Common characters to not transform when masking. Useful to avoid removing punctuation. Only one of this or `charactersToSkip` must be specified. * Possible values are: `NUMERIC`, `ALPHA_UPPER_CASE`, `ALPHA_LOWER_CASE`, `PUNCTUATION`, `WHITESPACE`. */ commonCharactersToIgnore?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfig { /** * A context may be used for higher security and maintaining referential integrity such that the same identifier in two different contexts will be given a distinct surrogate. The context is appended to plaintext value being encrypted. On decryption the provided context is validated against the value used during encryption. If a context was provided during encryption, same context must be provided during decryption as well. * If the context is not set, plaintext would be used as is for encryption. If the context is set but: * 1. there is no record present when transforming a given value or * 2. the field is not present when transforming a given value, * plaintext would be used as is for encryption. * Note that case (1) is expected when an InfoTypeTransformation is applied to both structured and unstructured ContentItems. * Structure is documented below. */ context?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigContext; /** * The key used by the encryption function. For deterministic encryption using AES-SIV, the provided key is internally expanded to 64 bytes prior to use. * Structure is documented below. */ cryptoKey?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKey; /** * The custom info type to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines the format: {info type name}({surrogate character count}):{surrogate} * For example, if the name of custom info type is 'MY\_TOKEN\_INFO\_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY\_TOKEN\_INFO\_TYPE(3):abc' * This annotation identifies the surrogate when inspecting content using the custom info type 'Surrogate'. This facilitates reversal of the surrogate when it occurs in free text. * Note: For record transformations where the entire cell in a table is being transformed, surrogates are not mandatory. Surrogates are used to denote the location of the token and are necessary for re-identification in free form text. * In order for inspection to work properly, the name of this info type must not occur naturally anywhere in your data; otherwise, inspection may either * * reverse a surrogate that does not correspond to an actual identifier * * be unable to parse the surrogate and result in an error * Therefore, choose your custom info type name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ā§MY\_TOKEN\_TYPE. * Structure is documented below. */ surrogateInfoType?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigContext { /** * Name describing the field. */ name?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKey { /** * KMS wrapped key. * Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt * For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). Only one of this, `transient` or `unwrapped` must be specified. * Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing). * Structure is documented below. */ kmsWrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped; /** * Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes. Only one of this, `unwrapped` or `kmsWrapped` must be specified. * Structure is documented below. */ transient?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient; /** * Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible. Only one of this, `transient` or `kmsWrapped` must be specified. * Structure is documented below. */ unwrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped { /** * The resource name of the KMS CryptoKey to use for unwrapping. */ cryptoKeyName: string; /** * The wrapped data crypto key. * A base64-encoded string. */ wrappedKey: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient { /** * Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate `TransientCryptoKey` protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated). */ name: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped { /** * A 128/192/256 bit key. * A base64-encoded string. * **Note**: This property is sensitive and will not be displayed in the plan. */ key: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType { /** * Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`. */ name?: string; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScore; /** * Optional version name for this InfoType. */ version?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfig { /** * The key used by the encryption function. * Structure is documented below. */ cryptoKey?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfigCryptoKey; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfigCryptoKey { /** * KMS wrapped key. * Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt * For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). Only one of this, `transient` or `unwrapped` must be specified. * Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing). * Structure is documented below. */ kmsWrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped; /** * Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes. Only one of this, `unwrapped` or `kmsWrapped` must be specified. * Structure is documented below. */ transient?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfigCryptoKeyTransient; /** * Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible. Only one of this, `transient` or `kmsWrapped` must be specified. * Structure is documented below. */ unwrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped { /** * The resource name of the KMS CryptoKey to use for unwrapping. */ cryptoKeyName: string; /** * The wrapped data crypto key. * A base64-encoded string. */ wrappedKey: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfigCryptoKeyTransient { /** * Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate `TransientCryptoKey` protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated). */ name: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped { /** * A 128/192/256 bit key. * A base64-encoded string. * **Note**: This property is sensitive and will not be displayed in the plan. */ key: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfig { /** * Common alphabets. Only one of this, `customAlphabet` or `radix` must be specified. * Possible values are: `NUMERIC`, `HEXADECIMAL`, `UPPER_CASE_ALPHA_NUMERIC`, `ALPHA_NUMERIC`. */ commonAlphabet?: string; /** * The 'tweak', a context may be used for higher security since the same identifier in two different contexts won't be given the same surrogate. If the context is not set, a default tweak will be used. * If the context is set but: * 1. there is no record present when transforming a given value or * 2. the field is not present when transforming a given value, * a default tweak will be used. * Note that case (1) is expected when an `InfoTypeTransformation` is applied to both structured and non-structured `ContentItem`s. Currently, the referenced field may be of value type integer or string. * The tweak is constructed as a sequence of bytes in big endian byte order such that: * * a 64 bit integer is encoded followed by a single byte of value 1 * * a string is encoded in UTF-8 format followed by a single byte of value 2 * Structure is documented below. */ context?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigContext; /** * The key used by the encryption algorithm. * Structure is documented below. */ cryptoKey?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey; /** * This is supported by mapping these to the alphanumeric characters that the FFX mode natively supports. This happens before/after encryption/decryption. Each character listed must appear only once. Number of characters must be in the range \[2, 95\]. This must be encoded as ASCII. The order of characters does not matter. The full list of allowed characters is: * ``0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz ~`!@#$%^&*()_-+={[}]|:;"'<,>.?/``. Only one of this, `commonAlphabet` or `radix` must be specified. */ customAlphabet?: string; /** * The native way to select the alphabet. Must be in the range \[2, 95\]. Only one of this, `customAlphabet` or `commonAlphabet` must be specified. */ radix?: number; /** * The custom infoType to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom infoType followed by the number of characters comprising the surrogate. The following scheme defines the format: info\_type\_name(surrogate\_character\_count):surrogate * For example, if the name of custom infoType is 'MY\_TOKEN\_INFO\_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY\_TOKEN\_INFO\_TYPE(3):abc' * This annotation identifies the surrogate when inspecting content using the custom infoType [`SurrogateType`](https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#surrogatetype). This facilitates reversal of the surrogate when it occurs in free text. * In order for inspection to work properly, the name of this infoType must not occur naturally anywhere in your data; otherwise, inspection may find a surrogate that does not correspond to an actual identifier. Therefore, choose your custom infoType name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ā§MY\_TOKEN\_TYPE * Structure is documented below. */ surrogateInfoType?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigContext { /** * Name describing the field. */ name?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey { /** * KMS wrapped key. * Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt * For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). Only one of this, `transient` or `unwrapped` must be specified. * Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing). * Structure is documented below. */ kmsWrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped; /** * Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes. Only one of this, `unwrapped` or `kmsWrapped` must be specified. * Structure is documented below. */ transient?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient; /** * Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible. Only one of this, `transient` or `kmsWrapped` must be specified. * Structure is documented below. */ unwrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped { /** * The resource name of the KMS CryptoKey to use for unwrapping. */ cryptoKeyName: string; /** * The wrapped data crypto key. * A base64-encoded string. */ wrappedKey: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient { /** * Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate `TransientCryptoKey` protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated). */ name: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped { /** * A 128/192/256 bit key. * A base64-encoded string. * **Note**: This property is sensitive and will not be displayed in the plan. */ key: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType { /** * Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`. */ name?: string; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScore; /** * Optional version name for this InfoType. */ version?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfig { /** * Points to the field that contains the context, for example, an entity id. * If set, must also set cryptoKey. If set, shift will be consistent for the given context. * Structure is documented below. */ context?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigContext; /** * Causes the shift to be computed based on this key and the context. This results in the same shift for the same context and cryptoKey. If set, must also set context. Can only be applied to table items. * Structure is documented below. */ cryptoKey?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigCryptoKey; /** * For example, -5 means shift date to at most 5 days back in the past. */ lowerBoundDays: number; /** * Range of shift in days. Actual shift will be selected at random within this range (inclusive ends). Negative means shift to earlier in time. Must not be more than 365250 days (1000 years) each direction. * For example, 3 means shift date to at most 3 days into the future. */ upperBoundDays: number; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigContext { /** * Name describing the field. */ name: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigCryptoKey { /** * KMS wrapped key. * Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt * For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). Only one of this, `transient` or `unwrapped` must be specified. * Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing). * Structure is documented below. */ kmsWrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped; /** * Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes. Only one of this, `unwrapped` or `kmsWrapped` must be specified. * Structure is documented below. */ transient?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigCryptoKeyTransient; /** * Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible. Only one of this, `transient` or `kmsWrapped` must be specified. * Structure is documented below. */ unwrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped { /** * The resource name of the KMS CryptoKey to use for unwrapping. */ cryptoKeyName: string; /** * The wrapped data crypto key. * A base64-encoded string. */ wrappedKey: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigCryptoKeyTransient { /** * Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate `TransientCryptoKey` protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated). */ name: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped { /** * A 128/192/256 bit key. * A base64-encoded string. * **Note**: This property is sensitive and will not be displayed in the plan. */ key: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationFixedSizeBucketingConfig { /** * Size of each bucket (except for minimum and maximum buckets). * So if lowerBound = 10, upperBound = 89, and bucketSize = 10, then the following buckets would be used: -10, 10-20, 20-30, 30-40, 40-50, 50-60, 60-70, 70-80, 80-89, 89+. * Precision up to 2 decimals works. */ bucketSize: number; /** * Lower bound value of buckets. * All values less than lowerBound are grouped together into a single bucket; for example if lowerBound = 10, then all values less than 10 are replaced with the value "-10". * The `lowerBound` block must only contain one argument. See the `fixedSizeBucketingConfig` block description for more information about choosing a data type. * Structure is documented below. */ lowerBound: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationFixedSizeBucketingConfigLowerBound; /** * Upper bound value of buckets. * All values greater than upperBound are grouped together into a single bucket; for example if upperBound = 89, then all values greater than 89 are replaced with the value "89+". * The `upperBound` block must only contain one argument. See the `fixedSizeBucketingConfig` block description for more information about choosing a data type. * Structure is documented below. */ upperBound: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationFixedSizeBucketingConfigUpperBound; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationFixedSizeBucketingConfigLowerBound { /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationFixedSizeBucketingConfigUpperBound { /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationRedactConfig { } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfig { /** * Replace each input value with a given value. * The `newValue` block must only contain one argument. For example when replacing the contents of a string-type field, only `stringValue` should be set. * Structure is documented below. */ newValue: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValue; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValue { /** * A boolean value. */ booleanValue?: boolean; /** * Represents a whole or partial calendar date. * Structure is documented below. */ dateValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueDateValue; /** * Represents a day of the week. * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeekValue?: string; /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: number; /** * A string value. */ stringValue?: string; /** * Represents a time of day. * Structure is documented below. */ timeValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueTimeValue; /** * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ timestampValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueDateValue { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ year?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueTimeValue { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceDictionaryConfig { /** * A list of words to select from for random replacement. The [limits](https://cloud.google.com/dlp/limits) page contains details about the size limits of dictionaries. * Structure is documented below. */ wordList: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceDictionaryConfigWordList; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceDictionaryConfigWordList { /** * Words or phrases defining the dictionary. The dictionary must contain at least one phrase and every phrase must contain at least 2 characters that are letters or digits. */ words: string[]; } interface PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationTimePartConfig { /** * The part of the time to keep. * Possible values are: `YEAR`, `MONTH`, `DAY_OF_MONTH`, `DAY_OF_WEEK`, `WEEK_OF_YEAR`, `HOUR_OF_DAY`. */ partToExtract?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformations { /** * Transform the record by applying various field transformations. * Structure is documented below. */ fieldTransformations?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformation[]; /** * Configuration defining which records get suppressed entirely. Records that match any suppression rule are omitted from the output. * Structure is documented below. */ recordSuppressions?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppression[]; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformation { /** * Only apply the transformation if the condition evaluates to true for the given RecordCondition. The conditions are allowed to reference fields that are not used in the actual transformation. * Example Use Cases: * - Apply a different bucket transformation to an age column if the zip code column for the same record is within a specific range. * - Redact a field if the date of birth field is greater than 85. * Structure is documented below. */ condition?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationCondition; /** * Input field(s) to apply the transformation to. When you have columns that reference their position within a list, omit the index from the FieldId. * FieldId name matching ignores the index. For example, instead of "contact.nums[0].type", use "contact.nums.type". * Structure is documented below. */ fields: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationField[]; /** * Treat the contents of the field as free text, and selectively transform content that matches an InfoType. * Only one of `primitiveTransformation` or `infoTypeTransformations` must be specified. * Structure is documented below. */ infoTypeTransformations?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformations; /** * Apply the transformation to the entire field. * The `primitiveTransformation` block must only contain one argument, corresponding to the type of transformation. * Only one of `primitiveTransformation` or `infoTypeTransformations` must be specified. * Structure is documented below. */ primitiveTransformation?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformation; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationCondition { /** * An expression, consisting of an operator and conditions. * Structure is documented below. */ expressions?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationConditionExpressions; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationConditionExpressions { /** * Conditions to apply to the expression. * Structure is documented below. */ conditions?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationConditionExpressionsConditions; /** * The operator to apply to the result of conditions. Default and currently only supported value is AND. * Default value is `AND`. * Possible values are: `AND`. */ logicalOperator?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationConditionExpressionsConditions { /** * A collection of conditions. * Structure is documented below. */ conditions?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationConditionExpressionsConditionsCondition[]; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationConditionExpressionsConditionsCondition { /** * Field within the record this condition is evaluated against. * Structure is documented below. */ field: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationConditionExpressionsConditionsConditionField; /** * Operator used to compare the field or infoType to the value. * Possible values are: `EQUAL_TO`, `NOT_EQUAL_TO`, `GREATER_THAN`, `LESS_THAN`, `GREATER_THAN_OR_EQUALS`, `LESS_THAN_OR_EQUALS`, `EXISTS`. */ operator: string; /** * Value to compare against. [Mandatory, except for EXISTS tests.] * Structure is documented below. */ value?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationConditionExpressionsConditionsConditionValue; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationConditionExpressionsConditionsConditionField { /** * Name describing the field. */ name?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationConditionExpressionsConditionsConditionValue { /** * A boolean value. */ booleanValue?: boolean; /** * Represents a whole or partial calendar date. * Structure is documented below. */ dateValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationConditionExpressionsConditionsConditionValueDateValue; /** * Represents a day of the week. * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeekValue?: string; /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: string; /** * A string value. */ stringValue?: string; /** * Represents a time of day. * Structure is documented below. */ timeValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationConditionExpressionsConditionsConditionValueTimeValue; /** * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ timestampValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationConditionExpressionsConditionsConditionValueDateValue { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ year?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationConditionExpressionsConditionsConditionValueTimeValue { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationField { /** * Name describing the field. */ name?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformations { /** * Transformation for each infoType. Cannot specify more than one for a given infoType. * Structure is documented below. */ transformations: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformation[]; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformation { /** * InfoTypes to apply the transformation to. Leaving this empty will apply the transformation to apply to * all findings that correspond to infoTypes that were requested in InspectConfig. * Structure is documented below. */ infoTypes?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationInfoType[]; /** * Apply the transformation to the entire field. * The `primitiveTransformation` block must only contain one argument, corresponding to the type of transformation. * Structure is documented below. */ primitiveTransformation: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformation; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationInfoType { /** * Name of the information type. */ name: string; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationInfoTypeSensitivityScore; /** * Version name for this InfoType. */ version?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformation { /** * Generalization function that buckets values based on ranges. The ranges and replacement values are dynamically provided by the user for custom behavior, such as 1-30 > LOW 31-65 > MEDIUM 66-100 > HIGH * This can be used on data of type: number, long, string, timestamp. * If the provided value type differs from the type of data being transformed, we will first attempt converting the type of the data to be transformed to match the type of the bound before comparing. * See https://cloud.google.com/dlp/docs/concepts-bucketing to learn more. * Structure is documented below. */ bucketingConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfig; /** * Partially mask a string by replacing a given number of characters with a fixed character. Masking can start from the beginning or end of the string. This can be used on data of any type (numbers, longs, and so on) and when de-identifying structured data we'll attempt to preserve the original data's type. (This allows you to take a long like 123 and modify it to a string like **3). * Structure is documented below. */ characterMaskConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCharacterMaskConfig; /** * Pseudonymization method that generates deterministic encryption for the given input. Outputs a base64 encoded representation of the encrypted output. Uses AES-SIV based on the RFC [https://tools.ietf.org/html/rfc5297](https://tools.ietf.org/html/rfc5297). * Structure is documented below. */ cryptoDeterministicConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfig; /** * Pseudonymization method that generates surrogates via cryptographic hashing. Uses SHA-256. The key size must be either 32 or 64 bytes. * Outputs a base64 encoded representation of the hashed output (for example, L7k0BHmF1ha5U3NfGykjro4xWi1MPVQPjhMAZbSV9mM=). * Currently, only string and integer values can be hashed. * See https://cloud.google.com/dlp/docs/pseudonymization to learn more. * Structure is documented below. */ cryptoHashConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfig; /** * Replaces an identifier with a surrogate using Format Preserving Encryption (FPE) with the FFX mode of operation; however when used in the `content.reidentify` API method, it serves the opposite function by reversing the surrogate back into the original identifier. The identifier must be encoded as ASCII. For a given crypto key and context, the same identifier will be replaced with the same surrogate. Identifiers must be at least two characters long. In the case that the identifier is the empty string, it will be skipped. See [https://cloud.google.com/dlp/docs/pseudonymization](https://cloud.google.com/dlp/docs/pseudonymization) to learn more. * Note: We recommend using CryptoDeterministicConfig for all use cases which do not require preserving the input alphabet space and size, plus warrant referential integrity. * Structure is documented below. */ cryptoReplaceFfxFpeConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfig; /** * Shifts dates by random number of days, with option to be consistent for the same context. See https://cloud.google.com/dlp/docs/concepts-date-shifting to learn more. * Structure is documented below. */ dateShiftConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfig; /** * Buckets values based on fixed size ranges. The Bucketing transformation can provide all of this functionality, but requires more configuration. This message is provided as a convenience to the user for simple bucketing strategies. * The transformed value will be a hyphenated string of {lower_bound}-{upper_bound}. For example, if lowerBound = 10 and upperBound = 20, all values that are within this bucket will be replaced with "10-20". * This can be used on data of type: double, long. * If the bound Value type differs from the type of data being transformed, we will first attempt converting the type of the data to be transformed to match the type of the bound before comparing. * See https://cloud.google.com/dlp/docs/concepts-bucketing to learn more. * Structure is documented below. */ fixedSizeBucketingConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationFixedSizeBucketingConfig; /** * Redact a given value. For example, if used with an InfoTypeTransformation transforming PHONE_NUMBER, and input 'My phone number is 206-555-0123', the output would be 'My phone number is '. */ redactConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationRedactConfig; /** * Replace each input value with a given value. * Structure is documented below. */ replaceConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfig; /** * Replace with a value randomly drawn (with replacement) from a dictionary. * Structure is documented below. */ replaceDictionaryConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationReplaceDictionaryConfig; /** * Replace each matching finding with the name of the info type. */ replaceWithInfoTypeConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationReplaceWithInfoTypeConfig; /** * For use with Date, Timestamp, and TimeOfDay, extract or preserve a portion of the value. * Structure is documented below. */ timePartConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationTimePartConfig; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfig { /** * Set of buckets. Ranges must be non-overlapping. * Bucket is represented as a range, along with replacement values. * Structure is documented below. */ buckets: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucket[]; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucket { /** * Upper bound of the range, exclusive; type must match min. * The `max` block must only contain one argument. See the `bucketingConfig` block description for more information about choosing a data type. * Structure is documented below. */ max?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMax; /** * Lower bound of the range, inclusive. Type should be the same as max if used. * The `min` block must only contain one argument. See the `bucketingConfig` block description for more information about choosing a data type. * Structure is documented below. */ min?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMin; /** * Replacement value for this bucket. * The `replacementValue` block must only contain one argument. * Structure is documented below. */ replacementValue: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketReplacementValue; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMax { /** * Represents a whole or partial calendar date. * Structure is documented below. */ dateValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMaxDateValue; /** * Represents a day of the week. * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeekValue?: string; /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: string; /** * A string value. */ stringValue?: string; /** * Represents a time of day. * Structure is documented below. */ timeValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMaxTimeValue; /** * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ timestampValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMaxDateValue { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ year?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMaxTimeValue { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMin { /** * Represents a whole or partial calendar date. * Structure is documented below. */ dateValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMinDateValue; /** * Represents a day of the week. * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeekValue?: string; /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: string; /** * A string value. */ stringValue?: string; /** * Represents a time of day. * Structure is documented below. */ timeValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMinTimeValue; /** * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ timestampValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMinDateValue { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ year?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketMinTimeValue { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketReplacementValue { /** * Represents a whole or partial calendar date. * Structure is documented below. */ dateValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketReplacementValueDateValue; /** * Represents a day of the week. * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeekValue?: string; /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: string; /** * A string value. */ stringValue?: string; /** * Represents a time of day. * Structure is documented below. */ timeValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketReplacementValueTimeValue; /** * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ timestampValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketReplacementValueDateValue { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ year?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationBucketingConfigBucketReplacementValueTimeValue { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCharacterMaskConfig { /** * Characters to skip when doing de-identification of a value. These will be left alone and skipped. * Structure is documented below. */ charactersToIgnores?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCharacterMaskConfigCharactersToIgnore[]; /** * Character to use to mask the sensitive values—for example, * for an alphabetic string such as a name, or 0 for a numeric string * such as ZIP code or credit card number. This string must have a length of 1. If not supplied, this value defaults to * for * strings, and 0 for digits. */ maskingCharacter?: string; /** * Number of characters to mask. If not set, all matching chars will be masked. Skipped characters do not count towards this tally. * If numberToMask is negative, this denotes inverse masking. Cloud DLP masks all but a number of characters. For example, suppose you have the following values: */ numberToMask?: number; /** * Mask characters in reverse order. For example, if maskingCharacter is 0, numberToMask is 14, and reverseOrder is `false`, then the * input string `1234-5678-9012-3456` is masked as `00000000000000-3456`. */ reverseOrder?: boolean; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCharacterMaskConfigCharactersToIgnore { /** * Characters to not transform when masking. Only one of this or `commonCharactersToIgnore` must be specified. */ charactersToSkip?: string; /** * Common characters to not transform when masking. Useful to avoid removing punctuation. Only one of this or `charactersToSkip` must be specified. * Possible values are: `NUMERIC`, `ALPHA_UPPER_CASE`, `ALPHA_LOWER_CASE`, `PUNCTUATION`, `WHITESPACE`. */ commonCharactersToIgnore?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfig { /** * A context may be used for higher security and maintaining referential integrity such that the same identifier in two different contexts will be given a distinct surrogate. The context is appended to plaintext value being encrypted. On decryption the provided context is validated against the value used during encryption. If a context was provided during encryption, same context must be provided during decryption as well. * If the context is not set, plaintext would be used as is for encryption. If the context is set but: * 1. there is no record present when transforming a given value or * 2. the field is not present when transforming a given value, * plaintext would be used as is for encryption. * Note that case (1) is expected when an InfoTypeTransformation is applied to both structured and unstructured ContentItems. * Structure is documented below. */ context?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigContext; /** * The key used by the encryption function. For deterministic encryption using AES-SIV, the provided key is internally expanded to 64 bytes prior to use. * Structure is documented below. */ cryptoKey: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKey; /** * The custom info type to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines the format: {info type name}({surrogate character count}):{surrogate} * For example, if the name of custom info type is 'MY\_TOKEN\_INFO\_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY\_TOKEN\_INFO\_TYPE(3):abc' * This annotation identifies the surrogate when inspecting content using the custom info type 'Surrogate'. This facilitates reversal of the surrogate when it occurs in free text. * Note: For record transformations where the entire cell in a table is being transformed, surrogates are not mandatory. Surrogates are used to denote the location of the token and are necessary for re-identification in free form text. * In order for inspection to work properly, the name of this info type must not occur naturally anywhere in your data; otherwise, inspection may either * * reverse a surrogate that does not correspond to an actual identifier * * be unable to parse the surrogate and result in an error * Therefore, choose your custom info type name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ā§MY\_TOKEN\_TYPE. * Structure is documented below. */ surrogateInfoType: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigContext { /** * Name describing the field. */ name: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKey { /** * KMS wrapped key. * Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt * For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). Only one of this, `transient` or `unwrapped` must be specified. * Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing). * Structure is documented below. */ kmsWrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped; /** * Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes. Only one of this, `unwrapped` or `kmsWrapped` must be specified. * Structure is documented below. */ transient?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient; /** * Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible. Only one of this, `transient` or `kmsWrapped` must be specified. * Structure is documented below. */ unwrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped { /** * The resource name of the KMS CryptoKey to use for unwrapping. */ cryptoKeyName: string; /** * The wrapped data crypto key. * A base64-encoded string. */ wrappedKey: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient { /** * Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate `TransientCryptoKey` protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated). */ name: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped { /** * A 128/192/256 bit key. * A base64-encoded string. * **Note**: This property is sensitive and will not be displayed in the plan. */ key: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType { /** * Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`. */ name: string; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScore; /** * Optional version name for this InfoType. */ version?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfig { /** * The key used by the encryption function. * Structure is documented below. */ cryptoKey: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfigCryptoKey; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfigCryptoKey { /** * KMS wrapped key. * Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt * For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). Only one of this, `transient` or `unwrapped` must be specified. * Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing). * Structure is documented below. */ kmsWrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped; /** * Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes. Only one of this, `unwrapped` or `kmsWrapped` must be specified. * Structure is documented below. */ transient?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfigCryptoKeyTransient; /** * Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible. Only one of this, `transient` or `kmsWrapped` must be specified. * Structure is documented below. */ unwrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped { /** * The resource name of the KMS CryptoKey to use for unwrapping. */ cryptoKeyName: string; /** * The wrapped data crypto key. * A base64-encoded string. */ wrappedKey: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfigCryptoKeyTransient { /** * Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate `TransientCryptoKey` protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated). */ name: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped { /** * A 128/192/256 bit key. * A base64-encoded string. * **Note**: This property is sensitive and will not be displayed in the plan. */ key: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfig { /** * Common alphabets. Only one of this, `customAlphabet` or `radix` must be specified. * Possible values are: `NUMERIC`, `HEXADECIMAL`, `UPPER_CASE_ALPHA_NUMERIC`, `ALPHA_NUMERIC`. */ commonAlphabet?: string; /** * The 'tweak', a context may be used for higher security since the same identifier in two different contexts won't be given the same surrogate. If the context is not set, a default tweak will be used. * If the context is set but: * 1. there is no record present when transforming a given value or * 2. the field is not present when transforming a given value, * a default tweak will be used. * Note that case (1) is expected when an `InfoTypeTransformation` is applied to both structured and non-structured `ContentItem`s. Currently, the referenced field may be of value type integer or string. * The tweak is constructed as a sequence of bytes in big endian byte order such that: * * a 64 bit integer is encoded followed by a single byte of value 1 * * a string is encoded in UTF-8 format followed by a single byte of value 2 * Structure is documented below. */ context?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigContext; /** * The key used by the encryption algorithm. * Structure is documented below. */ cryptoKey: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey; /** * This is supported by mapping these to the alphanumeric characters that the FFX mode natively supports. This happens before/after encryption/decryption. Each character listed must appear only once. Number of characters must be in the range \[2, 95\]. This must be encoded as ASCII. The order of characters does not matter. The full list of allowed characters is: * ``0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz ~`!@#$%^&*()_-+={[}]|:;"'<,>.?/``. Only one of this, `commonAlphabet` or `radix` must be specified. */ customAlphabet?: string; /** * The native way to select the alphabet. Must be in the range \[2, 95\]. Only one of this, `customAlphabet` or `commonAlphabet` must be specified. */ radix?: number; /** * The custom infoType to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom infoType followed by the number of characters comprising the surrogate. The following scheme defines the format: info\_type\_name(surrogate\_character\_count):surrogate * For example, if the name of custom infoType is 'MY\_TOKEN\_INFO\_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY\_TOKEN\_INFO\_TYPE(3):abc' * This annotation identifies the surrogate when inspecting content using the custom infoType [`SurrogateType`](https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#surrogatetype). This facilitates reversal of the surrogate when it occurs in free text. * In order for inspection to work properly, the name of this infoType must not occur naturally anywhere in your data; otherwise, inspection may find a surrogate that does not correspond to an actual identifier. Therefore, choose your custom infoType name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ā§MY\_TOKEN\_TYPE * Structure is documented below. */ surrogateInfoType?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigContext { /** * Name describing the field. */ name: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey { /** * KMS wrapped key. * Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt * For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). Only one of this, `transient` or `unwrapped` must be specified. * Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing). * Structure is documented below. */ kmsWrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped; /** * Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes. Only one of this, `unwrapped` or `kmsWrapped` must be specified. * Structure is documented below. */ transient?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient; /** * Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible. Only one of this, `transient` or `kmsWrapped` must be specified. * Structure is documented below. */ unwrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped { /** * The resource name of the KMS CryptoKey to use for unwrapping. */ cryptoKeyName: string; /** * The wrapped data crypto key. * A base64-encoded string. */ wrappedKey: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient { /** * Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate `TransientCryptoKey` protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated). */ name: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped { /** * A 128/192/256 bit key. * A base64-encoded string. * **Note**: This property is sensitive and will not be displayed in the plan. */ key: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType { /** * Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`. */ name: string; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScore; /** * Optional version name for this InfoType. */ version?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfig { /** * Points to the field that contains the context, for example, an entity id. * If set, must also set cryptoKey. If set, shift will be consistent for the given context. * Structure is documented below. */ context?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigContext; /** * Causes the shift to be computed based on this key and the context. This results in the same shift for the same context and cryptoKey. If set, must also set context. Can only be applied to table items. * Structure is documented below. */ cryptoKey?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigCryptoKey; /** * For example, -5 means shift date to at most 5 days back in the past. */ lowerBoundDays: number; /** * Range of shift in days. Actual shift will be selected at random within this range (inclusive ends). Negative means shift to earlier in time. Must not be more than 365250 days (1000 years) each direction. * For example, 3 means shift date to at most 3 days into the future. */ upperBoundDays: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigContext { /** * Name describing the field. */ name: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigCryptoKey { /** * KMS wrapped key. * Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt * For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). Only one of this, `transient` or `unwrapped` must be specified. * Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing). * Structure is documented below. */ kmsWrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped; /** * Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes. Only one of this, `unwrapped` or `kmsWrapped` must be specified. * Structure is documented below. */ transient?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigCryptoKeyTransient; /** * Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible. Only one of this, `transient` or `kmsWrapped` must be specified. * Structure is documented below. */ unwrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped { /** * The resource name of the KMS CryptoKey to use for unwrapping. */ cryptoKeyName: string; /** * The wrapped data crypto key. * A base64-encoded string. */ wrappedKey: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigCryptoKeyTransient { /** * Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate `TransientCryptoKey` protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated). */ name: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped { /** * A 128/192/256 bit key. * A base64-encoded string. * **Note**: This property is sensitive and will not be displayed in the plan. */ key: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationFixedSizeBucketingConfig { /** * Size of each bucket (except for minimum and maximum buckets). * So if lowerBound = 10, upperBound = 89, and bucketSize = 10, then the following buckets would be used: -10, 10-20, 20-30, 30-40, 40-50, 50-60, 60-70, 70-80, 80-89, 89+. * Precision up to 2 decimals works. */ bucketSize: number; /** * Lower bound value of buckets. * All values less than lowerBound are grouped together into a single bucket; for example if lowerBound = 10, then all values less than 10 are replaced with the value "-10". * The `lowerBound` block must only contain one argument. See the `fixedSizeBucketingConfig` block description for more information about choosing a data type. * Structure is documented below. */ lowerBound: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationFixedSizeBucketingConfigLowerBound; /** * Upper bound value of buckets. * All values greater than upperBound are grouped together into a single bucket; for example if upperBound = 89, then all values greater than 89 are replaced with the value "89+". * The `upperBound` block must only contain one argument. See the `fixedSizeBucketingConfig` block description for more information about choosing a data type. * Structure is documented below. */ upperBound: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationFixedSizeBucketingConfigUpperBound; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationFixedSizeBucketingConfigLowerBound { /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationFixedSizeBucketingConfigUpperBound { /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationRedactConfig { } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfig { /** * Replace each input value with a given value. * The `newValue` block must only contain one argument. For example when replacing the contents of a string-type field, only `stringValue` should be set. * Structure is documented below. */ newValue: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValue; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValue { /** * A boolean value. */ booleanValue?: boolean; /** * Represents a whole or partial calendar date. * Structure is documented below. */ dateValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueDateValue; /** * Represents a day of the week. * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeekValue?: string; /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: string; /** * A string value. */ stringValue?: string; /** * Represents a time of day. * Structure is documented below. */ timeValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueTimeValue; /** * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ timestampValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueDateValue { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ year?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueTimeValue { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationReplaceDictionaryConfig { /** * A list of words to select from for random replacement. The [limits](https://cloud.google.com/dlp/limits) page contains details about the size limits of dictionaries. * Structure is documented below. */ wordList: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationReplaceDictionaryConfigWordList; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationReplaceDictionaryConfigWordList { /** * Words or phrases defining the dictionary. The dictionary must contain at least one phrase and every phrase must contain at least 2 characters that are letters or digits. */ words: string[]; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationReplaceWithInfoTypeConfig { } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationInfoTypeTransformationsTransformationPrimitiveTransformationTimePartConfig { /** * The part of the time to keep. * Possible values are: `YEAR`, `MONTH`, `DAY_OF_MONTH`, `DAY_OF_WEEK`, `WEEK_OF_YEAR`, `HOUR_OF_DAY`. */ partToExtract: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformation { /** * Generalization function that buckets values based on ranges. The ranges and replacement values are dynamically provided by the user for custom behavior, such as 1-30 > LOW 31-65 > MEDIUM 66-100 > HIGH * This can be used on data of type: number, long, string, timestamp. * If the provided value type differs from the type of data being transformed, we will first attempt converting the type of the data to be transformed to match the type of the bound before comparing. * See https://cloud.google.com/dlp/docs/concepts-bucketing to learn more. * Structure is documented below. */ bucketingConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfig; /** * Partially mask a string by replacing a given number of characters with a fixed character. Masking can start from the beginning or end of the string. This can be used on data of any type (numbers, longs, and so on) and when de-identifying structured data we'll attempt to preserve the original data's type. (This allows you to take a long like 123 and modify it to a string like **3). * Structure is documented below. */ characterMaskConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCharacterMaskConfig; /** * Pseudonymization method that generates deterministic encryption for the given input. Outputs a base64 encoded representation of the encrypted output. Uses AES-SIV based on the RFC [https://tools.ietf.org/html/rfc5297](https://tools.ietf.org/html/rfc5297). * Structure is documented below. */ cryptoDeterministicConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoDeterministicConfig; /** * Pseudonymization method that generates surrogates via cryptographic hashing. Uses SHA-256. The key size must be either 32 or 64 bytes. * Outputs a base64 encoded representation of the hashed output (for example, L7k0BHmF1ha5U3NfGykjro4xWi1MPVQPjhMAZbSV9mM=). * Currently, only string and integer values can be hashed. * See https://cloud.google.com/dlp/docs/pseudonymization to learn more. * Structure is documented below. */ cryptoHashConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoHashConfig; /** * Replaces an identifier with a surrogate using Format Preserving Encryption (FPE) with the FFX mode of operation; however when used in the `content.reidentify` API method, it serves the opposite function by reversing the surrogate back into the original identifier. The identifier must be encoded as ASCII. For a given crypto key and context, the same identifier will be replaced with the same surrogate. Identifiers must be at least two characters long. In the case that the identifier is the empty string, it will be skipped. See [https://cloud.google.com/dlp/docs/pseudonymization](https://cloud.google.com/dlp/docs/pseudonymization) to learn more. * Note: We recommend using CryptoDeterministicConfig for all use cases which do not require preserving the input alphabet space and size, plus warrant referential integrity. * Structure is documented below. */ cryptoReplaceFfxFpeConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfig; /** * Shifts dates by random number of days, with option to be consistent for the same context. See https://cloud.google.com/dlp/docs/concepts-date-shifting to learn more. * Structure is documented below. */ dateShiftConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationDateShiftConfig; /** * Buckets values based on fixed size ranges. The Bucketing transformation can provide all of this functionality, but requires more configuration. This message is provided as a convenience to the user for simple bucketing strategies. * The transformed value will be a hyphenated string of {lower_bound}-{upper_bound}. For example, if lowerBound = 10 and upperBound = 20, all values that are within this bucket will be replaced with "10-20". * This can be used on data of type: double, long. * If the bound Value type differs from the type of data being transformed, we will first attempt converting the type of the data to be transformed to match the type of the bound before comparing. * See https://cloud.google.com/dlp/docs/concepts-bucketing to learn more. * Structure is documented below. */ fixedSizeBucketingConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationFixedSizeBucketingConfig; /** * Redact a given value. For example, if used with an InfoTypeTransformation transforming PHONE_NUMBER, and input 'My phone number is 206-555-0123', the output would be 'My phone number is '. */ redactConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationRedactConfig; /** * Replace each input value with a given value. * Structure is documented below. */ replaceConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationReplaceConfig; /** * Replace with a value randomly drawn (with replacement) from a dictionary. * Structure is documented below. */ replaceDictionaryConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationReplaceDictionaryConfig; /** * For use with Date, Timestamp, and TimeOfDay, extract or preserve a portion of the value. * Structure is documented below. */ timePartConfig?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationTimePartConfig; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfig { /** * Set of buckets. Ranges must be non-overlapping. * Bucket is represented as a range, along with replacement values. * Structure is documented below. */ buckets?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucket[]; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucket { /** * Upper bound of the range, exclusive; type must match min. * The `max` block must only contain one argument. See the `bucketingConfig` block description for more information about choosing a data type. * Structure is documented below. */ max?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucketMax; /** * Lower bound of the range, inclusive. Type should be the same as max if used. * The `min` block must only contain one argument. See the `bucketingConfig` block description for more information about choosing a data type. * Structure is documented below. */ min?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucketMin; /** * Replacement value for this bucket. * The `replacementValue` block must only contain one argument. * Structure is documented below. */ replacementValue: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucketReplacementValue; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucketMax { /** * A boolean value. */ booleanValue?: boolean; /** * Represents a whole or partial calendar date. * Structure is documented below. */ dateValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucketMaxDateValue; /** * Represents a day of the week. * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeekValue?: string; /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: string; /** * A string value. */ stringValue?: string; /** * Represents a time of day. * Structure is documented below. */ timeValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucketMaxTimeValue; /** * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ timestampValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucketMaxDateValue { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ year?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucketMaxTimeValue { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucketMin { /** * A boolean value. */ booleanValue?: boolean; /** * Represents a whole or partial calendar date. * Structure is documented below. */ dateValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucketMinDateValue; /** * Represents a day of the week. * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeekValue?: string; /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: string; /** * A string value. */ stringValue?: string; /** * Represents a time of day. * Structure is documented below. */ timeValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucketMinTimeValue; /** * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ timestampValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucketMinDateValue { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ year?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucketMinTimeValue { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucketReplacementValue { /** * A boolean value. */ booleanValue?: boolean; /** * Represents a whole or partial calendar date. * Structure is documented below. */ dateValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucketReplacementValueDateValue; /** * Represents a day of the week. * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeekValue?: string; /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: string; /** * A string value. */ stringValue?: string; /** * Represents a time of day. * Structure is documented below. */ timeValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucketReplacementValueTimeValue; /** * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ timestampValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucketReplacementValueDateValue { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ year?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationBucketingConfigBucketReplacementValueTimeValue { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCharacterMaskConfig { /** * Characters to skip when doing de-identification of a value. These will be left alone and skipped. * Structure is documented below. */ charactersToIgnores?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCharacterMaskConfigCharactersToIgnore[]; /** * Character to use to mask the sensitive values—for example, * for an alphabetic string such as a name, or 0 for a numeric string * such as ZIP code or credit card number. This string must have a length of 1. If not supplied, this value defaults to * for * strings, and 0 for digits. */ maskingCharacter?: string; /** * Number of characters to mask. If not set, all matching chars will be masked. Skipped characters do not count towards this tally. * If numberToMask is negative, this denotes inverse masking. Cloud DLP masks all but a number of characters. For example, suppose you have the following values: */ numberToMask?: number; /** * Mask characters in reverse order. For example, if maskingCharacter is 0, numberToMask is 14, and reverseOrder is `false`, then the * input string `1234-5678-9012-3456` is masked as `00000000000000-3456`. */ reverseOrder?: boolean; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCharacterMaskConfigCharactersToIgnore { /** * Characters to not transform when masking. Only one of this or `commonCharactersToIgnore` must be specified. */ charactersToSkip?: string; /** * Common characters to not transform when masking. Useful to avoid removing punctuation. Only one of this or `charactersToSkip` must be specified. * Possible values are: `NUMERIC`, `ALPHA_UPPER_CASE`, `ALPHA_LOWER_CASE`, `PUNCTUATION`, `WHITESPACE`. */ commonCharactersToIgnore?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoDeterministicConfig { /** * A context may be used for higher security and maintaining referential integrity such that the same identifier in two different contexts will be given a distinct surrogate. The context is appended to plaintext value being encrypted. On decryption the provided context is validated against the value used during encryption. If a context was provided during encryption, same context must be provided during decryption as well. * If the context is not set, plaintext would be used as is for encryption. If the context is set but: * 1. there is no record present when transforming a given value or * 2. the field is not present when transforming a given value, * plaintext would be used as is for encryption. * Note that case (1) is expected when an InfoTypeTransformation is applied to both structured and unstructured ContentItems. * Structure is documented below. */ context?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoDeterministicConfigContext; /** * The key used by the encryption function. For deterministic encryption using AES-SIV, the provided key is internally expanded to 64 bytes prior to use. * Structure is documented below. */ cryptoKey?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKey; /** * The custom info type to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines the format: {info type name}({surrogate character count}):{surrogate} * For example, if the name of custom info type is 'MY\_TOKEN\_INFO\_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY\_TOKEN\_INFO\_TYPE(3):abc' * This annotation identifies the surrogate when inspecting content using the custom info type 'Surrogate'. This facilitates reversal of the surrogate when it occurs in free text. * Note: For record transformations where the entire cell in a table is being transformed, surrogates are not mandatory. Surrogates are used to denote the location of the token and are necessary for re-identification in free form text. * In order for inspection to work properly, the name of this info type must not occur naturally anywhere in your data; otherwise, inspection may either * * reverse a surrogate that does not correspond to an actual identifier * * be unable to parse the surrogate and result in an error * Therefore, choose your custom info type name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ā§MY\_TOKEN\_TYPE. * Structure is documented below. */ surrogateInfoType?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoDeterministicConfigContext { /** * Name describing the field. */ name?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKey { /** * KMS wrapped key. * Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt * For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). Only one of this, `transient` or `unwrapped` must be specified. * Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing). * Structure is documented below. */ kmsWrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped; /** * Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes. Only one of this, `unwrapped` or `kmsWrapped` must be specified. * Structure is documented below. */ transient?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient; /** * Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible. Only one of this, `transient` or `kmsWrapped` must be specified. * Structure is documented below. */ unwrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped { /** * The resource name of the KMS CryptoKey to use for unwrapping. */ cryptoKeyName: string; /** * The wrapped data crypto key. * A base64-encoded string. */ wrappedKey: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient { /** * Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate `TransientCryptoKey` protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated). */ name: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped { /** * A 128/192/256 bit key. * A base64-encoded string. * **Note**: This property is sensitive and will not be displayed in the plan. */ key: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType { /** * Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`. */ name?: string; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScore; /** * Optional version name for this InfoType. */ version?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoHashConfig { /** * The key used by the encryption function. * Structure is documented below. */ cryptoKey?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoHashConfigCryptoKey; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoHashConfigCryptoKey { /** * KMS wrapped key. * Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt * For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). Only one of this, `transient` or `unwrapped` must be specified. * Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing). * Structure is documented below. */ kmsWrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped; /** * Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes. Only one of this, `unwrapped` or `kmsWrapped` must be specified. * Structure is documented below. */ transient?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoHashConfigCryptoKeyTransient; /** * Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible. Only one of this, `transient` or `kmsWrapped` must be specified. * Structure is documented below. */ unwrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped { /** * The resource name of the KMS CryptoKey to use for unwrapping. */ cryptoKeyName: string; /** * The wrapped data crypto key. * A base64-encoded string. */ wrappedKey: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoHashConfigCryptoKeyTransient { /** * Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate `TransientCryptoKey` protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated). */ name: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped { /** * A 128/192/256 bit key. * A base64-encoded string. * **Note**: This property is sensitive and will not be displayed in the plan. */ key: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfig { /** * Common alphabets. Only one of this, `customAlphabet` or `radix` must be specified. * Possible values are: `NUMERIC`, `HEXADECIMAL`, `UPPER_CASE_ALPHA_NUMERIC`, `ALPHA_NUMERIC`. */ commonAlphabet?: string; /** * The 'tweak', a context may be used for higher security since the same identifier in two different contexts won't be given the same surrogate. If the context is not set, a default tweak will be used. * If the context is set but: * 1. there is no record present when transforming a given value or * 2. the field is not present when transforming a given value, * a default tweak will be used. * Note that case (1) is expected when an `InfoTypeTransformation` is applied to both structured and non-structured `ContentItem`s. Currently, the referenced field may be of value type integer or string. * The tweak is constructed as a sequence of bytes in big endian byte order such that: * * a 64 bit integer is encoded followed by a single byte of value 1 * * a string is encoded in UTF-8 format followed by a single byte of value 2 * Structure is documented below. */ context?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigContext; /** * The key used by the encryption algorithm. * Structure is documented below. */ cryptoKey?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey; /** * This is supported by mapping these to the alphanumeric characters that the FFX mode natively supports. This happens before/after encryption/decryption. Each character listed must appear only once. Number of characters must be in the range \[2, 95\]. This must be encoded as ASCII. The order of characters does not matter. The full list of allowed characters is: * ``0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz ~`!@#$%^&*()_-+={[}]|:;"'<,>.?/``. Only one of this, `commonAlphabet` or `radix` must be specified. */ customAlphabet?: string; /** * The native way to select the alphabet. Must be in the range \[2, 95\]. Only one of this, `customAlphabet` or `commonAlphabet` must be specified. */ radix?: number; /** * The custom infoType to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom infoType followed by the number of characters comprising the surrogate. The following scheme defines the format: info\_type\_name(surrogate\_character\_count):surrogate * For example, if the name of custom infoType is 'MY\_TOKEN\_INFO\_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY\_TOKEN\_INFO\_TYPE(3):abc' * This annotation identifies the surrogate when inspecting content using the custom infoType [`SurrogateType`](https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#surrogatetype). This facilitates reversal of the surrogate when it occurs in free text. * In order for inspection to work properly, the name of this infoType must not occur naturally anywhere in your data; otherwise, inspection may find a surrogate that does not correspond to an actual identifier. Therefore, choose your custom infoType name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ā§MY\_TOKEN\_TYPE * Structure is documented below. */ surrogateInfoType?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigContext { /** * Name describing the field. */ name?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey { /** * KMS wrapped key. * Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt * For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). Only one of this, `transient` or `unwrapped` must be specified. * Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing). * Structure is documented below. */ kmsWrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped; /** * Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes. Only one of this, `unwrapped` or `kmsWrapped` must be specified. * Structure is documented below. */ transient?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient; /** * Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible. Only one of this, `transient` or `kmsWrapped` must be specified. * Structure is documented below. */ unwrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped { /** * The resource name of the KMS CryptoKey to use for unwrapping. */ cryptoKeyName: string; /** * The wrapped data crypto key. * A base64-encoded string. */ wrappedKey: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient { /** * Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate `TransientCryptoKey` protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated). */ name: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped { /** * A 128/192/256 bit key. * A base64-encoded string. * **Note**: This property is sensitive and will not be displayed in the plan. */ key: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType { /** * Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`. */ name?: string; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScore; /** * Optional version name for this InfoType. */ version?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationDateShiftConfig { /** * Points to the field that contains the context, for example, an entity id. * If set, must also set cryptoKey. If set, shift will be consistent for the given context. * Structure is documented below. */ context?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationDateShiftConfigContext; /** * Causes the shift to be computed based on this key and the context. This results in the same shift for the same context and cryptoKey. If set, must also set context. Can only be applied to table items. * Structure is documented below. */ cryptoKey?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationDateShiftConfigCryptoKey; /** * For example, -5 means shift date to at most 5 days back in the past. */ lowerBoundDays: number; /** * Range of shift in days. Actual shift will be selected at random within this range (inclusive ends). Negative means shift to earlier in time. Must not be more than 365250 days (1000 years) each direction. * For example, 3 means shift date to at most 3 days into the future. */ upperBoundDays: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationDateShiftConfigContext { /** * Name describing the field. */ name?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationDateShiftConfigCryptoKey { /** * KMS wrapped key. * Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt * For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). Only one of this, `transient` or `unwrapped` must be specified. * Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing). * Structure is documented below. */ kmsWrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped; /** * Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes. Only one of this, `unwrapped` or `kmsWrapped` must be specified. * Structure is documented below. */ transient?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationDateShiftConfigCryptoKeyTransient; /** * Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible. Only one of this, `transient` or `kmsWrapped` must be specified. * Structure is documented below. */ unwrapped?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped { /** * The resource name of the KMS CryptoKey to use for unwrapping. */ cryptoKeyName: string; /** * The wrapped data crypto key. * A base64-encoded string. */ wrappedKey: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationDateShiftConfigCryptoKeyTransient { /** * Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate `TransientCryptoKey` protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated). */ name: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped { /** * A 128/192/256 bit key. * A base64-encoded string. * **Note**: This property is sensitive and will not be displayed in the plan. */ key: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationFixedSizeBucketingConfig { /** * Size of each bucket (except for minimum and maximum buckets). * So if lowerBound = 10, upperBound = 89, and bucketSize = 10, then the following buckets would be used: -10, 10-20, 20-30, 30-40, 40-50, 50-60, 60-70, 70-80, 80-89, 89+. * Precision up to 2 decimals works. */ bucketSize: number; /** * Lower bound value of buckets. * All values less than lowerBound are grouped together into a single bucket; for example if lowerBound = 10, then all values less than 10 are replaced with the value "-10". * The `lowerBound` block must only contain one argument. See the `fixedSizeBucketingConfig` block description for more information about choosing a data type. * Structure is documented below. */ lowerBound: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationFixedSizeBucketingConfigLowerBound; /** * Upper bound value of buckets. * All values greater than upperBound are grouped together into a single bucket; for example if upperBound = 89, then all values greater than 89 are replaced with the value "89+". * The `upperBound` block must only contain one argument. See the `fixedSizeBucketingConfig` block description for more information about choosing a data type. * Structure is documented below. */ upperBound: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationFixedSizeBucketingConfigUpperBound; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationFixedSizeBucketingConfigLowerBound { /** * A boolean value. */ booleanValue?: boolean; /** * Represents a whole or partial calendar date. * Structure is documented below. */ dateValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValue; /** * Represents a day of the week. * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeekValue?: string; /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: string; /** * A string value. */ stringValue?: string; /** * Represents a time of day. * Structure is documented below. */ timeValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValue; /** * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ timestampValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValue { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ year?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValue { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationFixedSizeBucketingConfigUpperBound { /** * A boolean value. */ booleanValue?: boolean; /** * Represents a whole or partial calendar date. * Structure is documented below. */ dateValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValue; /** * Represents a day of the week. * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeekValue?: string; /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: string; /** * A string value. */ stringValue?: string; /** * Represents a time of day. * Structure is documented below. */ timeValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValue; /** * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ timestampValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValue { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ year?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValue { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationRedactConfig { } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationReplaceConfig { /** * Replace each input value with a given value. * The `newValue` block must only contain one argument. For example when replacing the contents of a string-type field, only `stringValue` should be set. * Structure is documented below. */ newValue: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationReplaceConfigNewValue; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationReplaceConfigNewValue { /** * A boolean value. */ booleanValue?: boolean; /** * Represents a whole or partial calendar date. * Structure is documented below. */ dateValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationReplaceConfigNewValueDateValue; /** * Represents a day of the week. * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeekValue?: string; /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: string; /** * A string value. */ stringValue?: string; /** * Represents a time of day. * Structure is documented below. */ timeValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationReplaceConfigNewValueTimeValue; /** * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ timestampValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationReplaceConfigNewValueDateValue { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ year?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationReplaceConfigNewValueTimeValue { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationReplaceDictionaryConfig { /** * A list of words to select from for random replacement. The [limits](https://cloud.google.com/dlp/limits) page contains details about the size limits of dictionaries. * Structure is documented below. */ wordList?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationReplaceDictionaryConfigWordList; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationReplaceDictionaryConfigWordList { /** * Words or phrases defining the dictionary. The dictionary must contain at least one phrase and every phrase must contain at least 2 characters that are letters or digits. */ words: string[]; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationPrimitiveTransformationTimePartConfig { /** * The part of the time to keep. * Possible values are: `YEAR`, `MONTH`, `DAY_OF_MONTH`, `DAY_OF_WEEK`, `WEEK_OF_YEAR`, `HOUR_OF_DAY`. */ partToExtract?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppression { /** * A condition that when it evaluates to true will result in the record being evaluated to be suppressed from the transformed content. * Structure is documented below. */ condition?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionCondition; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionCondition { /** * An expression, consisting of an operator and conditions. * Structure is documented below. */ expressions?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionConditionExpressions; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionConditionExpressions { /** * Conditions to apply to the expression. * Structure is documented below. */ conditions?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionConditionExpressionsConditions; /** * The operator to apply to the result of conditions. Default and currently only supported value is AND. * Default value is `AND`. * Possible values are: `AND`. */ logicalOperator?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionConditionExpressionsConditions { /** * A collection of conditions. * Structure is documented below. */ conditions?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionConditionExpressionsConditionsCondition[]; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionConditionExpressionsConditionsCondition { /** * Field within the record this condition is evaluated against. * Structure is documented below. */ field: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionConditionExpressionsConditionsConditionField; /** * Operator used to compare the field or infoType to the value. * Possible values are: `EQUAL_TO`, `NOT_EQUAL_TO`, `GREATER_THAN`, `LESS_THAN`, `GREATER_THAN_OR_EQUALS`, `LESS_THAN_OR_EQUALS`, `EXISTS`. */ operator: string; /** * Value to compare against. [Mandatory, except for EXISTS tests.] * Structure is documented below. */ value?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionConditionExpressionsConditionsConditionValue; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionConditionExpressionsConditionsConditionField { /** * Name describing the field. */ name?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionConditionExpressionsConditionsConditionValue { /** * A boolean value. */ booleanValue?: boolean; /** * Represents a whole or partial calendar date. * Structure is documented below. */ dateValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionConditionExpressionsConditionsConditionValueDateValue; /** * Represents a day of the week. * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeekValue?: string; /** * A float value. */ floatValue?: number; /** * An integer value (int64 format) */ integerValue?: string; /** * A string value. */ stringValue?: string; /** * Represents a time of day. * Structure is documented below. */ timeValue?: outputs.dataloss.PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionConditionExpressionsConditionsConditionValueTimeValue; /** * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ timestampValue?: string; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionConditionExpressionsConditionsConditionValueDateValue { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ year?: number; } interface PreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionConditionExpressionsConditionsConditionValueTimeValue { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface PreventionDiscoveryConfigAction { /** * Export data profiles into a provided location * Structure is documented below. */ exportData?: outputs.dataloss.PreventionDiscoveryConfigActionExportData; /** * Publish a message into the Pub/Sub topic. * Structure is documented below. */ pubSubNotification?: outputs.dataloss.PreventionDiscoveryConfigActionPubSubNotification; /** * Publish a portion of each profile to Dataplex Universal Catalog with the aspect type Sensitive Data Protection Profile. */ publishToDataplexCatalog?: outputs.dataloss.PreventionDiscoveryConfigActionPublishToDataplexCatalog; /** * Tag the profiled resources with the specified tag values. * Structure is documented below. */ tagResources?: outputs.dataloss.PreventionDiscoveryConfigActionTagResources; } interface PreventionDiscoveryConfigActionExportData { /** * Store all table and column profiles in an existing table or a new table in an existing dataset. Each re-generation will result in a new row in BigQuery * Structure is documented below. */ profileTable?: outputs.dataloss.PreventionDiscoveryConfigActionExportDataProfileTable; } interface PreventionDiscoveryConfigActionExportDataProfileTable { /** * Dataset Id of the table */ datasetId?: string; /** * The Google Cloud Platform project ID of the project containing the table. If omitted, the project ID is inferred from the API call. */ projectId?: string; /** * Name of the table */ tableId?: string; } interface PreventionDiscoveryConfigActionPubSubNotification { /** * How much data to include in the pub/sub message. * Possible values are: `TABLE_PROFILE`, `RESOURCE_NAME`. */ detailOfMessage?: string; /** * The type of event that triggers a Pub/Sub. At most one PubSubNotification per EventType is permitted. * Possible values are: `NEW_PROFILE`, `CHANGED_PROFILE`, `SCORE_INCREASED`, `ERROR_CHANGED`. */ event?: string; /** * Conditions for triggering pubsub * Structure is documented below. */ pubsubCondition?: outputs.dataloss.PreventionDiscoveryConfigActionPubSubNotificationPubsubCondition; /** * Cloud Pub/Sub topic to send notifications to. Format is projects/{project}/topics/{topic}. */ topic?: string; } interface PreventionDiscoveryConfigActionPubSubNotificationPubsubCondition { /** * An expression * Structure is documented below. */ expressions?: outputs.dataloss.PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressions; } interface PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressions { /** * Conditions to apply to the expression * Structure is documented below. */ conditions?: outputs.dataloss.PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsCondition[]; /** * The operator to apply to the collection of conditions * Possible values are: `OR`, `AND`. */ logicalOperator?: string; } interface PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsCondition { /** * The minimum data risk score that triggers the condition. * Possible values are: `HIGH`, `MEDIUM_OR_HIGH`. */ minimumRiskScore?: string; /** * The minimum sensitivity level that triggers the condition. * Possible values are: `HIGH`, `MEDIUM_OR_HIGH`. */ minimumSensitivityScore?: string; } interface PreventionDiscoveryConfigActionPublishToDataplexCatalog { } interface PreventionDiscoveryConfigActionTagResources { /** * Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. */ lowerDataRiskToLow?: boolean; /** * The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. * Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. */ profileGenerationsToTags?: string[]; /** * The tags to associate with different conditions. * Structure is documented below. */ tagConditions?: outputs.dataloss.PreventionDiscoveryConfigActionTagResourcesTagCondition[]; } interface PreventionDiscoveryConfigActionTagResourcesTagCondition { /** * Conditions attaching the tag to a resource on its profile having this sensitivity score. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore; /** * The tag value to attach to resources. * Structure is documented below. */ tag?: outputs.dataloss.PreventionDiscoveryConfigActionTagResourcesTagConditionTag; } interface PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`, `SENSITIVITY_UNKNOWN`. */ score: string; } interface PreventionDiscoveryConfigActionTagResourcesTagConditionTag { /** * The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". */ namespacedValue?: string; } interface PreventionDiscoveryConfigError { /** * A list of messages that carry the error details. */ details?: outputs.dataloss.PreventionDiscoveryConfigErrorDetails; /** * The times the error occurred. List includes the oldest timestamp and the last 9 timestamps. */ timestamp?: string; } interface PreventionDiscoveryConfigErrorDetails { /** * The status code, which should be an enum value of google.rpc.Code. */ code?: number; /** * A list of messages that carry the error details. */ details?: { [key: string]: string; }[]; /** * A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. */ message?: string; } interface PreventionDiscoveryConfigOrgConfig { /** * The data to scan folder org or project * Structure is documented below. */ location?: outputs.dataloss.PreventionDiscoveryConfigOrgConfigLocation; /** * The project that will run the scan. The DLP service account that exists within this project must have access to all resources that are profiled, and the cloud DLP API must be enabled. */ projectId?: string; } interface PreventionDiscoveryConfigOrgConfigLocation { /** * The ID for the folder within an organization to scan */ folderId?: string; /** * The ID of an organization to scan */ organizationId?: string; } interface PreventionDiscoveryConfigOtherCloudStartingLocation { /** * A nested object resource. * Structure is documented below. */ awsLocation?: outputs.dataloss.PreventionDiscoveryConfigOtherCloudStartingLocationAwsLocation; } interface PreventionDiscoveryConfigOtherCloudStartingLocationAwsLocation { /** * The AWS account ID that this discovery config applies to. Within an organization, you can find the AWS account ID inside an AWS account ARN. Example: arn::organizations:::account// */ accountId?: string; /** * All AWS assets stored in Asset Inventory that didn't match other AWS discovery configs. */ allAssetInventoryAssets?: boolean; } interface PreventionDiscoveryConfigTarget { /** * BigQuery target for Discovery. The first target to match a table will be the one applied. * Structure is documented below. */ bigQueryTarget?: outputs.dataloss.PreventionDiscoveryConfigTargetBigQueryTarget; /** * Cloud SQL target for Discovery. The first target to match a table will be the one applied. * Structure is documented below. */ cloudSqlTarget?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudSqlTarget; /** * Cloud Storage target for Discovery. The first target to match a bucket will be the one applied. * Structure is documented below. */ cloudStorageTarget?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudStorageTarget; /** * Other clouds target for discovery. The first target to match a resource will be the one applied. * Structure is documented below. */ otherCloudTarget?: outputs.dataloss.PreventionDiscoveryConfigTargetOtherCloudTarget; /** * Discovery target that looks for credentials and secrets stored in cloud resource metadata and reports them as vulnerabilities to Security Command Center. Only one target of this type is allowed. */ secretsTarget?: outputs.dataloss.PreventionDiscoveryConfigTargetSecretsTarget; } interface PreventionDiscoveryConfigTargetBigQueryTarget { /** * How often and when to update profiles. New tables that match both the fiter and conditions are scanned as quickly as possible depending on system capacity. * Structure is documented below. */ cadence?: outputs.dataloss.PreventionDiscoveryConfigTargetBigQueryTargetCadence; /** * In addition to matching the filter, these conditions must be true before a profile is generated * Structure is documented below. */ conditions?: outputs.dataloss.PreventionDiscoveryConfigTargetBigQueryTargetConditions; /** * Tables that match this filter will not have profiles created. */ disabled?: outputs.dataloss.PreventionDiscoveryConfigTargetBigQueryTargetDisabled; /** * Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table * Structure is documented below. */ filter?: outputs.dataloss.PreventionDiscoveryConfigTargetBigQueryTargetFilter; } interface PreventionDiscoveryConfigTargetBigQueryTargetCadence { /** * Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. * Structure is documented below. */ inspectTemplateModifiedCadence?: outputs.dataloss.PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence; /** * Governs when to update data profiles when a schema is modified * Structure is documented below. */ schemaModifiedCadence?: outputs.dataloss.PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadence; /** * Governs when to update profile when a table is modified. * Structure is documented below. */ tableModifiedCadence?: outputs.dataloss.PreventionDiscoveryConfigTargetBigQueryTargetCadenceTableModifiedCadence; } interface PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence { /** * How frequently data profiles can be updated when the template is modified. Defaults to never. * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. */ frequency?: string; } interface PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadence { /** * Frequency to regenerate data profiles when the schema is modified. Defaults to monthly. * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. */ frequency?: string; /** * The types of schema modifications to consider. Defaults to NEW_COLUMNS. * Each value may be one of: `NEW_COLUMNS`, `REMOVED_COLUMNS`. */ types?: string[]; } interface PreventionDiscoveryConfigTargetBigQueryTargetCadenceTableModifiedCadence { /** * How frequently data profiles can be updated when tables are modified. Defaults to never. * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. */ frequency?: string; /** * The type of events to consider when deciding if the table has been modified and should have the profile updated. Defaults to MODIFIED_TIMESTAMP * Each value may be one of: `TABLE_MODIFIED_TIMESTAMP`. */ types?: string[]; } interface PreventionDiscoveryConfigTargetBigQueryTargetConditions { /** * File store must have been created after this date. Used to avoid backfilling. A timestamp in RFC3339 UTC "Zulu" format with nanosecond resolution and upto nine fractional digits. */ createdAfter?: string; /** * At least one of the conditions must be true for a table to be scanned. * Structure is documented below. */ orConditions?: outputs.dataloss.PreventionDiscoveryConfigTargetBigQueryTargetConditionsOrConditions; /** * Restrict discovery to categories of table types. Currently view, materialized view, snapshot and non-biglake external tables are supported. * Possible values are: `BIG_QUERY_COLLECTION_ALL_TYPES`, `BIG_QUERY_COLLECTION_ONLY_SUPPORTED_TYPES`. */ typeCollection?: string; /** * Data profiles will only be generated for the database resource types specified in this field. If not specified, defaults to [DATABASE_RESOURCE_TYPE_ALL_SUPPORTED_TYPES]. * Each value may be one of: `DATABASE_RESOURCE_TYPE_ALL_SUPPORTED_TYPES`, `DATABASE_RESOURCE_TYPE_TABLE`. */ types?: outputs.dataloss.PreventionDiscoveryConfigTargetBigQueryTargetConditionsTypes; } interface PreventionDiscoveryConfigTargetBigQueryTargetConditionsOrConditions { /** * Duration format. The minimum age a table must have before Cloud DLP can profile it. Value greater than 1. */ minAge?: string; /** * Minimum number of rows that should be present before Cloud DLP profiles as a table. */ minRowCount?: number; } interface PreventionDiscoveryConfigTargetBigQueryTargetConditionsTypes { /** * A set of BiqQuery table types * Each value may be one of: `BIG_QUERY_TABLE_TYPE_TABLE`, `BIG_QUERY_TABLE_TYPE_EXTERNAL_BIG_LAKE`. */ types?: string[]; } interface PreventionDiscoveryConfigTargetBigQueryTargetDisabled { } interface PreventionDiscoveryConfigTargetBigQueryTargetFilter { /** * Catch-all. This should always be the last filter in the list because anything above it will apply first. */ otherTables?: outputs.dataloss.PreventionDiscoveryConfigTargetBigQueryTargetFilterOtherTables; /** * The table to scan. Discovery configurations including this can only include one DiscoveryTarget (the DiscoveryTarget with this TableReference). * Structure is documented below. */ tableReference?: outputs.dataloss.PreventionDiscoveryConfigTargetBigQueryTargetFilterTableReference; /** * A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. * Structure is documented below. */ tables?: outputs.dataloss.PreventionDiscoveryConfigTargetBigQueryTargetFilterTables; } interface PreventionDiscoveryConfigTargetBigQueryTargetFilterOtherTables { } interface PreventionDiscoveryConfigTargetBigQueryTargetFilterTableReference { /** * Dataset ID of the table. */ datasetId: string; /** * Name of the table. */ tableId: string; } interface PreventionDiscoveryConfigTargetBigQueryTargetFilterTables { /** * A collection of regular expressions to match a BQ table against. * Structure is documented below. */ includeRegexes?: outputs.dataloss.PreventionDiscoveryConfigTargetBigQueryTargetFilterTablesIncludeRegexes; } interface PreventionDiscoveryConfigTargetBigQueryTargetFilterTablesIncludeRegexes { /** * The group of regular expression patterns to match against one or more resources. Maximum of 100 entries. The sum of all lengths of regular expressions can't exceed 10 KiB. * Structure is documented below. */ patterns?: outputs.dataloss.PreventionDiscoveryConfigTargetBigQueryTargetFilterTablesIncludeRegexesPattern[]; } interface PreventionDiscoveryConfigTargetBigQueryTargetFilterTablesIncludeRegexesPattern { /** * if unset, this property matches all datasets */ datasetIdRegex?: string; /** * For organizations, if unset, will match all projects. Has no effect for data profile configurations created within a project. */ projectIdRegex?: string; /** * if unset, this property matches all tables */ tableIdRegex?: string; } interface PreventionDiscoveryConfigTargetCloudSqlTarget { /** * In addition to matching the filter, these conditions must be true before a profile is generated. * Structure is documented below. */ conditions?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudSqlTargetConditions; /** * Disable profiling for database resources that match this filter. */ disabled?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudSqlTargetDisabled; /** * Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. * Structure is documented below. */ filter: outputs.dataloss.PreventionDiscoveryConfigTargetCloudSqlTargetFilter; /** * How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. * Structure is documented below. */ generationCadence?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence; } interface PreventionDiscoveryConfigTargetCloudSqlTargetConditions { /** * Database engines that should be profiled. Optional. Defaults to ALL_SUPPORTED_DATABASE_ENGINES if unspecified. * Each value may be one of: `ALL_SUPPORTED_DATABASE_ENGINES`, `MYSQL`, `POSTGRES`. */ databaseEngines?: string[]; /** * Data profiles will only be generated for the database resource types specified in this field. If not specified, defaults to [DATABASE_RESOURCE_TYPE_ALL_SUPPORTED_TYPES]. * Each value may be one of: `DATABASE_RESOURCE_TYPE_ALL_SUPPORTED_TYPES`, `DATABASE_RESOURCE_TYPE_TABLE`. */ types?: string[]; } interface PreventionDiscoveryConfigTargetCloudSqlTargetDisabled { } interface PreventionDiscoveryConfigTargetCloudSqlTargetFilter { /** * A collection of resources for this filter to apply to. * Structure is documented below. */ collection?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudSqlTargetFilterCollection; /** * The database resource to scan. Targets including this can only include one target (the target with this database resource reference). * Structure is documented below. */ databaseResourceReference?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudSqlTargetFilterDatabaseResourceReference; /** * Match discovery resources not covered by any other filter. */ others?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudSqlTargetFilterOthers; } interface PreventionDiscoveryConfigTargetCloudSqlTargetFilterCollection { /** * A collection of regular expressions to match a resource against. * Structure is documented below. */ includeRegexes?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudSqlTargetFilterCollectionIncludeRegexes; } interface PreventionDiscoveryConfigTargetCloudSqlTargetFilterCollectionIncludeRegexes { /** * The group of regular expression patterns to match against one or more resources. Maximum of 100 entries. The sum of all lengths of regular expressions can't exceed 10 KiB. * Structure is documented below. */ patterns?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudSqlTargetFilterCollectionIncludeRegexesPattern[]; } interface PreventionDiscoveryConfigTargetCloudSqlTargetFilterCollectionIncludeRegexesPattern { /** * Regex to test the database name against. If empty, all databases match. */ databaseRegex?: string; /** * Regex to test the database resource's name against. An example of a database resource name is a table's name. Other database resource names like view names could be included in the future. If empty, all database resources match.' */ databaseResourceNameRegex?: string; /** * Regex to test the instance name against. If empty, all instances match. */ instanceRegex?: string; /** * For organizations, if unset, will match all projects. Has no effect for data profile configurations created within a project. */ projectIdRegex?: string; } interface PreventionDiscoveryConfigTargetCloudSqlTargetFilterDatabaseResourceReference { /** * Required. Name of a database within the instance. */ database: string; /** * Required. Name of a database resource, for example, a table within the database. */ databaseResource: string; /** * Required. The instance where this resource is located. For example: Cloud SQL instance ID. */ instance: string; /** * Required. If within a project-level config, then this must match the config's project ID. */ projectId: string; } interface PreventionDiscoveryConfigTargetCloudSqlTargetFilterOthers { } interface PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence { /** * Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. * Structure is documented below. */ inspectTemplateModifiedCadence?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence; /** * Frequency to update profiles regardless of whether the underlying resource has changes. Defaults to never. * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. */ refreshFrequency?: string; /** * Governs when to update data profiles when a schema is modified * Structure is documented below. */ schemaModifiedCadence?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadence; } interface PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence { /** * How frequently data profiles can be updated when the template is modified. Defaults to never. * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. */ frequency: string; } interface PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadence { /** * Frequency to regenerate data profiles when the schema is modified. Defaults to monthly. * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. */ frequency?: string; /** * The types of schema modifications to consider. Defaults to NEW_COLUMNS. * Each value may be one of: `NEW_COLUMNS`, `REMOVED_COLUMNS`. */ types?: string[]; } interface PreventionDiscoveryConfigTargetCloudStorageTarget { /** * In addition to matching the filter, these conditions must be true before a profile is generated. * Structure is documented below. */ conditions?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudStorageTargetConditions; /** * Disable profiling for buckets that match this filter. */ disabled?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudStorageTargetDisabled; /** * The buckets the generationCadence applies to. The first target with a matching filter will be the one to apply to a bucket. * Structure is documented below. */ filter: outputs.dataloss.PreventionDiscoveryConfigTargetCloudStorageTargetFilter; /** * How often and when to update profiles. New buckets that match both the filter and conditions are scanned as quickly as possible depending on system capacity. * Structure is documented below. */ generationCadence?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudStorageTargetGenerationCadence; } interface PreventionDiscoveryConfigTargetCloudStorageTargetConditions { /** * Cloud Storage conditions. * Structure is documented below. */ cloudStorageConditions?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudStorageTargetConditionsCloudStorageConditions; /** * File store must have been created after this date. Used to avoid backfilling. A timestamp in RFC3339 UTC "Zulu" format with nanosecond resolution and upto nine fractional digits. */ createdAfter?: string; /** * Duration format. Minimum age a resource must be before a profile can be generated. Value must be 1 hour or greater. Minimum age is not supported for Azure Blob Storage containers. */ minAge?: string; } interface PreventionDiscoveryConfigTargetCloudStorageTargetConditionsCloudStorageConditions { /** * Only objects with the specified attributes will be scanned. Defaults to [ALL_SUPPORTED_BUCKETS] if unset. * Each value may be one of: `ALL_SUPPORTED_BUCKETS`, `AUTOCLASS_DISABLED`, `AUTOCLASS_ENABLED`. */ includedBucketAttributes?: string[]; /** * Only objects with the specified attributes will be scanned. If an object has one of the specified attributes but is inside an excluded bucket, it will not be scanned. Defaults to [ALL_SUPPORTED_OBJECTS]. A profile will be created even if no objects match the included_object_attributes. * Each value may be one of: `ALL_SUPPORTED_OBJECTS`, `STANDARD`, `NEARLINE`, `COLDLINE`, `ARCHIVE`, `REGIONAL`, `MULTI_REGIONAL`, `DURABLE_REDUCED_AVAILABILITY`. */ includedObjectAttributes?: string[]; } interface PreventionDiscoveryConfigTargetCloudStorageTargetDisabled { } interface PreventionDiscoveryConfigTargetCloudStorageTargetFilter { /** * The bucket to scan. Targets including this can only include one target (the target with this bucket). This enables profiling the contents of a single bucket, while the other options allow for easy profiling of many buckets within a project or an organization. * Structure is documented below. */ cloudStorageResourceReference?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudStorageTargetFilterCloudStorageResourceReference; /** * A collection of resources for this filter to apply to. * Structure is documented below. */ collection?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudStorageTargetFilterCollection; /** * Match discovery resources not covered by any other filter. */ others?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudStorageTargetFilterOthers; } interface PreventionDiscoveryConfigTargetCloudStorageTargetFilterCloudStorageResourceReference { /** * The bucket to scan. */ bucketName?: string; /** * If within a project-level config, then this must match the config's project id. */ projectId?: string; } interface PreventionDiscoveryConfigTargetCloudStorageTargetFilterCollection { /** * A collection of regular expressions to match a resource against. * Structure is documented below. */ includeRegexes?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudStorageTargetFilterCollectionIncludeRegexes; } interface PreventionDiscoveryConfigTargetCloudStorageTargetFilterCollectionIncludeRegexes { /** * The group of regular expression patterns to match against one or more resources. Maximum of 100 entries. The sum of all lengths of regular expressions can't exceed 10 KiB. * Structure is documented below. */ patterns?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudStorageTargetFilterCollectionIncludeRegexesPattern[]; } interface PreventionDiscoveryConfigTargetCloudStorageTargetFilterCollectionIncludeRegexesPattern { /** * Regex for Cloud Storage. * Structure is documented below. */ cloudStorageRegex?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudStorageTargetFilterCollectionIncludeRegexesPatternCloudStorageRegex; } interface PreventionDiscoveryConfigTargetCloudStorageTargetFilterCollectionIncludeRegexesPatternCloudStorageRegex { /** * Regex to test the bucket name against. If empty, all buckets match. Example: "marketing2021" or "(marketing)\d{4}" will both match the bucket gs://marketing2021 */ bucketNameRegex?: string; /** * For organizations, if unset, will match all projects. */ projectIdRegex?: string; } interface PreventionDiscoveryConfigTargetCloudStorageTargetFilterOthers { } interface PreventionDiscoveryConfigTargetCloudStorageTargetGenerationCadence { /** * Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. * Structure is documented below. */ inspectTemplateModifiedCadence?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudStorageTargetGenerationCadenceInspectTemplateModifiedCadence; /** * Frequency to update profiles regardless of whether the underlying resource has changes. Defaults to never. * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. */ refreshFrequency?: string; } interface PreventionDiscoveryConfigTargetCloudStorageTargetGenerationCadenceInspectTemplateModifiedCadence { /** * How frequently data profiles can be updated when the template is modified. Defaults to never. * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. */ frequency?: string; } interface PreventionDiscoveryConfigTargetOtherCloudTarget { /** * In addition to matching the filter, these conditions must be true before a profile is generated. * Structure is documented below. */ conditions?: outputs.dataloss.PreventionDiscoveryConfigTargetOtherCloudTargetConditions; /** * Required. The type of data profiles generated by this discovery target. Supported values are: aws/s3/bucket * Structure is documented below. */ dataSourceType?: outputs.dataloss.PreventionDiscoveryConfigTargetOtherCloudTargetDataSourceType; /** * Disable profiling for resources that match this filter. */ disabled?: outputs.dataloss.PreventionDiscoveryConfigTargetOtherCloudTargetDisabled; /** * Required. The resources that the discovery cadence applies to. The first target with a matching filter will be the one to apply to a resource. * Structure is documented below. */ filter: outputs.dataloss.PreventionDiscoveryConfigTargetOtherCloudTargetFilter; /** * How often and when to update profiles. New resources that match both the filter and conditions are scanned as quickly as possible depending on system capacity. * Structure is documented below. */ generationCadence?: outputs.dataloss.PreventionDiscoveryConfigTargetOtherCloudTargetGenerationCadence; } interface PreventionDiscoveryConfigTargetOtherCloudTargetConditions { /** * Amazon S3 bucket conditions. * Structure is documented below. * * * The `amazonS3BucketConditions` block supports: */ amazonS3BucketConditions?: outputs.dataloss.PreventionDiscoveryConfigTargetOtherCloudTargetConditionsAmazonS3BucketConditions; /** * Duration format. Minimum age a resource must be before a profile can be generated. Value must be 1 hour or greater. Minimum age is not supported for Azure Blob Storage containers. */ minAge?: string; } interface PreventionDiscoveryConfigTargetOtherCloudTargetConditionsAmazonS3BucketConditions { /** * Bucket types that should be profiled. Optional. Defaults to TYPE_ALL_SUPPORTED if unspecified. Possible values: ["TYPE_ALL_SUPPORTED", "TYPE_GENERAL_PURPOSE"] */ bucketTypes?: string[]; /** * Object classes that should be profiled. Optional. Defaults to ALL_SUPPORTED_CLASSES if unspecified. Possible values: ["ALL_SUPPORTED_CLASSES", "STANDARD", "STANDARD_INFREQUENT_ACCESS", "GLACIER_INSTANT_RETRIEVAL", "INTELLIGENT_TIERING"] */ objectStorageClasses?: string[]; } interface PreventionDiscoveryConfigTargetOtherCloudTargetDataSourceType { /** * (Optional) */ dataSource?: string; } interface PreventionDiscoveryConfigTargetOtherCloudTargetDisabled { } interface PreventionDiscoveryConfigTargetOtherCloudTargetFilter { /** * A collection of resources for this filter to apply to. * Structure is documented below. */ collection?: outputs.dataloss.PreventionDiscoveryConfigTargetOtherCloudTargetFilterCollection; /** * Match discovery resources not covered by any other filter. */ others?: outputs.dataloss.PreventionDiscoveryConfigTargetOtherCloudTargetFilterOthers; /** * The resource to scan. Configs using this filter can only have one target (the target with this single resource reference). * Structure is documented below. */ singleResource?: outputs.dataloss.PreventionDiscoveryConfigTargetOtherCloudTargetFilterSingleResource; } interface PreventionDiscoveryConfigTargetOtherCloudTargetFilterCollection { /** * A collection of regular expressions to match a resource against. * Structure is documented below. */ includeRegexes?: outputs.dataloss.PreventionDiscoveryConfigTargetOtherCloudTargetFilterCollectionIncludeRegexes; } interface PreventionDiscoveryConfigTargetOtherCloudTargetFilterCollectionIncludeRegexes { /** * The group of regular expression patterns to match against one or more resources. Maximum of 100 entries. The sum of all lengths of regular expressions can't exceed 10 KiB. * Structure is documented below. */ patterns?: outputs.dataloss.PreventionDiscoveryConfigTargetOtherCloudTargetFilterCollectionIncludeRegexesPattern[]; } interface PreventionDiscoveryConfigTargetOtherCloudTargetFilterCollectionIncludeRegexesPattern { /** * Regex for Cloud Storage. * Structure is documented below. * * * The `amazonS3BucketRegex` block supports: */ amazonS3BucketRegex?: outputs.dataloss.PreventionDiscoveryConfigTargetOtherCloudTargetFilterCollectionIncludeRegexesPatternAmazonS3BucketRegex; } interface PreventionDiscoveryConfigTargetOtherCloudTargetFilterCollectionIncludeRegexesPatternAmazonS3BucketRegex { /** * The AWS account regex */ awsAccountRegex?: outputs.dataloss.PreventionDiscoveryConfigTargetOtherCloudTargetFilterCollectionIncludeRegexesPatternAmazonS3BucketRegexAwsAccountRegex; /** * Regex to test the bucket name against. If empty, all buckets match. */ bucketNameRegex?: string; } interface PreventionDiscoveryConfigTargetOtherCloudTargetFilterCollectionIncludeRegexesPatternAmazonS3BucketRegexAwsAccountRegex { /** * Regex to test the AWS account ID against. If empty, all accounts match. Example: arn:aws:organizations::123:account/o-b2c3d4/345 */ accountIdRegex?: string; } interface PreventionDiscoveryConfigTargetOtherCloudTargetFilterOthers { } interface PreventionDiscoveryConfigTargetOtherCloudTargetFilterSingleResource { /** * Amazon S3 bucket. * Structure is documented below. * * * The `amazonS3Bucket` block supports: */ amazonS3Bucket?: outputs.dataloss.PreventionDiscoveryConfigTargetOtherCloudTargetFilterSingleResourceAmazonS3Bucket; } interface PreventionDiscoveryConfigTargetOtherCloudTargetFilterSingleResourceAmazonS3Bucket { /** * The AWS account. */ awsAccount?: outputs.dataloss.PreventionDiscoveryConfigTargetOtherCloudTargetFilterSingleResourceAmazonS3BucketAwsAccount; /** * The bucket name. */ bucketName?: string; } interface PreventionDiscoveryConfigTargetOtherCloudTargetFilterSingleResourceAmazonS3BucketAwsAccount { /** * AWS account ID. */ accountId?: string; } interface PreventionDiscoveryConfigTargetOtherCloudTargetGenerationCadence { /** * Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. * Structure is documented below. */ inspectTemplateModifiedCadence?: outputs.dataloss.PreventionDiscoveryConfigTargetOtherCloudTargetGenerationCadenceInspectTemplateModifiedCadence; /** * Frequency to update profiles regardless of whether the underlying resource has changes. Defaults to never. * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. */ refreshFrequency?: string; } interface PreventionDiscoveryConfigTargetOtherCloudTargetGenerationCadenceInspectTemplateModifiedCadence { /** * How frequently data profiles can be updated when the template is modified. Defaults to never. * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. */ frequency?: string; } interface PreventionDiscoveryConfigTargetSecretsTarget { } interface PreventionInspectTemplateInspectConfig { /** * List of options defining data content to scan. If empty, text, images, and other content will be included. * Each value may be one of: `CONTENT_TEXT`, `CONTENT_IMAGE`. */ contentOptions?: string[]; /** * Custom info types to be used. See https://cloud.google.com/dlp/docs/creating-custom-infotypes to learn more. * Structure is documented below. */ customInfoTypes?: outputs.dataloss.PreventionInspectTemplateInspectConfigCustomInfoType[]; /** * When true, excludes type information of the findings. */ excludeInfoTypes?: boolean; /** * When true, a contextual quote from the data that triggered a finding is included in the response. */ includeQuote?: boolean; /** * Restricts what infoTypes to look for. The values must correspond to InfoType values returned by infoTypes.list * or listed at https://cloud.google.com/dlp/docs/infotypes-reference. * When no InfoTypes or CustomInfoTypes are specified in a request, the system may automatically choose what detectors to run. * By default this may be all types, but may change over time as detectors are updated. * Structure is documented below. */ infoTypes?: outputs.dataloss.PreventionInspectTemplateInspectConfigInfoType[]; /** * Configuration to control the number of findings returned. * Structure is documented below. */ limits?: outputs.dataloss.PreventionInspectTemplateInspectConfigLimits; /** * Only returns findings equal or above this threshold. See https://cloud.google.com/dlp/docs/likelihood for more info * Default value is `POSSIBLE`. * Possible values are: `VERY_UNLIKELY`, `UNLIKELY`, `POSSIBLE`, `LIKELY`, `VERY_LIKELY`. */ minLikelihood?: string; /** * Set of rules to apply to the findings for this InspectConfig. Exclusion rules, contained in the set are executed in the end, * other rules are executed in the order they are specified for each info type. * Structure is documented below. */ ruleSets?: outputs.dataloss.PreventionInspectTemplateInspectConfigRuleSet[]; } interface PreventionInspectTemplateInspectConfigCustomInfoType { /** * Dictionary which defines the rule. * Structure is documented below. */ dictionary?: outputs.dataloss.PreventionInspectTemplateInspectConfigCustomInfoTypeDictionary; /** * If set to EXCLUSION_TYPE_EXCLUDE this infoType will not cause a finding to be returned. It still can be used for rules matching. * Possible values are: `EXCLUSION_TYPE_EXCLUDE`. */ exclusionType?: string; /** * CustomInfoType can either be a new infoType, or an extension of built-in infoType, when the name matches one of existing * infoTypes and that infoType is specified in `infoTypes` field. Specifying the latter adds findings to the * one detected by the system. If built-in info type is not specified in `infoTypes` list then the name is * treated as a custom info type. * Structure is documented below. */ infoType: outputs.dataloss.PreventionInspectTemplateInspectConfigCustomInfoTypeInfoType; /** * Likelihood to return for this CustomInfoType. This base value can be altered by a detection rule if the finding meets the criteria * specified by the rule. * Default value is `VERY_LIKELY`. * Possible values are: `VERY_UNLIKELY`, `UNLIKELY`, `POSSIBLE`, `LIKELY`, `VERY_LIKELY`. */ likelihood?: string; /** * Regular expression which defines the rule. * Structure is documented below. */ regex?: outputs.dataloss.PreventionInspectTemplateInspectConfigCustomInfoTypeRegex; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionInspectTemplateInspectConfigCustomInfoTypeSensitivityScore; /** * A reference to a StoredInfoType to use with scanning. * Structure is documented below. */ storedType?: outputs.dataloss.PreventionInspectTemplateInspectConfigCustomInfoTypeStoredType; /** * Message for detecting output from deidentification transformations that support reversing. */ surrogateType?: outputs.dataloss.PreventionInspectTemplateInspectConfigCustomInfoTypeSurrogateType; } interface PreventionInspectTemplateInspectConfigCustomInfoTypeDictionary { /** * Newline-delimited file of words in Cloud Storage. Only a single file is accepted. * Structure is documented below. */ cloudStoragePath?: outputs.dataloss.PreventionInspectTemplateInspectConfigCustomInfoTypeDictionaryCloudStoragePath; /** * List of words or phrases to search for. * Structure is documented below. */ wordList?: outputs.dataloss.PreventionInspectTemplateInspectConfigCustomInfoTypeDictionaryWordList; } interface PreventionInspectTemplateInspectConfigCustomInfoTypeDictionaryCloudStoragePath { /** * A url representing a file or path (no wildcards) in Cloud Storage. Example: `gs://[BUCKET_NAME]/dictionary.txt` */ path: string; } interface PreventionInspectTemplateInspectConfigCustomInfoTypeDictionaryWordList { /** * Words or phrases defining the dictionary. The dictionary must contain at least one * phrase and every phrase must contain at least 2 characters that are letters or digits. */ words: string[]; } interface PreventionInspectTemplateInspectConfigCustomInfoTypeInfoType { /** * Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names * listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. */ name: string; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionInspectTemplateInspectConfigCustomInfoTypeInfoTypeSensitivityScore; /** * Version name for this InfoType. */ version?: string; } interface PreventionInspectTemplateInspectConfigCustomInfoTypeInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionInspectTemplateInspectConfigCustomInfoTypeRegex { /** * The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included. */ groupIndexes?: number[]; /** * Pattern defining the regular expression. * Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub. */ pattern: string; } interface PreventionInspectTemplateInspectConfigCustomInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionInspectTemplateInspectConfigCustomInfoTypeStoredType { /** * Resource name of the requested StoredInfoType, for example `organizations/433245324/storedInfoTypes/432452342` * or `projects/project-id/storedInfoTypes/432452342`. */ name: string; } interface PreventionInspectTemplateInspectConfigCustomInfoTypeSurrogateType { } interface PreventionInspectTemplateInspectConfigInfoType { /** * Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed * at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. */ name: string; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionInspectTemplateInspectConfigInfoTypeSensitivityScore; /** * Version name for this InfoType. */ version?: string; } interface PreventionInspectTemplateInspectConfigInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionInspectTemplateInspectConfigLimits { /** * Configuration of findings limit given for specified infoTypes. * Structure is documented below. */ maxFindingsPerInfoTypes?: outputs.dataloss.PreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoType[]; /** * Max number of findings that will be returned for each item scanned. The maximum returned is 2000. */ maxFindingsPerItem: number; /** * Max number of findings that will be returned per request/job. The maximum returned is 2000. */ maxFindingsPerRequest: number; } interface PreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoType { /** * Type of information the findings limit applies to. Only one limit per infoType should be provided. If InfoTypeLimit does * not have an infoType, the DLP API applies the limit against all infoTypes that are found but not * specified in another InfoTypeLimit. * Structure is documented below. */ infoType?: outputs.dataloss.PreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoType; /** * Max findings limit for the given infoType. */ maxFindings: number; } interface PreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoType { /** * Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names * listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. */ name: string; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScore; /** * Version name for this InfoType. */ version?: string; } interface PreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionInspectTemplateInspectConfigRuleSet { /** * List of infoTypes this rule set is applied to. * Structure is documented below. */ infoTypes: outputs.dataloss.PreventionInspectTemplateInspectConfigRuleSetInfoType[]; /** * Set of rules to be applied to infoTypes. The rules are applied in order. * Structure is documented below. */ rules: outputs.dataloss.PreventionInspectTemplateInspectConfigRuleSetRule[]; } interface PreventionInspectTemplateInspectConfigRuleSetInfoType { /** * Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed * at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. */ name: string; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionInspectTemplateInspectConfigRuleSetInfoTypeSensitivityScore; /** * Version name for this InfoType. */ version?: string; } interface PreventionInspectTemplateInspectConfigRuleSetInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionInspectTemplateInspectConfigRuleSetRule { /** * The rule that specifies conditions when findings of infoTypes specified in InspectionRuleSet are removed from results. * Structure is documented below. */ exclusionRule?: outputs.dataloss.PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRule; /** * Hotword-based detection rule. * Structure is documented below. */ hotwordRule?: outputs.dataloss.PreventionInspectTemplateInspectConfigRuleSetRuleHotwordRule; } interface PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRule { /** * Dictionary which defines the rule. * Structure is documented below. */ dictionary?: outputs.dataloss.PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleDictionary; /** * Drop if the hotword rule is contained in the proximate context. * For tabular data, the context includes the column name. * Structure is documented below. */ excludeByHotword?: outputs.dataloss.PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleExcludeByHotword; /** * Set of infoTypes for which findings would affect this rule. * Structure is documented below. */ excludeInfoTypes?: outputs.dataloss.PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleExcludeInfoTypes; /** * How the rule is applied. See the documentation for more information: https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#MatchingType * Possible values are: `MATCHING_TYPE_FULL_MATCH`, `MATCHING_TYPE_PARTIAL_MATCH`, `MATCHING_TYPE_INVERSE_MATCH`. */ matchingType: string; /** * Regular expression which defines the rule. * Structure is documented below. */ regex?: outputs.dataloss.PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleRegex; } interface PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleDictionary { /** * Newline-delimited file of words in Cloud Storage. Only a single file is accepted. * Structure is documented below. */ cloudStoragePath?: outputs.dataloss.PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleDictionaryCloudStoragePath; /** * List of words or phrases to search for. * Structure is documented below. */ wordList?: outputs.dataloss.PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleDictionaryWordList; } interface PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleDictionaryCloudStoragePath { /** * A url representing a file or path (no wildcards) in Cloud Storage. Example: `gs://[BUCKET_NAME]/dictionary.txt` */ path: string; } interface PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleDictionaryWordList { /** * Words or phrases defining the dictionary. The dictionary must contain at least one * phrase and every phrase must contain at least 2 characters that are letters or digits. */ words: string[]; } interface PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleExcludeByHotword { /** * Regular expression pattern defining what qualifies as a hotword. * Structure is documented below. */ hotwordRegex: outputs.dataloss.PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleExcludeByHotwordHotwordRegex; /** * Proximity of the finding within which the entire hotword must reside. The total length of the window cannot * exceed 1000 characters. Note that the finding itself will be included in the window, so that hotwords may be * used to match substrings of the finding itself. For example, the certainty of a phone number regex * `(\d{3}) \d{3}-\d{4}` could be adjusted upwards if the area code is known to be the local area code of a company * office using the hotword regex `(xxx)`, where `xxx` is the area code in question. * Structure is documented below. */ proximity: outputs.dataloss.PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleExcludeByHotwordProximity; } interface PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleExcludeByHotwordHotwordRegex { /** * The index of the submatch to extract as findings. When not specified, * the entire match is returned. No more than 3 may be included. */ groupIndexes?: number[]; /** * Pattern defining the regular expression. Its syntax * (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub. */ pattern: string; } interface PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleExcludeByHotwordProximity { /** * Number of characters after the finding to consider. */ windowAfter?: number; /** * Number of characters before the finding to consider. */ windowBefore?: number; } interface PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleExcludeInfoTypes { /** * If a finding is matched by any of the infoType detectors listed here, the finding will be excluded from the scan results. * Structure is documented below. */ infoTypes: outputs.dataloss.PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleExcludeInfoTypesInfoType[]; } interface PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleExcludeInfoTypesInfoType { /** * Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed * at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. */ name: string; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleExcludeInfoTypesInfoTypeSensitivityScore; /** * Version name for this InfoType. */ version?: string; } interface PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleExcludeInfoTypesInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleRegex { /** * The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included. */ groupIndexes?: number[]; /** * Pattern defining the regular expression. * Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub. */ pattern: string; } interface PreventionInspectTemplateInspectConfigRuleSetRuleHotwordRule { /** * Regular expression pattern defining what qualifies as a hotword. * Structure is documented below. */ hotwordRegex: outputs.dataloss.PreventionInspectTemplateInspectConfigRuleSetRuleHotwordRuleHotwordRegex; /** * Likelihood adjustment to apply to all matching findings. * Structure is documented below. */ likelihoodAdjustment: outputs.dataloss.PreventionInspectTemplateInspectConfigRuleSetRuleHotwordRuleLikelihoodAdjustment; /** * Proximity of the finding within which the entire hotword must reside. The total length of the window cannot * exceed 1000 characters. Note that the finding itself will be included in the window, so that hotwords may be * used to match substrings of the finding itself. For example, the certainty of a phone number regex * `(\d{3}) \d{3}-\d{4}` could be adjusted upwards if the area code is known to be the local area code of a company * office using the hotword regex `(xxx)`, where `xxx` is the area code in question. * Structure is documented below. */ proximity: outputs.dataloss.PreventionInspectTemplateInspectConfigRuleSetRuleHotwordRuleProximity; } interface PreventionInspectTemplateInspectConfigRuleSetRuleHotwordRuleHotwordRegex { /** * The index of the submatch to extract as findings. When not specified, * the entire match is returned. No more than 3 may be included. */ groupIndexes?: number[]; /** * Pattern defining the regular expression. Its syntax * (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub. */ pattern: string; } interface PreventionInspectTemplateInspectConfigRuleSetRuleHotwordRuleLikelihoodAdjustment { /** * Set the likelihood of a finding to a fixed value. Either this or relativeLikelihood can be set. * Possible values are: `VERY_UNLIKELY`, `UNLIKELY`, `POSSIBLE`, `LIKELY`, `VERY_LIKELY`. */ fixedLikelihood?: string; /** * Increase or decrease the likelihood by the specified number of levels. For example, * if a finding would be POSSIBLE without the detection rule and relativeLikelihood is 1, * then it is upgraded to LIKELY, while a value of -1 would downgrade it to UNLIKELY. * Likelihood may never drop below VERY_UNLIKELY or exceed VERY_LIKELY, so applying an * adjustment of 1 followed by an adjustment of -1 when base likelihood is VERY_LIKELY * will result in a final likelihood of LIKELY. Either this or fixedLikelihood can be set. */ relativeLikelihood?: number; } interface PreventionInspectTemplateInspectConfigRuleSetRuleHotwordRuleProximity { /** * Number of characters after the finding to consider. */ windowAfter?: number; /** * Number of characters before the finding to consider. */ windowBefore?: number; } interface PreventionJobTriggerInspectJob { /** * Configuration block for the actions to execute on the completion of a job. Can be specified multiple times, but only one for each type. Each action block supports fields documented below. This argument is processed in attribute-as-blocks mode. * Structure is documented below. */ actions?: outputs.dataloss.PreventionJobTriggerInspectJobAction[]; /** * The core content of the template. * Structure is documented below. */ inspectConfig?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfig; /** * The name of the template to run when this job is triggered. */ inspectTemplateName?: string; /** * Information on where to inspect * Structure is documented below. */ storageConfig: outputs.dataloss.PreventionJobTriggerInspectJobStorageConfig; } interface PreventionJobTriggerInspectJobAction { /** * Create a de-identified copy of the requested table or files. * Structure is documented below. */ deidentify?: outputs.dataloss.PreventionJobTriggerInspectJobActionDeidentify; /** * Sends an email when the job completes. The email goes to IAM project owners and technical Essential Contacts. */ jobNotificationEmails?: outputs.dataloss.PreventionJobTriggerInspectJobActionJobNotificationEmails; /** * Publish a message into a given Pub/Sub topic when the job completes. * Structure is documented below. */ pubSub?: outputs.dataloss.PreventionJobTriggerInspectJobActionPubSub; /** * (Optional, Deprecated) * Publish findings of a DlpJob to Data Catalog. * * > **Warning:** `publishFindingsToCloudDataCatalog` is deprecated and will be removed in a future major release. To publish findings to Dataplex Catalog, use `publishFindingsToDataplexCatalog` instead. * * @deprecated `publishFindingsToCloudDataCatalog` is deprecated and will be removed in a future major release. To publish findings to Dataplex Catalog, use `publishFindingsToDataplexCatalog` instead. */ publishFindingsToCloudDataCatalog?: outputs.dataloss.PreventionJobTriggerInspectJobActionPublishFindingsToCloudDataCatalog; /** * Publish findings of a DlpJob as an aspect to Dataplex Universal Catalog. */ publishFindingsToDataplexCatalog?: outputs.dataloss.PreventionJobTriggerInspectJobActionPublishFindingsToDataplexCatalog; /** * Publish the result summary of a DlpJob to the Cloud Security Command Center. */ publishSummaryToCscc?: outputs.dataloss.PreventionJobTriggerInspectJobActionPublishSummaryToCscc; /** * Enable Stackdriver metric dlp.googleapis.com/findingCount. */ publishToStackdriver?: outputs.dataloss.PreventionJobTriggerInspectJobActionPublishToStackdriver; /** * If set, the detailed findings will be persisted to the specified OutputStorageConfig. Only a single instance of this action can be specified. Compatible with: Inspect, Risk * Structure is documented below. */ saveFindings?: outputs.dataloss.PreventionJobTriggerInspectJobActionSaveFindings; } interface PreventionJobTriggerInspectJobActionDeidentify { /** * User settable Cloud Storage bucket and folders to store de-identified files. * This field must be set for cloud storage deidentification. * The output Cloud Storage bucket must be different from the input bucket. * De-identified files will overwrite files in the output path. * Form of: gs://bucket/folder/ or gs://bucket */ cloudStorageOutput: string; /** * List of user-specified file type groups to transform. If specified, only the files with these filetypes will be transformed. * If empty, all supported files will be transformed. Supported types may be automatically added over time. * If a file type is set in this field that isn't supported by the Deidentify action then the job will fail and will not be successfully created/started. * Each value may be one of: `IMAGE`, `TEXT_FILE`, `CSV`, `TSV`. */ fileTypesToTransforms?: string[]; /** * User specified deidentify templates and configs for structured, unstructured, and image files. * Structure is documented below. */ transformationConfig?: outputs.dataloss.PreventionJobTriggerInspectJobActionDeidentifyTransformationConfig; /** * Config for storing transformation details. * Structure is documented below. */ transformationDetailsStorageConfig?: outputs.dataloss.PreventionJobTriggerInspectJobActionDeidentifyTransformationDetailsStorageConfig; } interface PreventionJobTriggerInspectJobActionDeidentifyTransformationConfig { /** * If this template is specified, it will serve as the default de-identify template. */ deidentifyTemplate?: string; /** * If this template is specified, it will serve as the de-identify template for images. */ imageRedactTemplate?: string; /** * If this template is specified, it will serve as the de-identify template for structured content such as delimited files and tables. */ structuredDeidentifyTemplate?: string; } interface PreventionJobTriggerInspectJobActionDeidentifyTransformationDetailsStorageConfig { /** * The BigQuery table in which to store the output. * Structure is documented below. */ table: outputs.dataloss.PreventionJobTriggerInspectJobActionDeidentifyTransformationDetailsStorageConfigTable; } interface PreventionJobTriggerInspectJobActionDeidentifyTransformationDetailsStorageConfigTable { /** * The ID of the dataset containing this table. */ datasetId: string; /** * The ID of the project containing this table. */ projectId: string; /** * The ID of the table. The ID must contain only letters (a-z, * A-Z), numbers (0-9), or underscores (_). The maximum length * is 1,024 characters. */ tableId?: string; } interface PreventionJobTriggerInspectJobActionJobNotificationEmails { } interface PreventionJobTriggerInspectJobActionPubSub { /** * Cloud Pub/Sub topic to send notifications to. */ topic: string; } interface PreventionJobTriggerInspectJobActionPublishFindingsToCloudDataCatalog { } interface PreventionJobTriggerInspectJobActionPublishFindingsToDataplexCatalog { } interface PreventionJobTriggerInspectJobActionPublishSummaryToCscc { } interface PreventionJobTriggerInspectJobActionPublishToStackdriver { } interface PreventionJobTriggerInspectJobActionSaveFindings { /** * Information on where to store output * Structure is documented below. */ outputConfig: outputs.dataloss.PreventionJobTriggerInspectJobActionSaveFindingsOutputConfig; } interface PreventionJobTriggerInspectJobActionSaveFindingsOutputConfig { /** * Schema used for writing the findings for Inspect jobs. This field is only used for * Inspect and must be unspecified for Risk jobs. Columns are derived from the Finding * object. If appending to an existing table, any columns from the predefined schema * that are missing will be added. No columns in the existing table will be deleted. * If unspecified, then all available columns will be used for a new table or an (existing) * table with no schema, and no changes will be made to an existing table that has a schema. * Only for use with external storage. * Possible values are: `BASIC_COLUMNS`, `GCS_COLUMNS`, `DATASTORE_COLUMNS`, `BIG_QUERY_COLUMNS`, `ALL_COLUMNS`. */ outputSchema?: string; /** * Store findings in an existing Cloud Storage bucket. Files will be generated with the job ID and file part number * as the filename, and will contain findings in textproto format as SaveToGcsFindingsOutput. The file name will use * the naming convention -, for example: my-job-id-2. * Supported for InspectJobs. The bucket must not be the same as the bucket being inspected. If storing findings to * Cloud Storage, the output schema field should not be set. If set, it will be ignored. * Structure is documented below. */ storagePath?: outputs.dataloss.PreventionJobTriggerInspectJobActionSaveFindingsOutputConfigStoragePath; /** * Information on the location of the target BigQuery Table. * Structure is documented below. */ table?: outputs.dataloss.PreventionJobTriggerInspectJobActionSaveFindingsOutputConfigTable; } interface PreventionJobTriggerInspectJobActionSaveFindingsOutputConfigStoragePath { /** * A URL representing a file or path (no wildcards) in Cloud Storage. * Example: `gs://[BUCKET_NAME]/dictionary.txt` */ path: string; } interface PreventionJobTriggerInspectJobActionSaveFindingsOutputConfigTable { /** * The ID of the dataset containing this table. */ datasetId: string; /** * The ID of the project containing this table. */ projectId: string; /** * The ID of the table. The ID must contain only letters (a-z, * A-Z), numbers (0-9), or underscores (_). The maximum length * is 1,024 characters. */ tableId?: string; } interface PreventionJobTriggerInspectJobInspectConfig { /** * Custom info types to be used. See https://cloud.google.com/dlp/docs/creating-custom-infotypes to learn more. * Structure is documented below. */ customInfoTypes?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigCustomInfoType[]; /** * When true, excludes type information of the findings. */ excludeInfoTypes?: boolean; /** * When true, a contextual quote from the data that triggered a finding is included in the response. */ includeQuote?: boolean; /** * Restricts what infoTypes to look for. The values must correspond to InfoType values returned by infoTypes.list * or listed at https://cloud.google.com/dlp/docs/infotypes-reference. * When no InfoTypes or CustomInfoTypes are specified in a request, the system may automatically choose what detectors to run. * By default this may be all types, but may change over time as detectors are updated. * Structure is documented below. */ infoTypes?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigInfoType[]; /** * Configuration to control the number of findings returned. * Structure is documented below. */ limits?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigLimits; /** * Only returns findings equal or above this threshold. See https://cloud.google.com/dlp/docs/likelihood for more info * Default value is `POSSIBLE`. * Possible values are: `VERY_UNLIKELY`, `UNLIKELY`, `POSSIBLE`, `LIKELY`, `VERY_LIKELY`. */ minLikelihood?: string; /** * Set of rules to apply to the findings for this InspectConfig. Exclusion rules, contained in the set are executed in the end, * other rules are executed in the order they are specified for each info type. * Structure is documented below. */ ruleSets?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigRuleSet[]; } interface PreventionJobTriggerInspectJobInspectConfigCustomInfoType { /** * Dictionary which defines the rule. * Structure is documented below. */ dictionary?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigCustomInfoTypeDictionary; /** * If set to EXCLUSION_TYPE_EXCLUDE this infoType will not cause a finding to be returned. It still can be used for rules matching. * Possible values are: `EXCLUSION_TYPE_EXCLUDE`. */ exclusionType?: string; /** * CustomInfoType can either be a new infoType, or an extension of built-in infoType, when the name matches one of existing * infoTypes and that infoType is specified in `infoTypes` field. Specifying the latter adds findings to the * one detected by the system. If built-in info type is not specified in `infoTypes` list then the name is * treated as a custom info type. * Structure is documented below. */ infoType: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigCustomInfoTypeInfoType; /** * Likelihood to return for this CustomInfoType. This base value can be altered by a detection rule if the finding meets the criteria * specified by the rule. * Default value is `VERY_LIKELY`. * Possible values are: `VERY_UNLIKELY`, `UNLIKELY`, `POSSIBLE`, `LIKELY`, `VERY_LIKELY`. */ likelihood?: string; /** * Regular expression which defines the rule. * Structure is documented below. */ regex?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigCustomInfoTypeRegex; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigCustomInfoTypeSensitivityScore; /** * A reference to a StoredInfoType to use with scanning. * Structure is documented below. */ storedType?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigCustomInfoTypeStoredType; /** * Message for detecting output from deidentification transformations that support reversing. */ surrogateType?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigCustomInfoTypeSurrogateType; } interface PreventionJobTriggerInspectJobInspectConfigCustomInfoTypeDictionary { /** * Newline-delimited file of words in Cloud Storage. Only a single file is accepted. * Structure is documented below. */ cloudStoragePath?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigCustomInfoTypeDictionaryCloudStoragePath; /** * List of words or phrases to search for. * Structure is documented below. */ wordList?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigCustomInfoTypeDictionaryWordList; } interface PreventionJobTriggerInspectJobInspectConfigCustomInfoTypeDictionaryCloudStoragePath { /** * A url representing a file or path (no wildcards) in Cloud Storage. Example: `gs://[BUCKET_NAME]/dictionary.txt` */ path: string; } interface PreventionJobTriggerInspectJobInspectConfigCustomInfoTypeDictionaryWordList { /** * Words or phrases defining the dictionary. The dictionary must contain at least one * phrase and every phrase must contain at least 2 characters that are letters or digits. */ words: string[]; } interface PreventionJobTriggerInspectJobInspectConfigCustomInfoTypeInfoType { /** * Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names * listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. */ name: string; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigCustomInfoTypeInfoTypeSensitivityScore; /** * Version of the information type to use. By default, the version is set to stable. */ version?: string; } interface PreventionJobTriggerInspectJobInspectConfigCustomInfoTypeInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionJobTriggerInspectJobInspectConfigCustomInfoTypeRegex { /** * The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included. */ groupIndexes?: number[]; /** * Pattern defining the regular expression. * Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub. */ pattern: string; } interface PreventionJobTriggerInspectJobInspectConfigCustomInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionJobTriggerInspectJobInspectConfigCustomInfoTypeStoredType { /** * (Output) * The creation timestamp of an inspectTemplate. Set by the server. */ createTime: string; /** * Resource name of the requested StoredInfoType, for example `organizations/433245324/storedInfoTypes/432452342` * or `projects/project-id/storedInfoTypes/432452342`. */ name: string; } interface PreventionJobTriggerInspectJobInspectConfigCustomInfoTypeSurrogateType { } interface PreventionJobTriggerInspectJobInspectConfigInfoType { /** * Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed * at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. */ name: string; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigInfoTypeSensitivityScore; /** * Version of the information type to use. By default, the version is set to stable. */ version?: string; } interface PreventionJobTriggerInspectJobInspectConfigInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionJobTriggerInspectJobInspectConfigLimits { /** * Configuration of findings limit given for specified infoTypes. * Structure is documented below. */ maxFindingsPerInfoTypes?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoType[]; /** * Max number of findings that will be returned for each item scanned. The maximum returned is 2000. */ maxFindingsPerItem?: number; /** * Max number of findings that will be returned per request/job. The maximum returned is 2000. */ maxFindingsPerRequest?: number; } interface PreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoType { /** * Type of information the findings limit applies to. Only one limit per infoType should be provided. If InfoTypeLimit does * not have an infoType, the DLP API applies the limit against all infoTypes that are found but not * specified in another InfoTypeLimit. * Structure is documented below. */ infoType?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoType; /** * Max findings limit for the given infoType. */ maxFindings?: number; } interface PreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoType { /** * Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names * listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. */ name: string; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScore; /** * Version of the information type to use. By default, the version is set to stable. */ version?: string; } interface PreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionJobTriggerInspectJobInspectConfigRuleSet { /** * List of infoTypes this rule set is applied to. * Structure is documented below. */ infoTypes?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigRuleSetInfoType[]; /** * Set of rules to be applied to infoTypes. The rules are applied in order. * Structure is documented below. */ rules: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigRuleSetRule[]; } interface PreventionJobTriggerInspectJobInspectConfigRuleSetInfoType { /** * Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed * at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. */ name: string; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypeSensitivityScore; /** * Version of the information type to use. By default, the version is set to stable. */ version?: string; } interface PreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionJobTriggerInspectJobInspectConfigRuleSetRule { /** * The rule that specifies conditions when findings of infoTypes specified in InspectionRuleSet are removed from results. * Structure is documented below. */ exclusionRule?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRule; /** * Hotword-based detection rule. * Structure is documented below. */ hotwordRule?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigRuleSetRuleHotwordRule; } interface PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRule { /** * Dictionary which defines the rule. * Structure is documented below. */ dictionary?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleDictionary; /** * Drop if the hotword rule is contained in the proximate context. * Structure is documented below. */ excludeByHotword?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleExcludeByHotword; /** * Set of infoTypes for which findings would affect this rule. * Structure is documented below. */ excludeInfoTypes?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleExcludeInfoTypes; /** * How the rule is applied. See the documentation for more information: https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#MatchingType * Possible values are: `MATCHING_TYPE_FULL_MATCH`, `MATCHING_TYPE_PARTIAL_MATCH`, `MATCHING_TYPE_INVERSE_MATCH`. */ matchingType: string; /** * Regular expression which defines the rule. * Structure is documented below. */ regex?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleRegex; } interface PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleDictionary { /** * Newline-delimited file of words in Cloud Storage. Only a single file is accepted. * Structure is documented below. */ cloudStoragePath?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleDictionaryCloudStoragePath; /** * List of words or phrases to search for. * Structure is documented below. */ wordList?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleDictionaryWordList; } interface PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleDictionaryCloudStoragePath { /** * A url representing a file or path (no wildcards) in Cloud Storage. Example: `gs://[BUCKET_NAME]/dictionary.txt` */ path: string; } interface PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleDictionaryWordList { /** * Words or phrases defining the dictionary. The dictionary must contain at least one * phrase and every phrase must contain at least 2 characters that are letters or digits. */ words: string[]; } interface PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleExcludeByHotword { /** * Regular expression pattern defining what qualifies as a hotword. * Structure is documented below. */ hotwordRegex?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleExcludeByHotwordHotwordRegex; /** * Proximity of the finding within which the entire hotword must reside. The total length of the window cannot * exceed 1000 characters. Note that the finding itself will be included in the window, so that hotwords may be * used to match substrings of the finding itself. For example, the certainty of a phone number regex * `(\d{3}) \d{3}-\d{4}` could be adjusted upwards if the area code is known to be the local area code of a company * office using the hotword regex `(xxx)`, where `xxx` is the area code in question. * Structure is documented below. */ proximity?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleExcludeByHotwordProximity; } interface PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleExcludeByHotwordHotwordRegex { /** * The index of the submatch to extract as findings. When not specified, * the entire match is returned. No more than 3 may be included. */ groupIndexes?: number[]; /** * Pattern defining the regular expression. Its syntax * (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub. */ pattern?: string; } interface PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleExcludeByHotwordProximity { /** * Number of characters after the finding to consider. Either this or windowBefore must be specified */ windowAfter?: number; /** * Number of characters before the finding to consider. Either this or windowAfter must be specified */ windowBefore?: number; } interface PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleExcludeInfoTypes { /** * If a finding is matched by any of the infoType detectors listed here, the finding will be excluded from the scan results. * Structure is documented below. */ infoTypes: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleExcludeInfoTypesInfoType[]; } interface PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleExcludeInfoTypesInfoType { /** * Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed * at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. */ name: string; /** * Optional custom sensitivity for this InfoType. This only applies to data profiling. * Structure is documented below. */ sensitivityScore?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleExcludeInfoTypesInfoTypeSensitivityScore; /** * Version of the information type to use. By default, the version is set to stable. */ version?: string; } interface PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleExcludeInfoTypesInfoTypeSensitivityScore { /** * The sensitivity score applied to the resource. * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. */ score: string; } interface PreventionJobTriggerInspectJobInspectConfigRuleSetRuleExclusionRuleRegex { /** * The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included. */ groupIndexes?: number[]; /** * Pattern defining the regular expression. * Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub. */ pattern: string; } interface PreventionJobTriggerInspectJobInspectConfigRuleSetRuleHotwordRule { /** * Regular expression pattern defining what qualifies as a hotword. * Structure is documented below. */ hotwordRegex?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigRuleSetRuleHotwordRuleHotwordRegex; /** * Likelihood adjustment to apply to all matching findings. * Structure is documented below. */ likelihoodAdjustment?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigRuleSetRuleHotwordRuleLikelihoodAdjustment; /** * Proximity of the finding within which the entire hotword must reside. The total length of the window cannot * exceed 1000 characters. Note that the finding itself will be included in the window, so that hotwords may be * used to match substrings of the finding itself. For example, the certainty of a phone number regex * `(\d{3}) \d{3}-\d{4}` could be adjusted upwards if the area code is known to be the local area code of a company * office using the hotword regex `(xxx)`, where `xxx` is the area code in question. * Structure is documented below. */ proximity?: outputs.dataloss.PreventionJobTriggerInspectJobInspectConfigRuleSetRuleHotwordRuleProximity; } interface PreventionJobTriggerInspectJobInspectConfigRuleSetRuleHotwordRuleHotwordRegex { /** * The index of the submatch to extract as findings. When not specified, * the entire match is returned. No more than 3 may be included. */ groupIndexes?: number[]; /** * Pattern defining the regular expression. Its syntax * (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub. */ pattern?: string; } interface PreventionJobTriggerInspectJobInspectConfigRuleSetRuleHotwordRuleLikelihoodAdjustment { /** * Set the likelihood of a finding to a fixed value. Either this or relativeLikelihood can be set. * Possible values are: `VERY_UNLIKELY`, `UNLIKELY`, `POSSIBLE`, `LIKELY`, `VERY_LIKELY`. */ fixedLikelihood?: string; /** * Increase or decrease the likelihood by the specified number of levels. For example, * if a finding would be POSSIBLE without the detection rule and relativeLikelihood is 1, * then it is upgraded to LIKELY, while a value of -1 would downgrade it to UNLIKELY. * Likelihood may never drop below VERY_UNLIKELY or exceed VERY_LIKELY, so applying an * adjustment of 1 followed by an adjustment of -1 when base likelihood is VERY_LIKELY * will result in a final likelihood of LIKELY. Either this or fixedLikelihood can be set. */ relativeLikelihood?: number; } interface PreventionJobTriggerInspectJobInspectConfigRuleSetRuleHotwordRuleProximity { /** * Number of characters after the finding to consider. Either this or windowBefore must be specified */ windowAfter?: number; /** * Number of characters before the finding to consider. Either this or windowAfter must be specified */ windowBefore?: number; } interface PreventionJobTriggerInspectJobStorageConfig { /** * Options defining BigQuery table and row identifiers. * Structure is documented below. */ bigQueryOptions?: outputs.dataloss.PreventionJobTriggerInspectJobStorageConfigBigQueryOptions; /** * Options defining a file or a set of files within a Google Cloud Storage bucket. * Structure is documented below. */ cloudStorageOptions?: outputs.dataloss.PreventionJobTriggerInspectJobStorageConfigCloudStorageOptions; /** * Options defining a data set within Google Cloud Datastore. * Structure is documented below. */ datastoreOptions?: outputs.dataloss.PreventionJobTriggerInspectJobStorageConfigDatastoreOptions; /** * Configuration to control jobs where the content being inspected is outside of Google Cloud Platform. * Structure is documented below. */ hybridOptions?: outputs.dataloss.PreventionJobTriggerInspectJobStorageConfigHybridOptions; /** * Configuration of the timespan of the items to include in scanning * Structure is documented below. */ timespanConfig?: outputs.dataloss.PreventionJobTriggerInspectJobStorageConfigTimespanConfig; } interface PreventionJobTriggerInspectJobStorageConfigBigQueryOptions { /** * References to fields excluded from scanning. * This allows you to skip inspection of entire columns which you know have no findings. * Structure is documented below. */ excludedFields?: outputs.dataloss.PreventionJobTriggerInspectJobStorageConfigBigQueryOptionsExcludedField[]; /** * Specifies the BigQuery fields that will be returned with findings. * If not specified, no identifying fields will be returned for findings. * Structure is documented below. */ identifyingFields?: outputs.dataloss.PreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingField[]; /** * Limit scanning only to these fields. * Structure is documented below. */ includedFields?: outputs.dataloss.PreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIncludedField[]; /** * Max number of rows to scan. If the table has more rows than this value, the rest of the rows are omitted. * If not set, or if set to 0, all rows will be scanned. Only one of rowsLimit and rowsLimitPercent can be * specified. Cannot be used in conjunction with TimespanConfig. */ rowsLimit?: number; /** * Max percentage of rows to scan. The rest are omitted. The number of rows scanned is rounded down. * Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of * rowsLimit and rowsLimitPercent can be specified. Cannot be used in conjunction with TimespanConfig. */ rowsLimitPercent?: number; /** * How to sample rows if not all rows are scanned. Meaningful only when used in conjunction with either * rowsLimit or rowsLimitPercent. If not specified, rows are scanned in the order BigQuery reads them. * If TimespanConfig is set, set this to an empty string to avoid using the default value. * Default value is `TOP`. * Possible values are: `TOP`, `RANDOM_START`. */ sampleMethod?: string; /** * Set of files to scan. * Structure is documented below. */ tableReference: outputs.dataloss.PreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference; } interface PreventionJobTriggerInspectJobStorageConfigBigQueryOptionsExcludedField { /** * Name describing the field excluded from scanning. */ name: string; } interface PreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingField { /** * Name describing the field. */ name: string; } interface PreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIncludedField { /** * Name describing the field to which scanning is limited. */ name: string; } interface PreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference { /** * The dataset ID of the table. */ datasetId: string; /** * The Google Cloud Platform project ID of the project containing the table. */ projectId: string; /** * The name of the table. */ tableId: string; } interface PreventionJobTriggerInspectJobStorageConfigCloudStorageOptions { /** * Max number of bytes to scan from a file. If a scanned file's size is bigger than this value * then the rest of the bytes are omitted. */ bytesLimitPerFile?: number; /** * Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. * Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. */ bytesLimitPerFilePercent?: number; /** * Set of files to scan. * Structure is documented below. */ fileSet: outputs.dataloss.PreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet; /** * List of file type groups to include in the scan. If empty, all files are scanned and available data * format processors are applied. In addition, the binary content of the selected files is always scanned as well. * Images are scanned only as binary if the specified region does not support image inspection and no fileTypes were specified. * Each value may be one of: `BINARY_FILE`, `TEXT_FILE`, `IMAGE`, `WORD`, `PDF`, `AVRO`, `CSV`, `TSV`, `POWERPOINT`, `EXCEL`. */ fileTypes?: string[]; /** * Limits the number of files to scan to this percentage of the input FileSet. Number of files scanned is rounded down. * Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. */ filesLimitPercent?: number; /** * How to sample bytes if not all bytes are scanned. Meaningful only when used in conjunction with bytesLimitPerFile. * If not specified, scanning would start from the top. * Possible values are: `TOP`, `RANDOM_START`. */ sampleMethod?: string; } interface PreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet { /** * The regex-filtered set of files to scan. * Structure is documented below. */ regexFileSet?: outputs.dataloss.PreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet; /** * The Cloud Storage url of the file(s) to scan, in the format `gs:///`. Trailing wildcard * in the path is allowed. * If the url ends in a trailing slash, the bucket or directory represented by the url will be scanned * non-recursively (content in sub-directories will not be scanned). This means that `gs://mybucket/` is * equivalent to `gs://mybucket/*`, and `gs://mybucket/directory/` is equivalent to `gs://mybucket/directory/*`. */ url?: string; } interface PreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet { /** * The name of a Cloud Storage bucket. */ bucketName: string; /** * A list of regular expressions matching file paths to exclude. All files in the bucket that match at * least one of these regular expressions will be excluded from the scan. */ excludeRegexes?: string[]; /** * A list of regular expressions matching file paths to include. All files in the bucket * that match at least one of these regular expressions will be included in the set of files, * except for those that also match an item in excludeRegex. Leaving this field empty will * match all files by default (this is equivalent to including .* in the list) */ includeRegexes?: string[]; } interface PreventionJobTriggerInspectJobStorageConfigDatastoreOptions { /** * A representation of a Datastore kind. * Structure is documented below. */ kind: outputs.dataloss.PreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKind; /** * Datastore partition ID. A partition ID identifies a grouping of entities. The grouping * is always by project and namespace, however the namespace ID may be empty. * Structure is documented below. */ partitionId: outputs.dataloss.PreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId; } interface PreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKind { /** * The name of the Datastore kind. */ name: string; } interface PreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId { /** * If not empty, the ID of the namespace to which the entities belong. */ namespaceId?: string; /** * The ID of the project to which the entities belong. */ projectId: string; } interface PreventionJobTriggerInspectJobStorageConfigHybridOptions { /** * A short description of where the data is coming from. Will be stored once in the job. 256 max length. */ description?: string; /** * To organize findings, these labels will be added to each finding. * Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `a-z?`. * Label values must be between 0 and 63 characters long and must conform to the regular expression `(a-z?)?`. * No more than 10 labels can be associated with a given finding. * Examples: * * `"environment" : "production"` * * `"pipeline" : "etl"` */ labels?: { [key: string]: string; }; /** * These are labels that each inspection request must include within their 'finding_labels' map. Request * may contain others, but any missing one of these will be rejected. * Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `a-z?`. * No more than 10 keys can be required. */ requiredFindingLabelKeys?: string[]; /** * If the container is a table, additional information to make findings meaningful such as the columns that are primary keys. * Structure is documented below. */ tableOptions?: outputs.dataloss.PreventionJobTriggerInspectJobStorageConfigHybridOptionsTableOptions; } interface PreventionJobTriggerInspectJobStorageConfigHybridOptionsTableOptions { /** * The columns that are the primary keys for table objects included in ContentItem. A copy of this * cell's value will stored alongside alongside each finding so that the finding can be traced to * the specific row it came from. No more than 3 may be provided. * Structure is documented below. */ identifyingFields?: outputs.dataloss.PreventionJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingField[]; } interface PreventionJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingField { /** * Name describing the field. */ name: string; } interface PreventionJobTriggerInspectJobStorageConfigTimespanConfig { /** * When the job is started by a JobTrigger we will automatically figure out a valid startTime to avoid * scanning files that have not been modified since the last time the JobTrigger executed. This will * be based on the time of the execution of the last run of the JobTrigger or the timespan endTime * used in the last run of the JobTrigger. */ enableAutoPopulationOfTimespanConfig?: boolean; /** * Exclude files, tables, or rows newer than this value. If not set, no upper time limit is applied. */ endTime?: string; /** * Exclude files, tables, or rows older than this value. If not set, no lower time limit is applied. */ startTime?: string; /** * Specification of the field containing the timestamp of scanned items. * Structure is documented below. */ timestampField?: outputs.dataloss.PreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampField; } interface PreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampField { /** * Specification of the field containing the timestamp of scanned items. Used for data sources like Datastore and BigQuery. * For BigQuery: Required to filter out rows based on the given start and end times. If not specified and the table was * modified between the given start and end times, the entire table will be scanned. The valid data types of the timestamp * field are: INTEGER, DATE, TIMESTAMP, or DATETIME BigQuery column. * For Datastore. Valid data types of the timestamp field are: TIMESTAMP. Datastore entity will be scanned if the * timestamp property does not exist or its value is empty or invalid. */ name: string; } interface PreventionJobTriggerTrigger { /** * For use with hybrid jobs. Jobs must be manually created and finished. */ manual?: outputs.dataloss.PreventionJobTriggerTriggerManual; /** * Schedule for triggered jobs * Structure is documented below. */ schedule?: outputs.dataloss.PreventionJobTriggerTriggerSchedule; } interface PreventionJobTriggerTriggerManual { } interface PreventionJobTriggerTriggerSchedule { /** * With this option a job is started a regular periodic basis. For example: every day (86400 seconds). * A scheduled start time will be skipped if the previous execution has not ended when its scheduled time occurs. * This value must be set to a time duration greater than or equal to 1 day and can be no longer than 60 days. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ recurrencePeriodDuration?: string; } interface PreventionStoredInfoTypeDictionary { /** * Newline-delimited file of words in Cloud Storage. Only a single file is accepted. * Structure is documented below. */ cloudStoragePath?: outputs.dataloss.PreventionStoredInfoTypeDictionaryCloudStoragePath; /** * List of words or phrases to search for. * Structure is documented below. */ wordList?: outputs.dataloss.PreventionStoredInfoTypeDictionaryWordList; } interface PreventionStoredInfoTypeDictionaryCloudStoragePath { /** * A url representing a file or path (no wildcards) in Cloud Storage. Example: `gs://[BUCKET_NAME]/dictionary.txt` */ path: string; } interface PreventionStoredInfoTypeDictionaryWordList { /** * Words or phrases defining the dictionary. The dictionary must contain at least one * phrase and every phrase must contain at least 2 characters that are letters or digits. */ words: string[]; } interface PreventionStoredInfoTypeLargeCustomDictionary { /** * Field in a BigQuery table where each cell represents a dictionary phrase. * Structure is documented below. */ bigQueryField?: outputs.dataloss.PreventionStoredInfoTypeLargeCustomDictionaryBigQueryField; /** * Set of files containing newline-delimited lists of dictionary phrases. * Structure is documented below. */ cloudStorageFileSet?: outputs.dataloss.PreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet; /** * Location to store dictionary artifacts in Google Cloud Storage. These files will only be accessible by project owners and the DLP API. * If any of these artifacts are modified, the dictionary is considered invalid and can no longer be used. * Structure is documented below. */ outputPath: outputs.dataloss.PreventionStoredInfoTypeLargeCustomDictionaryOutputPath; } interface PreventionStoredInfoTypeLargeCustomDictionaryBigQueryField { /** * Designated field in the BigQuery table. * Structure is documented below. */ field: outputs.dataloss.PreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField; /** * Field in a BigQuery table where each cell represents a dictionary phrase. * Structure is documented below. */ table: outputs.dataloss.PreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable; } interface PreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField { /** * Name describing the field. */ name: string; } interface PreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable { /** * The dataset ID of the table. */ datasetId: string; /** * The Google Cloud Platform project ID of the project containing the table. */ projectId: string; /** * The name of the table. */ tableId: string; } interface PreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet { /** * The url, in the format `gs:///`. Trailing wildcard in the path is allowed. */ url: string; } interface PreventionStoredInfoTypeLargeCustomDictionaryOutputPath { /** * A url representing a file or path (no wildcards) in Cloud Storage. Example: `gs://[BUCKET_NAME]/dictionary.txt` */ path: string; } interface PreventionStoredInfoTypeRegex { /** * The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included. */ groupIndexes?: number[]; /** * Pattern defining the regular expression. * Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub. */ pattern: string; } } export declare namespace dataplex { interface AspectTypeIamBindingCondition { description?: string; expression: string; title: string; } interface AspectTypeIamMemberCondition { description?: string; expression: string; title: string; } interface AssetDiscoverySpec { /** * Optional. Configuration for CSV data. */ csvOptions: outputs.dataplex.AssetDiscoverySpecCsvOptions; /** * Required. Whether discovery is enabled. */ enabled: boolean; /** * Optional. The list of patterns to apply for selecting data to exclude during discovery. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names. */ excludePatterns?: string[]; /** * Optional. The list of patterns to apply for selecting data to include during discovery if only a subset of the data should considered. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names. */ includePatterns?: string[]; /** * Optional. Configuration for Json data. */ jsonOptions: outputs.dataplex.AssetDiscoverySpecJsonOptions; /** * Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron) for running discovery periodically. Successive discovery runs must be scheduled at least 60 minutes apart. The default value is to run discovery every 60 minutes. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: "CRON_TZ=${IANA_TIME_ZONE}" or TZ=${IANA_TIME_ZONE}". The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, "CRON_TZ=America/New_York 1 * * * *", or "TZ=America/New_York 1 * * * *". */ schedule?: string; } interface AssetDiscoverySpecCsvOptions { /** * Optional. The delimiter being used to separate values. This defaults to ','. */ delimiter?: string; /** * Optional. Whether to disable the inference of data type for CSV data. If true, all columns will be registered as strings. */ disableTypeInference?: boolean; /** * Optional. The character encoding of the data. The default is UTF-8. */ encoding?: string; /** * Optional. The number of rows to interpret as header rows that should be skipped when reading data rows. */ headerRows?: number; } interface AssetDiscoverySpecJsonOptions { /** * Optional. Whether to disable the inference of data type for Json data. If true, all columns will be registered as their primitive types (strings, number or boolean). */ disableTypeInference?: boolean; /** * Optional. The character encoding of the data. The default is UTF-8. */ encoding?: string; } interface AssetDiscoveryStatus { /** * The duration of the last discovery run. */ lastRunDuration: string; /** * The start time of the last discovery run. */ lastRunTime: string; /** * Additional information about the current state. */ message: string; /** * Output only. Current state of the asset. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED */ state: string; /** * Data Stats of the asset reported by discovery. */ stats: outputs.dataplex.AssetDiscoveryStatusStat[]; /** * Output only. The time when the asset was last updated. */ updateTime: string; } interface AssetDiscoveryStatusStat { /** * The count of data items within the referenced resource. */ dataItems: number; /** * The number of stored data bytes within the referenced resource. */ dataSize: number; /** * The count of fileset entities within the referenced resource. */ filesets: number; /** * The count of table entities within the referenced resource. */ tables: number; } interface AssetIamBindingCondition { description?: string; expression: string; title: string; } interface AssetIamMemberCondition { description?: string; expression: string; title: string; } interface AssetResourceSpec { /** * Immutable. Relative name of the cloud resource that contains the data that is being managed within a lake. For example: `projects/{project_number}/buckets/{bucket_id}` `projects/{project_number}/datasets/{dataset_id}` */ name?: string; /** * Optional. Determines how read permissions are handled for each asset and their associated tables. Only available to storage buckets assets. Possible values: DIRECT, MANAGED */ readAccessMode: string; /** * Required. Immutable. Type of resource. Possible values: STORAGE_BUCKET, BIGQUERY_DATASET * * - - - */ type: string; } interface AssetResourceStatus { /** * Additional information about the current state. */ message: string; /** * Output only. Current state of the asset. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED */ state: string; /** * Output only. The time when the asset was last updated. */ updateTime: string; } interface AssetSecurityStatus { /** * Additional information about the current state. */ message: string; /** * Output only. Current state of the asset. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED */ state: string; /** * Output only. The time when the asset was last updated. */ updateTime: string; } interface DataAssetAccessGroupConfig { /** * The identifier for this object. Format specified above. */ accessGroup: string; /** * IAM roles granted on the resource. */ iamRoles?: string[]; } interface DataProductAccessGroup { /** * Description of the access group. */ description?: string; /** * User friendly display name. */ displayName: string; /** * Unique identifier of the access group. */ groupId: string; /** * The identifier for this object. Format specified above. */ id: string; /** * The principal entity. * Structure is documented below. */ principal: outputs.dataplex.DataProductAccessGroupPrincipal; } interface DataProductAccessGroupPrincipal { /** * Email of the Google Group. */ googleGroup?: string; } interface DatascanData { /** * The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan. */ entity?: string; /** * The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: * Cloud Storage bucket (//storage.googleapis.com/projects/PROJECT_ID/buckets/BUCKET_ID) for DataDiscoveryScan OR BigQuery table of type "TABLE" (/bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID) for DataProfileScan/DataQualityScan. */ resource?: string; } interface DatascanDataDiscoverySpec { /** * Configuration for metadata publishing. * Structure is documented below. */ bigqueryPublishingConfig?: outputs.dataplex.DatascanDataDiscoverySpecBigqueryPublishingConfig; /** * Configurations related to Cloud Storage as the data source. * Structure is documented below. */ storageConfig?: outputs.dataplex.DatascanDataDiscoverySpecStorageConfig; } interface DatascanDataDiscoverySpecBigqueryPublishingConfig { /** * The BigQuery connection used to create BigLake tables. Must be in the form `projects/{projectId}/locations/{locationId}/connections/{connection_id}`. */ connection?: string; /** * The location of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. */ location?: string; /** * The project of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. If not specified, the project of the Cloud Storage bucket will be used. The format is "projects/{project_id_or_number}". */ project?: string; /** * Determines whether to publish discovered tables as BigLake external tables or non-BigLake external tables. * Possible values are: `TABLE_TYPE_UNSPECIFIED`, `EXTERNAL`, `BIGLAKE`. */ tableType?: string; } interface DatascanDataDiscoverySpecStorageConfig { /** * Configuration for CSV data. * Structure is documented below. */ csvOptions?: outputs.dataplex.DatascanDataDiscoverySpecStorageConfigCsvOptions; /** * Defines the data to exclude during discovery. Provide a list of patterns that identify the data to exclude. For Cloud Storage bucket assets, these patterns are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these patterns are interpreted as patterns to match table names. */ excludePatterns?: string[]; /** * Defines the data to include during discovery when only a subset of the data should be considered. Provide a list of patterns that identify the data to include. For Cloud Storage bucket assets, these patterns are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these patterns are interpreted as patterns to match table names. */ includePatterns?: string[]; /** * Configuration for JSON data. * Structure is documented below. */ jsonOptions?: outputs.dataplex.DatascanDataDiscoverySpecStorageConfigJsonOptions; } interface DatascanDataDiscoverySpecStorageConfigCsvOptions { /** * The delimiter that is used to separate values. The default is `,` (comma). */ delimiter?: string; /** * The character encoding of the data. The default is UTF-8. */ encoding?: string; /** * The number of rows to interpret as header rows that should be skipped when reading data rows. */ headerRows?: number; /** * The character used to quote column values. Accepts `"` (double quotation mark) or `'` (single quotation mark). If unspecified, defaults to `"` (double quotation mark). */ quote?: string; /** * Whether to disable the inference of data types for CSV data. If true, all columns are registered as strings. */ typeInferenceDisabled?: boolean; } interface DatascanDataDiscoverySpecStorageConfigJsonOptions { /** * The character encoding of the data. The default is UTF-8. */ encoding?: string; /** * Whether to disable the inference of data types for JSON data. If true, all columns are registered as their primitive types (strings, number, or boolean). */ typeInferenceDisabled?: boolean; } interface DatascanDataDocumentationSpec { } interface DatascanDataProfileSpec { /** * If set, the latest DataScan job result will be published to Dataplex Catalog. */ catalogPublishingEnabled?: boolean; /** * The fields to exclude from data profile. * If specified, the fields will be excluded from data profile, regardless of `includeFields` value. * Structure is documented below. */ excludeFields?: outputs.dataplex.DatascanDataProfileSpecExcludeFields; /** * The fields to include in data profile. * If not specified, all fields at the time of profile scan job execution are included, except for ones listed in `excludeFields`. * Structure is documented below. */ includeFields?: outputs.dataplex.DatascanDataProfileSpecIncludeFields; /** * Actions to take upon job completion. * Structure is documented below. */ postScanActions?: outputs.dataplex.DatascanDataProfileSpecPostScanActions; /** * A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10 */ rowFilter?: string; /** * The percentage of the records to be selected from the dataset for DataScan. * Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. * Sampling is not applied if `samplingPercent` is not specified, 0 or 100. */ samplingPercent?: number; } interface DatascanDataProfileSpecExcludeFields { /** * Expected input is a list of fully qualified names of fields as in the schema. * Only top-level field names for nested fields are supported. * For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'. */ fieldNames?: string[]; } interface DatascanDataProfileSpecIncludeFields { /** * Expected input is a list of fully qualified names of fields as in the schema. * Only top-level field names for nested fields are supported. * For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'. */ fieldNames?: string[]; } interface DatascanDataProfileSpecPostScanActions { /** * If set, results will be exported to the provided BigQuery table. * Structure is documented below. */ bigqueryExport?: outputs.dataplex.DatascanDataProfileSpecPostScanActionsBigqueryExport; } interface DatascanDataProfileSpecPostScanActionsBigqueryExport { /** * The BigQuery table to export DataProfileScan results to. * Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID */ resultsTable?: string; } interface DatascanDataQualitySpec { /** * If set, the latest DataScan job result will be published to Dataplex Catalog. */ catalogPublishingEnabled?: boolean; /** * Actions to take upon job completion. * Structure is documented below. */ postScanActions?: outputs.dataplex.DatascanDataQualitySpecPostScanActions; /** * A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10 */ rowFilter?: string; /** * The list of rules to evaluate against a data source. At least one rule is required. * Structure is documented below. */ rules?: outputs.dataplex.DatascanDataQualitySpecRule[]; /** * The percentage of the records to be selected from the dataset for DataScan. * Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. * Sampling is not applied if `samplingPercent` is not specified, 0 or 100. */ samplingPercent?: number; } interface DatascanDataQualitySpecPostScanActions { /** * If set, results will be exported to the provided BigQuery table. * Structure is documented below. */ bigqueryExport?: outputs.dataplex.DatascanDataQualitySpecPostScanActionsBigqueryExport; /** * The configuration of notification report post scan action. * Structure is documented below. */ notificationReport?: outputs.dataplex.DatascanDataQualitySpecPostScanActionsNotificationReport; } interface DatascanDataQualitySpecPostScanActionsBigqueryExport { /** * The BigQuery table to export DataProfileScan results to. * Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID */ resultsTable?: string; } interface DatascanDataQualitySpecPostScanActionsNotificationReport { /** * This trigger is triggered whenever a scan job run ends, regardless of the result. */ jobEndTrigger?: outputs.dataplex.DatascanDataQualitySpecPostScanActionsNotificationReportJobEndTrigger; /** * This trigger is triggered when the scan job itself fails, regardless of the result. */ jobFailureTrigger?: outputs.dataplex.DatascanDataQualitySpecPostScanActionsNotificationReportJobFailureTrigger; /** * The individuals or groups who are designated to receive notifications upon triggers. * Structure is documented below. */ recipients: outputs.dataplex.DatascanDataQualitySpecPostScanActionsNotificationReportRecipients; /** * This trigger is triggered when the DQ score in the job result is less than a specified input score. * Structure is documented below. */ scoreThresholdTrigger?: outputs.dataplex.DatascanDataQualitySpecPostScanActionsNotificationReportScoreThresholdTrigger; } interface DatascanDataQualitySpecPostScanActionsNotificationReportJobEndTrigger { } interface DatascanDataQualitySpecPostScanActionsNotificationReportJobFailureTrigger { } interface DatascanDataQualitySpecPostScanActionsNotificationReportRecipients { /** * The email recipients who will receive the DataQualityScan results report. */ emails?: string[]; } interface DatascanDataQualitySpecPostScanActionsNotificationReportScoreThresholdTrigger { /** * The score range is in [0,100]. */ scoreThreshold?: number; } interface DatascanDataQualitySpecRule { /** * The unnested column which this rule is evaluated against. */ column?: string; /** * Description of the rule. * The maximum length is 1,024 characters. */ description?: string; /** * The dimension name a rule belongs to. Custom dimension name is supported with all uppercase letters and maximum length of 30 characters. */ dimension: string; /** * Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules. */ ignoreNull?: boolean; /** * A mutable name for the rule. * The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). * The maximum length is 63 characters. * Must start with a letter. * Must end with a number or a letter. */ name?: string; /** * ColumnMap rule which evaluates whether each column value is null. */ nonNullExpectation?: outputs.dataplex.DatascanDataQualitySpecRuleNonNullExpectation; /** * ColumnMap rule which evaluates whether each column value lies between a specified range. * Structure is documented below. */ rangeExpectation?: outputs.dataplex.DatascanDataQualitySpecRuleRangeExpectation; /** * ColumnMap rule which evaluates whether each column value matches a specified regex. * Structure is documented below. */ regexExpectation?: outputs.dataplex.DatascanDataQualitySpecRuleRegexExpectation; /** * Table rule which evaluates whether each row passes the specified condition. * Structure is documented below. */ rowConditionExpectation?: outputs.dataplex.DatascanDataQualitySpecRuleRowConditionExpectation; /** * ColumnMap rule which evaluates whether each column value is contained by a specified set. * Structure is documented below. */ setExpectation?: outputs.dataplex.DatascanDataQualitySpecRuleSetExpectation; /** * Table rule which evaluates whether any row matches invalid state. * Structure is documented below. */ sqlAssertion?: outputs.dataplex.DatascanDataQualitySpecRuleSqlAssertion; /** * ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. * Structure is documented below. */ statisticRangeExpectation?: outputs.dataplex.DatascanDataQualitySpecRuleStatisticRangeExpectation; /** * Whether the Rule is active or suspended. Default = false. */ suspended?: boolean; /** * Table rule which evaluates whether the provided expression is true. * Structure is documented below. */ tableConditionExpectation?: outputs.dataplex.DatascanDataQualitySpecRuleTableConditionExpectation; /** * The minimum ratio of passingRows / totalRows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0). */ threshold?: number; /** * Row-level rule which evaluates whether each column value is unique. */ uniquenessExpectation?: outputs.dataplex.DatascanDataQualitySpecRuleUniquenessExpectation; } interface DatascanDataQualitySpecRuleNonNullExpectation { } interface DatascanDataQualitySpecRuleRangeExpectation { /** * The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided. */ maxValue?: string; /** * The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided. */ minValue?: string; /** * Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. * Only relevant if a maxValue has been defined. Default = false. */ strictMaxEnabled?: boolean; /** * Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. * Only relevant if a minValue has been defined. Default = false. */ strictMinEnabled?: boolean; } interface DatascanDataQualitySpecRuleRegexExpectation { /** * A regular expression the column value is expected to match. */ regex: string; } interface DatascanDataQualitySpecRuleRowConditionExpectation { /** * The SQL expression. */ sqlExpression: string; } interface DatascanDataQualitySpecRuleSetExpectation { /** * Expected values for the column value. */ values: string[]; } interface DatascanDataQualitySpecRuleSqlAssertion { /** * The SQL statement. */ sqlStatement: string; } interface DatascanDataQualitySpecRuleStatisticRangeExpectation { /** * The maximum column statistic value allowed for a row to pass this validation. * At least one of minValue and maxValue need to be provided. */ maxValue?: string; /** * The minimum column statistic value allowed for a row to pass this validation. * At least one of minValue and maxValue need to be provided. */ minValue?: string; /** * column statistics. * Possible values are: `STATISTIC_UNDEFINED`, `MEAN`, `MIN`, `MAX`. */ statistic: string; /** * Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. * Only relevant if a maxValue has been defined. Default = false. */ strictMaxEnabled?: boolean; /** * Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. * Only relevant if a minValue has been defined. Default = false. */ strictMinEnabled?: boolean; } interface DatascanDataQualitySpecRuleTableConditionExpectation { /** * The SQL expression. */ sqlExpression: string; } interface DatascanDataQualitySpecRuleUniquenessExpectation { } interface DatascanExecutionSpec { /** * The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table. */ field?: string; /** * Spec related to how often and when a scan should be triggered. * Structure is documented below. */ trigger: outputs.dataplex.DatascanExecutionSpecTrigger; } interface DatascanExecutionSpecTrigger { /** * The scan runs once via dataScans.run API. */ onDemand?: outputs.dataplex.DatascanExecutionSpecTriggerOnDemand; /** * The scan runs once upon DataScan creation. * Structure is documented below. */ oneTime?: outputs.dataplex.DatascanExecutionSpecTriggerOneTime; /** * The scan is scheduled to run periodically. * Structure is documented below. */ schedule?: outputs.dataplex.DatascanExecutionSpecTriggerSchedule; } interface DatascanExecutionSpecTriggerOnDemand { } interface DatascanExecutionSpecTriggerOneTime { /** * Time to live for the DataScan and its results after the one-time run completes. Accepts a string with a unit suffix 's' (e.g., '7200s'). Default is 24 hours. Ranges between 0 and 31536000 seconds (1 year). */ ttlAfterScanCompletion?: string; } interface DatascanExecutionSpecTriggerSchedule { /** * Cron schedule for running scans periodically. This field is required for Schedule scans. */ cron: string; } interface DatascanExecutionStatus { /** * (Output) * The time when the latest DataScanJob started. */ latestJobEndTime: string; /** * (Output) * The time when the latest DataScanJob ended. */ latestJobStartTime: string; } interface DatascanIamBindingCondition { description?: string; expression: string; title: string; } interface DatascanIamMemberCondition { description?: string; expression: string; title: string; } interface EntryAspect { /** * A nested object resource. * Structure is documented below. */ aspect: outputs.dataplex.EntryAspectAspect; /** * Depending on how the aspect is attached to the entry, the format of the aspect key can be one of the following: * If the aspect is attached directly to the entry: {project_number}.{locationId}.{aspectTypeId} * If the aspect is attached to an entry's path: {project_number}.{locationId}.{aspectTypeId}@{path} */ aspectKey: string; } interface EntryAspectAspect { /** * (Output) * The resource name of the type used to create this Aspect. */ aspectType: string; /** * (Output) * The time when the Aspect was created. */ createTime: string; /** * The content of the aspect in JSON form, according to its aspect type schema. The maximum size of the field is 120KB (encoded as UTF-8). */ data: string; /** * (Output) * The path in the entry under which the aspect is attached. */ path: string; /** * (Output) * The time when the Aspect was last modified. */ updateTime: string; } interface EntryEntrySource { /** * Structure is documented below. */ ancestors?: outputs.dataplex.EntryEntrySourceAncestor[]; /** * The time when the resource was created in the source system. */ createTime?: string; /** * A description of the data resource. Maximum length is 2,000 characters. */ description?: string; /** * A user-friendly display name. Maximum length is 500 characters. */ displayName?: string; /** * User-defined labels. The maximum size of keys and values is 128 characters each. * An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ labels?: { [key: string]: string; }; /** * (Output) * Location of the resource in the source system. You can search the entry by this location. * By default, this should match the location of the entry group containing this entry. * A different value allows capturing the source location for data external to Google Cloud. */ location: string; /** * The platform containing the source system. Maximum length is 64 characters. */ platform?: string; /** * The name of the resource in the source system. Maximum length is 4,000 characters. */ resource?: string; /** * The name of the source system. Maximum length is 64 characters. */ system?: string; /** * The time when the resource was last updated in the source system. * If the entry exists in the system and its EntrySource has updateTime populated, * further updates to the EntrySource of the entry must provide incremental updates to its updateTime. */ updateTime?: string; } interface EntryEntrySourceAncestor { /** * The name of the ancestor resource. */ name?: string; /** * The type of the ancestor resource. */ type?: string; } interface EntryGroupIamBindingCondition { description?: string; expression: string; title: string; } interface EntryGroupIamMemberCondition { description?: string; expression: string; title: string; } interface EntryLinkEntryReference { /** * The relative resource name of the referenced Entry, of the form: * projects/{project_id_or_number}/locations/{location_id}/entryGroups/{entry_group_id}/entries/{entry_id} */ name: string; /** * The path in the Entry that is referenced in the Entry Link. * Empty path denotes that the Entry itself is referenced in the Entry Link. */ path?: string; /** * The reference type of the Entry. * Possible values are: `SOURCE`, `TARGET`. */ type?: string; } interface EntryTypeIamBindingCondition { description?: string; expression: string; title: string; } interface EntryTypeIamMemberCondition { description?: string; expression: string; title: string; } interface EntryTypeRequiredAspect { /** * Required aspect type for the entry type. */ type?: string; } interface GetDataQualityRulesRule { /** * The unnested column which this rule is evaluated against. */ column: string; /** * Description of the rule. (The maximum length is 1,024 characters.) */ description: string; /** * The dimension a rule belongs to. Supported dimensions are "COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "FRESHNESS", "VOLUME" */ dimension: string; /** * Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. * This field is only valid for the following type of rules: RangeExpectation, RegexExpectation, SetExpectation, UniquenessExpectation */ ignoreNull: boolean; /** * A mutable name for the rule. * The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). * The maximum length is 63 characters. * Must start with a letter. * Must end with a number or a letter. */ name: string; /** * Row-level rule which evaluates whether each column value is null. */ nonNullExpectations: outputs.dataplex.GetDataQualityRulesRuleNonNullExpectation[]; /** * Row-level rule which evaluates whether each column value lies between a specified range. */ rangeExpectations: outputs.dataplex.GetDataQualityRulesRuleRangeExpectation[]; /** * Row-level rule which evaluates whether each column value matches a specified regex. */ regexExpectations: outputs.dataplex.GetDataQualityRulesRuleRegexExpectation[]; /** * Row-level rule which evaluates whether each row in a table passes the specified condition. */ rowConditionExpectations: outputs.dataplex.GetDataQualityRulesRuleRowConditionExpectation[]; /** * Row-level rule which evaluates whether each column value is contained by a specified set. */ setExpectations: outputs.dataplex.GetDataQualityRulesRuleSetExpectation[]; /** * Aggregate rule which evaluates the number of rows returned for the provided statement. If any rows are returned, this rule fails. */ sqlAssertions: outputs.dataplex.GetDataQualityRulesRuleSqlAssertion[]; /** * Aggregate rule which evaluates whether the column aggregate statistic lies between a specified range. */ statisticRangeExpectations: outputs.dataplex.GetDataQualityRulesRuleStatisticRangeExpectation[]; /** * Whether the Rule is active or suspended. Default is false. */ suspended: boolean; /** * Aggregate rule which evaluates whether the provided expression is true for a table. */ tableConditionExpectations: outputs.dataplex.GetDataQualityRulesRuleTableConditionExpectation[]; /** * The minimum ratio of passingRows / totalRows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0). This field is only valid for row-level type rules. */ threshold: number; /** * Row-level rule which evaluates whether each column value is unique. */ uniquenessExpectations: outputs.dataplex.GetDataQualityRulesRuleUniquenessExpectation[]; } interface GetDataQualityRulesRuleNonNullExpectation { } interface GetDataQualityRulesRuleRangeExpectation { /** * The maximum column value allowed for a row to pass this validation. */ maxValue: string; /** * The minimum column value allowed for a row to pass this validation. */ minValue: string; /** * Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. */ strictMaxEnabled: boolean; /** * Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. */ strictMinEnabled: boolean; } interface GetDataQualityRulesRuleRegexExpectation { /** * A regular expression the column value is expected to match. */ regex: string; } interface GetDataQualityRulesRuleRowConditionExpectation { /** * The SQL expression. */ sqlExpression: string; } interface GetDataQualityRulesRuleSetExpectation { /** * Expected values for the column value. */ values: string[]; } interface GetDataQualityRulesRuleSqlAssertion { /** * The SQL expression. */ sqlStatement: string; } interface GetDataQualityRulesRuleStatisticRangeExpectation { /** * The maximum column value allowed for a row to pass this validation. */ maxValue: string; /** * The minimum column value allowed for a row to pass this validation. */ minValue: string; /** * The list of aggregate metrics a rule can be evaluated against. * Possible values: ["STATISTIC_UNDEFINED", "MEAN", "MIN", "MAX"] */ statistic: string; /** * Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. */ strictMaxEnabled: boolean; /** * Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. */ strictMinEnabled: boolean; } interface GetDataQualityRulesRuleTableConditionExpectation { /** * The SQL expression. */ sqlExpression: string; } interface GetDataQualityRulesRuleUniquenessExpectation { } interface GlossaryIamBindingCondition { description?: string; expression: string; title: string; } interface GlossaryIamMemberCondition { description?: string; expression: string; title: string; } interface LakeAssetStatus { /** * Number of active assets. */ activeAssets: number; /** * Number of assets that are in process of updating the security policy on attached resources. */ securityPolicyApplyingAssets: number; /** * Output only. The time when the lake was last updated. */ updateTime: string; } interface LakeIamBindingCondition { description?: string; expression: string; title: string; } interface LakeIamMemberCondition { description?: string; expression: string; title: string; } interface LakeMetastore { /** * Optional. A relative reference to the Dataproc Metastore (https://cloud.google.com/dataproc-metastore/docs) service associated with the lake: `projects/{project_id}/locations/{location_id}/services/{service_id}` */ service?: string; } interface LakeMetastoreStatus { /** * The URI of the endpoint used to access the Metastore service. */ endpoint: string; /** * Additional information about the current status. */ message: string; /** * Output only. Current state of the lake. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED */ state: string; /** * Output only. The time when the lake was last updated. */ updateTime: string; } interface TaskExecutionSpec { /** * The arguments to pass to the task. The args can use placeholders of the format ${placeholder} as part of key/value string. These will be interpolated before passing the args to the driver. Currently supported placeholders: - ${taskId} - ${job_time} To pass positional args, set the key as TASK_ARGS. The value should be a comma-separated string of all the positional arguments. To use a delimiter other than comma, refer to https://cloud.google.com/sdk/gcloud/reference/topic/escaping. In case of other keys being present in the args, then TASK_ARGS will be passed as the last argument. An object containing a list of 'key': value pairs. Example: { 'name': 'wrench', 'mass': '1.3kg', 'count': '3' }. */ args?: { [key: string]: string; }; /** * The Cloud KMS key to use for encryption, of the form: projects/{project_number}/locations/{locationId}/keyRings/{key-ring-name}/cryptoKeys/{key-name}. */ kmsKey?: string; /** * The maximum duration after which the job execution is expired. A duration in seconds with up to nine fractional digits, ending with 's'. Example: '3.5s'. */ maxJobExecutionLifetime?: string; /** * The project in which jobs are run. By default, the project containing the Lake is used. If a project is provided, the ExecutionSpec.service_account must belong to this project. */ project?: string; /** * Service account to use to execute a task. If not provided, the default Compute service account for the project is used. */ serviceAccount: string; } interface TaskExecutionStatus { /** * (Output) * latest job execution. * Structure is documented below. */ latestJobs: outputs.dataplex.TaskExecutionStatusLatestJob[]; /** * (Output) * Last update time of the status. */ updateTime: string; } interface TaskExecutionStatusLatestJob { /** * (Output) * The time when the job ended. */ endTime: string; /** * (Output) * Additional information about the current state. */ message: string; /** * (Output) * The relative resource name of the job, of the form: projects/{project_number}/locations/{locationId}/lakes/{lakeId}/tasks/{taskId}/jobs/{jobId}. */ name: string; /** * (Output) * The number of times the job has been retried (excluding the initial attempt). */ retryCount: number; /** * (Output) * The underlying service running a job. */ service: string; /** * (Output) * The full resource name for the job run under a particular service. */ serviceJob: string; /** * (Output) * The time when the job was started. */ startTime: string; /** * (Output) * Execution state for the job. */ state: string; /** * (Output) * System generated globally unique ID for the job. */ uid: string; } interface TaskIamBindingCondition { description?: string; expression: string; title: string; } interface TaskIamMemberCondition { description?: string; expression: string; title: string; } interface TaskNotebook { /** * Cloud Storage URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */ archiveUris?: string[]; /** * Cloud Storage URIs of files to be placed in the working directory of each executor. */ fileUris?: string[]; /** * Infrastructure specification for the execution. * Structure is documented below. */ infrastructureSpec?: outputs.dataplex.TaskNotebookInfrastructureSpec; /** * Path to input notebook. This can be the Cloud Storage URI of the notebook file or the path to a Notebook Content. The execution args are accessible as environment variables (TASK_key=value). */ notebook: string; } interface TaskNotebookInfrastructureSpec { /** * Compute resources needed for a Task when using Dataproc Serverless. * Structure is documented below. */ batch?: outputs.dataplex.TaskNotebookInfrastructureSpecBatch; /** * Container Image Runtime Configuration. * Structure is documented below. */ containerImage?: outputs.dataplex.TaskNotebookInfrastructureSpecContainerImage; /** * Vpc network. * Structure is documented below. */ vpcNetwork?: outputs.dataplex.TaskNotebookInfrastructureSpecVpcNetwork; } interface TaskNotebookInfrastructureSpecBatch { /** * Total number of job executors. Executor Count should be between 2 and 100. [Default=2] */ executorsCount?: number; /** * Max configurable executors. If maxExecutorsCount > executorsCount, then auto-scaling is enabled. Max Executor Count should be between 2 and 1000. [Default=1000] */ maxExecutorsCount?: number; } interface TaskNotebookInfrastructureSpecContainerImage { /** * Container image to use. */ image?: string; /** * A list of Java JARS to add to the classpath. Valid input includes Cloud Storage URIs to Jar binaries. For example, gs://bucket-name/my/path/to/file.jar */ javaJars?: string[]; /** * Override to common configuration of open source components installed on the Dataproc cluster. The properties to set on daemon config files. Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. For more information, see Cluster properties. */ properties?: { [key: string]: string; }; /** * A list of python packages to be installed. Valid formats include Cloud Storage URI to a PIP installable library. For example, gs://bucket-name/my/path/to/lib.tar.gz */ pythonPackages?: string[]; } interface TaskNotebookInfrastructureSpecVpcNetwork { /** * The Cloud VPC network in which the job is run. By default, the Cloud VPC network named Default within the project is used. */ network?: string; /** * List of network tags to apply to the job. */ networkTags?: string[]; /** * The Cloud VPC sub-network in which the job is run. */ subNetwork?: string; } interface TaskSpark { /** * Cloud Storage URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */ archiveUris?: string[]; /** * Cloud Storage URIs of files to be placed in the working directory of each executor. */ fileUris?: string[]; /** * Infrastructure specification for the execution. * Structure is documented below. */ infrastructureSpec?: outputs.dataplex.TaskSparkInfrastructureSpec; /** * The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris. The execution args are passed in as a sequence of named process arguments (--key=value). */ mainClass?: string; /** * The Cloud Storage URI of the jar file that contains the main class. The execution args are passed in as a sequence of named process arguments (--key=value). */ mainJarFileUri?: string; /** * The Gcloud Storage URI of the main Python file to use as the driver. Must be a .py file. The execution args are passed in as a sequence of named process arguments (--key=value). */ pythonScriptFile?: string; /** * The query text. The execution args are used to declare a set of script variables (set key='value';). */ sqlScript?: string; /** * A reference to a query file. This can be the Cloud Storage URI of the query file or it can the path to a SqlScript Content. The execution args are used to declare a set of script variables (set key='value';). */ sqlScriptFile?: string; } interface TaskSparkInfrastructureSpec { /** * Compute resources needed for a Task when using Dataproc Serverless. * Structure is documented below. */ batch?: outputs.dataplex.TaskSparkInfrastructureSpecBatch; /** * Container Image Runtime Configuration. * Structure is documented below. */ containerImage?: outputs.dataplex.TaskSparkInfrastructureSpecContainerImage; /** * Vpc network. * Structure is documented below. */ vpcNetwork?: outputs.dataplex.TaskSparkInfrastructureSpecVpcNetwork; } interface TaskSparkInfrastructureSpecBatch { /** * Total number of job executors. Executor Count should be between 2 and 100. [Default=2] */ executorsCount?: number; /** * Max configurable executors. If maxExecutorsCount > executorsCount, then auto-scaling is enabled. Max Executor Count should be between 2 and 1000. [Default=1000] */ maxExecutorsCount?: number; } interface TaskSparkInfrastructureSpecContainerImage { /** * Container image to use. */ image?: string; /** * A list of Java JARS to add to the classpath. Valid input includes Cloud Storage URIs to Jar binaries. For example, gs://bucket-name/my/path/to/file.jar */ javaJars?: string[]; /** * Override to common configuration of open source components installed on the Dataproc cluster. The properties to set on daemon config files. Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. For more information, see Cluster properties. */ properties?: { [key: string]: string; }; /** * A list of python packages to be installed. Valid formats include Cloud Storage URI to a PIP installable library. For example, gs://bucket-name/my/path/to/lib.tar.gz */ pythonPackages?: string[]; } interface TaskSparkInfrastructureSpecVpcNetwork { /** * The Cloud VPC network in which the job is run. By default, the Cloud VPC network named Default within the project is used. */ network?: string; /** * List of network tags to apply to the job. */ networkTags?: string[]; /** * The Cloud VPC sub-network in which the job is run. */ subNetwork?: string; } interface TaskTriggerSpec { /** * Prevent the task from executing. This does not cancel already running tasks. It is intended to temporarily disable RECURRING tasks. */ disabled?: boolean; /** * Number of retry attempts before aborting. Set to zero to never attempt to retry a failed task. */ maxRetries?: number; /** * Cron schedule (https://en.wikipedia.org/wiki/Cron) for running tasks periodically. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: 'CRON_TZ=${IANA_TIME_ZONE}' or 'TZ=${IANA_TIME_ZONE}'. The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, CRON_TZ=America/New_York 1 * * * *, or TZ=America/New_York 1 * * * *. This field is required for RECURRING tasks. */ schedule?: string; /** * The first run of the task will be after this time. If not specified, the task will run shortly after being submitted if ON_DEMAND and based on the schedule if RECURRING. */ startTime?: string; /** * Trigger type of the user-specified Task * Possible values are: `ON_DEMAND`, `RECURRING`. */ type: string; } interface ZoneAssetStatus { /** * Number of active assets. */ activeAssets: number; /** * Number of assets that are in process of updating the security policy on attached resources. */ securityPolicyApplyingAssets: number; /** * Output only. The time when the zone was last updated. */ updateTime: string; } interface ZoneDiscoverySpec { /** * Optional. Configuration for CSV data. */ csvOptions: outputs.dataplex.ZoneDiscoverySpecCsvOptions; /** * Required. Whether discovery is enabled. */ enabled: boolean; /** * Optional. The list of patterns to apply for selecting data to exclude during discovery. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names. */ excludePatterns?: string[]; /** * Optional. The list of patterns to apply for selecting data to include during discovery if only a subset of the data should considered. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names. */ includePatterns?: string[]; /** * Optional. Configuration for Json data. */ jsonOptions: outputs.dataplex.ZoneDiscoverySpecJsonOptions; /** * Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron) for running discovery periodically. Successive discovery runs must be scheduled at least 60 minutes apart. The default value is to run discovery every 60 minutes. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: "CRON_TZ=${IANA_TIME_ZONE}" or TZ=${IANA_TIME_ZONE}". The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, "CRON_TZ=America/New_York 1 * * * *", or "TZ=America/New_York 1 * * * *". */ schedule: string; } interface ZoneDiscoverySpecCsvOptions { /** * Optional. The delimiter being used to separate values. This defaults to ','. */ delimiter?: string; /** * Optional. Whether to disable the inference of data type for CSV data. If true, all columns will be registered as strings. */ disableTypeInference?: boolean; /** * Optional. The character encoding of the data. The default is UTF-8. */ encoding?: string; /** * Optional. The number of rows to interpret as header rows that should be skipped when reading data rows. */ headerRows?: number; } interface ZoneDiscoverySpecJsonOptions { /** * Optional. Whether to disable the inference of data type for Json data. If true, all columns will be registered as their primitive types (strings, number or boolean). */ disableTypeInference?: boolean; /** * Optional. The character encoding of the data. The default is UTF-8. */ encoding?: string; } interface ZoneIamBindingCondition { description?: string; expression: string; title: string; } interface ZoneIamMemberCondition { description?: string; expression: string; title: string; } interface ZoneResourceSpec { /** * Required. Immutable. The location type of the resources that are allowed to be attached to the assets within this zone. Possible values: LOCATION_TYPE_UNSPECIFIED, SINGLE_REGION, MULTI_REGION * * - - - */ locationType: string; } } export declare namespace dataproc { interface AutoscalingPolicyBasicAlgorithm { /** * Duration between scaling events. A scaling period starts after the * update operation from the previous event has completed. * Bounds: [2m, 1d]. Default: 2m. */ cooldownPeriod?: string; /** * YARN autoscaling configuration. * Structure is documented below. */ yarnConfig: outputs.dataproc.AutoscalingPolicyBasicAlgorithmYarnConfig; } interface AutoscalingPolicyBasicAlgorithmYarnConfig { /** * Timeout for YARN graceful decommissioning of Node Managers. Specifies the * duration to wait for jobs to complete before forcefully removing workers * (and potentially interrupting jobs). Only applicable to downscaling operations. * Bounds: [0s, 1d]. */ gracefulDecommissionTimeout: string; /** * Fraction of average pending memory in the last cooldown period for which to * remove workers. A scale-down factor of 1 will result in scaling down so that there * is no available memory remaining after the update (more aggressive scaling). * A scale-down factor of 0 disables removing workers, which can be beneficial for * autoscaling a single job. * Bounds: [0.0, 1.0]. */ scaleDownFactor: number; /** * Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. * For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must * recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 * means the autoscaler will scale down on any recommended change. * Bounds: [0.0, 1.0]. Default: 0.0. */ scaleDownMinWorkerFraction?: number; /** * Fraction of average pending memory in the last cooldown period for which to * add workers. A scale-up factor of 1.0 will result in scaling up so that there * is no pending memory remaining after the update (more aggressive scaling). * A scale-up factor closer to 0 will result in a smaller magnitude of scaling up * (less aggressive scaling). * Bounds: [0.0, 1.0]. */ scaleUpFactor: number; /** * Minimum scale-up threshold as a fraction of total cluster size before scaling * occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler * must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of * 0 means the autoscaler will scale up on any recommended change. * Bounds: [0.0, 1.0]. Default: 0.0. */ scaleUpMinWorkerFraction?: number; } interface AutoscalingPolicyIamBindingCondition { description?: string; expression: string; title: string; } interface AutoscalingPolicyIamMemberCondition { description?: string; expression: string; title: string; } interface AutoscalingPolicySecondaryWorkerConfig { /** * Maximum number of instances for this group. Note that by default, clusters will not use * secondary workers. Required for secondary workers if the minimum secondary instances is set. * Bounds: [minInstances, ). Defaults to 0. */ maxInstances?: number; /** * Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0. */ minInstances?: number; /** * Weight for the instance group, which is used to determine the fraction of total workers * in the cluster from this instance group. For example, if primary workers have weight 2, * and secondary workers have weight 1, the cluster will have approximately 2 primary workers * for each secondary worker. * The cluster may not reach the specified balance if constrained by min/max bounds or other * autoscaling settings. For example, if maxInstances for secondary workers is 0, then only * primary workers will be added. The cluster can also be out of balance when created. * If weight is not set on any instance group, the cluster will default to equal weight for * all groups: the cluster will attempt to maintain an equal number of workers in each group * within the configured size bounds for each group. If weight is set for one group only, * the cluster will default to zero weight on the unset group. For example if weight is set * only on primary workers, the cluster will use primary workers only and no secondary workers. */ weight?: number; } interface AutoscalingPolicyWorkerConfig { /** * Maximum number of instances for this group. */ maxInstances: number; /** * Minimum number of instances for this group. Bounds: [2, maxInstances]. Defaults to 2. */ minInstances?: number; /** * Weight for the instance group, which is used to determine the fraction of total workers * in the cluster from this instance group. For example, if primary workers have weight 2, * and secondary workers have weight 1, the cluster will have approximately 2 primary workers * for each secondary worker. * The cluster may not reach the specified balance if constrained by min/max bounds or other * autoscaling settings. For example, if maxInstances for secondary workers is 0, then only * primary workers will be added. The cluster can also be out of balance when created. * If weight is not set on any instance group, the cluster will default to equal weight for * all groups: the cluster will attempt to maintain an equal number of workers in each group * within the configured size bounds for each group. If weight is set for one group only, * the cluster will default to zero weight on the unset group. For example if weight is set * only on primary workers, the cluster will use primary workers only and no secondary workers. */ weight?: number; } interface BatchEnvironmentConfig { /** * Execution configuration for a workload. * Structure is documented below. */ executionConfig?: outputs.dataproc.BatchEnvironmentConfigExecutionConfig; /** * Peripherals configuration that workload has access to. * Structure is documented below. */ peripheralsConfig: outputs.dataproc.BatchEnvironmentConfigPeripheralsConfig; } interface BatchEnvironmentConfigExecutionConfig { /** * Authentication configuration for a workload is used to set the default identity for the workload execution. * Structure is documented below. */ authenticationConfig?: outputs.dataproc.BatchEnvironmentConfigExecutionConfigAuthenticationConfig; /** * The Cloud KMS key to use for encryption. */ kmsKey?: string; /** * Tags used for network traffic control. */ networkTags?: string[]; /** * Network configuration for workload execution. */ networkUri?: string; /** * Service account that used to execute workload. */ serviceAccount: string; /** * A Cloud Storage bucket used to stage workload dependencies, config files, and store * workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, * Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, * and then create and manage project-level, per-location staging and temporary buckets. * This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. */ stagingBucket?: string; /** * Subnetwork configuration for workload execution. */ subnetworkUri?: string; /** * The duration after which the workload will be terminated. * When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing * work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it * exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, * it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. * Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session), * the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or * when ttl has been exceeded, whichever occurs first. */ ttl: string; } interface BatchEnvironmentConfigExecutionConfigAuthenticationConfig { /** * Authentication type for the user workload running in containers. * Possible values are: `SERVICE_ACCOUNT`, `END_USER_CREDENTIALS`. */ userWorkloadAuthenticationType?: string; } interface BatchEnvironmentConfigPeripheralsConfig { /** * Resource name of an existing Dataproc Metastore service. */ metastoreService?: string; /** * The Spark History Server configuration for the workload. * Structure is documented below. */ sparkHistoryServerConfig?: outputs.dataproc.BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfig; } interface BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfig { /** * Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload. */ dataprocCluster?: string; } interface BatchPysparkBatch { /** * HCFS URIs of archives to be extracted into the working directory of each executor. * Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */ archiveUris?: string[]; /** * The arguments to pass to the driver. Do not include arguments that can be set as batch * properties, such as --conf, since a collision can occur that causes an incorrect batch submission. */ args?: string[]; /** * HCFS URIs of files to be placed in the working directory of each executor. */ fileUris?: string[]; /** * HCFS URIs of jar files to add to the classpath of the Spark driver and tasks. */ jarFileUris?: string[]; /** * The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file. */ mainPythonFileUri?: string; /** * HCFS file URIs of Python files to pass to the PySpark framework. * Supported file types: .py, .egg, and .zip. */ pythonFileUris?: string[]; } interface BatchRuntimeConfig { /** * Optional. Autotuning configuration of the workload. * Structure is documented below. */ autotuningConfig?: outputs.dataproc.BatchRuntimeConfigAutotuningConfig; /** * Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs. */ cohort?: string; /** * Optional custom container image for the job runtime environment. If not specified, a default container image will be used. */ containerImage?: string; /** * (Output) * A mapping of property names to values, which are used to configure workload execution. */ effectiveProperties: { [key: string]: string; }; /** * A mapping of property names to values, which are used to configure workload execution. */ properties?: { [key: string]: string; }; /** * Version of the batch runtime. */ version: string; } interface BatchRuntimeConfigAutotuningConfig { /** * Optional. Scenarios for which tunings are applied. * Each value may be one of: `SCALING`, `BROADCAST_HASH_JOIN`, `MEMORY`. */ scenarios?: string[]; } interface BatchRuntimeInfo { /** * (Output) * Approximate workload resource usage, calculated when the workload completes(see [Dataproc Serverless pricing](https://cloud.google.com/dataproc-serverless/pricing)) * Structure is documented below. */ approximateUsages: outputs.dataproc.BatchRuntimeInfoApproximateUsage[]; /** * (Output) * Snapshot of current workload resource usage(see [Dataproc Serverless pricing](https://cloud.google.com/dataproc-serverless/pricing)) * Structure is documented below. */ currentUsages: outputs.dataproc.BatchRuntimeInfoCurrentUsage[]; /** * (Output) * A URI pointing to the location of the diagnostics tarball. */ diagnosticOutputUri: string; /** * (Output) * Map of remote access endpoints (such as web interfaces and APIs) to their URIs. */ endpoints: { [key: string]: string; }; /** * (Output) * A URI pointing to the location of the stdout and stderr of the workload. */ outputUri: string; } interface BatchRuntimeInfoApproximateUsage { /** * (Output) * Accelerator type being used, if any. */ acceleratorType: string; /** * (Output) * Accelerator usage in (milliAccelerator x seconds) */ milliAcceleratorSeconds: string; /** * (Output) * DCU (Dataproc Compute Units) usage in (milliDCU x seconds) */ milliDcuSeconds: string; /** * (Output) * Shuffle storage usage in (GB x seconds) */ shuffleStorageGbSeconds: string; } interface BatchRuntimeInfoCurrentUsage { /** * (Output) * Accelerator type being used, if any. */ acceleratorType: string; /** * (Output) * Milli (one-thousandth) accelerator.. */ milliAccelerator: string; /** * (Output) * Milli (one-thousandth) Dataproc Compute Units (DCUs). */ milliDcu: string; /** * (Output) * Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier. */ milliDcuPremium: string; /** * (Output) * Shuffle Storage in gigabytes (GB). */ shuffleStorageGb: string; /** * (Output) * Shuffle Storage in gigabytes (GB) charged at premium tier. */ shuffleStorageGbPremium: string; /** * (Output) * The timestamp of the usage snapshot. */ snapshotTime: string; } interface BatchSparkBatch { /** * HCFS URIs of archives to be extracted into the working directory of each executor. * Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */ archiveUris?: string[]; /** * The arguments to pass to the driver. Do not include arguments that can be set as batch * properties, such as --conf, since a collision can occur that causes an incorrect batch submission. */ args?: string[]; /** * HCFS URIs of files to be placed in the working directory of each executor. */ fileUris?: string[]; /** * HCFS URIs of jar files to add to the classpath of the Spark driver and tasks. */ jarFileUris?: string[]; /** * The name of the driver main class. The jar file that contains the class must be in the * classpath or specified in jarFileUris. */ mainClass?: string; /** * The HCFS URI of the jar file that contains the main class. */ mainJarFileUri?: string; } interface BatchSparkRBatch { /** * HCFS URIs of archives to be extracted into the working directory of each executor. * Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */ archiveUris?: string[]; /** * The arguments to pass to the driver. Do not include arguments that can be set as batch * properties, such as --conf, since a collision can occur that causes an incorrect batch submission. */ args?: string[]; /** * HCFS URIs of files to be placed in the working directory of each executor. */ fileUris?: string[]; /** * The HCFS URI of the main R file to use as the driver. Must be a .R or .r file. */ mainRFileUri?: string; } interface BatchSparkSqlBatch { /** * HCFS URIs of jar files to be added to the Spark CLASSPATH. */ jarFileUris?: string[]; /** * The HCFS URI of the script that contains Spark SQL queries to execute. */ queryFileUri?: string; /** * Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";). */ queryVariables?: { [key: string]: string; }; } interface BatchStateHistory { /** * (Output) * The state of the batch at this point in history. For possible values, see the [API documentation](https://cloud.google.com/dataproc-serverless/docs/reference/rest/v1/projects.locations.batches#State). */ state: string; /** * (Output) * Details about the state at this point in history. */ stateMessage: string; /** * (Output) * The time when the batch entered the historical state. */ stateStartTime: string; } interface ClusterClusterConfig { /** * The autoscaling policy config associated with the cluster. * Note that once set, if `autoscalingConfig` is the only field set in `clusterConfig`, it can * only be removed by setting `policyUri = ""`, rather than removing the whole block. * Structure defined below. */ autoscalingConfig?: outputs.dataproc.ClusterClusterConfigAutoscalingConfig; /** * A Dataproc NodeGroup resource is a group of Dataproc cluster nodes that execute an assigned role. * Structure defined below. */ auxiliaryNodeGroups: outputs.dataproc.ClusterClusterConfigAuxiliaryNodeGroup[]; /** * The name of the cloud storage bucket ultimately used to house the staging data * for the cluster. If `stagingBucket` is specified, it will contain this value, otherwise * it will be the auto generated name. */ bucket: string; /** * The tier of the cluster. */ clusterTier: string; /** * The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. * Structure defined below. */ dataprocMetricConfig: outputs.dataproc.ClusterClusterConfigDataprocMetricConfig; /** * The Customer managed encryption keys settings for the cluster. * Structure defined below. */ encryptionConfig?: outputs.dataproc.ClusterClusterConfigEncryptionConfig; /** * The config settings for port access on the cluster. * Structure defined below. */ endpointConfig: outputs.dataproc.ClusterClusterConfigEndpointConfig; /** * Common config settings for resources of Google Compute Engine cluster * instances, applicable to all instances in the cluster. Structure defined below. */ gceClusterConfig: outputs.dataproc.ClusterClusterConfigGceClusterConfig; /** * Commands to execute on each node after config is completed. * You can specify multiple versions of these. Structure defined below. */ initializationActions?: outputs.dataproc.ClusterClusterConfigInitializationAction[]; /** * The settings for auto deletion cluster schedule. * Structure defined below. */ lifecycleConfig?: outputs.dataproc.ClusterClusterConfigLifecycleConfig; /** * The Google Compute Engine config settings for the master instances * in a cluster. Structure defined below. */ masterConfig: outputs.dataproc.ClusterClusterConfigMasterConfig; /** * The config setting for metastore service with the cluster. * Structure defined below. * - - - */ metastoreConfig?: outputs.dataproc.ClusterClusterConfigMetastoreConfig; /** * The Google Compute Engine config settings for the additional * instances in a cluster. Structure defined below. * * **NOTE** : `preemptibleWorkerConfig` is * an alias for the api's [secondaryWorkerConfig](https://cloud.google.com/dataproc/docs/reference/rest/v1/ClusterConfig#InstanceGroupConfig). The name doesn't necessarily mean it is preemptible and is named as * such for legacy/compatibility reasons. */ preemptibleWorkerConfig: outputs.dataproc.ClusterClusterConfigPreemptibleWorkerConfig; /** * Security related configuration. Structure defined below. */ securityConfig?: outputs.dataproc.ClusterClusterConfigSecurityConfig; /** * The config settings for software inside the cluster. * Structure defined below. */ softwareConfig: outputs.dataproc.ClusterClusterConfigSoftwareConfig; /** * The Cloud Storage staging bucket used to stage files, * such as Hadoop jars, between client machines and the cluster. * Note: If you don't explicitly specify a `stagingBucket` * then GCP will auto create / assign one for you. However, you are not guaranteed * an auto generated bucket which is solely dedicated to your cluster; it may be shared * with other clusters in the same region/zone also choosing to use the auto generation * option. */ stagingBucket?: string; /** * The Cloud Storage temp bucket used to store ephemeral cluster * and jobs data, such as Spark and MapReduce history files. * Note: If you don't explicitly specify a `tempBucket` then GCP will auto create / assign one for you. */ tempBucket: string; /** * The Google Compute Engine config settings for the worker instances * in a cluster. Structure defined below. */ workerConfig: outputs.dataproc.ClusterClusterConfigWorkerConfig; } interface ClusterClusterConfigAutoscalingConfig { /** * The autoscaling policy used by the cluster. * * Only resource names including projectid and location (region) are valid. Examples: * * `https://www.googleapis.com/compute/v1/projects/[projectId]/locations/[dataprocRegion]/autoscalingPolicies/[policyId]` * `projects/[projectId]/locations/[dataprocRegion]/autoscalingPolicies/[policyId]` * Note that the policy must be in the same project and Cloud Dataproc region. * * - - - */ policyUri: string; } interface ClusterClusterConfigAuxiliaryNodeGroup { /** * A node group ID. Generated if not specified. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters. */ nodeGroupId: string; /** * Node group configuration. */ nodeGroups: outputs.dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroup[]; } interface ClusterClusterConfigAuxiliaryNodeGroupNodeGroup { /** * The Node group resource name. */ name: string; /** * The node group instance group configuration. */ nodeGroupConfig: outputs.dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfig; /** * Node group roles. * One of `"DRIVER"`. */ roles: string[]; } interface ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfig { /** * The Compute Engine accelerator (GPU) configuration for these instances. Can be specified * multiple times. */ accelerators?: outputs.dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAccelerator[]; /** * Disk Config */ diskConfig: outputs.dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigDiskConfig; /** * List of auxiliary node group instance names which have been assigned to the cluster. */ instanceNames: string[]; /** * The name of a Google Compute Engine machine type * to create for the node group. If not specified, GCP will default to a predetermined * computed value (currently `n1-standard-4`). */ machineType: string; /** * The name of a minimum generation of CPU family * for the node group. If not specified, GCP will default to a predetermined computed value * for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) * for details about which CPU families are available (and defaulted) for each zone. */ minCpuPlatform: string; /** * Specifies the number of master nodes to create. * Please set a number greater than 0. Node Group must have at least 1 instance. */ numInstances: number; } interface ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAccelerator { /** * The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`. * * * - - - */ acceleratorCount: number; /** * The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`. */ acceleratorType: string; } interface ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigDiskConfig { /** * Size of the primary disk attached to each node, specified * in GB. The primary disk contains the boot volume and system libraries, and the * smallest allowed disk size is 10GB. GCP will default to a predetermined * computed value if not set (currently 500GB). Note: If SSDs are not * attached, it also contains the HDFS data blocks and Hadoop working directories. */ bootDiskSizeGb: number; /** * The disk type of the primary disk attached to each node. * One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`. */ bootDiskType?: string; /** * Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). */ localSsdInterface?: string; /** * The amount of local SSD disks that will be attached to each master cluster node. * Defaults to 0. */ numLocalSsds: number; } interface ClusterClusterConfigDataprocMetricConfig { /** * Metrics sources to enable. */ metrics: outputs.dataproc.ClusterClusterConfigDataprocMetricConfigMetric[]; } interface ClusterClusterConfigDataprocMetricConfigMetric { /** * One or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course. * * - - - */ metricOverrides?: string[]; /** * A source for the collection of Dataproc OSS metrics (see [available OSS metrics](https://cloud.google.com//dataproc/docs/guides/monitoring#available_oss_metrics)). */ metricSource: string; } interface ClusterClusterConfigEncryptionConfig { /** * The Cloud KMS key name to use for PD disk encryption for * all instances in the cluster. * * - - - */ kmsKeyName: string; } interface ClusterClusterConfigEndpointConfig { /** * The flag to enable http access to specific ports * on the cluster from external sources (aka Component Gateway). Defaults to false. */ enableHttpPortAccess: boolean; /** * The map of port descriptions to URLs. Will only be populated if * `enableHttpPortAccess` is true. */ httpPorts: { [key: string]: string; }; } interface ClusterClusterConfigGceClusterConfig { /** * Confidential Instance Config for clusters using [Confidential VMs](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/confidential-compute) */ confidentialInstanceConfig: outputs.dataproc.ClusterClusterConfigGceClusterConfigConfidentialInstanceConfig; /** * By default, clusters are not restricted to internal IP addresses, * and will have ephemeral external IP addresses assigned to each instance. If set to true, all * instances in the cluster will only have internal IP addresses. Note: Private Google Access * (also known as `privateIpGoogleAccess`) must be enabled on the subnetwork that the cluster * will be launched in. */ internalIpOnly?: boolean; /** * A map of the Compute Engine metadata entries to add to all instances * (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). */ metadata: { [key: string]: string; }; /** * The name or selfLink of the Google Compute Engine * network to the cluster will be part of. Conflicts with `subnetwork`. * If neither is specified, this defaults to the "default" network. */ network: string; /** * Node Group Affinity for sole-tenant clusters. */ nodeGroupAffinity: outputs.dataproc.ClusterClusterConfigGceClusterConfigNodeGroupAffinity; /** * Reservation Affinity for consuming zonal reservation. */ reservationAffinity: outputs.dataproc.ClusterClusterConfigGceClusterConfigReservationAffinity; /** * A map of resource manager tags to add to all instances. * Keys must be in the format `tagKeys/{tag_key_id}` and values in the format `tagValues/{tag_value_id}` * (see [Secure tags](https://cloud.google.com/dataproc/docs/guides/use-secure-tags)). */ resourceManagerTags: { [key: string]: string; }; /** * The service account to be used by the Node VMs. * If not specified, the "default" service account is used. */ serviceAccount?: string; /** * The set of Google API scopes * to be made available on all of the node VMs under the `serviceAccount` * specified. Both OAuth2 URLs and gcloud * short names are supported. To allow full access to all Cloud APIs, use the * `cloud-platform` scope. See a complete list of scopes [here](https://cloud.google.com/sdk/gcloud/reference/alpha/compute/instances/set-scopes#--scopes). */ serviceAccountScopes: string[]; /** * Shielded Instance Config for clusters using [Compute Engine Shielded VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm). * * - - - */ shieldedInstanceConfig: outputs.dataproc.ClusterClusterConfigGceClusterConfigShieldedInstanceConfig; /** * The name or selfLink of the Google Compute Engine * subnetwork the cluster will be part of. Conflicts with `network`. */ subnetwork?: string; /** * The list of instance tags applied to instances in the cluster. * Tags are used to identify valid sources or targets for network firewalls. */ tags?: string[]; /** * The GCP zone where your data is stored and used (i.e. where * the master and the worker nodes will be created in). If `region` is set to 'global' (default) * then `zone` is mandatory, otherwise GCP is able to make use of [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/auto-zone) * to determine this automatically for you. * Note: This setting additionally determines and restricts * which computing resources are available for use with other configs such as * `cluster_config.master_config.machine_type` and `cluster_config.worker_config.machine_type`. */ zone: string; } interface ClusterClusterConfigGceClusterConfigConfidentialInstanceConfig { /** * Defines whether the instance should have confidential compute enabled. */ enableConfidentialCompute?: boolean; } interface ClusterClusterConfigGceClusterConfigNodeGroupAffinity { /** * The URI of a sole-tenant node group resource that the cluster will be created on. */ nodeGroupUri: string; } interface ClusterClusterConfigGceClusterConfigReservationAffinity { /** * Corresponds to the type of reservation consumption. */ consumeReservationType?: string; /** * Corresponds to the label key of reservation resource. */ key?: string; /** * Corresponds to the label values of reservation resource. */ values?: string[]; } interface ClusterClusterConfigGceClusterConfigShieldedInstanceConfig { /** * Defines whether instances have integrity monitoring enabled. * * - - - */ enableIntegrityMonitoring?: boolean; /** * Defines whether instances have Secure Boot enabled. */ enableSecureBoot?: boolean; /** * Defines whether instances have the [vTPM](https://cloud.google.com/security/shielded-cloud/shielded-vm#vtpm) enabled. */ enableVtpm?: boolean; } interface ClusterClusterConfigInitializationAction { /** * The script to be executed during initialization of the cluster. * The script must be a GCS file with a gs:// prefix. */ script: string; /** * The maximum duration (in seconds) which `script` is * allowed to take to execute its action. GCP will default to a predetermined * computed value if not set (currently 300). * * - - - */ timeoutSec?: number; } interface ClusterClusterConfigLifecycleConfig { /** * The time when cluster will be auto-deleted. * A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. * Example: "2014-10-02T15:01:23.045123456Z". */ autoDeleteTime?: string; /** * The time when cluster will be auto-stopped. * A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. * Example: "2014-10-02T15:01:23.045123456Z". * * - - - */ autoStopTime?: string; /** * The duration to keep the cluster alive while idling * (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d]. */ idleDeleteTtl?: string; /** * Time when the cluster became idle * (most recent job finished) and became eligible for deletion due to idleness. */ idleStartTime: string; /** * The duration to keep the cluster alive while idling * (no jobs running). After this TTL, the cluster will be stopped. Valid range: [10m, 14d]. */ idleStopTtl?: string; } interface ClusterClusterConfigMasterConfig { /** * The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. */ accelerators?: outputs.dataproc.ClusterClusterConfigMasterConfigAccelerator[]; /** * Disk Config */ diskConfig: outputs.dataproc.ClusterClusterConfigMasterConfigDiskConfig; /** * The URI for the image to use for this worker. See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images) * for more information. */ imageUri: string; /** * Instance flexibility Policy allowing a mixture of VM shapes. */ instanceFlexibilityPolicy: outputs.dataproc.ClusterClusterConfigMasterConfigInstanceFlexibilityPolicy; /** * List of master instance names which * have been assigned to the cluster. */ instanceNames: string[]; /** * The name of a Google Compute Engine machine type * to create for the master. If not specified, GCP will default to a predetermined * computed value (currently `n1-standard-4`). */ machineType: string; /** * The name of a minimum generation of CPU family * for the master. If not specified, GCP will default to a predetermined computed value * for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) * for details about which CPU families are available (and defaulted) for each zone. */ minCpuPlatform: string; /** * Specifies the number of master nodes to create. * If not specified, GCP will default to a predetermined computed value (currently 1). */ numInstances: number; } interface ClusterClusterConfigMasterConfigAccelerator { /** * The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`. * * > The Cloud Dataproc API can return unintuitive error messages when using accelerators; even when you have defined an accelerator, Auto Zone Placement does not exclusively select * zones that have that accelerator available. If you get a 400 error that the accelerator can't be found, this is a likely cause. Make sure you check [accelerator availability by zone](https://cloud.google.com/compute/docs/reference/rest/v1/acceleratorTypes/list) * if you are trying to use accelerators in a given zone. * * - - - */ acceleratorCount: number; /** * The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`. */ acceleratorType: string; } interface ClusterClusterConfigMasterConfigDiskConfig { /** * Size of the primary disk attached to each node, specified * in GB. The primary disk contains the boot volume and system libraries, and the * smallest allowed disk size is 10GB. GCP will default to a predetermined * computed value if not set (currently 500GB). Note: If SSDs are not * attached, it also contains the HDFS data blocks and Hadoop working directories. */ bootDiskSizeGb: number; /** * The disk type of the primary disk attached to each node. * One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`. */ bootDiskType?: string; /** * Optional. Interface type of local SSDs (default is "scsi"). * Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile * Memory Express). See * [local SSD performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance). */ localSsdInterface?: string; /** * The amount of local SSD disks that will be * attached to each master cluster node. Defaults to 0. */ numLocalSsds: number; } interface ClusterClusterConfigMasterConfigInstanceFlexibilityPolicy { /** * List of instance selection options that the group will use when creating new VMs. */ instanceSelectionLists: outputs.dataproc.ClusterClusterConfigMasterConfigInstanceFlexibilityPolicyInstanceSelectionList[]; /** * A list of instance selection results in the group. */ instanceSelectionResults: outputs.dataproc.ClusterClusterConfigMasterConfigInstanceFlexibilityPolicyInstanceSelectionResult[]; } interface ClusterClusterConfigMasterConfigInstanceFlexibilityPolicyInstanceSelectionList { /** * Full machine-type names, e.g. `"n1-standard-16"`. */ machineTypes: string[]; /** * Preference of this instance selection. A lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. */ rank: number; } interface ClusterClusterConfigMasterConfigInstanceFlexibilityPolicyInstanceSelectionResult { /** * Full machine-type names, e.g. "n1-standard-16". */ machineType: string; /** * Number of VM provisioned with the machine_type. */ vmCount: number; } interface ClusterClusterConfigMetastoreConfig { /** * Resource name of an existing Dataproc Metastore service. * * Only resource names including projectid and location (region) are valid. Examples: * * `projects/[projectId]/locations/[dataprocRegion]/services/[service-name]` */ dataprocMetastoreService: string; } interface ClusterClusterConfigPreemptibleWorkerConfig { /** * Disk Config */ diskConfig: outputs.dataproc.ClusterClusterConfigPreemptibleWorkerConfigDiskConfig; /** * Instance flexibility Policy allowing a mixture of VM shapes and provisioning models. */ instanceFlexibilityPolicy: outputs.dataproc.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicy; /** * List of preemptible instance names which have been assigned * to the cluster. */ instanceNames: string[]; /** * Specifies the number of preemptible nodes to create. * Defaults to 0. */ numInstances: number; /** * Specifies the preemptibility of the secondary workers. The default value is `PREEMPTIBLE` * Accepted values are: * * PREEMPTIBILITY_UNSPECIFIED * * NON_PREEMPTIBLE * * PREEMPTIBLE */ preemptibility?: string; } interface ClusterClusterConfigPreemptibleWorkerConfigDiskConfig { /** * Size of the primary disk attached to each preemptible worker node, specified * in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined * computed value if not set (currently 500GB). Note: If SSDs are not * attached, it also contains the HDFS data blocks and Hadoop working directories. */ bootDiskSizeGb: number; /** * The disk type of the primary disk attached to each preemptible worker node. * One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`. */ bootDiskType?: string; /** * Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). */ localSsdInterface?: string; /** * The amount of local SSD disks that will be * attached to each preemptible worker node. Defaults to 0. */ numLocalSsds: number; } interface ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicy { /** * List of instance selection options that the group will use when creating new VMs. */ instanceSelectionLists: outputs.dataproc.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionList[]; /** * A list of instance selection results in the group. */ instanceSelectionResults: outputs.dataproc.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionResult[]; /** * Defines how the Group selects the provisioning model to ensure required reliability. */ provisioningModelMix?: outputs.dataproc.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyProvisioningModelMix; } interface ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionList { /** * Full machine-type names, e.g. `"n1-standard-16"`. */ machineTypes: string[]; /** * Preference of this instance selection. A lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. */ rank: number; } interface ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionResult { /** * Full machine-type names, e.g. "n1-standard-16". */ machineType: string; /** * Number of VM provisioned with the machine_type. */ vmCount: number; } interface ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyProvisioningModelMix { /** * The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standardCapacityBase, then it will start using standardCapacityPercentAboveBase to mix Spot with Standard VMs. eg. If 15 instances are requested and standardCapacityBase is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. */ standardCapacityBase?: number; /** * The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standardCapacityBase. eg. If 15 instances are requested and standardCapacityBase is 5 and standardCapacityPercentAboveBase is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. * - - - */ standardCapacityPercentAboveBase?: number; } interface ClusterClusterConfigSecurityConfig { /** * Identity Configuration. At least one of `identityConfig` * or `kerberosConfig` is required. */ identityConfig?: outputs.dataproc.ClusterClusterConfigSecurityConfigIdentityConfig; /** * Kerberos Configuration. At least one of `identityConfig` * or `kerberosConfig` is required. */ kerberosConfig?: outputs.dataproc.ClusterClusterConfigSecurityConfigKerberosConfig; } interface ClusterClusterConfigSecurityConfigIdentityConfig { /** * The end user to service account mappings * in a service account based multi-tenant cluster * * - - - */ userServiceAccountMapping: { [key: string]: string; }; } interface ClusterClusterConfigSecurityConfigKerberosConfig { /** * The admin server (IP or hostname) for the * remote trusted realm in a cross realm trust relationship. */ crossRealmTrustAdminServer?: string; /** * The KDC (IP or hostname) for the * remote trusted realm in a cross realm trust relationship. */ crossRealmTrustKdc?: string; /** * The remote realm the Dataproc on-cluster KDC will * trust, should the user enable cross realm trust. */ crossRealmTrustRealm?: string; /** * The Cloud Storage URI of a KMS * encrypted file containing the shared password between the on-cluster Kerberos realm * and the remote trusted realm, in a cross realm trust relationship. */ crossRealmTrustSharedPasswordUri?: string; /** * Flag to indicate whether to Kerberize the cluster. */ enableKerberos?: boolean; /** * The Cloud Storage URI of a KMS encrypted file containing * the master key of the KDC database. */ kdcDbKeyUri?: string; /** * The Cloud Storage URI of a KMS encrypted file containing * the password to the user provided key. For the self-signed certificate, this password * is generated by Dataproc. */ keyPasswordUri?: string; /** * The Cloud Storage URI of a KMS encrypted file containing * the password to the user provided keystore. For the self-signed certificated, the password * is generated by Dataproc. */ keystorePasswordUri?: string; /** * The Cloud Storage URI of the keystore file used for SSL encryption. * If not provided, Dataproc will provide a self-signed certificate. */ keystoreUri?: string; /** * The URI of the KMS key used to encrypt various sensitive files. */ kmsKeyUri: string; /** * The name of the on-cluster Kerberos realm. If not specified, the * uppercased domain of hostnames will be the realm. */ realm?: string; /** * The Cloud Storage URI of a KMS encrypted file * containing the root principal password. */ rootPrincipalPasswordUri: string; /** * The lifetime of the ticket granting ticket, in hours. */ tgtLifetimeHours?: number; /** * The Cloud Storage URI of a KMS encrypted file * containing the password to the user provided truststore. For the self-signed * certificate, this password is generated by Dataproc. */ truststorePasswordUri?: string; /** * The Cloud Storage URI of the truststore file used for * SSL encryption. If not provided, Dataproc will provide a self-signed certificate. */ truststoreUri?: string; } interface ClusterClusterConfigSoftwareConfig { /** * The Cloud Dataproc image version to use * for the cluster - this controls the sets of software versions * installed onto the nodes when you create clusters. If not specified, defaults to the * latest version. For a list of valid versions see * [Cloud Dataproc versions](https://cloud.google.com/dataproc/docs/concepts/dataproc-versions) */ imageVersion: string; /** * The set of optional components to activate on the cluster. See [Available Optional Components](https://cloud.google.com/dataproc/docs/concepts/components/overview#available_optional_components). * * - - - */ optionalComponents?: string[]; /** * A list of override and additional properties (key/value pairs) * used to modify various aspects of the common configuration files used when creating * a cluster. For a list of valid properties please see * [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties) */ overrideProperties?: { [key: string]: string; }; /** * A list of the properties used to set the daemon config files. * This will include any values supplied by the user via `cluster_config.software_config.override_properties` */ properties: { [key: string]: string; }; } interface ClusterClusterConfigWorkerConfig { /** * The Compute Engine accelerator configuration for these instances. Can be specified multiple times. */ accelerators?: outputs.dataproc.ClusterClusterConfigWorkerConfigAccelerator[]; /** * Disk Config */ diskConfig: outputs.dataproc.ClusterClusterConfigWorkerConfigDiskConfig; /** * The URI for the image to use for this worker. See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images) * for more information. */ imageUri: string; /** * Instance flexibility Policy allowing a mixture of VM shapes. */ instanceFlexibilityPolicy: outputs.dataproc.ClusterClusterConfigWorkerConfigInstanceFlexibilityPolicy; /** * List of worker instance names which have been assigned * to the cluster. */ instanceNames: string[]; /** * The name of a Google Compute Engine machine type * to create for the worker nodes. If not specified, GCP will default to a predetermined * computed value (currently `n1-standard-4`). */ machineType: string; /** * The name of a minimum generation of CPU family * for the master. If not specified, GCP will default to a predetermined computed value * for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) * for details about which CPU families are available (and defaulted) for each zone. */ minCpuPlatform: string; /** * The minimum number of primary worker instances to create. If `minNumInstances` is set, cluster creation will succeed if the number of primary workers created is at least equal to the `minNumInstances` number. */ minNumInstances: number; /** * Specifies the number of worker nodes to create. * If not specified, GCP will default to a predetermined computed value (currently 2). * There is currently a beta feature which allows you to run a * [Single Node Cluster](https://cloud.google.com/dataproc/docs/concepts/single-node-clusters). * In order to take advantage of this you need to set * `"dataproc:dataproc.allow.zero.workers" = "true"` in * `cluster_config.software_config.properties` */ numInstances: number; } interface ClusterClusterConfigWorkerConfigAccelerator { /** * The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`. * * > The Cloud Dataproc API can return unintuitive error messages when using accelerators; even when you have defined an accelerator, Auto Zone Placement does not exclusively select * zones that have that accelerator available. If you get a 400 error that the accelerator can't be found, this is a likely cause. Make sure you check [accelerator availability by zone](https://cloud.google.com/compute/docs/reference/rest/v1/acceleratorTypes/list) * if you are trying to use accelerators in a given zone. * * - - - */ acceleratorCount: number; /** * The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`. */ acceleratorType: string; } interface ClusterClusterConfigWorkerConfigDiskConfig { /** * Size of the primary disk attached to each worker node, specified * in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined * computed value if not set (currently 500GB). Note: If SSDs are not * attached, it also contains the HDFS data blocks and Hadoop working directories. */ bootDiskSizeGb: number; /** * The disk type of the primary disk attached to each node. * One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`. */ bootDiskType?: string; /** * Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). */ localSsdInterface?: string; /** * The amount of local SSD disks that will be * attached to each worker cluster node. Defaults to 0. */ numLocalSsds: number; } interface ClusterClusterConfigWorkerConfigInstanceFlexibilityPolicy { /** * List of instance selection options that the group will use when creating new VMs. */ instanceSelectionLists: outputs.dataproc.ClusterClusterConfigWorkerConfigInstanceFlexibilityPolicyInstanceSelectionList[]; /** * A list of instance selection results in the group. */ instanceSelectionResults: outputs.dataproc.ClusterClusterConfigWorkerConfigInstanceFlexibilityPolicyInstanceSelectionResult[]; } interface ClusterClusterConfigWorkerConfigInstanceFlexibilityPolicyInstanceSelectionList { /** * Full machine-type names, e.g. `"n1-standard-16"`. */ machineTypes: string[]; /** * Preference of this instance selection. A lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. */ rank: number; } interface ClusterClusterConfigWorkerConfigInstanceFlexibilityPolicyInstanceSelectionResult { /** * Full machine-type names, e.g. "n1-standard-16". */ machineType: string; /** * Number of VM provisioned with the machine_type. */ vmCount: number; } interface ClusterIAMBindingCondition { description?: string; expression: string; title: string; } interface ClusterIAMMemberCondition { description?: string; expression: string; title: string; } interface ClusterVirtualClusterConfig { /** * Configuration of auxiliary services used by this cluster. * Structure defined below. */ auxiliaryServicesConfig: outputs.dataproc.ClusterVirtualClusterConfigAuxiliaryServicesConfig; /** * The configuration for running the Dataproc cluster on Kubernetes. * Structure defined below. * - - - */ kubernetesClusterConfig: outputs.dataproc.ClusterVirtualClusterConfigKubernetesClusterConfig; /** * The Cloud Storage staging bucket used to stage files, * such as Hadoop jars, between client machines and the cluster. * Note: If you don't explicitly specify a `stagingBucket` * then GCP will auto create / assign one for you. However, you are not guaranteed * an auto generated bucket which is solely dedicated to your cluster; it may be shared * with other clusters in the same region/zone also choosing to use the auto generation * option. */ stagingBucket?: string; } interface ClusterVirtualClusterConfigAuxiliaryServicesConfig { /** * The Hive Metastore configuration for this workload. */ metastoreConfig?: outputs.dataproc.ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig; /** * The Spark History Server configuration for the workload. */ sparkHistoryServerConfig?: outputs.dataproc.ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig; } interface ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig { /** * Resource name of an existing Dataproc Metastore service. */ dataprocMetastoreService?: string; } interface ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig { /** * Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload. * - - - */ dataprocCluster?: string; } interface ClusterVirtualClusterConfigKubernetesClusterConfig { /** * The configuration for running the Dataproc cluster on GKE. */ gkeClusterConfig: outputs.dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig; /** * A namespace within the Kubernetes cluster to deploy into. * If this namespace does not exist, it is created. * If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. * If not specified, the name of the Dataproc Cluster is used. */ kubernetesNamespace?: string; /** * The software configuration for this Dataproc cluster running on Kubernetes. */ kubernetesSoftwareConfig: outputs.dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig; } interface ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig { /** * A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster * (the GKE cluster can be zonal or regional) */ gkeClusterTarget?: string; /** * GKE node pools where workloads will be scheduled. At least one node pool must be assigned the `DEFAULT` * GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a `DEFAULT` GkeNodePoolTarget. * Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings. */ nodePoolTargets?: outputs.dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget[]; } interface ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget { /** * The target GKE node pool. */ nodePool: string; /** * The configuration for the GKE node pool. * If specified, Dataproc attempts to create a node pool with the specified shape. * If one with the same name already exists, it is verified against all specified fields. * If a field differs, the virtual cluster creation will fail. */ nodePoolConfig: outputs.dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig; /** * The roles associated with the GKE node pool. * One of `"DEFAULT"`, `"CONTROLLER"`, `"SPARK_DRIVER"` or `"SPARK_EXECUTOR"`. */ roles: string[]; } interface ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig { /** * The autoscaler configuration for this node pool. * The autoscaler is enabled only when a valid configuration is present. */ autoscaling: outputs.dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling; /** * The node pool configuration. */ config: outputs.dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig; /** * The list of Compute Engine zones where node pool nodes associated * with a Dataproc on GKE virtual cluster will be located. * - - - */ locations: string[]; } interface ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling { /** * The maximum number of nodes in the node pool. Must be >= minNodeCount, and must be > 0. */ maxNodeCount?: number; /** * The minimum number of nodes in the node pool. Must be >= 0 and <= maxNodeCount. */ minNodeCount?: number; } interface ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig { /** * The number of local SSD disks to attach to the node, * which is limited by the maximum number of disks allowable per zone. */ localSsdCount?: number; /** * The name of a Compute Engine machine type. */ machineType?: string; /** * Minimum CPU platform to be used by this instance. * The instance may be scheduled on the specified or a newer CPU platform. * Specify the friendly names of CPU platforms, such as "Intel Haswell" or "Intel Sandy Bridge". */ minCpuPlatform?: string; /** * Whether the nodes are created as preemptible VM instances. * Preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the * CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role). */ preemptible?: boolean; /** * Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag. */ spot?: boolean; } interface ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig { /** * The components that should be installed in this Dataproc cluster. The key must be a string from the * KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified. * * **NOTE** : `component_version[SPARK]` is mandatory to set, or the creation of the cluster will fail. */ componentVersion: { [key: string]: string; }; /** * The properties to set on daemon config files. Property keys are specified in prefix:property format, * for example spark:spark.kubernetes.container.image. */ properties: { [key: string]: string; }; } interface GdcApplicationEnvironmentSparkApplicationEnvironmentConfig { /** * A map of default Spark properties to apply to workloads in this application environment. These defaults may be overridden by per-application properties. */ defaultProperties?: { [key: string]: string; }; /** * The default Dataproc version to use for applications submitted to this application environment */ defaultVersion?: string; } interface GdcServiceInstanceGdceCluster { /** * Gdce cluster resource id. */ gdceCluster: string; } interface GdcServiceInstanceSparkServiceInstanceConfig { } interface GdcSparkApplicationPysparkApplicationConfig { /** * HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */ archiveUris?: string[]; /** * The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission. */ args?: string[]; /** * HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. */ fileUris?: string[]; /** * HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks. */ jarFileUris?: string[]; /** * The HCFS URI of the main Python file to use as the driver. Must be a .py file. */ mainPythonFileUri: string; /** * HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip. */ pythonFileUris?: string[]; } interface GdcSparkApplicationSparkApplicationConfig { /** * HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`. */ archiveUris?: string[]; /** * The arguments to pass to the driver. Do not include arguments that can be set as application properties, such as `--conf`, since a collision can occur that causes an incorrect application submission. */ args?: string[]; /** * HCFS URIs of files to be placed in the working directory of each executor. */ fileUris?: string[]; /** * HCFS URIs of jar files to add to the classpath of the Spark driver and tasks. */ jarFileUris?: string[]; /** * The name of the driver main class. The jar file that contains the class must be in the classpath or specified in `jarFileUris`. */ mainClass?: string; /** * The HCFS URI of the jar file that contains the main class. */ mainJarFileUri?: string; } interface GdcSparkApplicationSparkRApplicationConfig { /** * HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */ archiveUris?: string[]; /** * The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission. */ args?: string[]; /** * HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. */ fileUris?: string[]; /** * The HCFS URI of the main R file to use as the driver. Must be a .R file. */ mainRFileUri: string; } interface GdcSparkApplicationSparkSqlApplicationConfig { /** * HCFS URIs of jar files to be added to the Spark CLASSPATH. */ jarFileUris?: string[]; /** * The HCFS URI of the script that contains SQL queries. */ queryFileUri?: string; /** * Represents a list of queries. * Structure is documented below. */ queryList?: outputs.dataproc.GdcSparkApplicationSparkSqlApplicationConfigQueryList; /** * Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`). */ scriptVariables?: { [key: string]: string; }; } interface GdcSparkApplicationSparkSqlApplicationConfigQueryList { /** * The queries to run. */ queries: string[]; } interface GetMetastoreServiceEncryptionConfig { /** * The fully qualified customer provided Cloud KMS key name to use for customer data encryption. * Use the following format: 'projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)' */ kmsKey: string; } interface GetMetastoreServiceHiveMetastoreConfig { /** * A mapping of Hive metastore version to the auxiliary version configuration. * When specified, a secondary Hive metastore service is created along with the primary service. * All auxiliary versions must be less than the service's primary version. * The key is the auxiliary service name and it must match the regular expression a-z?. * This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen. */ auxiliaryVersions: outputs.dataproc.GetMetastoreServiceHiveMetastoreConfigAuxiliaryVersion[]; /** * A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). * The mappings override system defaults (some keys cannot be overridden) */ configOverrides: { [key: string]: string; }; /** * The protocol to use for the metastore service endpoint. If unspecified, defaults to 'THRIFT'. Default value: "THRIFT" Possible values: ["THRIFT", "GRPC"] */ endpointProtocol: string; /** * Information used to configure the Hive metastore service as a service principal in a Kerberos realm. */ kerberosConfigs: outputs.dataproc.GetMetastoreServiceHiveMetastoreConfigKerberosConfig[]; /** * The Hive metastore schema version. */ version: string; } interface GetMetastoreServiceHiveMetastoreConfigAuxiliaryVersion { /** * A mapping of Hive metastore configuration key-value pairs to apply to the auxiliary Hive metastore (configured in hive-site.xml) in addition to the primary version's overrides. * If keys are present in both the auxiliary version's overrides and the primary version's overrides, the value from the auxiliary version's overrides takes precedence. */ configOverrides: { [key: string]: string; }; key: string; /** * The Hive metastore version of the auxiliary service. It must be less than the primary Hive metastore service's version. */ version: string; } interface GetMetastoreServiceHiveMetastoreConfigKerberosConfig { /** * A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC). */ keytabs: outputs.dataproc.GetMetastoreServiceHiveMetastoreConfigKerberosConfigKeytab[]; /** * A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly. */ krb5ConfigGcsUri: string; /** * A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format. */ principal: string; } interface GetMetastoreServiceHiveMetastoreConfigKerberosConfigKeytab { /** * The relative resource name of a Secret Manager secret version, in the following form: * * "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}". */ cloudSecret: string; } interface GetMetastoreServiceMaintenanceWindow { /** * The day of week, when the window starts. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"] */ dayOfWeek: string; /** * The hour of day (0-23) when the window starts. */ hourOfDay: number; } interface GetMetastoreServiceMetadataIntegration { /** * The integration config for the Data Catalog service. */ dataCatalogConfigs: outputs.dataproc.GetMetastoreServiceMetadataIntegrationDataCatalogConfig[]; } interface GetMetastoreServiceMetadataIntegrationDataCatalogConfig { /** * Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. */ enabled: boolean; } interface GetMetastoreServiceNetworkConfig { /** * The consumer-side network configuration for the Dataproc Metastore instance. */ consumers: outputs.dataproc.GetMetastoreServiceNetworkConfigConsumer[]; /** * Enables custom routes to be imported and exported for the Dataproc Metastore service's peered VPC network. */ customRoutesEnabled: boolean; } interface GetMetastoreServiceNetworkConfigConsumer { /** * The URI of the endpoint used to access the metastore service. */ endpointUri: string; /** * The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. * It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. * There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form: * 'projects/{projectNumber}/regions/{region_id}/subnetworks/{subnetwork_id} */ subnetwork: string; } interface GetMetastoreServiceScalingConfig { /** * Represents the autoscaling configuration of a metastore service. */ autoscalingConfigs: outputs.dataproc.GetMetastoreServiceScalingConfigAutoscalingConfig[]; /** * Metastore instance sizes. Possible values: ["EXTRA_SMALL", "SMALL", "MEDIUM", "LARGE", "EXTRA_LARGE"] */ instanceSize: string; /** * Scaling factor, in increments of 0.1 for values less than 1.0, and increments of 1.0 for values greater than 1.0. */ scalingFactor: number; } interface GetMetastoreServiceScalingConfigAutoscalingConfig { /** * Defines whether autoscaling is enabled. The default value is false. */ autoscalingEnabled: boolean; /** * Output only. The scaling factor of a service with autoscaling enabled. */ autoscalingFactor: number; /** * Represents the limit configuration of a metastore service. */ limitConfigs: outputs.dataproc.GetMetastoreServiceScalingConfigAutoscalingConfigLimitConfig[]; } interface GetMetastoreServiceScalingConfigAutoscalingConfigLimitConfig { /** * The maximum scaling factor that the service will autoscale to. The default value is 6.0. */ maxScalingFactor: number; /** * The minimum scaling factor that the service will autoscale to. The default value is 0.1. */ minScalingFactor: number; } interface GetMetastoreServiceScheduledBackup { /** * A Cloud Storage URI of a folder, in the format gs:///. A sub-folder containing backup files will be stored below it. */ backupLocation: string; /** * The scheduled interval in Cron format, see https://en.wikipedia.org/wiki/Cron The default is empty: scheduled backup is not enabled. Must be specified to enable scheduled backups. */ cronSchedule: string; /** * Defines whether the scheduled backup is enabled. The default value is false. */ enabled: boolean; /** * Specifies the time zone to be used when interpreting cronSchedule. Must be a time zone name from the time zone database (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g. America/Los_Angeles or Africa/Abidjan. If left unspecified, the default is UTC. */ timeZone: string; } interface GetMetastoreServiceTelemetryConfig { /** * The output format of the Dataproc Metastore service's logs. Default value: "JSON" Possible values: ["LEGACY", "JSON"] */ logFormat: string; } interface JobHadoopConfig { /** * HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip. */ archiveUris?: string[]; /** * The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission. */ args?: string[]; /** * HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks. */ fileUris?: string[]; /** * HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks. */ jarFileUris?: string[]; /** * The runtime logging config of the job */ loggingConfig: outputs.dataproc.JobHadoopConfigLoggingConfig; /** * The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jarFileUris`. Conflicts with `mainJarFileUri` */ mainClass?: string; /** * The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass` */ mainJarFileUri?: string; /** * A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in `/etc/hadoop/conf/*-site` and classes in user code.. * * * `logging_config.driver_log_levels`- (Required) The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' */ properties?: { [key: string]: string; }; } interface JobHadoopConfigLoggingConfig { /** * Optional. The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'. */ driverLogLevels: { [key: string]: string; }; } interface JobHiveConfig { /** * Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false. */ continueOnFailure?: boolean; /** * HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. */ jarFileUris?: string[]; /** * A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in `/etc/hadoop/conf/*-site.xml`, `/etc/hive/conf/hive-site.xml`, and classes in user code.. */ properties?: { [key: string]: string; }; /** * HCFS URI of file containing Hive script to execute as the job. * Conflicts with `queryList` */ queryFileUri?: string; /** * The list of Hive queries or statements to execute as part of the job. * Conflicts with `queryFileUri` */ queryLists?: string[]; /** * Mapping of query variable names to values (equivalent to the Hive command: `SET name="value";`). */ scriptVariables?: { [key: string]: string; }; } interface JobIAMBindingCondition { description?: string; expression: string; title: string; } interface JobIAMMemberCondition { description?: string; expression: string; title: string; } interface JobPigConfig { /** * Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false. */ continueOnFailure?: boolean; /** * HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. * * * `logging_config.driver_log_levels`- (Required) The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' */ jarFileUris?: string[]; /** * The runtime logging config of the job */ loggingConfig: outputs.dataproc.JobPigConfigLoggingConfig; /** * A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in `/etc/hadoop/conf/*-site.xml`, `/etc/pig/conf/pig.properties`, and classes in user code. */ properties?: { [key: string]: string; }; /** * HCFS URI of file containing Hive script to execute as the job. * Conflicts with `queryList` */ queryFileUri?: string; /** * The list of Hive queries or statements to execute as part of the job. * Conflicts with `queryFileUri` */ queryLists?: string[]; /** * Mapping of query variable names to values (equivalent to the Pig command: `name=[value]`). */ scriptVariables?: { [key: string]: string; }; } interface JobPigConfigLoggingConfig { /** * Optional. The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'. */ driverLogLevels: { [key: string]: string; }; } interface JobPlacement { /** * The name of the cluster where the job will be submitted */ clusterName: string; /** * Output-only. A cluster UUID generated by the Cloud Dataproc service when the job is submitted */ clusterUuid: string; } interface JobPrestoConfig { /** * Presto client tags to attach to this query. */ clientTags?: string[]; /** * Whether to continue executing queries if a query fails. Setting to true can be useful when executing independent parallel queries. Defaults to false. */ continueOnFailure?: boolean; /** * The runtime logging config of the job */ loggingConfig: outputs.dataproc.JobPrestoConfigLoggingConfig; /** * The format in which query output will be displayed. See the Presto documentation for supported output formats. * * * `logging_config.driver_log_levels`- (Required) The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' */ outputFormat?: string; /** * A mapping of property names to values. Used to set Presto session properties Equivalent to using the --session flag in the Presto CLI. */ properties?: { [key: string]: string; }; /** * The HCFS URI of the script that contains SQL queries. * Conflicts with `queryList` */ queryFileUri?: string; /** * The list of SQL queries or statements to execute as part of the job. * Conflicts with `queryFileUri` */ queryLists?: string[]; } interface JobPrestoConfigLoggingConfig { /** * Optional. The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'. */ driverLogLevels: { [key: string]: string; }; } interface JobPysparkConfig { /** * HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip. */ archiveUris?: string[]; /** * The arguments to pass to the driver. */ args?: string[]; /** * HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks. */ fileUris?: string[]; /** * HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks. */ jarFileUris?: string[]; /** * The runtime logging config of the job */ loggingConfig: outputs.dataproc.JobPysparkConfigLoggingConfig; /** * The HCFS URI of the main Python file to use as the driver. Must be a .py file. */ mainPythonFileUri: string; /** * A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in `/etc/spark/conf/spark-defaults.conf` and classes in user code. * * * `logging_config.driver_log_levels`- (Required) The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' */ properties?: { [key: string]: string; }; /** * HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip. */ pythonFileUris?: string[]; } interface JobPysparkConfigLoggingConfig { /** * Optional. The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'. */ driverLogLevels: { [key: string]: string; }; } interface JobReference { /** * The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs */ jobId: string; } interface JobScheduling { /** * Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. */ maxFailuresPerHour: number; /** * Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. */ maxFailuresTotal: number; } interface JobSparkConfig { /** * HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip. */ archiveUris?: string[]; /** * The arguments to pass to the driver. */ args?: string[]; /** * HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks. */ fileUris?: string[]; /** * HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks. */ jarFileUris?: string[]; /** * The runtime logging config of the job */ loggingConfig: outputs.dataproc.JobSparkConfigLoggingConfig; /** * The class containing the main method of the driver. Must be in a * provided jar or jar that is already on the classpath. Conflicts with `mainJarFileUri` */ mainClass?: string; /** * The HCFS URI of jar file containing * the driver jar. Conflicts with `mainClass` */ mainJarFileUri?: string; /** * A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in `/etc/spark/conf/spark-defaults.conf` and classes in user code. * * * `logging_config.driver_log_levels`- (Required) The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' */ properties?: { [key: string]: string; }; } interface JobSparkConfigLoggingConfig { /** * Optional. The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'. */ driverLogLevels: { [key: string]: string; }; } interface JobSparksqlConfig { /** * HCFS URIs of jar files to be added to the Spark CLASSPATH. * * * `logging_config.driver_log_levels`- (Required) The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' */ jarFileUris?: string[]; /** * The runtime logging config of the job */ loggingConfig: outputs.dataproc.JobSparksqlConfigLoggingConfig; /** * A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. */ properties?: { [key: string]: string; }; /** * The HCFS URI of the script that contains SQL queries. * Conflicts with `queryList` */ queryFileUri?: string; /** * The list of SQL queries or statements to execute as part of the job. * Conflicts with `queryFileUri` */ queryLists?: string[]; /** * Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`). */ scriptVariables?: { [key: string]: string; }; } interface JobSparksqlConfigLoggingConfig { /** * Optional. The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'. */ driverLogLevels: { [key: string]: string; }; } interface JobStatus { /** * Optional job state details, such as an error description if the state is ERROR. */ details: string; /** * A state message specifying the overall job state. */ state: string; /** * The time when this state was entered. */ stateStartTime: string; /** * Additional state information, which includes status reported by the agent. */ substate: string; } interface MetastoreDatabaseIamBindingCondition { description?: string; expression: string; title: string; } interface MetastoreDatabaseIamMemberCondition { description?: string; expression: string; title: string; } interface MetastoreFederationBackendMetastore { /** * The type of the backend metastore. * Possible values are: `METASTORE_TYPE_UNSPECIFIED`, `DATAPROC_METASTORE`, `BIGQUERY`. */ metastoreType: string; /** * The relative resource name of the metastore that is being federated. The formats of the relative resource names for the currently supported metastores are listed below: Dataplex: projects/{projectId}/locations/{location}/lakes/{lake_id} BigQuery: projects/{projectId} Dataproc Metastore: projects/{projectId}/locations/{location}/services/{serviceId} */ name: string; /** * The identifier for this object. Format specified above. */ rank: string; } interface MetastoreFederationIamBindingCondition { description?: string; expression: string; title: string; } interface MetastoreFederationIamMemberCondition { description?: string; expression: string; title: string; } interface MetastoreServiceEncryptionConfig { /** * The fully qualified customer provided Cloud KMS key name to use for customer data encryption. * Use the following format: `projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)` */ kmsKey: string; } interface MetastoreServiceHiveMetastoreConfig { /** * A mapping of Hive metastore version to the auxiliary version configuration. * When specified, a secondary Hive metastore service is created along with the primary service. * All auxiliary versions must be less than the service's primary version. * The key is the auxiliary service name and it must match the regular expression a-z?. * This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen. * Structure is documented below. */ auxiliaryVersions?: outputs.dataproc.MetastoreServiceHiveMetastoreConfigAuxiliaryVersion[]; /** * A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). * The mappings override system defaults (some keys cannot be overridden) */ configOverrides: { [key: string]: string; }; /** * The protocol to use for the metastore service endpoint. If unspecified, defaults to `THRIFT`. * Default value is `THRIFT`. * Possible values are: `THRIFT`, `GRPC`. */ endpointProtocol?: string; /** * Information used to configure the Hive metastore service as a service principal in a Kerberos realm. * Structure is documented below. */ kerberosConfig?: outputs.dataproc.MetastoreServiceHiveMetastoreConfigKerberosConfig; /** * The Hive metastore schema version. */ version: string; } interface MetastoreServiceHiveMetastoreConfigAuxiliaryVersion { /** * A mapping of Hive metastore configuration key-value pairs to apply to the auxiliary Hive metastore (configured in hive-site.xml) in addition to the primary version's overrides. * If keys are present in both the auxiliary version's overrides and the primary version's overrides, the value from the auxiliary version's overrides takes precedence. */ configOverrides?: { [key: string]: string; }; /** * The identifier for this object. Format specified above. */ key: string; /** * The Hive metastore version of the auxiliary service. It must be less than the primary Hive metastore service's version. */ version: string; } interface MetastoreServiceHiveMetastoreConfigKerberosConfig { /** * A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC). * Structure is documented below. */ keytab: outputs.dataproc.MetastoreServiceHiveMetastoreConfigKerberosConfigKeytab; /** * A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly. */ krb5ConfigGcsUri: string; /** * A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format. */ principal: string; } interface MetastoreServiceHiveMetastoreConfigKerberosConfigKeytab { /** * The relative resource name of a Secret Manager secret version, in the following form: * "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}". */ cloudSecret: string; } interface MetastoreServiceIamBindingCondition { description?: string; expression: string; title: string; } interface MetastoreServiceIamMemberCondition { description?: string; expression: string; title: string; } interface MetastoreServiceMaintenanceWindow { /** * The day of week, when the window starts. * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeek: string; /** * The hour of day (0-23) when the window starts. */ hourOfDay: number; } interface MetastoreServiceMetadataIntegration { /** * The integration config for the Data Catalog service. * Structure is documented below. */ dataCatalogConfig: outputs.dataproc.MetastoreServiceMetadataIntegrationDataCatalogConfig; } interface MetastoreServiceMetadataIntegrationDataCatalogConfig { /** * Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. */ enabled: boolean; } interface MetastoreServiceNetworkConfig { /** * The consumer-side network configuration for the Dataproc Metastore instance. * Structure is documented below. */ consumers: outputs.dataproc.MetastoreServiceNetworkConfigConsumer[]; /** * (Optional, Beta) * Enables custom routes to be imported and exported for the Dataproc Metastore service's peered VPC network. */ customRoutesEnabled?: boolean; } interface MetastoreServiceNetworkConfigConsumer { /** * (Output) * The URI of the endpoint used to access the metastore service. */ endpointUri: string; /** * The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. * It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. * There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form: * `projects/{projectNumber}/regions/{region_id}/subnetworks/{subnetwork_id} */ subnetwork: string; } interface MetastoreServiceScalingConfig { /** * Represents the autoscaling configuration of a metastore service. * Structure is documented below. */ autoscalingConfig?: outputs.dataproc.MetastoreServiceScalingConfigAutoscalingConfig; /** * Metastore instance sizes. * Possible values are: `EXTRA_SMALL`, `SMALL`, `MEDIUM`, `LARGE`, `EXTRA_LARGE`. */ instanceSize?: string; /** * Scaling factor, in increments of 0.1 for values less than 1.0, and increments of 1.0 for values greater than 1.0. */ scalingFactor?: number; } interface MetastoreServiceScalingConfigAutoscalingConfig { /** * Defines whether autoscaling is enabled. The default value is false. */ autoscalingEnabled?: boolean; /** * (Output) * Output only. The scaling factor of a service with autoscaling enabled. */ autoscalingFactor: number; /** * Represents the limit configuration of a metastore service. * Structure is documented below. */ limitConfig: outputs.dataproc.MetastoreServiceScalingConfigAutoscalingConfigLimitConfig; } interface MetastoreServiceScalingConfigAutoscalingConfigLimitConfig { /** * The maximum scaling factor that the service will autoscale to. The default value is 6.0. */ maxScalingFactor: number; /** * The minimum scaling factor that the service will autoscale to. The default value is 0.1. */ minScalingFactor: number; } interface MetastoreServiceScheduledBackup { /** * A Cloud Storage URI of a folder, in the format gs:///. A sub-folder containing backup files will be stored below it. */ backupLocation: string; /** * The scheduled interval in Cron format, see https://en.wikipedia.org/wiki/Cron The default is empty: scheduled backup is not enabled. Must be specified to enable scheduled backups. */ cronSchedule?: string; /** * Defines whether the scheduled backup is enabled. The default value is false. */ enabled: boolean; /** * Specifies the time zone to be used when interpreting cronSchedule. Must be a time zone name from the time zone database (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g. America/Los_Angeles or Africa/Abidjan. If left unspecified, the default is UTC. */ timeZone: string; } interface MetastoreServiceTelemetryConfig { /** * The output format of the Dataproc Metastore service's logs. * Default value is `JSON`. * Possible values are: `LEGACY`, `JSON`. */ logFormat?: string; } interface MetastoreTableIamBindingCondition { description?: string; expression: string; title: string; } interface MetastoreTableIamMemberCondition { description?: string; expression: string; title: string; } interface SessionTemplateEnvironmentConfig { /** * Execution configuration for a workload. * Structure is documented below. */ executionConfig?: outputs.dataproc.SessionTemplateEnvironmentConfigExecutionConfig; /** * Peripherals configuration that workload has access to. * Structure is documented below. */ peripheralsConfig: outputs.dataproc.SessionTemplateEnvironmentConfigPeripheralsConfig; } interface SessionTemplateEnvironmentConfigExecutionConfig { /** * Authentication configuration for a workload is used to set the default identity for the workload execution. * Structure is documented below. */ authenticationConfig?: outputs.dataproc.SessionTemplateEnvironmentConfigExecutionConfigAuthenticationConfig; /** * The duration to keep the session alive while it's idling. * Exceeding this threshold causes the session to terminate. Minimum value is 10 minutes; maximum value is 14 day. * Defaults to 1 hour if not set. If both ttl and idleTtl are specified for an interactive session, the conditions * are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or when ttl has * been exceeded, whichever occurs first. */ idleTtl?: string; /** * The Cloud KMS key to use for encryption. */ kmsKey?: string; /** * Tags used for network traffic control. */ networkTags?: string[]; /** * Service account that used to execute workload. */ serviceAccount: string; /** * A Cloud Storage bucket used to stage workload dependencies, config files, and store * workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, * Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, * and then create and manage project-level, per-location staging and temporary buckets. * This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. */ stagingBucket?: string; /** * Subnetwork configuration for workload execution. */ subnetworkUri?: string; /** * The duration after which the workload will be terminated. * When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing * work to finish. If ttl is not specified for a session workload, the workload will be allowed to run until it * exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, * it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. * Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session), * the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or * when ttl has been exceeded, whichever occurs first. */ ttl: string; } interface SessionTemplateEnvironmentConfigExecutionConfigAuthenticationConfig { /** * Authentication type for the user workload running in containers. * Possible values are: `SERVICE_ACCOUNT`, `END_USER_CREDENTIALS`. */ userWorkloadAuthenticationType?: string; } interface SessionTemplateEnvironmentConfigPeripheralsConfig { /** * Resource name of an existing Dataproc Metastore service. */ metastoreService?: string; /** * The Spark History Server configuration for the workload. * Structure is documented below. */ sparkHistoryServerConfig?: outputs.dataproc.SessionTemplateEnvironmentConfigPeripheralsConfigSparkHistoryServerConfig; } interface SessionTemplateEnvironmentConfigPeripheralsConfigSparkHistoryServerConfig { /** * Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload. */ dataprocCluster?: string; } interface SessionTemplateJupyterSession { /** * Display name, shown in the Jupyter kernelspec card. */ displayName?: string; /** * Kernel to be used with Jupyter interactive session. * Possible values are: `PYTHON`, `SCALA`. */ kernel?: string; } interface SessionTemplateRuntimeConfig { /** * Optional custom container image for the job runtime environment. If not specified, a default container image will be used. */ containerImage?: string; /** * (Output) * A mapping of property names to values, which are used to configure workload execution. */ effectiveProperties: { [key: string]: string; }; /** * A mapping of property names to values, which are used to configure workload execution. */ properties?: { [key: string]: string; }; /** * Version of the session runtime. */ version?: string; } interface SessionTemplateSparkConnectSession { } interface WorkflowTemplateEncryptionConfig { /** * The Cloud KMS key name to use for encrypting workflow template [job arguments](https://docs.docs.cloud.google.com/dataproc/docs/concepts/workflows/use-workflows). * * When this this key is provided, the following workflow template job arguments, if present, are [CMEK encrypted](https://docs.cloud.google.com/dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_workflow_template_data): * * - [FlinkJob args](https://docs.cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) * - [HadoopJob args](https://docs.cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) * - [SparkJob args](https://docs.cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) * - [SparkRJob args](https://docs.cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) * - [PySparkJob args](https://docs.cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) * - [SparkSqlJob](https://docs.cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) scriptVariables and queryList.queries * - [HiveJob](https://docs.cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) scriptVariables and queryList.queries * - [PigJob](https://docs.cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) scriptVariables and queryList.queries * - [PrestoJob](https://docs.cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) scriptVariables and queryList.queries */ kmsKey?: string; } interface WorkflowTemplateJob { /** * Job is a Hadoop job. */ hadoopJob?: outputs.dataproc.WorkflowTemplateJobHadoopJob; /** * Job is a Hive job. */ hiveJob?: outputs.dataproc.WorkflowTemplateJobHiveJob; /** * The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: {0,63} No more than 32 labels can be associated with a given job. */ labels?: { [key: string]: string; }; /** * Job is a Pig job. */ pigJob?: outputs.dataproc.WorkflowTemplateJobPigJob; /** * The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow. */ prerequisiteStepIds?: string[]; /** * Job is a Presto job. */ prestoJob?: outputs.dataproc.WorkflowTemplateJobPrestoJob; /** * Job is a PySpark job. */ pysparkJob?: outputs.dataproc.WorkflowTemplateJobPysparkJob; /** * Job scheduling configuration. */ scheduling?: outputs.dataproc.WorkflowTemplateJobScheduling; /** * Job is a Spark job. */ sparkJob?: outputs.dataproc.WorkflowTemplateJobSparkJob; /** * Job is a SparkR job. */ sparkRJob?: outputs.dataproc.WorkflowTemplateJobSparkRJob; /** * Job is a SparkSql job. */ sparkSqlJob?: outputs.dataproc.WorkflowTemplateJobSparkSqlJob; /** * Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job `goog-dataproc-workflow-step-id` label, and in field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. */ stepId: string; } interface WorkflowTemplateJobHadoopJob { /** * HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip. */ archiveUris?: string[]; /** * The arguments to pass to the driver. Do not include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since a collision may occur that causes an incorrect job submission. */ args?: string[]; /** * HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks. */ fileUris?: string[]; /** * Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks. */ jarFileUris?: string[]; /** * The runtime log config for job execution. */ loggingConfig?: outputs.dataproc.WorkflowTemplateJobHadoopJobLoggingConfig; /** * The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jarFileUris`. */ mainClass?: string; /** * The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' */ mainJarFileUri?: string; /** * A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code. */ properties?: { [key: string]: string; }; } interface WorkflowTemplateJobHadoopJobLoggingConfig { /** * The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' */ driverLogLevels?: { [key: string]: string; }; } interface WorkflowTemplateJobHiveJob { /** * Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries. */ continueOnFailure?: boolean; /** * HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. */ jarFileUris?: string[]; /** * A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code. */ properties?: { [key: string]: string; }; /** * The HCFS URI of the script that contains Hive queries. */ queryFileUri?: string; /** * A list of queries. */ queryList?: outputs.dataproc.WorkflowTemplateJobHiveJobQueryList; /** * Mapping of query variable names to values (equivalent to the Hive command: `SET name="value";`). */ scriptVariables?: { [key: string]: string; }; } interface WorkflowTemplateJobHiveJobQueryList { /** * Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } } */ queries: string[]; } interface WorkflowTemplateJobPigJob { /** * Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries. */ continueOnFailure?: boolean; /** * HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. */ jarFileUris?: string[]; /** * The runtime log config for job execution. */ loggingConfig?: outputs.dataproc.WorkflowTemplateJobPigJobLoggingConfig; /** * A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code. */ properties?: { [key: string]: string; }; /** * The HCFS URI of the script that contains the Pig queries. */ queryFileUri?: string; /** * A list of queries. */ queryList?: outputs.dataproc.WorkflowTemplateJobPigJobQueryList; /** * Mapping of query variable names to values (equivalent to the Pig command: `name=`). */ scriptVariables?: { [key: string]: string; }; } interface WorkflowTemplateJobPigJobLoggingConfig { /** * The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' */ driverLogLevels?: { [key: string]: string; }; } interface WorkflowTemplateJobPigJobQueryList { /** * Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } } */ queries: string[]; } interface WorkflowTemplateJobPrestoJob { /** * Presto client tags to attach to this query */ clientTags?: string[]; /** * Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries. */ continueOnFailure?: boolean; /** * The runtime log config for job execution. */ loggingConfig?: outputs.dataproc.WorkflowTemplateJobPrestoJobLoggingConfig; /** * The format in which query output will be displayed. See the Presto documentation for supported output formats */ outputFormat?: string; /** * A mapping of property names to values. Used to set Presto (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI */ properties?: { [key: string]: string; }; /** * The HCFS URI of the script that contains SQL queries. */ queryFileUri?: string; /** * A list of queries. */ queryList?: outputs.dataproc.WorkflowTemplateJobPrestoJobQueryList; } interface WorkflowTemplateJobPrestoJobLoggingConfig { /** * The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' */ driverLogLevels?: { [key: string]: string; }; } interface WorkflowTemplateJobPrestoJobQueryList { /** * Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } } */ queries: string[]; } interface WorkflowTemplateJobPysparkJob { /** * HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */ archiveUris?: string[]; /** * The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission. */ args?: string[]; /** * HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. */ fileUris?: string[]; /** * HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks. */ jarFileUris?: string[]; /** * The runtime log config for job execution. */ loggingConfig?: outputs.dataproc.WorkflowTemplateJobPysparkJobLoggingConfig; /** * Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file. */ mainPythonFileUri: string; /** * A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. */ properties?: { [key: string]: string; }; /** * HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip. */ pythonFileUris?: string[]; } interface WorkflowTemplateJobPysparkJobLoggingConfig { /** * The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' */ driverLogLevels?: { [key: string]: string; }; } interface WorkflowTemplateJobScheduling { /** * Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10. */ maxFailuresPerHour?: number; /** * Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240 */ maxFailuresTotal?: number; } interface WorkflowTemplateJobSparkJob { /** * HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */ archiveUris?: string[]; /** * The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission. */ args?: string[]; /** * HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. */ fileUris?: string[]; /** * HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks. */ jarFileUris?: string[]; /** * The runtime log config for job execution. */ loggingConfig?: outputs.dataproc.WorkflowTemplateJobSparkJobLoggingConfig; /** * The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jarFileUris`. */ mainClass?: string; /** * The HCFS URI of the jar file that contains the main class. */ mainJarFileUri?: string; /** * A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. */ properties?: { [key: string]: string; }; } interface WorkflowTemplateJobSparkJobLoggingConfig { /** * The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' */ driverLogLevels?: { [key: string]: string; }; } interface WorkflowTemplateJobSparkRJob { /** * HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */ archiveUris?: string[]; /** * The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission. */ args?: string[]; /** * HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. */ fileUris?: string[]; /** * The runtime log config for job execution. */ loggingConfig?: outputs.dataproc.WorkflowTemplateJobSparkRJobLoggingConfig; /** * Required. The HCFS URI of the main R file to use as the driver. Must be a .R file. */ mainRFileUri: string; /** * A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. */ properties?: { [key: string]: string; }; } interface WorkflowTemplateJobSparkRJobLoggingConfig { /** * The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' */ driverLogLevels?: { [key: string]: string; }; } interface WorkflowTemplateJobSparkSqlJob { /** * HCFS URIs of jar files to be added to the Spark CLASSPATH. */ jarFileUris?: string[]; /** * The runtime log config for job execution. */ loggingConfig?: outputs.dataproc.WorkflowTemplateJobSparkSqlJobLoggingConfig; /** * A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten. */ properties?: { [key: string]: string; }; /** * The HCFS URI of the script that contains SQL queries. */ queryFileUri?: string; /** * A list of queries. */ queryList?: outputs.dataproc.WorkflowTemplateJobSparkSqlJobQueryList; /** * Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`). */ scriptVariables?: { [key: string]: string; }; } interface WorkflowTemplateJobSparkSqlJobLoggingConfig { /** * The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' */ driverLogLevels?: { [key: string]: string; }; } interface WorkflowTemplateJobSparkSqlJobQueryList { /** * Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } } */ queries: string[]; } interface WorkflowTemplateParameter { /** * Brief description of the parameter. Must not exceed 1024 characters. */ description?: string; /** * Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args */ fields: string[]; /** * Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters. */ name: string; /** * Validation rules to be applied to this parameter's value. */ validation?: outputs.dataproc.WorkflowTemplateParameterValidation; } interface WorkflowTemplateParameterValidation { /** * Validation based on regular expressions. */ regex?: outputs.dataproc.WorkflowTemplateParameterValidationRegex; /** * Validation based on a list of allowed values. */ values?: outputs.dataproc.WorkflowTemplateParameterValidationValues; } interface WorkflowTemplateParameterValidationRegex { /** * Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient). */ regexes: string[]; } interface WorkflowTemplateParameterValidationValues { /** * Required. List of allowed values for the parameter. */ values: string[]; } interface WorkflowTemplatePlacement { /** * A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted. */ clusterSelector?: outputs.dataproc.WorkflowTemplatePlacementClusterSelector; /** * A cluster that is managed by the workflow. */ managedCluster?: outputs.dataproc.WorkflowTemplatePlacementManagedCluster; } interface WorkflowTemplatePlacementClusterSelector { /** * Required. The cluster labels. Cluster must have all labels to match. */ clusterLabels: { [key: string]: string; }; /** * The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used. */ zone: string; } interface WorkflowTemplatePlacementManagedCluster { /** * Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters. */ clusterName: string; /** * Required. The cluster configuration. */ config: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfig; /** * The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster. */ labels?: { [key: string]: string; }; } interface WorkflowTemplatePlacementManagedClusterConfig { /** * Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset. */ autoscalingConfig?: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig; /** * Encryption settings for the cluster. */ encryptionConfig?: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig; /** * Port/endpoint configuration for this cluster */ endpointConfig?: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigEndpointConfig; /** * The shared Compute Engine config settings for all instances in a cluster. */ gceClusterConfig?: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig; /** * The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as `gceClusterConfig`, `masterConfig`, `workerConfig`, `secondaryWorkerConfig`, and `autoscalingConfig`. */ gkeClusterConfig?: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig; /** * Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi */ initializationActions?: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigInitializationAction[]; /** * Lifecycle setting for the cluster. */ lifecycleConfig?: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig; /** * The Compute Engine config settings for additional worker instances in a cluster. */ masterConfig: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfig; /** * Metastore configuration. */ metastoreConfig?: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig; /** * The Compute Engine config settings for additional worker instances in a cluster. */ secondaryWorkerConfig: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig; /** * Security settings for the cluster. */ securityConfig?: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigSecurityConfig; /** * The config settings for software inside the cluster. */ softwareConfig?: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig; /** * A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://docs.cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). */ stagingBucket?: string; /** * A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. */ tempBucket?: string; /** * The Compute Engine config settings for additional worker instances in a cluster. * * - - - */ workerConfig: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfig; } interface WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig { /** * The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/` Note that the policy must be in the same project and Dataproc region. */ policy?: string; } interface WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig { /** * The Cloud KMS key name to use for PD disk encryption for all instances in the cluster. */ gcePdKmsKeyName?: string; } interface WorkflowTemplatePlacementManagedClusterConfigEndpointConfig { /** * If true, enable http access to specific ports on the cluster from external sources. Defaults to false. */ enableHttpPortAccess?: boolean; /** * Output only. The map of port descriptions to URLs. Will only be populated if enableHttpPortAccess is true. */ httpPorts: { [key: string]: string; }; } interface WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig { /** * If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internalIpOnly` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. */ internalIpOnly: boolean; /** * The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://docs.cloud.google.com/compute/docs/metadata/overview)). */ metadata?: { [key: string]: string; }; /** * The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `networkUri` nor `subnetworkUri` is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default` * `default` */ network?: string; /** * Node Group Affinity for sole-tenant clusters. */ nodeGroupAffinity?: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity; /** * The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL */ privateIpv6GoogleAccess?: string; /** * Reservation Affinity for consuming Zonal reservation. */ reservationAffinity?: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity; /** * The (https://docs.cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used. */ serviceAccount?: string; /** * The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/docs.cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control */ serviceAccountScopes?: string[]; /** * Shielded Instance Config for clusters using [Compute Engine Shielded VMs](https://docs.cloud.google.com/security/shielded-cloud/shielded-vm). Structure defined below. */ shieldedInstanceConfig?: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig; /** * The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0` * `sub0` */ subnetwork?: string; /** * The Compute Engine tags to add to all instances (see [Manage tags for resources](https://docs.cloud.google.com/compute/docs/tag-resources)). */ tags?: string[]; /** * The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/` * `us-central1-f` */ zone: string; } interface WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity { /** * Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1` * `node-group-1` */ nodeGroup: string; } interface WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity { /** * Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION */ consumeReservationType?: string; /** * Corresponds to the label key of reservation resource. */ key?: string; /** * Corresponds to the label values of reservation resource. */ values?: string[]; } interface WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig { /** * Defines whether instances have [Integrity Monitoring](https://docs.cloud.google.com/compute/shielded-vm/docs/shielded-vm#integrity-monitoring) enabled. */ enableIntegrityMonitoring?: boolean; /** * Defines whether instances have [Secure Boot](https://docs.cloud.google.com/compute/shielded-vm/docs/shielded-vm#secure-boot) enabled. */ enableSecureBoot?: boolean; /** * Defines whether instances have the [vTPM](https://docs.cloud.google.com/compute/shielded-vm/docs/shielded-vm#vtpm) enabled. */ enableVtpm?: boolean; } interface WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig { /** * A target for the deployment. */ namespacedGkeDeploymentTarget?: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget; } interface WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { /** * A namespace within the GKE cluster to deploy into. */ clusterNamespace?: string; /** * The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}' */ targetGkeCluster?: string; } interface WorkflowTemplatePlacementManagedClusterConfigInitializationAction { /** * Required. Cloud Storage URI of executable file. */ executableFile?: string; /** * Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. */ executionTimeout?: string; } interface WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { /** * The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). */ autoDeleteTime?: string; /** * The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). */ autoDeleteTtl?: string; /** * The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). */ idleDeleteTtl?: string; /** * Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). */ idleStartTime: string; } interface WorkflowTemplatePlacementManagedClusterConfigMasterConfig { /** * The Compute Engine accelerator configuration for these instances. */ accelerators: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerator[]; /** * Disk option config settings. */ diskConfig: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig; /** * The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default. */ image?: string; /** * Output only. The list of instance names. Dataproc derives the names from `clusterName`, `numInstances`, and the instance group. */ instanceNames: string[]; /** * Output only. Specifies that this instance group contains preemptible instances. */ isPreemptible: boolean; /** * The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/(https://docs.cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`. */ machineType?: string; /** * Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. */ managedGroupConfigs: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig[]; /** * Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://docs.cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). */ minCpuPlatform: string; /** * The number of VM instances in the instance group. For master instance groups, must be set to 1. */ numInstances?: number; /** * Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE */ preemptibility?: string; } interface WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerator { /** * The number of the accelerator cards of this type exposed to this instance. */ acceleratorCount?: number; /** * Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://docs.cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`. */ acceleratorType?: string; } interface WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig { /** * Size in GB of the boot disk (default is 500GB). */ bootDiskSizeGb?: number; /** * Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive). */ bootDiskType?: string; /** * Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries. */ numLocalSsds: number; } interface WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig { /** * Output only. The name of the Instance Group Manager for this group. */ instanceGroupManagerName: string; /** * Output only. The name of the Instance Template used for the Managed Instance Group. */ instanceTemplateName: string; } interface WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig { /** * Required. Resource name of an existing Dataproc Metastore service. Example: * `projects/` */ dataprocMetastoreService: string; } interface WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig { /** * Optional. The Compute Engine accelerator configuration for these instances. */ accelerators: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerator[]; /** * Optional. Disk option config settings. */ diskConfig: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig; /** * Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[projectId]/global/images/[image-id]` * `projects/[projectId]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[projectId]/global/images/family/[custom-image-family-name]` * `projects/[projectId]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default. */ image?: string; /** * Output only. The list of instance names. Dataproc derives the names from `clusterName`, `numInstances`, and the instance group. */ instanceNames: string[]; /** * Output only. Specifies that this instance group contains preemptible instances. */ isPreemptible: boolean; /** * Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[projectId]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[projectId]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`. */ machineType?: string; /** * Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. */ managedGroupConfigs: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig[]; /** * Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc > Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). */ minCpuPlatform: string; /** * Optional. The number of VM instances in the instance group. For [HA cluster](https://www.terraform.io/dataproc/docs/concepts/configuring-clusters/high-availability) masterConfig groups, **must be set to 3**. For standard cluster masterConfig groups, **must be set to 1**. */ numInstances?: number; /** * Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE */ preemptibility?: string; } interface WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerator { /** * The number of the accelerator cards of this type exposed to this instance. */ acceleratorCount?: number; /** * Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://docs.cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`. */ acceleratorType?: string; } interface WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig { /** * Size in GB of the boot disk (default is 500GB). */ bootDiskSizeGb?: number; /** * Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive). */ bootDiskType?: string; /** * Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries. */ numLocalSsds: number; } interface WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig { /** * Output only. The name of the Instance Group Manager for this group. */ instanceGroupManagerName: string; /** * Output only. The name of the Instance Template used for the Managed Instance Group. */ instanceTemplateName: string; } interface WorkflowTemplatePlacementManagedClusterConfigSecurityConfig { /** * Kerberos related configuration. */ kerberosConfig?: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig; } interface WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig { /** * The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship. */ crossRealmTrustAdminServer?: string; /** * The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship. */ crossRealmTrustKdc?: string; /** * The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust. */ crossRealmTrustRealm?: string; /** * The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship. */ crossRealmTrustSharedPassword?: string; /** * Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster. */ enableKerberos?: boolean; /** * The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database. */ kdcDbKey?: string; /** * The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc. */ keyPassword?: string; /** * The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. */ keystore?: string; /** * The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc. */ keystorePassword?: string; /** * The uri of the KMS key used to encrypt various sensitive files. */ kmsKey?: string; /** * The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm. */ realm?: string; /** * The Cloud Storage URI of a KMS encrypted file containing the root principal password. */ rootPrincipalPassword?: string; /** * The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used. */ tgtLifetimeHours?: number; /** * The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. */ truststore?: string; /** * The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc. */ truststorePassword?: string; } interface WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig { /** * The version of software inside the cluster. It must be one of the supported [Dataproc Versions](https://docs.cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as "1.2" (including a subminor version, such as "1.2.29"), or the ["preview" version](https://docs.cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version. */ imageVersion?: string; /** * The set of components to activate on the cluster. */ optionalComponents?: string[]; /** * The properties to set on daemon config files. * * Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * * * capacity-scheduler: `capacity-scheduler.xml` * * core: `core-site.xml` * * distcp: `distcp-default.xml` * * hdfs: `hdfs-site.xml` * * hive: `hive-site.xml` * * mapred: `mapred-site.xml` * * pig: `pig.properties` * * spark: `spark-defaults.conf` * * yarn: `yarn-site.xml` * * * For more information, see [Cluster properties](https://docs.cloud.google.com/dataproc/docs/concepts/cluster-properties). */ properties?: { [key: string]: string; }; } interface WorkflowTemplatePlacementManagedClusterConfigWorkerConfig { /** * Optional. The Compute Engine accelerator configuration for these instances. */ accelerators: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerator[]; /** * Optional. Disk option config settings. */ diskConfig: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig; /** * Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[projectId]/global/images/[image-id]` * `projects/[projectId]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[projectId]/global/images/family/[custom-image-family-name]` * `projects/[projectId]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default. */ image?: string; /** * Output only. The list of instance names. Dataproc derives the names from `clusterName`, `numInstances`, and the instance group. */ instanceNames: string[]; /** * Output only. Specifies that this instance group contains preemptible instances. */ isPreemptible: boolean; /** * Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[projectId]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[projectId]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`. */ machineType?: string; /** * Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. */ managedGroupConfigs: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig[]; /** * Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc > Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). */ minCpuPlatform: string; /** * Optional. The number of VM instances in the instance group. For [HA cluster](https://www.terraform.io/dataproc/docs/concepts/configuring-clusters/high-availability) masterConfig groups, **must be set to 3**. For standard cluster masterConfig groups, **must be set to 1**. */ numInstances?: number; /** * Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE */ preemptibility?: string; } interface WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerator { /** * The number of the accelerator cards of this type exposed to this instance. */ acceleratorCount?: number; /** * Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://docs.cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`. */ acceleratorType?: string; } interface WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig { /** * Size in GB of the boot disk (default is 500GB). */ bootDiskSizeGb?: number; /** * Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive). */ bootDiskType?: string; /** * Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries. */ numLocalSsds: number; } interface WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig { /** * Output only. The name of the Instance Group Manager for this group. */ instanceGroupManagerName: string; /** * Output only. The name of the Instance Template used for the Managed Instance Group. */ instanceTemplateName: string; } } export declare namespace datastream { interface ConnectionProfileBigqueryProfile { } interface ConnectionProfileForwardSshConnectivity { /** * Hostname for the SSH tunnel. */ hostname: string; /** * SSH password. * **Note**: This property is sensitive and will not be displayed in the plan. */ password?: string; /** * Port for the SSH tunnel. */ port?: number; /** * SSH private key. * **Note**: This property is sensitive and will not be displayed in the plan. */ privateKey?: string; /** * Username for the SSH tunnel. */ username: string; } interface ConnectionProfileGcsProfile { /** * The Cloud Storage bucket name. */ bucket: string; /** * The root path inside the Cloud Storage bucket. */ rootPath?: string; } interface ConnectionProfileMongodbProfile { /** * List of host addresses for a MongoDB cluster. * Structure is documented below. */ hostAddresses: outputs.datastream.ConnectionProfileMongodbProfileHostAddress[]; /** * Password for the MongoDB connection. Mutually exclusive with * secretManagerStoredPassword. * **Note**: This property is sensitive and will not be displayed in the plan. */ password?: string; /** * Name of the replica set. */ replicaSet?: string; /** * A reference to a Secret Manager resource name storing the MongoDB * connection password. Mutually exclusive with password. */ secretManagerStoredPassword?: string; /** * Srv connection format. Mutually exclusive with * standard_connection_Format. */ srvConnectionFormat?: outputs.datastream.ConnectionProfileMongodbProfileSrvConnectionFormat; /** * SSL configuration for the MongoDB connection. * Structure is documented below. */ sslConfig?: outputs.datastream.ConnectionProfileMongodbProfileSslConfig; /** * Standard connection format. Mutually exclusive with * srv_connection_format. * Structure is documented below. */ standardConnectionFormat?: outputs.datastream.ConnectionProfileMongodbProfileStandardConnectionFormat; /** * Username for the MongoDB connection. */ username: string; } interface ConnectionProfileMongodbProfileHostAddress { /** * Hostname for the connection. */ hostname: string; /** * Port for the connection. */ port?: number; } interface ConnectionProfileMongodbProfileSrvConnectionFormat { } interface ConnectionProfileMongodbProfileSslConfig { /** * PEM-encoded certificate of the CA that signed the source database * server's certificate. * **Note**: This property is sensitive and will not be displayed in the plan. */ caCertificate?: string; /** * (Output) * Indicates whether the clientKey field is set. */ caCertificateSet: boolean; /** * PEM-encoded certificate that will be used by the replica to * authenticate against the source database server. If this field * is used then the 'clientKey' and the 'caCertificate' fields are * mandatory. * **Note**: This property is sensitive and will not be displayed in the plan. */ clientCertificate?: string; /** * (Output) * Indicates whether the clientCertificate field is set. */ clientCertificateSet: boolean; /** * PEM-encoded private key associated with the Client Certificate. * If this field is used then the 'client_certificate' and the * 'ca_certificate' fields are mandatory. * **Note**: This property is sensitive and will not be displayed in the plan. */ clientKey?: string; /** * (Output) * Indicates whether the clientKey field is set. */ clientKeySet: boolean; /** * A reference to a Secret Manager resource name storing the * PEM-encoded private key. Mutually exclusive with clientKey. * **Note**: This property is sensitive and will not be displayed in the plan. */ secretManagerStoredClientKey?: string; } interface ConnectionProfileMongodbProfileStandardConnectionFormat { /** * Specifies whether the client connects directly to the * host[:port] in the connection URI. */ directConnection?: boolean; } interface ConnectionProfileMysqlProfile { /** * Hostname for the MySQL connection. */ hostname: string; /** * Password for the MySQL connection. * **Note**: This property is sensitive and will not be displayed in the plan. */ password?: string; /** * Port for the MySQL connection. */ port?: number; /** * A reference to a Secret Manager resource name storing the user's password. */ secretManagerStoredPassword?: string; /** * SSL configuration for the MySQL connection. * Structure is documented below. */ sslConfig?: outputs.datastream.ConnectionProfileMysqlProfileSslConfig; /** * Username for the MySQL connection. */ username: string; } interface ConnectionProfileMysqlProfileSslConfig { /** * PEM-encoded certificate of the CA that signed the source database * server's certificate. * **Note**: This property is sensitive and will not be displayed in the plan. */ caCertificate?: string; /** * (Output) * Indicates whether the clientKey field is set. */ caCertificateSet: boolean; /** * PEM-encoded certificate that will be used by the replica to * authenticate against the source database server. If this field * is used then the 'clientKey' and the 'caCertificate' fields are * mandatory. * **Note**: This property is sensitive and will not be displayed in the plan. */ clientCertificate?: string; /** * (Output) * Indicates whether the clientCertificate field is set. */ clientCertificateSet: boolean; /** * PEM-encoded private key associated with the Client Certificate. * If this field is used then the 'client_certificate' and the * 'ca_certificate' fields are mandatory. * **Note**: This property is sensitive and will not be displayed in the plan. */ clientKey?: string; /** * (Output) * Indicates whether the clientKey field is set. */ clientKeySet: boolean; } interface ConnectionProfileOracleProfile { /** * Connection string attributes */ connectionAttributes?: { [key: string]: string; }; /** * Database for the Oracle connection. */ databaseService: string; /** * Hostname for the Oracle connection. */ hostname: string; /** * Password for the Oracle connection. * **Note**: This property is sensitive and will not be displayed in the plan. */ password?: string; /** * Port for the Oracle connection. */ port?: number; /** * A reference to a Secret Manager resource name storing the user's password. */ secretManagerStoredPassword?: string; /** * Username for the Oracle connection. */ username: string; } interface ConnectionProfilePostgresqlProfile { /** * Database for the PostgreSQL connection. */ database: string; /** * Hostname for the PostgreSQL connection. */ hostname: string; /** * Password for the PostgreSQL connection. * **Note**: This property is sensitive and will not be displayed in the plan. */ password?: string; /** * Port for the PostgreSQL connection. */ port?: number; /** * A reference to a Secret Manager resource name storing the user's password. */ secretManagerStoredPassword?: string; /** * SSL configuration for the PostgreSQL connection. * Structure is documented below. */ sslConfig?: outputs.datastream.ConnectionProfilePostgresqlProfileSslConfig; /** * Username for the PostgreSQL connection. */ username: string; } interface ConnectionProfilePostgresqlProfileSslConfig { /** * If this field is set, the communication will be encrypted with TLS encryption * and both the server identity and the client identity will be authenticated. * Structure is documented below. */ serverAndClientVerification?: outputs.datastream.ConnectionProfilePostgresqlProfileSslConfigServerAndClientVerification; /** * If this field is set, the communication will be encrypted with TLS encryption * and the server identity will be authenticated. * Structure is documented below. */ serverVerification?: outputs.datastream.ConnectionProfilePostgresqlProfileSslConfigServerVerification; } interface ConnectionProfilePostgresqlProfileSslConfigServerAndClientVerification { /** * PEM-encoded server root CA certificate. * **Note**: This property is sensitive and will not be displayed in the plan. */ caCertificate: string; /** * PEM-encoded certificate used by the source database to authenticate the * client identity (i.e., the Datastream's identity). This certificate is * signed by either a root certificate trusted by the server or one or more * intermediate certificates (which is stored with the leaf certificate) to * link to this certificate to the trusted root certificate. * **Note**: This property is sensitive and will not be displayed in the plan. */ clientCertificate: string; /** * PEM-encoded private key associated with the client certificate. * This value will be used during the SSL/TLS handshake, allowing * the PostgreSQL server to authenticate the client's identity, * i.e. identity of the stream. * **Note**: This property is sensitive and will not be displayed in the plan. */ clientKey: string; } interface ConnectionProfilePostgresqlProfileSslConfigServerVerification { /** * PEM-encoded server root CA certificate. * **Note**: This property is sensitive and will not be displayed in the plan. */ caCertificate: string; } interface ConnectionProfilePrivateConnectivity { /** * A reference to a private connection resource. Format: `projects/{project}/locations/{location}/privateConnections/{name}` */ privateConnection: string; } interface ConnectionProfileSalesforceProfile { /** * Domain for the Salesforce Org. */ domain: string; /** * OAuth credentials to use for Salesforce authentication. * Structure is documented below. */ oauth2ClientCredentials?: outputs.datastream.ConnectionProfileSalesforceProfileOauth2ClientCredentials; /** * User credentials to use for Salesforce authentication. * Structure is documented below. */ userCredentials?: outputs.datastream.ConnectionProfileSalesforceProfileUserCredentials; } interface ConnectionProfileSalesforceProfileOauth2ClientCredentials { /** * Client ID to use for authentication. */ clientId?: string; /** * Client secret to use for authentication. */ clientSecret?: string; /** * A reference to a Secret Manager resource name storing the client secret. */ secretManagerStoredClientSecret?: string; } interface ConnectionProfileSalesforceProfileUserCredentials { /** * Password of the user. */ password?: string; /** * A reference to a Secret Manager resource name storing the user's password. */ secretManagerStoredPassword?: string; /** * A reference to a Secret Manager resource name storing the user's security token. * * The `oauth2ClientCredentials` block supports: */ secretManagerStoredSecurityToken?: string; /** * Security token of the user. */ securityToken?: string; /** * Username to use for authentication. */ username?: string; } interface ConnectionProfileSpannerProfile { /** * The full project and resource path for Spanner database. Format: * projects/{project}/instances/{instance}/databases/{database}. */ database: string; /** * The regional Spanner endpoint. Format: * https://spanner.{region}.rep.googleapis.com. */ host?: string; } interface ConnectionProfileSqlServerProfile { /** * Database for the SQL Server connection. */ database: string; /** * Hostname for the SQL Server connection. */ hostname: string; /** * Password for the SQL Server connection. * **Note**: This property is sensitive and will not be displayed in the plan. */ password?: string; /** * Port for the SQL Server connection. */ port?: number; /** * A reference to a Secret Manager resource name storing the user's password. */ secretManagerStoredPassword?: string; /** * Username for the SQL Server connection. */ username: string; } interface PrivateConnectionError { /** * A list of messages that carry the error details. */ details?: { [key: string]: string; }; /** * A message containing more information about the error that occurred. */ message?: string; } interface PrivateConnectionPscInterfaceConfig { /** * Fully qualified name of the network attachment that Datastream will connect to. * Format: projects/{project}/regions/{region}/networkAttachments/{name} * To get Datastream project for the accepted list: * `gcloud datastream private-connections create [PC ID] --location=[LOCATION] --network-attachment=[NA URI] --validate-only --display-name=[ANY STRING]` * Add Datastream project to the attachment accepted list: * `gcloud compute network-attachments update [NA URI] --region=[NA region] --producer-accept-list=[TP from prev command]` */ networkAttachment: string; } interface PrivateConnectionVpcPeeringConfig { /** * A free subnet for peering. (CIDR of /29) */ subnet: string; /** * Fully qualified name of the VPC that Datastream will peer to. * Format: projects/{project}/global/{networks}/{name} */ vpc: string; } interface StreamBackfillAll { /** * MongoDB data source objects to avoid backfilling. * Structure is documented below. */ mongodbExcludedObjects?: outputs.datastream.StreamBackfillAllMongodbExcludedObjects; /** * MySQL data source objects to avoid backfilling. * Structure is documented below. */ mysqlExcludedObjects?: outputs.datastream.StreamBackfillAllMysqlExcludedObjects; /** * PostgreSQL data source objects to avoid backfilling. * Structure is documented below. */ oracleExcludedObjects?: outputs.datastream.StreamBackfillAllOracleExcludedObjects; /** * PostgreSQL data source objects to avoid backfilling. * Structure is documented below. */ postgresqlExcludedObjects?: outputs.datastream.StreamBackfillAllPostgresqlExcludedObjects; /** * Salesforce objects to avoid backfilling. * Structure is documented below. */ salesforceExcludedObjects?: outputs.datastream.StreamBackfillAllSalesforceExcludedObjects; /** * Spanner objects to avoid backfilling. * Structure is documented below. */ spannerExcludedObjects?: outputs.datastream.StreamBackfillAllSpannerExcludedObjects; /** * SQL Server data source objects to avoid backfilling. * Structure is documented below. */ sqlServerExcludedObjects?: outputs.datastream.StreamBackfillAllSqlServerExcludedObjects; } interface StreamBackfillAllMongodbExcludedObjects { /** * MongoDB databases in the cluster. * Structure is documented below. */ databases: outputs.datastream.StreamBackfillAllMongodbExcludedObjectsDatabase[]; } interface StreamBackfillAllMongodbExcludedObjectsDatabase { /** * Collections in the database. * Structure is documented below. */ collections?: outputs.datastream.StreamBackfillAllMongodbExcludedObjectsDatabaseCollection[]; /** * Database name. */ database: string; } interface StreamBackfillAllMongodbExcludedObjectsDatabaseCollection { /** * Collection name. */ collection: string; /** * Fields in the collection. * Structure is documented below. */ fields?: outputs.datastream.StreamBackfillAllMongodbExcludedObjectsDatabaseCollectionField[]; } interface StreamBackfillAllMongodbExcludedObjectsDatabaseCollectionField { /** * Field name. */ field?: string; } interface StreamBackfillAllMysqlExcludedObjects { /** * MySQL databases on the server * Structure is documented below. */ mysqlDatabases: outputs.datastream.StreamBackfillAllMysqlExcludedObjectsMysqlDatabase[]; } interface StreamBackfillAllMysqlExcludedObjectsMysqlDatabase { /** * Database name. */ database: string; /** * Tables in the database. * Structure is documented below. */ mysqlTables?: outputs.datastream.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTable[]; } interface StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTable { /** * MySQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything. * Structure is documented below. */ mysqlColumns?: outputs.datastream.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTableMysqlColumn[]; /** * Table name. */ table: string; } interface StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTableMysqlColumn { /** * Column collation. */ collation?: string; /** * Column name. */ column?: string; /** * The MySQL data type. Full data types list can be found here: * https://dev.mysql.com/doc/refman/8.0/en/data-types.html */ dataType?: string; /** * (Output) * Column length. */ length: number; /** * Whether or not the column can accept a null value. */ nullable?: boolean; /** * The ordinal position of the column in the table. */ ordinalPosition?: number; /** * Whether or not the column represents a primary key. */ primaryKey?: boolean; } interface StreamBackfillAllOracleExcludedObjects { /** * Oracle schemas/databases in the database server * Structure is documented below. */ oracleSchemas: outputs.datastream.StreamBackfillAllOracleExcludedObjectsOracleSchema[]; } interface StreamBackfillAllOracleExcludedObjectsOracleSchema { /** * Tables in the database. * Structure is documented below. */ oracleTables?: outputs.datastream.StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTable[]; /** * Schema name. */ schema: string; } interface StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTable { /** * Oracle columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything. * Structure is documented below. */ oracleColumns?: outputs.datastream.StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTableOracleColumn[]; /** * Table name. */ table: string; } interface StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTableOracleColumn { /** * Column name. */ column?: string; /** * The Oracle data type. Full data types list can be found here: * https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Data-Types.html */ dataType?: string; /** * (Output) * Column encoding. */ encoding: string; /** * (Output) * Column length. */ length: number; /** * (Output) * Whether or not the column can accept a null value. */ nullable: boolean; /** * (Output) * The ordinal position of the column in the table. */ ordinalPosition: number; /** * (Output) * Column precision. */ precision: number; /** * (Output) * Whether or not the column represents a primary key. */ primaryKey: boolean; /** * (Output) * Column scale. */ scale: number; } interface StreamBackfillAllPostgresqlExcludedObjects { /** * PostgreSQL schemas on the server * Structure is documented below. */ postgresqlSchemas: outputs.datastream.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchema[]; } interface StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchema { /** * Tables in the schema. * Structure is documented below. */ postgresqlTables?: outputs.datastream.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTable[]; /** * Database name. */ schema: string; } interface StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTable { /** * PostgreSQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything. * Structure is documented below. */ postgresqlColumns?: outputs.datastream.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumn[]; /** * Table name. */ table: string; } interface StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumn { /** * Column name. */ column?: string; /** * The PostgreSQL data type. Full data types list can be found here: * https://www.postgresql.org/docs/current/datatype.html */ dataType?: string; /** * (Output) * Column length. */ length: number; /** * Whether or not the column can accept a null value. */ nullable?: boolean; /** * The ordinal position of the column in the table. */ ordinalPosition?: number; /** * (Output) * Column precision. */ precision: number; /** * Whether or not the column represents a primary key. */ primaryKey?: boolean; /** * (Output) * Column scale. */ scale: number; } interface StreamBackfillAllSalesforceExcludedObjects { /** * Salesforce objects in Salesforce Org. * Structure is documented below. */ objects: outputs.datastream.StreamBackfillAllSalesforceExcludedObjectsObject[]; } interface StreamBackfillAllSalesforceExcludedObjectsObject { /** * Fields in the Salesforce object. When unspecified as part of include/exclude objects, includes/excludes everything/nothing. * Structure is documented below. */ fields?: outputs.datastream.StreamBackfillAllSalesforceExcludedObjectsObjectField[]; /** * Name of object in Salesforce Org. */ objectName?: string; } interface StreamBackfillAllSalesforceExcludedObjectsObjectField { /** * Field name. */ name?: string; } interface StreamBackfillAllSpannerExcludedObjects { /** * Spanner schemas in the database * Structure is documented below. */ schemas: outputs.datastream.StreamBackfillAllSpannerExcludedObjectsSchema[]; } interface StreamBackfillAllSpannerExcludedObjectsSchema { /** * Schema name. */ schema: string; /** * Tables in the schema. * Structure is documented below. */ tables?: outputs.datastream.StreamBackfillAllSpannerExcludedObjectsSchemaTable[]; } interface StreamBackfillAllSpannerExcludedObjectsSchemaTable { /** * Spanner columns in the table. When unspecified as part of include/exclude objects, includes/excludes everything. * Structure is documented below. */ columns?: outputs.datastream.StreamBackfillAllSpannerExcludedObjectsSchemaTableColumn[]; /** * Table name. */ table: string; } interface StreamBackfillAllSpannerExcludedObjectsSchemaTableColumn { /** * Column name. */ column: string; /** * (Output) * The Spanner data type. Full data types list can be found here: * https://docs.cloud.google.com/spanner/docs/reference/standard-sql/data-types */ dataType: string; /** * (Output) * Whether the column is a primary key. */ isPrimaryKey: boolean; /** * (Output) * The ordinal position of the column in the table. */ ordinalPosition: number; } interface StreamBackfillAllSqlServerExcludedObjects { /** * SQL Server schemas/databases in the database server * Structure is documented below. */ schemas: outputs.datastream.StreamBackfillAllSqlServerExcludedObjectsSchema[]; } interface StreamBackfillAllSqlServerExcludedObjectsSchema { /** * Schema name. */ schema: string; /** * Tables in the schema. * Structure is documented below. */ tables?: outputs.datastream.StreamBackfillAllSqlServerExcludedObjectsSchemaTable[]; } interface StreamBackfillAllSqlServerExcludedObjectsSchemaTable { /** * Spanner columns in the table. When unspecified as part of include/exclude objects, includes/excludes everything. * Structure is documented below. */ columns?: outputs.datastream.StreamBackfillAllSqlServerExcludedObjectsSchemaTableColumn[]; /** * Table name. */ table: string; } interface StreamBackfillAllSqlServerExcludedObjectsSchemaTableColumn { /** * Column name. */ column?: string; /** * (Output) * The Spanner data type. Full data types list can be found here: * https://docs.cloud.google.com/spanner/docs/reference/standard-sql/data-types */ dataType?: string; /** * (Output) * Column length. */ length: number; /** * (Output) * Whether or not the column can accept a null value. */ nullable: boolean; /** * (Output) * The ordinal position of the column in the table. */ ordinalPosition: number; /** * (Output) * Column precision. */ precision: number; /** * (Output) * Whether or not the column represents a primary key. */ primaryKey: boolean; /** * (Output) * Column scale. */ scale: number; } interface StreamBackfillNone { } interface StreamDestinationConfig { /** * A configuration for how data should be loaded to Google BigQuery. * Structure is documented below. */ bigqueryDestinationConfig?: outputs.datastream.StreamDestinationConfigBigqueryDestinationConfig; /** * Destination connection profile resource. Format: projects/{project}/locations/{location}/connectionProfiles/{name} */ destinationConnectionProfile: string; /** * A configuration for how data should be loaded to Cloud Storage. * Structure is documented below. */ gcsDestinationConfig?: outputs.datastream.StreamDestinationConfigGcsDestinationConfig; } interface StreamDestinationConfigBigqueryDestinationConfig { /** * AppendOnly mode defines that the stream of changes (INSERT, UPDATE-INSERT, UPDATE-DELETE and DELETE * events) to a source table will be written to the destination Google BigQuery table, retaining the * historical state of the data. */ appendOnly?: outputs.datastream.StreamDestinationConfigBigqueryDestinationConfigAppendOnly; /** * BigLake Managed Tables configuration for BigQuery streams. * Structure is documented below. */ blmtConfig?: outputs.datastream.StreamDestinationConfigBigqueryDestinationConfigBlmtConfig; /** * The guaranteed data freshness (in seconds) when querying tables created by the stream. * Editing this field will only affect new tables created in the future, but existing tables * will not be impacted. Lower values mean that queries will return fresher data, but may result in higher cost. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". Defaults to 900s. */ dataFreshness?: string; /** * Merge mode defines that all changes to a table will be merged at the destination Google BigQuery * table. This is the default write mode. When selected, BigQuery reflects the way the data is stored * in the source database. With Merge mode, no historical record of the change events is kept. */ merge?: outputs.datastream.StreamDestinationConfigBigqueryDestinationConfigMerge; /** * A single target dataset to which all data will be streamed. * Structure is documented below. */ singleTargetDataset?: outputs.datastream.StreamDestinationConfigBigqueryDestinationConfigSingleTargetDataset; /** * Destination datasets are created so that hierarchy of the destination data objects matches the source hierarchy. * Structure is documented below. */ sourceHierarchyDatasets?: outputs.datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets; } interface StreamDestinationConfigBigqueryDestinationConfigAppendOnly { } interface StreamDestinationConfigBigqueryDestinationConfigBlmtConfig { /** * The Cloud Storage bucket name. */ bucket: string; /** * The bigquery connection. Format: `{project}.{location}.{name}` */ connectionName: string; /** * The file format. */ fileFormat: string; /** * The root path inside the Cloud Storage bucket. */ rootPath?: string; /** * The table format. */ tableFormat: string; } interface StreamDestinationConfigBigqueryDestinationConfigMerge { } interface StreamDestinationConfigBigqueryDestinationConfigSingleTargetDataset { /** * Dataset ID in the format projects/{project}/datasets/{dataset_id} or * {project}:{dataset_id} */ datasetId: string; } interface StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets { /** * Dataset template used for dynamic dataset creation. * Structure is documented below. */ datasetTemplate: outputs.datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate; /** * Optional. The project id of the BigQuery dataset. If not specified, the project will be inferred from the stream resource. */ projectId?: string; } interface StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate { /** * If supplied, every created dataset will have its name prefixed by the provided value. * The prefix and name will be separated by an underscore. i.e. _. */ datasetIdPrefix?: string; /** * Describes the Cloud KMS encryption key that will be used to protect destination BigQuery * table. The BigQuery Service Account associated with your project requires access to this * encryption key. i.e. projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{cryptoKey}. * See https://cloud.google.com/bigquery/docs/customer-managed-encryption for more information. */ kmsKeyName?: string; /** * The geographic location where the dataset should reside. * See https://cloud.google.com/bigquery/docs/locations for supported locations. */ location: string; } interface StreamDestinationConfigGcsDestinationConfig { /** * AVRO file format configuration. */ avroFileFormat?: outputs.datastream.StreamDestinationConfigGcsDestinationConfigAvroFileFormat; /** * The maximum duration for which new events are added before a file is closed and a new file is created. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". Defaults to 900s. */ fileRotationInterval: string; /** * The maximum file size to be saved in the bucket. */ fileRotationMb: number; /** * JSON file format configuration. * Structure is documented below. */ jsonFileFormat?: outputs.datastream.StreamDestinationConfigGcsDestinationConfigJsonFileFormat; /** * Path inside the Cloud Storage bucket to write data to. */ path?: string; } interface StreamDestinationConfigGcsDestinationConfigAvroFileFormat { } interface StreamDestinationConfigGcsDestinationConfigJsonFileFormat { /** * Compression of the loaded JSON file. * Possible values are: `NO_COMPRESSION`, `GZIP`. */ compression?: string; /** * The schema file format along JSON data files. * Possible values are: `NO_SCHEMA_FILE`, `AVRO_SCHEMA_FILE`. */ schemaFileFormat?: string; } interface StreamRuleSet { /** * List of customization rules to apply. * Structure is documented below. */ customizationRules: outputs.datastream.StreamRuleSetCustomizationRule[]; /** * Object filter to apply the customization rules to. * Structure is documented below. */ objectFilter: outputs.datastream.StreamRuleSetObjectFilter; } interface StreamRuleSetCustomizationRule { /** * BigQuery clustering rule. * Structure is documented below. */ bigqueryClustering?: outputs.datastream.StreamRuleSetCustomizationRuleBigqueryClustering; /** * BigQuery partitioning rule. * Structure is documented below. */ bigqueryPartitioning?: outputs.datastream.StreamRuleSetCustomizationRuleBigqueryPartitioning; } interface StreamRuleSetCustomizationRuleBigqueryClustering { /** * Column names to set as clustering columns. */ columns: string[]; } interface StreamRuleSetCustomizationRuleBigqueryPartitioning { /** * A nested object resource. * Structure is documented below. */ ingestionTimePartition?: outputs.datastream.StreamRuleSetCustomizationRuleBigqueryPartitioningIngestionTimePartition; /** * A nested object resource. * Structure is documented below. */ integerRangePartition?: outputs.datastream.StreamRuleSetCustomizationRuleBigqueryPartitioningIntegerRangePartition; /** * If true, queries over the table require a partition filter. */ requirePartitionFilter?: boolean; /** * A nested object resource. * Structure is documented below. */ timeUnitPartition?: outputs.datastream.StreamRuleSetCustomizationRuleBigqueryPartitioningTimeUnitPartition; } interface StreamRuleSetCustomizationRuleBigqueryPartitioningIngestionTimePartition { /** * Partition granularity. * Possible values are: `PARTITIONING_TIME_GRANULARITY_UNSPECIFIED`, `PARTITIONING_TIME_GRANULARITY_HOUR`, `PARTITIONING_TIME_GRANULARITY_DAY`, `PARTITIONING_TIME_GRANULARITY_MONTH`, `PARTITIONING_TIME_GRANULARITY_YEAR`. */ partitioningTimeGranularity?: string; } interface StreamRuleSetCustomizationRuleBigqueryPartitioningIntegerRangePartition { /** * The partitioning column. */ column: string; /** * The ending value for range partitioning (exclusive). */ end: number; /** * The interval of each range within the partition. */ interval: number; /** * The starting value for range partitioning (inclusive). */ start: number; } interface StreamRuleSetCustomizationRuleBigqueryPartitioningTimeUnitPartition { /** * The partitioning column. */ column: string; /** * Partition granularity. * Possible values are: `PARTITIONING_TIME_GRANULARITY_UNSPECIFIED`, `PARTITIONING_TIME_GRANULARITY_HOUR`, `PARTITIONING_TIME_GRANULARITY_DAY`, `PARTITIONING_TIME_GRANULARITY_MONTH`, `PARTITIONING_TIME_GRANULARITY_YEAR`. */ partitioningTimeGranularity?: string; } interface StreamRuleSetObjectFilter { /** * Specific source object identifier. * Structure is documented below. */ sourceObjectIdentifier?: outputs.datastream.StreamRuleSetObjectFilterSourceObjectIdentifier; } interface StreamRuleSetObjectFilterSourceObjectIdentifier { /** * A nested object resource. * Structure is documented below. */ mongodbIdentifier?: outputs.datastream.StreamRuleSetObjectFilterSourceObjectIdentifierMongodbIdentifier; /** * A nested object resource. * Structure is documented below. */ mysqlIdentifier?: outputs.datastream.StreamRuleSetObjectFilterSourceObjectIdentifierMysqlIdentifier; /** * A nested object resource. * Structure is documented below. */ oracleIdentifier?: outputs.datastream.StreamRuleSetObjectFilterSourceObjectIdentifierOracleIdentifier; /** * A nested object resource. * Structure is documented below. */ postgresqlIdentifier?: outputs.datastream.StreamRuleSetObjectFilterSourceObjectIdentifierPostgresqlIdentifier; /** * A nested object resource. * Structure is documented below. */ salesforceIdentifier?: outputs.datastream.StreamRuleSetObjectFilterSourceObjectIdentifierSalesforceIdentifier; /** * A nested object resource. * Structure is documented below. */ spannerIdentifier?: outputs.datastream.StreamRuleSetObjectFilterSourceObjectIdentifierSpannerIdentifier; /** * A nested object resource. * Structure is documented below. */ sqlServerIdentifier?: outputs.datastream.StreamRuleSetObjectFilterSourceObjectIdentifierSqlServerIdentifier; } interface StreamRuleSetObjectFilterSourceObjectIdentifierMongodbIdentifier { /** * The MongoDB collection name. */ collection: string; /** * The MongoDB database name. */ database: string; } interface StreamRuleSetObjectFilterSourceObjectIdentifierMysqlIdentifier { /** * The database name. */ database: string; /** * The table name. */ table: string; } interface StreamRuleSetObjectFilterSourceObjectIdentifierOracleIdentifier { /** * The schema name. */ schema: string; /** * The table name. */ table: string; } interface StreamRuleSetObjectFilterSourceObjectIdentifierPostgresqlIdentifier { /** * The schema name. */ schema: string; /** * The table name. */ table: string; } interface StreamRuleSetObjectFilterSourceObjectIdentifierSalesforceIdentifier { /** * The Salesforce object name. */ objectName: string; } interface StreamRuleSetObjectFilterSourceObjectIdentifierSpannerIdentifier { /** * The schema name. */ schema?: string; /** * The table name. */ table: string; } interface StreamRuleSetObjectFilterSourceObjectIdentifierSqlServerIdentifier { /** * The schema name. */ schema: string; /** * The table name. */ table: string; } interface StreamSourceConfig { /** * MongoDB source configuration. * Structure is documented below. */ mongodbSourceConfig?: outputs.datastream.StreamSourceConfigMongodbSourceConfig; /** * MySQL data source configuration. * Structure is documented below. */ mysqlSourceConfig?: outputs.datastream.StreamSourceConfigMysqlSourceConfig; /** * MySQL data source configuration. * Structure is documented below. */ oracleSourceConfig?: outputs.datastream.StreamSourceConfigOracleSourceConfig; /** * PostgreSQL data source configuration. * Structure is documented below. */ postgresqlSourceConfig?: outputs.datastream.StreamSourceConfigPostgresqlSourceConfig; /** * Salesforce data source configuration. * Structure is documented below. */ salesforceSourceConfig?: outputs.datastream.StreamSourceConfigSalesforceSourceConfig; /** * Source connection profile resource. Format: projects/{project}/locations/{location}/connectionProfiles/{name} */ sourceConnectionProfile: string; /** * Spanner data source configuration. * Structure is documented below. */ spannerSourceConfig?: outputs.datastream.StreamSourceConfigSpannerSourceConfig; /** * SQL Server data source configuration. * Structure is documented below. */ sqlServerSourceConfig?: outputs.datastream.StreamSourceConfigSqlServerSourceConfig; } interface StreamSourceConfigMongodbSourceConfig { /** * MongoDB collections to include in the stream. * Structure is documented below. */ excludeObjects?: outputs.datastream.StreamSourceConfigMongodbSourceConfigExcludeObjects; /** * MongoDB collections to include in the stream. * Structure is documented below. */ includeObjects?: outputs.datastream.StreamSourceConfigMongodbSourceConfigIncludeObjects; /** * Optional. Maximum number of concurrent backfill tasks. The number * should be non-negative and less than or equal to 50. If not set * (or set to 0), the system''s default value is used */ maxConcurrentBackfillTasks?: number; } interface StreamSourceConfigMongodbSourceConfigExcludeObjects { /** * MongoDB databases in the cluster. * Structure is documented below. */ databases?: outputs.datastream.StreamSourceConfigMongodbSourceConfigExcludeObjectsDatabase[]; } interface StreamSourceConfigMongodbSourceConfigExcludeObjectsDatabase { /** * Collections in the database. * Structure is documented below. */ collections?: outputs.datastream.StreamSourceConfigMongodbSourceConfigExcludeObjectsDatabaseCollection[]; /** * Database name. */ database?: string; } interface StreamSourceConfigMongodbSourceConfigExcludeObjectsDatabaseCollection { /** * Collection name. */ collection?: string; /** * Fields in the collection. * Structure is documented below. */ fields?: outputs.datastream.StreamSourceConfigMongodbSourceConfigExcludeObjectsDatabaseCollectionField[]; } interface StreamSourceConfigMongodbSourceConfigExcludeObjectsDatabaseCollectionField { /** * Field name. */ field?: string; } interface StreamSourceConfigMongodbSourceConfigIncludeObjects { /** * MongoDB databases in the cluster. * Structure is documented below. */ databases?: outputs.datastream.StreamSourceConfigMongodbSourceConfigIncludeObjectsDatabase[]; } interface StreamSourceConfigMongodbSourceConfigIncludeObjectsDatabase { /** * Collections in the database. * Structure is documented below. */ collections?: outputs.datastream.StreamSourceConfigMongodbSourceConfigIncludeObjectsDatabaseCollection[]; /** * Database name. */ database?: string; } interface StreamSourceConfigMongodbSourceConfigIncludeObjectsDatabaseCollection { /** * Collection name. */ collection?: string; /** * Fields in the collection. * Structure is documented below. */ fields?: outputs.datastream.StreamSourceConfigMongodbSourceConfigIncludeObjectsDatabaseCollectionField[]; } interface StreamSourceConfigMongodbSourceConfigIncludeObjectsDatabaseCollectionField { /** * Field name. */ field?: string; } interface StreamSourceConfigMysqlSourceConfig { /** * CDC reader reads from binary logs replication cdc method. */ binaryLogPosition?: outputs.datastream.StreamSourceConfigMysqlSourceConfigBinaryLogPosition; /** * MySQL objects to exclude from the stream. * Structure is documented below. */ excludeObjects?: outputs.datastream.StreamSourceConfigMysqlSourceConfigExcludeObjects; /** * CDC reader reads from gtid based replication. */ gtid?: outputs.datastream.StreamSourceConfigMysqlSourceConfigGtid; /** * MySQL objects to retrieve from the source. * Structure is documented below. */ includeObjects?: outputs.datastream.StreamSourceConfigMysqlSourceConfigIncludeObjects; /** * Maximum number of concurrent backfill tasks. The number should be non negative. * If not set (or set to 0), the system's default value will be used. */ maxConcurrentBackfillTasks: number; /** * Maximum number of concurrent CDC tasks. The number should be non negative. * If not set (or set to 0), the system's default value will be used. */ maxConcurrentCdcTasks: number; } interface StreamSourceConfigMysqlSourceConfigBinaryLogPosition { } interface StreamSourceConfigMysqlSourceConfigExcludeObjects { /** * MySQL databases on the server * Structure is documented below. */ mysqlDatabases: outputs.datastream.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabase[]; } interface StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabase { /** * Database name. */ database: string; /** * Tables in the database. * Structure is documented below. */ mysqlTables?: outputs.datastream.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTable[]; } interface StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTable { /** * MySQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything. * Structure is documented below. */ mysqlColumns?: outputs.datastream.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTableMysqlColumn[]; /** * Table name. */ table: string; } interface StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTableMysqlColumn { /** * Column collation. */ collation?: string; /** * Column name. */ column?: string; /** * The MySQL data type. Full data types list can be found here: * https://dev.mysql.com/doc/refman/8.0/en/data-types.html */ dataType?: string; /** * (Output) * Column length. */ length: number; /** * Whether or not the column can accept a null value. */ nullable?: boolean; /** * The ordinal position of the column in the table. */ ordinalPosition?: number; /** * Whether or not the column represents a primary key. */ primaryKey?: boolean; } interface StreamSourceConfigMysqlSourceConfigGtid { } interface StreamSourceConfigMysqlSourceConfigIncludeObjects { /** * MySQL databases on the server * Structure is documented below. */ mysqlDatabases: outputs.datastream.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabase[]; } interface StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabase { /** * Database name. */ database: string; /** * Tables in the database. * Structure is documented below. */ mysqlTables?: outputs.datastream.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTable[]; } interface StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTable { /** * MySQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything. * Structure is documented below. */ mysqlColumns?: outputs.datastream.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableMysqlColumn[]; /** * Table name. */ table: string; } interface StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableMysqlColumn { /** * Column collation. */ collation?: string; /** * Column name. */ column?: string; /** * The MySQL data type. Full data types list can be found here: * https://dev.mysql.com/doc/refman/8.0/en/data-types.html */ dataType?: string; /** * (Output) * Column length. */ length: number; /** * Whether or not the column can accept a null value. */ nullable?: boolean; /** * The ordinal position of the column in the table. */ ordinalPosition?: number; /** * Whether or not the column represents a primary key. */ primaryKey?: boolean; } interface StreamSourceConfigOracleSourceConfig { /** * Configuration to drop large object values. */ dropLargeObjects?: outputs.datastream.StreamSourceConfigOracleSourceConfigDropLargeObjects; /** * Oracle objects to exclude from the stream. * Structure is documented below. */ excludeObjects?: outputs.datastream.StreamSourceConfigOracleSourceConfigExcludeObjects; /** * Oracle objects to retrieve from the source. * Structure is documented below. */ includeObjects?: outputs.datastream.StreamSourceConfigOracleSourceConfigIncludeObjects; /** * Maximum number of concurrent backfill tasks. The number should be non negative. * If not set (or set to 0), the system's default value will be used. */ maxConcurrentBackfillTasks: number; /** * Maximum number of concurrent CDC tasks. The number should be non negative. * If not set (or set to 0), the system's default value will be used. */ maxConcurrentCdcTasks: number; /** * Configuration to drop large object values. */ streamLargeObjects?: outputs.datastream.StreamSourceConfigOracleSourceConfigStreamLargeObjects; } interface StreamSourceConfigOracleSourceConfigDropLargeObjects { } interface StreamSourceConfigOracleSourceConfigExcludeObjects { /** * Oracle schemas/databases in the database server * Structure is documented below. */ oracleSchemas: outputs.datastream.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchema[]; } interface StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchema { /** * Tables in the database. * Structure is documented below. */ oracleTables?: outputs.datastream.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTable[]; /** * Schema name. */ schema: string; } interface StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTable { /** * Oracle columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything. * Structure is documented below. */ oracleColumns?: outputs.datastream.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTableOracleColumn[]; /** * Table name. */ table: string; } interface StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTableOracleColumn { /** * Column name. */ column?: string; /** * The Oracle data type. Full data types list can be found here: * https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Data-Types.html */ dataType?: string; /** * (Output) * Column encoding. */ encoding: string; /** * (Output) * Column length. */ length: number; /** * (Output) * Whether or not the column can accept a null value. */ nullable: boolean; /** * (Output) * The ordinal position of the column in the table. */ ordinalPosition: number; /** * (Output) * Column precision. */ precision: number; /** * (Output) * Whether or not the column represents a primary key. */ primaryKey: boolean; /** * (Output) * Column scale. */ scale: number; } interface StreamSourceConfigOracleSourceConfigIncludeObjects { /** * Oracle schemas/databases in the database server * Structure is documented below. */ oracleSchemas: outputs.datastream.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchema[]; } interface StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchema { /** * Tables in the database. * Structure is documented below. */ oracleTables?: outputs.datastream.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTable[]; /** * Schema name. */ schema: string; } interface StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTable { /** * Oracle columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything. * Structure is documented below. */ oracleColumns?: outputs.datastream.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTableOracleColumn[]; /** * Table name. */ table: string; } interface StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTableOracleColumn { /** * Column name. */ column?: string; /** * The Oracle data type. Full data types list can be found here: * https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Data-Types.html */ dataType?: string; /** * (Output) * Column encoding. */ encoding: string; /** * (Output) * Column length. */ length: number; /** * (Output) * Whether or not the column can accept a null value. */ nullable: boolean; /** * (Output) * The ordinal position of the column in the table. */ ordinalPosition: number; /** * (Output) * Column precision. */ precision: number; /** * (Output) * Whether or not the column represents a primary key. */ primaryKey: boolean; /** * (Output) * Column scale. */ scale: number; } interface StreamSourceConfigOracleSourceConfigStreamLargeObjects { } interface StreamSourceConfigPostgresqlSourceConfig { /** * PostgreSQL objects to exclude from the stream. * Structure is documented below. */ excludeObjects?: outputs.datastream.StreamSourceConfigPostgresqlSourceConfigExcludeObjects; /** * PostgreSQL objects to retrieve from the source. * Structure is documented below. */ includeObjects?: outputs.datastream.StreamSourceConfigPostgresqlSourceConfigIncludeObjects; /** * Maximum number of concurrent backfill tasks. The number should be non * negative. If not set (or set to 0), the system's default value will be used. */ maxConcurrentBackfillTasks: number; /** * The name of the publication that includes the set of all tables * that are defined in the stream's include_objects. */ publication: string; /** * The name of the logical replication slot that's configured with * the pgoutput plugin. */ replicationSlot: string; } interface StreamSourceConfigPostgresqlSourceConfigExcludeObjects { /** * PostgreSQL schemas on the server * Structure is documented below. */ postgresqlSchemas: outputs.datastream.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchema[]; } interface StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchema { /** * Tables in the schema. * Structure is documented below. */ postgresqlTables?: outputs.datastream.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTable[]; /** * Database name. */ schema: string; } interface StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTable { /** * PostgreSQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything. * Structure is documented below. */ postgresqlColumns?: outputs.datastream.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumn[]; /** * Table name. */ table: string; } interface StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumn { /** * Column name. */ column?: string; /** * The PostgreSQL data type. Full data types list can be found here: * https://www.postgresql.org/docs/current/datatype.html */ dataType?: string; /** * (Output) * Column length. */ length: number; /** * Whether or not the column can accept a null value. */ nullable?: boolean; /** * The ordinal position of the column in the table. */ ordinalPosition?: number; /** * (Output) * Column precision. */ precision: number; /** * Whether or not the column represents a primary key. */ primaryKey?: boolean; /** * (Output) * Column scale. */ scale: number; } interface StreamSourceConfigPostgresqlSourceConfigIncludeObjects { /** * PostgreSQL schemas on the server * Structure is documented below. */ postgresqlSchemas: outputs.datastream.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchema[]; } interface StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchema { /** * Tables in the schema. * Structure is documented below. */ postgresqlTables?: outputs.datastream.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTable[]; /** * Database name. */ schema: string; } interface StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTable { /** * PostgreSQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything. * Structure is documented below. */ postgresqlColumns?: outputs.datastream.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumn[]; /** * Table name. */ table: string; } interface StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumn { /** * Column name. */ column?: string; /** * The PostgreSQL data type. Full data types list can be found here: * https://www.postgresql.org/docs/current/datatype.html */ dataType?: string; /** * (Output) * Column length. */ length: number; /** * Whether or not the column can accept a null value. */ nullable?: boolean; /** * The ordinal position of the column in the table. */ ordinalPosition?: number; /** * (Output) * Column precision. */ precision: number; /** * Whether or not the column represents a primary key. */ primaryKey?: boolean; /** * (Output) * Column scale. */ scale: number; } interface StreamSourceConfigSalesforceSourceConfig { /** * Salesforce objects to exclude from the stream. * Structure is documented below. */ excludeObjects?: outputs.datastream.StreamSourceConfigSalesforceSourceConfigExcludeObjects; /** * Salesforce objects to retrieve from the source. * Structure is documented below. */ includeObjects?: outputs.datastream.StreamSourceConfigSalesforceSourceConfigIncludeObjects; /** * Salesforce objects polling interval. The interval at which new changes will be polled for each object. The duration must be between 5 minutes and 24 hours. */ pollingInterval: string; } interface StreamSourceConfigSalesforceSourceConfigExcludeObjects { /** * Salesforce objects in data source. * Structure is documented below. */ objects: outputs.datastream.StreamSourceConfigSalesforceSourceConfigExcludeObjectsObject[]; } interface StreamSourceConfigSalesforceSourceConfigExcludeObjectsObject { /** * Fields in the Salesforce object. When unspecified as part of include/exclude objects, includes/excludes everything/nothing. * Structure is documented below. */ fields?: outputs.datastream.StreamSourceConfigSalesforceSourceConfigExcludeObjectsObjectField[]; /** * Name of object in Salesforce Org. */ objectName?: string; } interface StreamSourceConfigSalesforceSourceConfigExcludeObjectsObjectField { /** * Field name. */ name?: string; } interface StreamSourceConfigSalesforceSourceConfigIncludeObjects { /** * Salesforce objects in Salesforce Org. * Structure is documented below. */ objects: outputs.datastream.StreamSourceConfigSalesforceSourceConfigIncludeObjectsObject[]; } interface StreamSourceConfigSalesforceSourceConfigIncludeObjectsObject { /** * Fields in the Salesforce object. When unspecified as part of include/exclude objects, includes/excludes everything/nothing. * Structure is documented below. */ fields?: outputs.datastream.StreamSourceConfigSalesforceSourceConfigIncludeObjectsObjectField[]; /** * Name of object in Salesforce Org. */ objectName?: string; } interface StreamSourceConfigSalesforceSourceConfigIncludeObjectsObjectField { /** * Field name. */ name?: string; } interface StreamSourceConfigSpannerSourceConfig { /** * Whether to use DataBoost for backfill queries. */ backfillDataBoostEnabled?: boolean; /** * The Spanner change stream name to use. */ changeStreamName?: string; /** * Spanner objects to retrieve from the source. * Structure is documented below. */ excludeObjects?: outputs.datastream.StreamSourceConfigSpannerSourceConfigExcludeObjects; /** * The FGAC role to use for Spanner queries. */ fgacRole?: string; /** * Spanner objects to retrieve from the source. * Structure is documented below. */ includeObjects?: outputs.datastream.StreamSourceConfigSpannerSourceConfigIncludeObjects; /** * Max concurrent backfill tasks. */ maxConcurrentBackfillTasks: number; /** * Max concurrent CDC tasks. */ maxConcurrentCdcTasks: number; /** * The RPC priority to use for Spanner queries. * Possible values are: `LOW`, `MEDIUM`, `HIGH`. */ spannerRpcPriority?: string; } interface StreamSourceConfigSpannerSourceConfigExcludeObjects { /** * Spanner schemas in the database * Structure is documented below. */ schemas: outputs.datastream.StreamSourceConfigSpannerSourceConfigExcludeObjectsSchema[]; } interface StreamSourceConfigSpannerSourceConfigExcludeObjectsSchema { /** * Schema name. */ schema: string; /** * Tables in the schema. * Structure is documented below. */ tables?: outputs.datastream.StreamSourceConfigSpannerSourceConfigExcludeObjectsSchemaTable[]; } interface StreamSourceConfigSpannerSourceConfigExcludeObjectsSchemaTable { /** * Spanner columns in the table. When unspecified as part of include/exclude objects, includes/excludes everything. * Structure is documented below. */ columns?: outputs.datastream.StreamSourceConfigSpannerSourceConfigExcludeObjectsSchemaTableColumn[]; /** * Table name. */ table: string; } interface StreamSourceConfigSpannerSourceConfigExcludeObjectsSchemaTableColumn { /** * Column name. */ column?: string; /** * (Output) * The Spanner data type. Full data types list can be found here: * https://docs.cloud.google.com/spanner/docs/reference/standard-sql/data-types */ dataType: string; /** * (Output) * Whether the column is a primary key. */ isPrimaryKey: boolean; /** * (Output) * The ordinal position of the column in the table. */ ordinalPosition: number; } interface StreamSourceConfigSpannerSourceConfigIncludeObjects { /** * Spanner schemas in the database * Structure is documented below. */ schemas: outputs.datastream.StreamSourceConfigSpannerSourceConfigIncludeObjectsSchema[]; } interface StreamSourceConfigSpannerSourceConfigIncludeObjectsSchema { /** * Schema name. */ schema: string; /** * Tables in the schema. * Structure is documented below. */ tables?: outputs.datastream.StreamSourceConfigSpannerSourceConfigIncludeObjectsSchemaTable[]; } interface StreamSourceConfigSpannerSourceConfigIncludeObjectsSchemaTable { /** * Spanner columns in the table. When unspecified as part of include/exclude objects, includes/excludes everything. * Structure is documented below. */ columns?: outputs.datastream.StreamSourceConfigSpannerSourceConfigIncludeObjectsSchemaTableColumn[]; /** * Table name. */ table: string; } interface StreamSourceConfigSpannerSourceConfigIncludeObjectsSchemaTableColumn { /** * Column name. */ column?: string; /** * (Output) * The Spanner data type. Full data types list can be found here: * https://docs.cloud.google.com/spanner/docs/reference/standard-sql/data-types */ dataType: string; /** * (Output) * Whether the column is a primary key. */ isPrimaryKey: boolean; /** * (Output) * The ordinal position of the column in the table. */ ordinalPosition: number; } interface StreamSourceConfigSqlServerSourceConfig { /** * CDC reader reads from change tables. */ changeTables?: outputs.datastream.StreamSourceConfigSqlServerSourceConfigChangeTables; /** * SQL Server objects to exclude from the stream. * Structure is documented below. */ excludeObjects?: outputs.datastream.StreamSourceConfigSqlServerSourceConfigExcludeObjects; /** * SQL Server objects to retrieve from the source. * Structure is documented below. */ includeObjects?: outputs.datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjects; /** * Max concurrent backfill tasks. */ maxConcurrentBackfillTasks: number; /** * Max concurrent CDC tasks. */ maxConcurrentCdcTasks: number; /** * CDC reader reads from transaction logs. */ transactionLogs?: outputs.datastream.StreamSourceConfigSqlServerSourceConfigTransactionLogs; } interface StreamSourceConfigSqlServerSourceConfigChangeTables { } interface StreamSourceConfigSqlServerSourceConfigExcludeObjects { /** * Spanner schemas in the database * Structure is documented below. */ schemas: outputs.datastream.StreamSourceConfigSqlServerSourceConfigExcludeObjectsSchema[]; } interface StreamSourceConfigSqlServerSourceConfigExcludeObjectsSchema { /** * Schema name. */ schema: string; /** * Tables in the schema. * Structure is documented below. */ tables?: outputs.datastream.StreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemaTable[]; } interface StreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemaTable { /** * Spanner columns in the table. When unspecified as part of include/exclude objects, includes/excludes everything. * Structure is documented below. */ columns?: outputs.datastream.StreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemaTableColumn[]; /** * Table name. */ table: string; } interface StreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemaTableColumn { /** * Column name. */ column?: string; /** * (Output) * The Spanner data type. Full data types list can be found here: * https://docs.cloud.google.com/spanner/docs/reference/standard-sql/data-types */ dataType?: string; /** * (Output) * Column length. */ length: number; /** * (Output) * Whether or not the column can accept a null value. */ nullable: boolean; /** * (Output) * The ordinal position of the column in the table. */ ordinalPosition: number; /** * (Output) * Column precision. */ precision: number; /** * (Output) * Whether or not the column represents a primary key. */ primaryKey: boolean; /** * (Output) * Column scale. */ scale: number; } interface StreamSourceConfigSqlServerSourceConfigIncludeObjects { /** * Spanner schemas in the database * Structure is documented below. */ schemas: outputs.datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchema[]; } interface StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchema { /** * Schema name. */ schema: string; /** * Tables in the schema. * Structure is documented below. */ tables?: outputs.datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTable[]; } interface StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTable { /** * Spanner columns in the table. When unspecified as part of include/exclude objects, includes/excludes everything. * Structure is documented below. */ columns?: outputs.datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableColumn[]; /** * Table name. */ table: string; } interface StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableColumn { /** * Column name. */ column?: string; /** * (Output) * The Spanner data type. Full data types list can be found here: * https://docs.cloud.google.com/spanner/docs/reference/standard-sql/data-types */ dataType?: string; /** * (Output) * Column length. */ length: number; /** * (Output) * Whether or not the column can accept a null value. */ nullable: boolean; /** * (Output) * The ordinal position of the column in the table. */ ordinalPosition: number; /** * (Output) * Column precision. */ precision: number; /** * (Output) * Whether or not the column represents a primary key. */ primaryKey: boolean; /** * (Output) * Column scale. */ scale: number; } interface StreamSourceConfigSqlServerSourceConfigTransactionLogs { } } export declare namespace deploymentmanager { interface DeploymentLabel { /** * Key for label. */ key?: string; /** * Value of label. */ value?: string; } interface DeploymentTarget { /** * The root configuration file to use for this deployment. * Structure is documented below. */ config: outputs.deploymentmanager.DeploymentTargetConfig; /** * Specifies import files for this configuration. This can be * used to import templates or other files. For example, you might * import a text file in order to use the file in a template. * Structure is documented below. */ imports?: outputs.deploymentmanager.DeploymentTargetImport[]; } interface DeploymentTargetConfig { /** * The full YAML contents of your configuration file. */ content: string; } interface DeploymentTargetImport { /** * The full contents of the template that you want to import. */ content?: string; /** * The name of the template to import, as declared in the YAML * configuration. */ name?: string; } } export declare namespace developerconnect { interface AccountConnectorProviderOauthConfig { /** * Required. User selected scopes to apply to the Oauth config * In the event of changing scopes, user records under AccountConnector will * be deleted and users will re-auth again. */ scopes: string[]; /** * List of providers that are owned by Developer Connect. * Possible values: * GITHUB * GITLAB * GOOGLE * SENTRY * ROVO * NEW_RELIC * DATASTAX */ systemProviderId?: string; } interface ConnectionBitbucketCloudConfig { /** * Represents a personal access token that authorized the Connection, * and associated metadata. * Structure is documented below. */ authorizerCredential: outputs.developerconnect.ConnectionBitbucketCloudConfigAuthorizerCredential; /** * Represents a personal access token that authorized the Connection, * and associated metadata. * Structure is documented below. */ readAuthorizerCredential: outputs.developerconnect.ConnectionBitbucketCloudConfigReadAuthorizerCredential; /** * Required. Immutable. SecretManager resource containing the webhook secret used to verify webhook * events, formatted as `projects/*/secrets/*/versions/*`. This is used to * validate and create webhooks. */ webhookSecretSecretVersion: string; /** * Required. The Bitbucket Cloud Workspace ID to be connected to Google Cloud Platform. */ workspace: string; } interface ConnectionBitbucketCloudConfigAuthorizerCredential { /** * Required. A SecretManager resource containing the user token that authorizes * the Developer Connect connection. Format: * `projects/*/secrets/*/versions/*`. */ userTokenSecretVersion: string; /** * (Output) * Output only. The username associated with this token. */ username: string; } interface ConnectionBitbucketCloudConfigReadAuthorizerCredential { /** * Required. A SecretManager resource containing the user token that authorizes * the Developer Connect connection. Format: * `projects/*/secrets/*/versions/*`. */ userTokenSecretVersion: string; /** * (Output) * Output only. The username associated with this token. */ username: string; } interface ConnectionBitbucketDataCenterConfig { /** * Represents a personal access token that authorized the Connection, * and associated metadata. * Structure is documented below. */ authorizerCredential: outputs.developerconnect.ConnectionBitbucketDataCenterConfigAuthorizerCredential; /** * Required. The URI of the Bitbucket Data Center host this connection is for. */ hostUri: string; /** * Represents a personal access token that authorized the Connection, * and associated metadata. * Structure is documented below. */ readAuthorizerCredential: outputs.developerconnect.ConnectionBitbucketDataCenterConfigReadAuthorizerCredential; /** * (Output) * Output only. Version of the Bitbucket Data Center server running on the `hostUri`. */ serverVersion: string; /** * ServiceDirectoryConfig represents Service Directory configuration for a * connection. * Structure is documented below. */ serviceDirectoryConfig?: outputs.developerconnect.ConnectionBitbucketDataCenterConfigServiceDirectoryConfig; /** * Optional. SSL certificate authority to trust when making requests to Bitbucket Data * Center. */ sslCaCertificate?: string; /** * Required. Immutable. SecretManager resource containing the webhook secret used to verify webhook * events, formatted as `projects/*/secrets/*/versions/*`. This is used to * validate webhooks. */ webhookSecretSecretVersion: string; } interface ConnectionBitbucketDataCenterConfigAuthorizerCredential { /** * Required. A SecretManager resource containing the user token that authorizes * the Developer Connect connection. Format: * `projects/*/secrets/*/versions/*`. */ userTokenSecretVersion: string; /** * (Output) * Output only. The username associated with this token. */ username: string; } interface ConnectionBitbucketDataCenterConfigReadAuthorizerCredential { /** * Required. A SecretManager resource containing the user token that authorizes * the Developer Connect connection. Format: * `projects/*/secrets/*/versions/*`. */ userTokenSecretVersion: string; /** * (Output) * Output only. The username associated with this token. */ username: string; } interface ConnectionBitbucketDataCenterConfigServiceDirectoryConfig { /** * Required. The Service Directory service name. * Format: * projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}. */ service: string; } interface ConnectionCryptoKeyConfig { /** * Required. The name of the key which is used to encrypt/decrypt customer data. For key * in Cloud KMS, the key should be in the format of * `projects/*/locations/*/keyRings/*/cryptoKeys/*`. */ keyReference: string; } interface ConnectionGithubConfig { /** * Optional. GitHub App installation id. */ appInstallationId: string; /** * Represents an OAuth token of the account that authorized the Connection, * and associated metadata. * Structure is documented below. */ authorizerCredential: outputs.developerconnect.ConnectionGithubConfigAuthorizerCredential; /** * Required. Immutable. The GitHub Application that was installed to the GitHub user or * organization. * Possible values: * GIT_HUB_APP_UNSPECIFIED * DEVELOPER_CONNECT * FIREBASE */ githubApp: string; /** * (Output) * Output only. The URI to navigate to in order to manage the installation associated * with this GitHubConfig. */ installationUri: string; } interface ConnectionGithubConfigAuthorizerCredential { /** * Required. A SecretManager resource containing the OAuth token that authorizes * the connection. Format: `projects/*/secrets/*/versions/*`. */ oauthTokenSecretVersion: string; /** * (Output) * Output only. The username associated with this token. */ username: string; } interface ConnectionGithubEnterpriseConfig { /** * Optional. ID of the GitHub App created from the manifest. */ appId?: string; /** * Optional. ID of the installation of the GitHub App. */ appInstallationId?: string; /** * (Output) * Output only. The URL-friendly name of the GitHub App. */ appSlug: string; /** * Required. The URI of the GitHub Enterprise host this connection is for. */ hostUri: string; /** * (Output) * Output only. The URI to navigate to in order to manage the installation associated * with this GitHubEnterpriseConfig. */ installationUri: string; /** * Optional. SecretManager resource containing the private key of the GitHub App, * formatted as `projects/*/secrets/*/versions/*`. */ privateKeySecretVersion?: string; /** * (Output) * Output only. GitHub Enterprise version installed at the host_uri. */ serverVersion: string; /** * ServiceDirectoryConfig represents Service Directory configuration for a * connection. * Structure is documented below. */ serviceDirectoryConfig?: outputs.developerconnect.ConnectionGithubEnterpriseConfigServiceDirectoryConfig; /** * Optional. SSL certificate to use for requests to GitHub Enterprise. */ sslCaCertificate?: string; /** * Optional. SecretManager resource containing the webhook secret of the GitHub App, * formatted as `projects/*/secrets/*/versions/*`. */ webhookSecretSecretVersion?: string; } interface ConnectionGithubEnterpriseConfigServiceDirectoryConfig { /** * Required. The Service Directory service name. * Format: * projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}. */ service: string; } interface ConnectionGitlabConfig { /** * Represents a personal access token that authorized the Connection, * and associated metadata. * Structure is documented below. */ authorizerCredential: outputs.developerconnect.ConnectionGitlabConfigAuthorizerCredential; /** * Represents a personal access token that authorized the Connection, * and associated metadata. * Structure is documented below. */ readAuthorizerCredential: outputs.developerconnect.ConnectionGitlabConfigReadAuthorizerCredential; /** * Required. Immutable. SecretManager resource containing the webhook secret of a GitLab project, * formatted as `projects/*/secrets/*/versions/*`. This is used to validate * webhooks. */ webhookSecretSecretVersion: string; } interface ConnectionGitlabConfigAuthorizerCredential { /** * Required. A SecretManager resource containing the user token that authorizes * the Developer Connect connection. Format: * `projects/*/secrets/*/versions/*`. */ userTokenSecretVersion: string; /** * (Output) * Output only. The username associated with this token. */ username: string; } interface ConnectionGitlabConfigReadAuthorizerCredential { /** * Required. A SecretManager resource containing the user token that authorizes * the Developer Connect connection. Format: * `projects/*/secrets/*/versions/*`. */ userTokenSecretVersion: string; /** * (Output) * Output only. The username associated with this token. */ username: string; } interface ConnectionGitlabEnterpriseConfig { /** * Represents a personal access token that authorized the Connection, * and associated metadata. * Structure is documented below. */ authorizerCredential: outputs.developerconnect.ConnectionGitlabEnterpriseConfigAuthorizerCredential; /** * Required. The URI of the GitLab Enterprise host this connection is for. */ hostUri: string; /** * Represents a personal access token that authorized the Connection, * and associated metadata. * Structure is documented below. */ readAuthorizerCredential: outputs.developerconnect.ConnectionGitlabEnterpriseConfigReadAuthorizerCredential; /** * (Output) * Output only. Version of the GitLab Enterprise server running on the `hostUri`. */ serverVersion: string; /** * ServiceDirectoryConfig represents Service Directory configuration for a * connection. * Structure is documented below. */ serviceDirectoryConfig?: outputs.developerconnect.ConnectionGitlabEnterpriseConfigServiceDirectoryConfig; /** * Optional. SSL Certificate Authority certificate to use for requests to GitLab * Enterprise instance. */ sslCaCertificate?: string; /** * Required. Immutable. SecretManager resource containing the webhook secret of a GitLab project, * formatted as `projects/*/secrets/*/versions/*`. This is used to validate * webhooks. */ webhookSecretSecretVersion: string; } interface ConnectionGitlabEnterpriseConfigAuthorizerCredential { /** * Required. A SecretManager resource containing the user token that authorizes * the Developer Connect connection. Format: * `projects/*/secrets/*/versions/*`. */ userTokenSecretVersion: string; /** * (Output) * Output only. The username associated with this token. */ username: string; } interface ConnectionGitlabEnterpriseConfigReadAuthorizerCredential { /** * Required. A SecretManager resource containing the user token that authorizes * the Developer Connect connection. Format: * `projects/*/secrets/*/versions/*`. */ userTokenSecretVersion: string; /** * (Output) * Output only. The username associated with this token. */ username: string; } interface ConnectionGitlabEnterpriseConfigServiceDirectoryConfig { /** * Required. The Service Directory service name. * Format: * projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}. */ service: string; } interface ConnectionInstallationState { /** * Output only. Link to follow for next action. Empty string if the installation is already * complete. */ actionUri?: string; /** * Output only. Message of what the user should do next to continue the installation. * Empty string if the installation is already complete. */ message?: string; /** * (Output) * Output only. Current step of the installation process. * Possible values: * STAGE_UNSPECIFIED * PENDING_CREATE_APP * PENDING_USER_OAUTH * PENDING_INSTALL_APP * COMPLETE */ stage: string; } interface InsightsConfigArtifactConfig { /** * Google Artifact Analysis configurations. * Structure is documented below. */ googleArtifactAnalysis?: outputs.developerconnect.InsightsConfigArtifactConfigGoogleArtifactAnalysis; /** * Google Artifact Registry configurations. * Structure is documented below. */ googleArtifactRegistry?: outputs.developerconnect.InsightsConfigArtifactConfigGoogleArtifactRegistry; /** * The URI of the artifact that is deployed. * e.g. `us-docker.pkg.dev/my-project/my-repo/image`. * The URI does not include the tag / digest because it captures a lineage of * artifacts. */ uri?: string; } interface InsightsConfigArtifactConfigGoogleArtifactAnalysis { /** * The project id of the project where the provenance is stored. */ projectId: string; } interface InsightsConfigArtifactConfigGoogleArtifactRegistry { /** * The name of the artifact registry package. */ artifactRegistryPackage: string; /** * The host project of Artifact Registry. */ projectId: string; } interface InsightsConfigError { /** * (Output) * The status code, which should be an enum value of google.rpc.Code. */ code: number; /** * (Output) * A list of messages that carry the error details. There is a common set of * message types for APIs to use. * Structure is documented below. */ details: outputs.developerconnect.InsightsConfigErrorDetail[]; /** * (Output) * A developer-facing error message, which should be in English. Any * user-facing error message should be localized and sent in the * google.rpc.Status.details field, or localized by the client. */ message: string; } interface InsightsConfigErrorDetail { /** * (Output) * A message with details about the error. */ detailMessage: string; } interface InsightsConfigRuntimeConfig { /** * AppHubWorkload represents the App Hub Workload. * Structure is documented below. */ appHubWorkload?: outputs.developerconnect.InsightsConfigRuntimeConfigAppHubWorkload; /** * GKEWorkload represents the Google Kubernetes Engine runtime. * Structure is documented below. */ gkeWorkload?: outputs.developerconnect.InsightsConfigRuntimeConfigGkeWorkload; /** * (Output) * The state of the Runtime. * Possible values: * STATE_UNSPECIFIED * LINKED * UNLINKED */ state: string; /** * The URI of the runtime configuration. * For GKE, this is the cluster name. * For Cloud Run, this is the service name. */ uri: string; } interface InsightsConfigRuntimeConfigAppHubWorkload { /** * (Output) * The criticality of the App Hub Workload. */ criticality: string; /** * (Output) * The environment of the App Hub Workload. */ environment: string; /** * (Output) * Output only. The name of the App Hub Workload. * Format: * `projects/{project}/locations/{location}/applications/{application}/workloads/{workload}`. */ workload: string; } interface InsightsConfigRuntimeConfigGkeWorkload { /** * The name of the GKE cluster. * Format: * `projects/{project}/locations/{location}/clusters/{cluster}`. */ cluster: string; /** * (Output) * The name of the GKE deployment. * Format: * `projects/{project}/locations/{location}/clusters/{cluster}/namespaces/{namespace}/deployments/{deployment}`. */ deployment: string; } interface InsightsConfigTargetProjects { /** * The project IDs. Format {project}. */ projectIds?: string[]; } } export declare namespace diagflow { interface ConversationProfileAutomatedAgentConfig { /** * ID of the Dialogflow agent environment to use. * Expects the format "projects//locations//agent/environments/" */ agent: string; /** * Configure lifetime of the Dialogflow session. */ sessionTtl?: string; } interface ConversationProfileHumanAgentAssistantConfig { /** * Configuration for agent assistance of end user participant. * Structure is documented below. */ endUserSuggestionConfig?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfig; /** * Configuration for agent assistance of human agent participant. * Structure is documented below. */ humanAgentSuggestionConfig?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfig; /** * desc * Structure is documented below. */ messageAnalysisConfig?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigMessageAnalysisConfig; /** * Pub/Sub topic on which to publish new agent assistant events. * Expects the format "projects//locations//topics/" * Structure is documented below. */ notificationConfig?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigNotificationConfig; } interface ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfig { /** * When disableHighLatencyFeaturesSyncDelivery is true and using the AnalyzeContent API, we will not deliver the responses from high latency features in the API response. The humanAgentAssistantConfig.notification_config must be configured and enableEventBasedSuggestion must be set to true to receive the responses from high latency features in Pub/Sub. High latency feature(s): KNOWLEDGE_ASSIST */ disableHighLatencyFeaturesSyncDelivery?: boolean; /** * Configuration of different suggestion features. One feature can have only one config. * Structure is documented below. */ featureConfigs?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfig[]; /** * List of various generator resource names used in the conversation profile. */ generators?: string[]; /** * If groupSuggestionResponses is false, and there are multiple featureConfigs in event based suggestion or StreamingAnalyzeContent, we will try to deliver suggestions to customers as soon as we get new suggestion. Different type of suggestions based on the same context will be in separate Pub/Sub event or StreamingAnalyzeContentResponse. * If groupSuggestionResponses set to true. All the suggestions to the same participant based on the same context will be grouped into a single Pub/Sub event or StreamingAnalyzeContentResponse. */ groupSuggestionResponses?: boolean; } interface ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfig { /** * Configs of custom conversation model. * Structure is documented below. */ conversationModelConfig?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigConversationModelConfig; /** * Config to process conversation. * Structure is documented below. */ conversationProcessConfig?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigConversationProcessConfig; /** * Disable the logging of search queries sent by human agents. It can prevent those queries from being stored at answer records. * This feature is only supported for types: KNOWLEDGE_SEARCH. */ disableAgentQueryLogging?: boolean; /** * Enable including conversation context during query answer generation. * This feature is only supported for types: KNOWLEDGE_SEARCH. */ enableConversationAugmentedQuery?: boolean; /** * Automatically iterates all participants and tries to compile suggestions. * This feature is only supported for types: ARTICLE_SUGGESTION, FAQ, DIALOGFLOW_ASSIST, KNOWLEDGE_ASSIST. */ enableEventBasedSuggestion?: boolean; /** * Enable query suggestion only. * This feature is only supported for types: KNOWLEDGE_ASSIST */ enableQuerySuggestionOnly?: boolean; /** * Enable query suggestion even if we can't find its answer. By default, queries are suggested only if we find its answer. * This feature is only supported for types: KNOWLEDGE_ASSIST. */ enableQuerySuggestionWhenNoAnswer?: boolean; /** * Configs of query. * Structure is documented below. */ queryConfig?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigQueryConfig; /** * The suggestion feature. * Structure is documented below. */ suggestionFeature?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigSuggestionFeature; /** * Settings of suggestion trigger. * This feature is only supported for types: ARTICLE_SUGGESTION, FAQ. * Structure is documented below. */ suggestionTriggerSettings?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigSuggestionTriggerSettings; } interface ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigConversationModelConfig { /** * Version of current baseline model. It will be ignored if model is set. Valid versions are: Article Suggestion baseline model: - 0.9 - 1.0 (default) Summarization baseline model: - 1.0 */ baselineModelVersion?: string; /** * Conversation model resource name. Format: projects//conversationModels/. */ model?: string; } interface ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigConversationProcessConfig { /** * Number of recent non-small-talk sentences to use as context for article and FAQ suggestion */ recentSentencesCount?: number; } interface ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigQueryConfig { /** * Confidence threshold of query result. * This feature is only supported for types: ARTICLE_SUGGESTION, FAQ, SMART_REPLY, SMART_COMPOSE, KNOWLEDGE_SEARCH, KNOWLEDGE_ASSIST, ENTITY_EXTRACTION. */ confidenceThreshold?: number; /** * Determines how recent conversation context is filtered when generating suggestions. If unspecified, no messages will be dropped. * Structure is documented below. */ contextFilterSettings?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigQueryConfigContextFilterSettings; /** * Query from Dialogflow agent. * This feature is supported for types: DIALOGFLOW_ASSIST. * Structure is documented below. */ dialogflowQuerySource?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigQueryConfigDialogflowQuerySource; /** * Query from knowledge base document. * This feature is supported for types: SMART_REPLY, SMART_COMPOSE. * Structure is documented below. */ documentQuerySource?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigQueryConfigDocumentQuerySource; /** * Query from knowledgebase. * This feature is only supported for types: ARTICLE_SUGGESTION, FAQ. * Structure is documented below. */ knowledgeBaseQuerySource?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigQueryConfigKnowledgeBaseQuerySource; /** * Maximum number of results to return. */ maxResults?: number; /** * he customized sections chosen to return when requesting a summary of a conversation. * Structure is documented below. */ sections?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigQueryConfigSections; } interface ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigQueryConfigContextFilterSettings { /** * If set to true, the last message from virtual agent (hand off message) and the message before it (trigger message of hand off) are dropped. */ dropHandoffMessages?: boolean; /** * If set to true, all messages from ivr stage are dropped. */ dropIvrMessages?: boolean; /** * If set to true, all messages from virtual agent are dropped. */ dropVirtualAgentMessages?: boolean; } interface ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigQueryConfigDialogflowQuerySource { /** * he name of a Dialogflow virtual agent used for end user side intent detection and suggestion. Format: projects//locations//agent. */ agent: string; /** * The Dialogflow assist configuration for human agent. * Structure is documented below. */ humanAgentSideConfig?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigQueryConfigDialogflowQuerySourceHumanAgentSideConfig; } interface ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigQueryConfigDialogflowQuerySourceHumanAgentSideConfig { /** * The name of a dialogflow virtual agent used for intent detection and suggestion triggered by human agent. Format: projects//locations//agent. */ agent?: string; } interface ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigQueryConfigDocumentQuerySource { /** * Knowledge documents to query from. Format: projects//locations//knowledgeBases//documents/. */ documents: string[]; } interface ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigQueryConfigKnowledgeBaseQuerySource { /** * Knowledge bases to query. Format: projects//locations//knowledgeBases/. */ knowledgeBases: string[]; } interface ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigQueryConfigSections { /** * The selected sections chosen to return when requesting a summary of a conversation * If not provided the default selection will be "{SITUATION, ACTION, RESULT}". * Each value may be one of: `SECTION_TYPE_UNSPECIFIED`, `SITUATION`, `ACTION`, `RESOLUTION`, `REASON_FOR_CANCELLATION`, `CUSTOMER_SATISFACTION`, `ENTITIES`. */ sectionTypes?: string[]; } interface ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigSuggestionFeature { /** * Type of Human Agent Assistant API feature to request. */ type?: string; } interface ConversationProfileHumanAgentAssistantConfigEndUserSuggestionConfigFeatureConfigSuggestionTriggerSettings { /** * Do not trigger if last utterance is small talk. */ noSmallTalk?: boolean; /** * Only trigger suggestion if participant role of last utterance is END_USER. */ onlyEndUser?: boolean; } interface ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfig { /** * When disableHighLatencyFeaturesSyncDelivery is true and using the AnalyzeContent API, we will not deliver the responses from high latency features in the API response. The humanAgentAssistantConfig.notification_config must be configured and enableEventBasedSuggestion must be set to true to receive the responses from high latency features in Pub/Sub. High latency feature(s): KNOWLEDGE_ASSIST */ disableHighLatencyFeaturesSyncDelivery?: boolean; /** * Configuration of different suggestion features. One feature can have only one config. * Structure is documented below. */ featureConfigs?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfig[]; /** * List of various generator resource names used in the conversation profile. */ generators?: string[]; /** * If groupSuggestionResponses is false, and there are multiple featureConfigs in event based suggestion or StreamingAnalyzeContent, we will try to deliver suggestions to customers as soon as we get new suggestion. Different type of suggestions based on the same context will be in separate Pub/Sub event or StreamingAnalyzeContentResponse. * If groupSuggestionResponses set to true. All the suggestions to the same participant based on the same context will be grouped into a single Pub/Sub event or StreamingAnalyzeContentResponse. */ groupSuggestionResponses?: boolean; } interface ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfig { /** * Configs of custom conversation model. * Structure is documented below. */ conversationModelConfig?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfigConversationModelConfig; /** * Config to process conversation. * Structure is documented below. */ conversationProcessConfig?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfigConversationProcessConfig; /** * Disable the logging of search queries sent by human agents. It can prevent those queries from being stored at answer records. * This feature is only supported for types: KNOWLEDGE_SEARCH. */ disableAgentQueryLogging?: boolean; /** * Enable including conversation context during query answer generation. * This feature is only supported for types: KNOWLEDGE_SEARCH. */ enableConversationAugmentedQuery?: boolean; /** * Automatically iterates all participants and tries to compile suggestions. * This feature is only supported for types: ARTICLE_SUGGESTION, FAQ, DIALOGFLOW_ASSIST, KNOWLEDGE_ASSIST. */ enableEventBasedSuggestion?: boolean; /** * Enable query suggestion only. * This feature is only supported for types: KNOWLEDGE_ASSIST */ enableQuerySuggestionOnly?: boolean; /** * Enable query suggestion even if we can't find its answer. By default, queries are suggested only if we find its answer. * This feature is only supported for types: KNOWLEDGE_ASSIST. */ enableQuerySuggestionWhenNoAnswer?: boolean; /** * Configs of query. * Structure is documented below. */ queryConfig?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfigQueryConfig; /** * The suggestion feature. * Structure is documented below. */ suggestionFeature?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfigSuggestionFeature; /** * Settings of suggestion trigger. * This feature is only supported for types: ARTICLE_SUGGESTION, FAQ. * Structure is documented below. */ suggestionTriggerSettings?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfigSuggestionTriggerSettings; } interface ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfigConversationModelConfig { /** * Version of current baseline model. It will be ignored if model is set. Valid versions are: Article Suggestion baseline model: - 0.9 - 1.0 (default) Summarization baseline model: - 1.0 */ baselineModelVersion?: string; /** * Conversation model resource name. Format: projects//conversationModels/. */ model?: string; } interface ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfigConversationProcessConfig { /** * Number of recent non-small-talk sentences to use as context for article and FAQ suggestion */ recentSentencesCount?: number; } interface ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfigQueryConfig { /** * Confidence threshold of query result. * This feature is only supported for types: ARTICLE_SUGGESTION, FAQ, SMART_REPLY, SMART_COMPOSE, KNOWLEDGE_SEARCH, KNOWLEDGE_ASSIST, ENTITY_EXTRACTION. */ confidenceThreshold?: number; /** * Determines how recent conversation context is filtered when generating suggestions. If unspecified, no messages will be dropped. * Structure is documented below. */ contextFilterSettings?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfigQueryConfigContextFilterSettings; /** * Query from Dialogflow agent. * This feature is supported for types: DIALOGFLOW_ASSIST. * Structure is documented below. */ dialogflowQuerySource?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfigQueryConfigDialogflowQuerySource; /** * Maximum number of results to return. */ maxResults?: number; /** * he customized sections chosen to return when requesting a summary of a conversation. * Structure is documented below. */ sections?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfigQueryConfigSections; } interface ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfigQueryConfigContextFilterSettings { /** * If set to true, the last message from virtual agent (hand off message) and the message before it (trigger message of hand off) are dropped. */ dropHandoffMessages?: boolean; /** * If set to true, all messages from ivr stage are dropped. */ dropIvrMessages?: boolean; /** * If set to true, all messages from virtual agent are dropped. */ dropVirtualAgentMessages?: boolean; } interface ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfigQueryConfigDialogflowQuerySource { /** * he name of a Dialogflow virtual agent used for end user side intent detection and suggestion. Format: projects//locations//agent. */ agent: string; /** * The Dialogflow assist configuration for human agent. * Structure is documented below. */ humanAgentSideConfig?: outputs.diagflow.ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfigQueryConfigDialogflowQuerySourceHumanAgentSideConfig; } interface ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfigQueryConfigDialogflowQuerySourceHumanAgentSideConfig { /** * The name of a dialogflow virtual agent used for intent detection and suggestion triggered by human agent. Format: projects//locations//agent. */ agent?: string; } interface ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfigQueryConfigSections { /** * The selected sections chosen to return when requesting a summary of a conversation * If not provided the default selection will be "{SITUATION, ACTION, RESULT}". * Each value may be one of: `SECTION_TYPE_UNSPECIFIED`, `SITUATION`, `ACTION`, `RESOLUTION`, `REASON_FOR_CANCELLATION`, `CUSTOMER_SATISFACTION`, `ENTITIES`. */ sectionTypes?: string[]; } interface ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfigSuggestionFeature { /** * Type of Human Agent Assistant API feature to request. */ type?: string; } interface ConversationProfileHumanAgentAssistantConfigHumanAgentSuggestionConfigFeatureConfigSuggestionTriggerSettings { /** * Do not trigger if last utterance is small talk. */ noSmallTalk?: boolean; /** * Only trigger suggestion if participant role of last utterance is END_USER. */ onlyEndUser?: boolean; } interface ConversationProfileHumanAgentAssistantConfigMessageAnalysisConfig { /** * Enable entity extraction in conversation messages on agent assist stage. */ enableEntityExtraction?: boolean; /** * Enable sentiment analysis in conversation messages on agent assist stage. Sentiment analysis inspects user input and identifies the prevailing subjective opinion, especially to determine a user's attitude as positive, negative, or neutral. */ enableSentimentAnalysis?: boolean; } interface ConversationProfileHumanAgentAssistantConfigNotificationConfig { /** * Format of the message * Possible values are: `MESSAGE_FORMAT_UNSPECIFIED`, `PROTO`, `JSON`. */ messageFormat?: string; /** * Name of the Pub/Sub topic to publish conversation events */ topic?: string; } interface ConversationProfileHumanAgentHandoffConfig { /** * Config for using LivePerson. * Structure is documented below. */ livePersonConfig?: outputs.diagflow.ConversationProfileHumanAgentHandoffConfigLivePersonConfig; } interface ConversationProfileHumanAgentHandoffConfigLivePersonConfig { /** * Account number of the LivePerson account to connect. */ accountNumber: string; } interface ConversationProfileLoggingConfig { /** * Whether to log conversation events */ enableStackdriverLogging?: boolean; } interface ConversationProfileNewMessageEventNotificationConfig { /** * Format of the message * Possible values are: `MESSAGE_FORMAT_UNSPECIFIED`, `PROTO`, `JSON`. */ messageFormat?: string; /** * Name of the Pub/Sub topic to publish conversation events */ topic?: string; } interface ConversationProfileNewRecognitionResultNotificationConfig { /** * Format of message. * Possible values are: `MESSAGE_FORMAT_UNSPECIFIED`, `PROTO`, `JSON`. */ messageFormat?: string; /** * Name of the Pub/Sub topic to publish conversation events like CONVERSATION_STARTED as serialized ConversationEvent protos. * For telephony integration to receive notification, make sure either this topic is in the same project as the conversation or you grant service-@gcp-sa-dialogflow.iam.gserviceaccount.com the Dialogflow Service Agent role in the topic project. * For chat integration to receive notification, make sure API caller has been granted the Dialogflow Service Agent role for the topic. * Format: projects//locations//topics/. */ topic?: string; } interface ConversationProfileNotificationConfig { /** * Format of the message * Possible values are: `MESSAGE_FORMAT_UNSPECIFIED`, `PROTO`, `JSON`. */ messageFormat?: string; /** * Name of the Pub/Sub topic to publish conversation events */ topic?: string; } interface ConversationProfileSttConfig { /** * Audio encoding of the audio content to process. * Possible values are: `AUDIO_ENCODING_UNSPECIFIED`, `AUDIO_ENCODING_LINEAR_16`, `AUDIO_ENCODING_FLAC`, `AUDIO_ENCODING_MULAW`, `AUDIO_ENCODING_AMR`, `AUDIO_ENCODING_AMR_WB`, `AUDIO_ENCODING_OGG_OPUS`, `AUDIOENCODING_SPEEX_WITH_HEADER_BYTE`. */ audioEncoding?: string; /** * If true, Dialogflow returns SpeechWordInfo in StreamingRecognitionResult with information about the recognized speech words. */ enableWordInfo?: boolean; /** * The language of the supplied audio. */ languageCode: string; /** * Which Speech model to select. * Leave this field unspecified to use Agent Speech settings for model selection. */ model?: string; /** * Sample rate (in Hertz) of the audio content sent in the query. */ sampleRateHertz?: number; /** * The speech model used in speech to text. * Possible values are: `SPEECH_MODEL_VARIANT_UNSPECIFIED`, `USE_BEST_AVAILABLE`, `USE_STANDARD`, `USE_ENHANCED`. */ speechModelVariant?: string; /** * Use timeout based endpointing, interpreting endpointer sensitivy as seconds of timeout value. */ useTimeoutBasedEndpointing?: boolean; } interface ConversationProfileTtsConfig { /** * An identifier which selects 'audio effects' profiles that are applied on (post synthesized) text to speech. Effects are applied on top of each other in the order they are given. */ effectsProfileIds?: string[]; /** * Speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20 semitones from the original pitch. -20 means decrease 20 semitones from the original pitch. */ pitch?: number; /** * Speaking rate/speed, in the range [0.25, 4.0]. */ speakingRate?: number; /** * The desired voice of the synthesized audio. * Structure is documented below. */ voice?: outputs.diagflow.ConversationProfileTtsConfigVoice; /** * Volume gain (in dB) of the normal native volume supported by the specific voice. */ volumeGainDb?: number; } interface ConversationProfileTtsConfigVoice { /** * The name of the voice. */ name?: string; /** * The preferred gender of the voice. * Possible values are: `SSML_VOICE_GENDER_UNSPECIFIED`, `SSML_VOICE_GENDER_MALE`, `SSML_VOICE_GENDER_FEMALE`, `SSML_VOICE_GENDER_NEUTRAL`. */ ssmlGender?: string; } interface CxAgentAdvancedSettings { /** * If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: * * Agent level * * Flow level * Structure is documented below. */ audioExportGcsDestination?: outputs.diagflow.CxAgentAdvancedSettingsAudioExportGcsDestination; /** * Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: * * Agent level * * Flow level * * Page level * * Parameter level * Structure is documented below. */ dtmfSettings?: outputs.diagflow.CxAgentAdvancedSettingsDtmfSettings; /** * Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: * * Agent level * Structure is documented below. */ loggingSettings?: outputs.diagflow.CxAgentAdvancedSettingsLoggingSettings; /** * Settings for speech to text detection. Exposed at the following levels: * * Agent level * * Flow level * * Page level * * Parameter level * Structure is documented below. */ speechSettings?: outputs.diagflow.CxAgentAdvancedSettingsSpeechSettings; } interface CxAgentAdvancedSettingsAudioExportGcsDestination { /** * The Google Cloud Storage URI for the exported objects. Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation. * Format: gs://bucket/object-name-or-prefix */ uri?: string; } interface CxAgentAdvancedSettingsDtmfSettings { /** * If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). */ enabled?: boolean; /** * The digit that terminates a DTMF digit sequence. */ finishDigit?: string; /** * Max length of DTMF digits. */ maxDigits?: number; } interface CxAgentAdvancedSettingsLoggingSettings { /** * Enables consent-based end-user input redaction, if true, a pre-defined session parameter **$session.params.conversation-redaction** will be used to determine if the utterance should be redacted. */ enableConsentBasedRedaction?: boolean; /** * Enables DF Interaction logging. */ enableInteractionLogging?: boolean; /** * Enables Google Cloud Logging. */ enableStackdriverLogging?: boolean; } interface CxAgentAdvancedSettingsSpeechSettings { /** * Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. */ endpointerSensitivity?: number; /** * Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). * An object containing a list of **"key": value** pairs. Example: **{ "name": "wrench", "mass": "1.3kg", "count": "3" }**. */ models?: { [key: string]: string; }; /** * Timeout before detecting no speech. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ noSpeechTimeout?: string; /** * Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. */ useTimeoutBasedEndpointing?: boolean; } interface CxAgentAnswerFeedbackSettings { /** * If enabled, end users will be able to provide [answer feedback](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents.sessions/submitAnswerFeedback#body.AnswerFeedback) * to Dialogflow responses. Feature works only if interaction logging is enabled in the Dialogflow agent. */ enableAnswerFeedback?: boolean; } interface CxAgentClientCertificateSettings { /** * The name of the SecretManager secret version resource storing the passphrase. 'passphrase' should be left unset if the private key is not encrypted. Format: **projects/{project}/secrets/{secret}/versions/{version}** */ passphrase?: string; /** * The name of the SecretManager secret version resource storing the private key encoded in PEM format. Format: **projects/{project}/secrets/{secret}/versions/{version}** */ privateKey: string; /** * The ssl certificate encoded in PEM format. This string must include the begin header and end footer lines. */ sslCertificate: string; } interface CxAgentGenAppBuilderSettings { /** * The full name of the Gen App Builder engine related to this agent if there is one. * Format: projects/{Project ID}/locations/{Location ID}/collections/{Collection ID}/engines/{Engine ID} */ engine: string; } interface CxAgentGitIntegrationSettings { /** * Settings of integration with GitHub. * Structure is documented below. */ githubSettings?: outputs.diagflow.CxAgentGitIntegrationSettingsGithubSettings; } interface CxAgentGitIntegrationSettingsGithubSettings { /** * The access token used to authenticate the access to the GitHub repository. * **Note**: This property is sensitive and will not be displayed in the plan. */ accessToken?: string; /** * A list of branches configured to be used from Dialogflow. */ branches?: string[]; /** * The unique repository display name for the GitHub repository. */ displayName?: string; /** * The GitHub repository URI related to the agent. */ repositoryUri?: string; /** * The branch of the GitHub repository tracked for this agent. */ trackingBranch?: string; } interface CxAgentPersonalizationSettings { /** * Default end user metadata, used when processing DetectIntent requests. Recommended to be filled as a template instead of hard-coded value, for example { "age": "$session.params.age" }. * The data will be merged with the [QueryParameters.end_user_metadata](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/QueryParameters#FIELDS.end_user_metadata) * in [DetectIntentRequest.query_params](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents.sessions/detectIntent#body.request_body.FIELDS.query_params) during query processing. * This field uses JSON data as a string. The value provided must be a valid JSON representation documented in [Struct](https://protobuf.dev/reference/protobuf/google.protobuf/#struct). */ defaultEndUserMetadata?: string; } interface CxAgentSpeechToTextSettings { /** * Whether to use speech adaptation for speech recognition. */ enableSpeechAdaptation?: boolean; } interface CxAgentTextToSpeechSettings { /** * Configuration of how speech should be synthesized, mapping from [language](https://cloud.google.com/dialogflow/cx/docs/reference/language) to [SynthesizeSpeechConfig](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents#synthesizespeechconfig). * These settings affect: * * The phone gateway synthesize configuration set via Agent.text_to_speech_settings. * * How speech is synthesized when invoking session APIs. `Agent.text_to_speech_settings` only applies if `OutputAudioConfig.synthesize_speech_config` is not specified. */ synthesizeSpeechConfigs?: string; } interface CxEntityTypeEntity { /** * A collection of value synonyms. For example, if the entity type is vegetable, and value is scallions, a synonym could be green onions. * For KIND_LIST entity types: This collection must contain exactly one synonym equal to value. */ synonyms?: string[]; /** * The primary value associated with this entity entry. For example, if the entity type is vegetable, the value could be scallions. * For KIND_MAP entity types: A canonical value to be used in place of synonyms. * For KIND_LIST entity types: A string that can contain references to other entity types (with or without aliases). */ value?: string; } interface CxEntityTypeExcludedPhrase { /** * The word or phrase to be excluded. */ value?: string; } interface CxEnvironmentVersionConfig { /** * Format: projects/{{project}}/locations/{{location}}/agents/{{agent}}/flows/{{flow}}/versions/{{version}}. */ version: string; } interface CxFlowAdvancedSettings { /** * If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: * * Agent level * * Flow level * Structure is documented below. */ audioExportGcsDestination?: outputs.diagflow.CxFlowAdvancedSettingsAudioExportGcsDestination; /** * Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: * * Agent level * * Flow level * * Page level * * Parameter level * Structure is documented below. */ dtmfSettings?: outputs.diagflow.CxFlowAdvancedSettingsDtmfSettings; /** * Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: * * Agent level * Structure is documented below. */ loggingSettings?: outputs.diagflow.CxFlowAdvancedSettingsLoggingSettings; /** * Settings for speech to text detection. Exposed at the following levels: * * Agent level * * Flow level * * Page level * * Parameter level * Structure is documented below. */ speechSettings?: outputs.diagflow.CxFlowAdvancedSettingsSpeechSettings; } interface CxFlowAdvancedSettingsAudioExportGcsDestination { /** * The Google Cloud Storage URI for the exported objects. Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation. * Format: gs://bucket/object-name-or-prefix */ uri?: string; } interface CxFlowAdvancedSettingsDtmfSettings { /** * If true, incoming audio is processed for DTMF (dual tone multi frequtectency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will de the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). */ enabled?: boolean; /** * The digit that terminates a DTMF digit sequence. */ finishDigit?: string; /** * Max length of DTMF digits. */ maxDigits?: number; } interface CxFlowAdvancedSettingsLoggingSettings { /** * Enables consent-based end-user input redaction, if true, a pre-defined session parameter **$session.params.conversation-redaction** will be used to determine if the utterance should be redacted. */ enableConsentBasedRedaction?: boolean; /** * Enables DF Interaction logging. */ enableInteractionLogging?: boolean; /** * Enables Google Cloud Logging. */ enableStackdriverLogging?: boolean; } interface CxFlowAdvancedSettingsSpeechSettings { /** * Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. */ endpointerSensitivity?: number; /** * Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). * An object containing a list of **"key": value** pairs. Example: **{ "name": "wrench", "mass": "1.3kg", "count": "3" }**. */ models?: { [key: string]: string; }; /** * Timeout before detecting no speech. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.500s". */ noSpeechTimeout?: string; /** * Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. */ useTimeoutBasedEndpointing?: boolean; } interface CxFlowEventHandler { /** * The name of the event to handle. */ event?: string; /** * (Output) * The unique identifier of this event handler. */ name: string; /** * The target flow to transition to. * Format: projects//locations//agents//flows/. */ targetFlow?: string; /** * The target page to transition to. * Format: projects//locations//agents//flows//pages/. */ targetPage?: string; /** * The fulfillment to call when the event occurs. Handling webhook errors with a fulfillment enabled with webhook could cause infinite loop. It is invalid to specify such fulfillment for a handler handling webhooks. * Structure is documented below. */ triggerFulfillment?: outputs.diagflow.CxFlowEventHandlerTriggerFulfillment; } interface CxFlowEventHandlerTriggerFulfillment { /** * Conditional cases for this fulfillment. * Structure is documented below. */ conditionalCases?: outputs.diagflow.CxFlowEventHandlerTriggerFulfillmentConditionalCase[]; /** * If the flag is true, the agent will utilize LLM to generate a text response. If LLM generation fails, the defined responses in the fulfillment will be respected. This flag is only useful for fulfillments associated with no-match event handlers. */ enableGenerativeFallback?: boolean; /** * The list of rich message responses to present to the user. * Structure is documented below. */ messages?: outputs.diagflow.CxFlowEventHandlerTriggerFulfillmentMessage[]; /** * Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. */ returnPartialResponses?: boolean; /** * Set parameter values before executing the webhook. * Structure is documented below. */ setParameterActions?: outputs.diagflow.CxFlowEventHandlerTriggerFulfillmentSetParameterAction[]; /** * The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. */ tag?: string; /** * The webhook to call. Format: projects//locations//agents//webhooks/. */ webhook?: string; } interface CxFlowEventHandlerTriggerFulfillmentConditionalCase { /** * A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. * See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. */ cases?: string; } interface CxFlowEventHandlerTriggerFulfillmentMessage { /** * The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. */ channel?: string; /** * Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. * Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. * You may set this, for example: * * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. * * In a webhook response when you determine that you handled the customer issue. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ conversationSuccess?: outputs.diagflow.CxFlowEventHandlerTriggerFulfillmentMessageConversationSuccess; /** * Indicates that the conversation should be handed off to a live agent. * Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. * You may set this, for example: * * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * * In a webhook response when you determine that the customer issue can only be handled by a human. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ liveAgentHandoff?: outputs.diagflow.CxFlowEventHandlerTriggerFulfillmentMessageLiveAgentHandoff; /** * A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ outputAudioText?: outputs.diagflow.CxFlowEventHandlerTriggerFulfillmentMessageOutputAudioText; /** * Returns a response containing a custom, platform-specific payload. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. */ payload?: string; /** * Specifies an audio clip to be played by the client as part of the response. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ playAudio?: outputs.diagflow.CxFlowEventHandlerTriggerFulfillmentMessagePlayAudio; /** * Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ telephonyTransferCall?: outputs.diagflow.CxFlowEventHandlerTriggerFulfillmentMessageTelephonyTransferCall; /** * The text response message. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ text?: outputs.diagflow.CxFlowEventHandlerTriggerFulfillmentMessageText; } interface CxFlowEventHandlerTriggerFulfillmentMessageConversationSuccess { /** * Custom metadata. Dialogflow doesn't impose any structure on this. */ metadata?: string; } interface CxFlowEventHandlerTriggerFulfillmentMessageLiveAgentHandoff { /** * Custom metadata. Dialogflow doesn't impose any structure on this. */ metadata?: string; } interface CxFlowEventHandlerTriggerFulfillmentMessageOutputAudioText { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * The SSML text to be synthesized. For more information, see SSML. * This field is part of a union field `source`: Only one of `text` or `ssml` may be set. */ ssml?: string; /** * The raw text to be synthesized. * This field is part of a union field `source`: Only one of `text` or `ssml` may be set. */ text?: string; } interface CxFlowEventHandlerTriggerFulfillmentMessagePlayAudio { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. * * The `mixedAudio` block contains: */ allowPlaybackInterruption: boolean; /** * URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. */ audioUri: string; } interface CxFlowEventHandlerTriggerFulfillmentMessageTelephonyTransferCall { /** * Transfer the call to a phone number in E.164 format. */ phoneNumber: string; } interface CxFlowEventHandlerTriggerFulfillmentMessageText { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. * required: true */ texts?: string[]; } interface CxFlowEventHandlerTriggerFulfillmentSetParameterAction { /** * Display name of the parameter. */ parameter?: string; /** * The new JSON-encoded value of the parameter. A null value clears the parameter. */ value?: string; } interface CxFlowKnowledgeConnectorSettings { /** * Optional. List of related data store connections. * Structure is documented below. */ dataStoreConnections?: outputs.diagflow.CxFlowKnowledgeConnectorSettingsDataStoreConnection[]; /** * Whether Knowledge Connector is enabled or not. */ enabled?: boolean; /** * The target flow to transition to. Format: projects//locations//agents//flows/. * This field is part of a union field `target`: Only one of `targetPage` or `targetFlow` may be set. */ targetFlow?: string; /** * The target page to transition to. Format: projects//locations//agents//flows//pages/. * The page must be in the same host flow (the flow that owns this `KnowledgeConnectorSettings`). * This field is part of a union field `target`: Only one of `targetPage` or `targetFlow` may be set. */ targetPage?: string; /** * The fulfillment to be triggered. * When the answers from the Knowledge Connector are selected by Dialogflow, you can utitlize the request scoped parameter $request.knowledge.answers (contains up to the 5 highest confidence answers) and $request.knowledge.questions (contains the corresponding questions) to construct the fulfillment. * Structure is documented below. */ triggerFulfillment?: outputs.diagflow.CxFlowKnowledgeConnectorSettingsTriggerFulfillment; } interface CxFlowKnowledgeConnectorSettingsDataStoreConnection { /** * The full name of the referenced data store. Formats: projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore} projects/{project}/locations/{location}/dataStores/{dataStore} */ dataStore?: string; /** * The type of the connected data store. * * PUBLIC_WEB: A data store that contains public web content. * * UNSTRUCTURED: A data store that contains unstructured private data. * * STRUCTURED: A data store that contains structured data (for example FAQ). * Possible values are: `PUBLIC_WEB`, `UNSTRUCTURED`, `STRUCTURED`. */ dataStoreType?: string; /** * The document processing mode for the data store connection. Should only be set for PUBLIC_WEB and UNSTRUCTURED data stores. If not set it is considered as DOCUMENTS, as this is the legacy mode. * * DOCUMENTS: Documents are processed as documents. * * CHUNKS: Documents are converted to chunks. * Possible values are: `DOCUMENTS`, `CHUNKS`. */ documentProcessingMode?: string; } interface CxFlowKnowledgeConnectorSettingsTriggerFulfillment { /** * Hierarchical advanced settings for agent/flow/page/fulfillment/parameter. Settings exposed at lower level overrides the settings exposed at higher level. Overriding occurs at the sub-setting level. For example, the playbackInterruptionSettings at fulfillment level only overrides the playbackInterruptionSettings at the agent level, leaving other settings at the agent level unchanged. * DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. * Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. * Structure is documented below. */ advancedSettings?: outputs.diagflow.CxFlowKnowledgeConnectorSettingsTriggerFulfillmentAdvancedSettings; /** * Conditional cases for this fulfillment. * Structure is documented below. */ conditionalCases?: outputs.diagflow.CxFlowKnowledgeConnectorSettingsTriggerFulfillmentConditionalCase[]; /** * If the flag is true, the agent will utilize LLM to generate a text response. If LLM generation fails, the defined responses in the fulfillment will be respected. This flag is only useful for fulfillments associated with no-match event handlers. */ enableGenerativeFallback?: boolean; /** * The list of rich message responses to present to the user. * Structure is documented below. */ messages?: outputs.diagflow.CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessage[]; /** * Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. */ returnPartialResponses?: boolean; /** * Set parameter values before executing the webhook. * Structure is documented below. */ setParameterActions?: outputs.diagflow.CxFlowKnowledgeConnectorSettingsTriggerFulfillmentSetParameterAction[]; /** * The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. */ tag?: string; /** * The webhook to call. Format: projects//locations//agents//webhooks/. */ webhook?: string; } interface CxFlowKnowledgeConnectorSettingsTriggerFulfillmentAdvancedSettings { /** * Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: * * Agent level * * Flow level * * Page level * * Parameter level * Structure is documented below. */ dtmfSettings?: outputs.diagflow.CxFlowKnowledgeConnectorSettingsTriggerFulfillmentAdvancedSettingsDtmfSettings; /** * Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: * * Agent level * Structure is documented below. */ loggingSettings?: outputs.diagflow.CxFlowKnowledgeConnectorSettingsTriggerFulfillmentAdvancedSettingsLoggingSettings; /** * Settings for speech to text detection. Exposed at the following levels: * * Agent level * * Flow level * * Page level * * Parameter level * Structure is documented below. */ speechSettings?: outputs.diagflow.CxFlowKnowledgeConnectorSettingsTriggerFulfillmentAdvancedSettingsSpeechSettings; } interface CxFlowKnowledgeConnectorSettingsTriggerFulfillmentAdvancedSettingsDtmfSettings { /** * If true, incoming audio is processed for DTMF (dual tone multi frequtectency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will de the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). */ enabled?: boolean; /** * Endpoint timeout setting for matching dtmf input to regex. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.500s". */ endpointingTimeoutDuration?: string; /** * The digit that terminates a DTMF digit sequence. */ finishDigit?: string; /** * Interdigit timeout setting for matching dtmf input to regex. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.500s". */ interdigitTimeoutDuration?: string; /** * Max length of DTMF digits. */ maxDigits?: number; } interface CxFlowKnowledgeConnectorSettingsTriggerFulfillmentAdvancedSettingsLoggingSettings { /** * Enables consent-based end-user input redaction, if true, a pre-defined session parameter **$session.params.conversation-redaction** will be used to determine if the utterance should be redacted. */ enableConsentBasedRedaction?: boolean; /** * Enables DF Interaction logging. */ enableInteractionLogging?: boolean; /** * Enables Google Cloud Logging. */ enableStackdriverLogging?: boolean; } interface CxFlowKnowledgeConnectorSettingsTriggerFulfillmentAdvancedSettingsSpeechSettings { /** * Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. */ endpointerSensitivity?: number; /** * Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). * An object containing a list of **"key": value** pairs. Example: **{ "name": "wrench", "mass": "1.3kg", "count": "3" }**. */ models?: { [key: string]: string; }; /** * Timeout before detecting no speech. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.500s". */ noSpeechTimeout?: string; /** * Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. */ useTimeoutBasedEndpointing?: boolean; } interface CxFlowKnowledgeConnectorSettingsTriggerFulfillmentConditionalCase { /** * A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. * See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. */ cases?: string; } interface CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessage { /** * The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. */ channel?: string; /** * Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. * Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. * You may set this, for example: * * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. * * In a webhook response when you determine that you handled the customer issue. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ conversationSuccess?: outputs.diagflow.CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessageConversationSuccess; /** * (Output) * This type has no fields. * Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. */ endInteractions: outputs.diagflow.CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessageEndInteraction[]; /** * This type has no fields. * Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. * Otherwise, the info card response is skipped. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. */ knowledgeInfoCard?: outputs.diagflow.CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessageKnowledgeInfoCard; /** * Indicates that the conversation should be handed off to a live agent. * Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. * You may set this, for example: * * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * * In a webhook response when you determine that the customer issue can only be handled by a human. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ liveAgentHandoff?: outputs.diagflow.CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessageLiveAgentHandoff; /** * (Output) * Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via playAudio. This message is generated by Dialogflow only and not supposed to be defined by the user. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ mixedAudios: outputs.diagflow.CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessageMixedAudio[]; /** * A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ outputAudioText?: outputs.diagflow.CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessageOutputAudioText; /** * Returns a response containing a custom, platform-specific payload. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. */ payload?: string; /** * Specifies an audio clip to be played by the client as part of the response. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ playAudio?: outputs.diagflow.CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessagePlayAudio; /** * Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ telephonyTransferCall?: outputs.diagflow.CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessageTelephonyTransferCall; /** * The text response message. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ text?: outputs.diagflow.CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessageText; } interface CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessageConversationSuccess { /** * Custom metadata. Dialogflow doesn't impose any structure on this. */ metadata?: string; } interface CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessageEndInteraction { } interface CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessageKnowledgeInfoCard { } interface CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessageLiveAgentHandoff { /** * Custom metadata. Dialogflow doesn't impose any structure on this. */ metadata?: string; } interface CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessageMixedAudio { /** * Segments this audio response is composed of. */ segments?: outputs.diagflow.CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessageMixedAudioSegment[]; } interface CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessageMixedAudioSegment { /** * (Output) * Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. * A base64-encoded string. * This field is part of a union field `content`: Only one of `audio` or `uri` may be set. */ audio?: string; /** * Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. * This field is part of a union field `content`: Only one of `audio` or `uri` may be set. */ uri?: string; } interface CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessageOutputAudioText { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * The SSML text to be synthesized. For more information, see SSML. * This field is part of a union field `source`: Only one of `text` or `ssml` may be set. */ ssml?: string; /** * The raw text to be synthesized. * This field is part of a union field `source`: Only one of `text` or `ssml` may be set. */ text?: string; } interface CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessagePlayAudio { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. * * The `mixedAudio` block contains: */ allowPlaybackInterruption: boolean; /** * URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. */ audioUri: string; } interface CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessageTelephonyTransferCall { /** * Transfer the call to a phone number in E.164 format. */ phoneNumber: string; } interface CxFlowKnowledgeConnectorSettingsTriggerFulfillmentMessageText { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. * required: true */ texts?: string[]; } interface CxFlowKnowledgeConnectorSettingsTriggerFulfillmentSetParameterAction { /** * Display name of the parameter. */ parameter?: string; /** * The new JSON-encoded value of the parameter. A null value clears the parameter. */ value?: string; } interface CxFlowNluSettings { /** * To filter out false positive results and still get variety in matched natural language inputs for your agent, you can tune the machine learning classification threshold. * If the returned score value is less than the threshold value, then a no-match event will be triggered. The score values range from 0.0 (completely uncertain) to 1.0 (completely certain). If set to 0.0, the default of 0.3 is used. */ classificationThreshold?: number; /** * Indicates NLU model training mode. * * MODEL_TRAINING_MODE_AUTOMATIC: NLU model training is automatically triggered when a flow gets modified. User can also manually trigger model training in this mode. * * MODEL_TRAINING_MODE_MANUAL: User needs to manually trigger NLU model training. Best for large flows whose models take long time to train. * Possible values are: `MODEL_TRAINING_MODE_AUTOMATIC`, `MODEL_TRAINING_MODE_MANUAL`. */ modelTrainingMode?: string; /** * Indicates the type of NLU model. * * MODEL_TYPE_STANDARD: Use standard NLU model. * * MODEL_TYPE_ADVANCED: Use advanced NLU model. * Possible values are: `MODEL_TYPE_STANDARD`, `MODEL_TYPE_ADVANCED`. */ modelType?: string; } interface CxFlowTransitionRoute { /** * The condition to evaluate against form parameters or session parameters. * At least one of intent or condition must be specified. When both intent and condition are specified, the transition can only happen when both are fulfilled. */ condition?: string; /** * The unique identifier of an Intent. * Format: projects//locations//agents//intents/. Indicates that the transition can only happen when the given intent is matched. At least one of intent or condition must be specified. When both intent and condition are specified, the transition can only happen when both are fulfilled. */ intent?: string; /** * (Output) * The unique identifier of this transition route. */ name: string; /** * The target flow to transition to. * Format: projects//locations//agents//flows/. */ targetFlow?: string; /** * The target page to transition to. * Format: projects//locations//agents//flows//pages/. */ targetPage?: string; /** * The fulfillment to call when the condition is satisfied. At least one of triggerFulfillment and target must be specified. When both are defined, triggerFulfillment is executed first. * Structure is documented below. */ triggerFulfillment?: outputs.diagflow.CxFlowTransitionRouteTriggerFulfillment; } interface CxFlowTransitionRouteTriggerFulfillment { /** * Conditional cases for this fulfillment. * Structure is documented below. */ conditionalCases?: outputs.diagflow.CxFlowTransitionRouteTriggerFulfillmentConditionalCase[]; /** * The list of rich message responses to present to the user. * Structure is documented below. */ messages?: outputs.diagflow.CxFlowTransitionRouteTriggerFulfillmentMessage[]; /** * Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. */ returnPartialResponses?: boolean; /** * Set parameter values before executing the webhook. * Structure is documented below. */ setParameterActions?: outputs.diagflow.CxFlowTransitionRouteTriggerFulfillmentSetParameterAction[]; /** * The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. */ tag?: string; /** * The webhook to call. Format: projects//locations//agents//webhooks/. */ webhook?: string; } interface CxFlowTransitionRouteTriggerFulfillmentConditionalCase { /** * A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. * See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. */ cases?: string; } interface CxFlowTransitionRouteTriggerFulfillmentMessage { /** * The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. */ channel?: string; /** * Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. * Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. * You may set this, for example: * * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. * * In a webhook response when you determine that you handled the customer issue. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ conversationSuccess?: outputs.diagflow.CxFlowTransitionRouteTriggerFulfillmentMessageConversationSuccess; /** * Indicates that the conversation should be handed off to a live agent. * Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. * You may set this, for example: * * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * * In a webhook response when you determine that the customer issue can only be handled by a human. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ liveAgentHandoff?: outputs.diagflow.CxFlowTransitionRouteTriggerFulfillmentMessageLiveAgentHandoff; /** * A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ outputAudioText?: outputs.diagflow.CxFlowTransitionRouteTriggerFulfillmentMessageOutputAudioText; /** * Returns a response containing a custom, platform-specific payload. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. */ payload?: string; /** * Specifies an audio clip to be played by the client as part of the response. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ playAudio?: outputs.diagflow.CxFlowTransitionRouteTriggerFulfillmentMessagePlayAudio; /** * Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ telephonyTransferCall?: outputs.diagflow.CxFlowTransitionRouteTriggerFulfillmentMessageTelephonyTransferCall; /** * The text response message. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ text?: outputs.diagflow.CxFlowTransitionRouteTriggerFulfillmentMessageText; } interface CxFlowTransitionRouteTriggerFulfillmentMessageConversationSuccess { /** * Custom metadata. Dialogflow doesn't impose any structure on this. */ metadata?: string; } interface CxFlowTransitionRouteTriggerFulfillmentMessageLiveAgentHandoff { /** * Custom metadata. Dialogflow doesn't impose any structure on this. */ metadata?: string; } interface CxFlowTransitionRouteTriggerFulfillmentMessageOutputAudioText { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * The SSML text to be synthesized. For more information, see SSML. * This field is part of a union field `source`: Only one of `text` or `ssml` may be set. */ ssml?: string; /** * The raw text to be synthesized. * This field is part of a union field `source`: Only one of `text` or `ssml` may be set. */ text?: string; } interface CxFlowTransitionRouteTriggerFulfillmentMessagePlayAudio { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. * * The `mixedAudio` block contains: */ allowPlaybackInterruption: boolean; /** * URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. */ audioUri: string; } interface CxFlowTransitionRouteTriggerFulfillmentMessageTelephonyTransferCall { /** * Transfer the call to a phone number in E.164 format. */ phoneNumber: string; } interface CxFlowTransitionRouteTriggerFulfillmentMessageText { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. * required: true */ texts?: string[]; } interface CxFlowTransitionRouteTriggerFulfillmentSetParameterAction { /** * Display name of the parameter. */ parameter?: string; /** * The new JSON-encoded value of the parameter. A null value clears the parameter. */ value?: string; } interface CxGenerativeSettingsFallbackSettings { /** * Stored prompts that can be selected, for example default templates like "conservative" or "chatty", or user defined ones. * Structure is documented below. */ promptTemplates?: outputs.diagflow.CxGenerativeSettingsFallbackSettingsPromptTemplate[]; /** * Display name of the selected prompt. */ selectedPrompt?: string; } interface CxGenerativeSettingsFallbackSettingsPromptTemplate { /** * Prompt name. */ displayName?: string; /** * If the flag is true, the prompt is frozen and cannot be modified by users. */ frozen?: boolean; /** * Prompt text that is sent to a LLM on no-match default, placeholders are filled downstream. For example: "Here is a conversation $conversation, a response is: " */ promptText?: string; } interface CxGenerativeSettingsGenerativeSafetySettings { /** * Banned phrases for generated text. * Structure is documented below. */ bannedPhrases?: outputs.diagflow.CxGenerativeSettingsGenerativeSafetySettingsBannedPhrase[]; /** * Optional. Default phrase match strategy for banned phrases. * See [PhraseMatchStrategy](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/GenerativeSettings#phrasematchstrategy) for valid values. */ defaultBannedPhraseMatchStrategy?: string; } interface CxGenerativeSettingsGenerativeSafetySettingsBannedPhrase { /** * Language code of the phrase. */ languageCode: string; /** * Text input which can be used for prompt or banned phrases. */ text: string; } interface CxGenerativeSettingsKnowledgeConnectorSettings { /** * Name of the virtual agent. Used for LLM prompt. Can be left empty. */ agent?: string; /** * Identity of the agent, e.g. "virtual agent", "AI assistant". */ agentIdentity?: string; /** * Agent scope, e.g. "Example company website", "internal Example company website for employees", "manual of car owner". */ agentScope?: string; /** * Name of the company, organization or other entity that the agent represents. Used for knowledge connector LLM prompt and for knowledge search. */ business?: string; /** * Company description, used for LLM prompt, e.g. "a family company selling freshly roasted coffee beans".`` */ businessDescription?: string; /** * Whether to disable fallback to Data Store search results (in case the LLM couldn't pick a proper answer). Per default the feature is enabled. */ disableDataStoreFallback?: boolean; } interface CxGenerativeSettingsLlmModelSettings { /** * The selected LLM model. */ model?: string; /** * The custom prompt to use. */ promptText?: string; } interface CxGeneratorLlmModelSettings { /** * The selected LLM model. */ model?: string; /** * The custom prompt to use. */ promptText?: string; } interface CxGeneratorModelParameter { /** * The maximum number of tokens to generate. */ maxDecodeSteps?: number; /** * The temperature used for sampling. Temperature sampling occurs after both topP and topK have been applied. * Valid range: [0.0, 1.0] Low temperature = less random. High temperature = more random. */ temperature?: number; /** * If set, the sampling process in each step is limited to the topK tokens with highest probabilities. * Valid range: [1, 40] or 1000+. Small topK = less random. Large topK = more random. */ topK?: number; /** * If set, only the tokens comprising the top topP probability mass are considered. * If both topP and topK are set, topP will be used for further refining candidates selected with topK. * Valid range: (0.0, 1.0]. Small topP = less random. Large topP = more random. */ topP?: number; } interface CxGeneratorPlaceholder { /** * Unique ID used to map custom placeholder to parameters in fulfillment. */ id?: string; /** * Custom placeholder value in the prompt text. */ name?: string; } interface CxGeneratorPromptText { /** * Text input which can be used for prompt or banned phrases. */ text?: string; } interface CxIntentParameter { /** * The entity type of the parameter. * Format: projects/-/locations/-/agents/-/entityTypes/ for system entity types (for example, projects/-/locations/-/agents/-/entityTypes/sys.date), or projects//locations//agents//entityTypes/ for developer entity types. */ entityType: string; /** * The unique identifier of the parameter. This field is used by training phrases to annotate their parts. */ id: string; /** * Indicates whether the parameter represents a list of values. */ isList?: boolean; /** * Indicates whether the parameter content should be redacted in log. If redaction is enabled, the parameter content will be replaced by parameter name during logging. * Note: the parameter content is subject to redaction if either parameter level redaction or entity type level redaction is enabled. */ redact?: boolean; } interface CxIntentTrainingPhrase { /** * (Output) * The unique identifier of the training phrase. */ id: string; /** * The ordered list of training phrase parts. The parts are concatenated in order to form the training phrase. * Note: The API does not automatically annotate training phrases like the Dialogflow Console does. * Note: Do not forget to include whitespace at part boundaries, so the training phrase is well formatted when the parts are concatenated. * If the training phrase does not need to be annotated with parameters, you just need a single part with only the Part.text field set. * If you want to annotate the training phrase, you must create multiple parts, where the fields of each part are populated in one of two ways: * Part.text is set to a part of the phrase that has no parameters. * Part.text is set to a part of the phrase that you want to annotate, and the parameterId field is set. * Structure is documented below. */ parts: outputs.diagflow.CxIntentTrainingPhrasePart[]; /** * Indicates how many times this example was added to the intent. */ repeatCount?: number; } interface CxIntentTrainingPhrasePart { /** * The parameter used to annotate this part of the training phrase. This field is required for annotated parts of the training phrase. */ parameterId?: string; /** * The text for this part. */ text: string; } interface CxPageAdvancedSettings { /** * Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: * * Agent level * * Flow level * * Page level * * Parameter level * Structure is documented below. */ dtmfSettings?: outputs.diagflow.CxPageAdvancedSettingsDtmfSettings; } interface CxPageAdvancedSettingsDtmfSettings { /** * If true, incoming audio is processed for DTMF (dual tone multi frequtectency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will de the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). */ enabled?: boolean; /** * The digit that terminates a DTMF digit sequence. */ finishDigit?: string; /** * Max length of DTMF digits. */ maxDigits?: number; } interface CxPageEntryFulfillment { /** * Conditional cases for this fulfillment. * Structure is documented below. */ conditionalCases?: outputs.diagflow.CxPageEntryFulfillmentConditionalCase[]; /** * The list of rich message responses to present to the user. * Structure is documented below. */ messages?: outputs.diagflow.CxPageEntryFulfillmentMessage[]; /** * Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. */ returnPartialResponses?: boolean; /** * Set parameter values before executing the webhook. * Structure is documented below. */ setParameterActions?: outputs.diagflow.CxPageEntryFulfillmentSetParameterAction[]; /** * The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. */ tag?: string; /** * The webhook to call. Format: projects//locations//agents//webhooks/. */ webhook?: string; } interface CxPageEntryFulfillmentConditionalCase { /** * A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. * See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. */ cases?: string; } interface CxPageEntryFulfillmentMessage { /** * The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. */ channel?: string; /** * Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. * Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. * You may set this, for example: * * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. * * In a webhook response when you determine that you handled the customer issue. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ conversationSuccess?: outputs.diagflow.CxPageEntryFulfillmentMessageConversationSuccess; /** * Indicates that the conversation should be handed off to a live agent. * Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. * You may set this, for example: * * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * * In a webhook response when you determine that the customer issue can only be handled by a human. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ liveAgentHandoff?: outputs.diagflow.CxPageEntryFulfillmentMessageLiveAgentHandoff; /** * A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ outputAudioText?: outputs.diagflow.CxPageEntryFulfillmentMessageOutputAudioText; /** * Returns a response containing a custom, platform-specific payload. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. */ payload?: string; /** * Specifies an audio clip to be played by the client as part of the response. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ playAudio?: outputs.diagflow.CxPageEntryFulfillmentMessagePlayAudio; /** * Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ telephonyTransferCall?: outputs.diagflow.CxPageEntryFulfillmentMessageTelephonyTransferCall; /** * The text response message. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ text?: outputs.diagflow.CxPageEntryFulfillmentMessageText; } interface CxPageEntryFulfillmentMessageConversationSuccess { /** * Custom metadata. Dialogflow doesn't impose any structure on this. */ metadata?: string; } interface CxPageEntryFulfillmentMessageLiveAgentHandoff { /** * Custom metadata. Dialogflow doesn't impose any structure on this. */ metadata?: string; } interface CxPageEntryFulfillmentMessageOutputAudioText { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * The SSML text to be synthesized. For more information, see SSML. * This field is part of a union field `source`: Only one of `text` or `ssml` may be set. */ ssml?: string; /** * The raw text to be synthesized. * This field is part of a union field `source`: Only one of `text` or `ssml` may be set. */ text?: string; } interface CxPageEntryFulfillmentMessagePlayAudio { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. * * The `mixedAudio` block contains: */ allowPlaybackInterruption: boolean; /** * URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. */ audioUri: string; } interface CxPageEntryFulfillmentMessageTelephonyTransferCall { /** * Transfer the call to a phone number in E.164 format. */ phoneNumber: string; } interface CxPageEntryFulfillmentMessageText { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. * required: true */ texts?: string[]; } interface CxPageEntryFulfillmentSetParameterAction { /** * Display name of the parameter. */ parameter?: string; /** * The new JSON-encoded value of the parameter. A null value clears the parameter. */ value?: string; } interface CxPageEventHandler { /** * The name of the event to handle. */ event?: string; /** * (Output) * The unique identifier of this event handler. */ name: string; /** * The target flow to transition to. * Format: projects//locations//agents//flows/. */ targetFlow?: string; /** * The target page to transition to. * Format: projects//locations//agents//flows//pages/. */ targetPage?: string; /** * The fulfillment to call when the event occurs. Handling webhook errors with a fulfillment enabled with webhook could cause infinite loop. It is invalid to specify such fulfillment for a handler handling webhooks. * Structure is documented below. */ triggerFulfillment?: outputs.diagflow.CxPageEventHandlerTriggerFulfillment; } interface CxPageEventHandlerTriggerFulfillment { /** * Conditional cases for this fulfillment. * Structure is documented below. */ conditionalCases?: outputs.diagflow.CxPageEventHandlerTriggerFulfillmentConditionalCase[]; /** * The list of rich message responses to present to the user. * Structure is documented below. */ messages?: outputs.diagflow.CxPageEventHandlerTriggerFulfillmentMessage[]; /** * Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. */ returnPartialResponses?: boolean; /** * Set parameter values before executing the webhook. * Structure is documented below. */ setParameterActions?: outputs.diagflow.CxPageEventHandlerTriggerFulfillmentSetParameterAction[]; /** * The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. */ tag?: string; /** * The webhook to call. Format: projects//locations//agents//webhooks/. */ webhook?: string; } interface CxPageEventHandlerTriggerFulfillmentConditionalCase { /** * A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. * See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. */ cases?: string; } interface CxPageEventHandlerTriggerFulfillmentMessage { /** * The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. */ channel?: string; /** * Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. * Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. * You may set this, for example: * * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. * * In a webhook response when you determine that you handled the customer issue. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ conversationSuccess?: outputs.diagflow.CxPageEventHandlerTriggerFulfillmentMessageConversationSuccess; /** * Indicates that the conversation should be handed off to a live agent. * Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. * You may set this, for example: * * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * * In a webhook response when you determine that the customer issue can only be handled by a human. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ liveAgentHandoff?: outputs.diagflow.CxPageEventHandlerTriggerFulfillmentMessageLiveAgentHandoff; /** * A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ outputAudioText?: outputs.diagflow.CxPageEventHandlerTriggerFulfillmentMessageOutputAudioText; /** * Returns a response containing a custom, platform-specific payload. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. */ payload?: string; /** * Specifies an audio clip to be played by the client as part of the response. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ playAudio?: outputs.diagflow.CxPageEventHandlerTriggerFulfillmentMessagePlayAudio; /** * Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ telephonyTransferCall?: outputs.diagflow.CxPageEventHandlerTriggerFulfillmentMessageTelephonyTransferCall; /** * The text response message. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ text?: outputs.diagflow.CxPageEventHandlerTriggerFulfillmentMessageText; } interface CxPageEventHandlerTriggerFulfillmentMessageConversationSuccess { /** * Custom metadata. Dialogflow doesn't impose any structure on this. */ metadata?: string; } interface CxPageEventHandlerTriggerFulfillmentMessageLiveAgentHandoff { /** * Custom metadata. Dialogflow doesn't impose any structure on this. */ metadata?: string; } interface CxPageEventHandlerTriggerFulfillmentMessageOutputAudioText { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * The SSML text to be synthesized. For more information, see SSML. * This field is part of a union field `source`: Only one of `text` or `ssml` may be set. */ ssml?: string; /** * The raw text to be synthesized. * This field is part of a union field `source`: Only one of `text` or `ssml` may be set. */ text?: string; } interface CxPageEventHandlerTriggerFulfillmentMessagePlayAudio { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. * * The `mixedAudio` block contains: */ allowPlaybackInterruption: boolean; /** * URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. */ audioUri: string; } interface CxPageEventHandlerTriggerFulfillmentMessageTelephonyTransferCall { /** * Transfer the call to a phone number in E.164 format. */ phoneNumber: string; } interface CxPageEventHandlerTriggerFulfillmentMessageText { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. * required: true */ texts?: string[]; } interface CxPageEventHandlerTriggerFulfillmentSetParameterAction { /** * Display name of the parameter. */ parameter?: string; /** * The new JSON-encoded value of the parameter. A null value clears the parameter. */ value?: string; } interface CxPageForm { /** * Parameters to collect from the user. * Structure is documented below. */ parameters?: outputs.diagflow.CxPageFormParameter[]; } interface CxPageFormParameter { /** * Hierarchical advanced settings for this parameter. The settings exposed at the lower level overrides the settings exposed at the higher level. * Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. * Structure is documented below. */ advancedSettings?: outputs.diagflow.CxPageFormParameterAdvancedSettings; /** * The default value of an optional parameter. If the parameter is required, the default value will be ignored. */ defaultValue?: string; /** * The human-readable name of the parameter, unique within the form. */ displayName?: string; /** * The entity type of the parameter. * Format: projects/-/locations/-/agents/-/entityTypes/ for system entity types (for example, projects/-/locations/-/agents/-/entityTypes/sys.date), or projects//locations//agents//entityTypes/ for developer entity types. */ entityType?: string; /** * Defines fill behavior for the parameter. * Structure is documented below. */ fillBehavior?: outputs.diagflow.CxPageFormParameterFillBehavior; /** * Indicates whether the parameter represents a list of values. */ isList?: boolean; /** * Indicates whether the parameter content should be redacted in log. * If redaction is enabled, the parameter content will be replaced by parameter name during logging. Note: the parameter content is subject to redaction if either parameter level redaction or entity type level redaction is enabled. */ redact?: boolean; /** * Indicates whether the parameter is required. Optional parameters will not trigger prompts; however, they are filled if the user specifies them. * Required parameters must be filled before form filling concludes. */ required?: boolean; } interface CxPageFormParameterAdvancedSettings { /** * Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: * * Agent level * * Flow level * * Page level * * Parameter level * Structure is documented below. */ dtmfSettings?: outputs.diagflow.CxPageFormParameterAdvancedSettingsDtmfSettings; } interface CxPageFormParameterAdvancedSettingsDtmfSettings { /** * If true, incoming audio is processed for DTMF (dual tone multi frequtectency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will de the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). */ enabled?: boolean; /** * The digit that terminates a DTMF digit sequence. */ finishDigit?: string; /** * Max length of DTMF digits. */ maxDigits?: number; } interface CxPageFormParameterFillBehavior { /** * The fulfillment to provide the initial prompt that the agent can present to the user in order to fill the parameter. * Structure is documented below. */ initialPromptFulfillment?: outputs.diagflow.CxPageFormParameterFillBehaviorInitialPromptFulfillment; /** * The handlers for parameter-level events, used to provide reprompt for the parameter or transition to a different page/flow. The supported events are: * * sys.no-match-, where N can be from 1 to 6 * * sys.no-match-default * * sys.no-input-, where N can be from 1 to 6 * * sys.no-input-default * * sys.invalid-parameter * [initialPromptFulfillment][initialPromptFulfillment] provides the first prompt for the parameter. * If the user's response does not fill the parameter, a no-match/no-input event will be triggered, and the fulfillment associated with the sys.no-match-1/sys.no-input-1 handler (if defined) will be called to provide a prompt. The sys.no-match-2/sys.no-input-2 handler (if defined) will respond to the next no-match/no-input event, and so on. * A sys.no-match-default or sys.no-input-default handler will be used to handle all following no-match/no-input events after all numbered no-match/no-input handlers for the parameter are consumed. * A sys.invalid-parameter handler can be defined to handle the case where the parameter values have been invalidated by webhook. For example, if the user's response fill the parameter, however the parameter was invalidated by webhook, the fulfillment associated with the sys.invalid-parameter handler (if defined) will be called to provide a prompt. * If the event handler for the corresponding event can't be found on the parameter, initialPromptFulfillment will be re-prompted. * Structure is documented below. */ repromptEventHandlers?: outputs.diagflow.CxPageFormParameterFillBehaviorRepromptEventHandler[]; } interface CxPageFormParameterFillBehaviorInitialPromptFulfillment { /** * Conditional cases for this fulfillment. * Structure is documented below. */ conditionalCases?: outputs.diagflow.CxPageFormParameterFillBehaviorInitialPromptFulfillmentConditionalCase[]; /** * The list of rich message responses to present to the user. * Structure is documented below. */ messages?: outputs.diagflow.CxPageFormParameterFillBehaviorInitialPromptFulfillmentMessage[]; /** * Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. */ returnPartialResponses?: boolean; /** * Set parameter values before executing the webhook. * Structure is documented below. */ setParameterActions?: outputs.diagflow.CxPageFormParameterFillBehaviorInitialPromptFulfillmentSetParameterAction[]; /** * The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. */ tag?: string; /** * The webhook to call. Format: projects//locations//agents//webhooks/. */ webhook?: string; } interface CxPageFormParameterFillBehaviorInitialPromptFulfillmentConditionalCase { /** * A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. * See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. */ cases?: string; } interface CxPageFormParameterFillBehaviorInitialPromptFulfillmentMessage { /** * The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. */ channel?: string; /** * Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. * Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. * You may set this, for example: * * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. * * In a webhook response when you determine that you handled the customer issue. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ conversationSuccess?: outputs.diagflow.CxPageFormParameterFillBehaviorInitialPromptFulfillmentMessageConversationSuccess; /** * Indicates that the conversation should be handed off to a live agent. * Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. * You may set this, for example: * * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * * In a webhook response when you determine that the customer issue can only be handled by a human. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ liveAgentHandoff?: outputs.diagflow.CxPageFormParameterFillBehaviorInitialPromptFulfillmentMessageLiveAgentHandoff; /** * A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ outputAudioText?: outputs.diagflow.CxPageFormParameterFillBehaviorInitialPromptFulfillmentMessageOutputAudioText; /** * Returns a response containing a custom, platform-specific payload. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. */ payload?: string; /** * Specifies an audio clip to be played by the client as part of the response. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ playAudio?: outputs.diagflow.CxPageFormParameterFillBehaviorInitialPromptFulfillmentMessagePlayAudio; /** * Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ telephonyTransferCall?: outputs.diagflow.CxPageFormParameterFillBehaviorInitialPromptFulfillmentMessageTelephonyTransferCall; /** * The text response message. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ text?: outputs.diagflow.CxPageFormParameterFillBehaviorInitialPromptFulfillmentMessageText; } interface CxPageFormParameterFillBehaviorInitialPromptFulfillmentMessageConversationSuccess { /** * Custom metadata. Dialogflow doesn't impose any structure on this. */ metadata?: string; } interface CxPageFormParameterFillBehaviorInitialPromptFulfillmentMessageLiveAgentHandoff { /** * Custom metadata. Dialogflow doesn't impose any structure on this. */ metadata?: string; } interface CxPageFormParameterFillBehaviorInitialPromptFulfillmentMessageOutputAudioText { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * The SSML text to be synthesized. For more information, see SSML. * This field is part of a union field `source`: Only one of `text` or `ssml` may be set. */ ssml?: string; /** * The raw text to be synthesized. * This field is part of a union field `source`: Only one of `text` or `ssml` may be set. */ text?: string; } interface CxPageFormParameterFillBehaviorInitialPromptFulfillmentMessagePlayAudio { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. * * The `mixedAudio` block contains: */ allowPlaybackInterruption: boolean; /** * URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. */ audioUri: string; } interface CxPageFormParameterFillBehaviorInitialPromptFulfillmentMessageTelephonyTransferCall { /** * Transfer the call to a phone number in E.164 format. */ phoneNumber: string; } interface CxPageFormParameterFillBehaviorInitialPromptFulfillmentMessageText { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. * required: true */ texts?: string[]; } interface CxPageFormParameterFillBehaviorInitialPromptFulfillmentSetParameterAction { /** * Display name of the parameter. */ parameter?: string; /** * The new JSON-encoded value of the parameter. A null value clears the parameter. */ value?: string; } interface CxPageFormParameterFillBehaviorRepromptEventHandler { /** * The name of the event to handle. */ event?: string; /** * (Output) * The unique identifier of this event handler. */ name: string; /** * The target flow to transition to. * Format: projects//locations//agents//flows/. */ targetFlow?: string; /** * The target page to transition to. * Format: projects//locations//agents//flows//pages/. */ targetPage?: string; /** * The fulfillment to call when the event occurs. Handling webhook errors with a fulfillment enabled with webhook could cause infinite loop. It is invalid to specify such fulfillment for a handler handling webhooks. * Structure is documented below. */ triggerFulfillment?: outputs.diagflow.CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillment; } interface CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillment { /** * Conditional cases for this fulfillment. * Structure is documented below. */ conditionalCases?: outputs.diagflow.CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillmentConditionalCase[]; /** * The list of rich message responses to present to the user. * Structure is documented below. */ messages?: outputs.diagflow.CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillmentMessage[]; /** * Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. */ returnPartialResponses?: boolean; /** * Set parameter values before executing the webhook. * Structure is documented below. */ setParameterActions?: outputs.diagflow.CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillmentSetParameterAction[]; /** * The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. */ tag?: string; /** * The webhook to call. Format: projects//locations//agents//webhooks/. */ webhook?: string; } interface CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillmentConditionalCase { /** * A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. * See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. */ cases?: string; } interface CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillmentMessage { /** * The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. */ channel?: string; /** * Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. * Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. * You may set this, for example: * * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. * * In a webhook response when you determine that you handled the customer issue. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ conversationSuccess?: outputs.diagflow.CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillmentMessageConversationSuccess; /** * Indicates that the conversation should be handed off to a live agent. * Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. * You may set this, for example: * * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * * In a webhook response when you determine that the customer issue can only be handled by a human. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ liveAgentHandoff?: outputs.diagflow.CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillmentMessageLiveAgentHandoff; /** * A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ outputAudioText?: outputs.diagflow.CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillmentMessageOutputAudioText; /** * Returns a response containing a custom, platform-specific payload. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. */ payload?: string; /** * Specifies an audio clip to be played by the client as part of the response. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ playAudio?: outputs.diagflow.CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillmentMessagePlayAudio; /** * Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ telephonyTransferCall?: outputs.diagflow.CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillmentMessageTelephonyTransferCall; /** * The text response message. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ text?: outputs.diagflow.CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillmentMessageText; } interface CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillmentMessageConversationSuccess { /** * Custom metadata. Dialogflow doesn't impose any structure on this. */ metadata?: string; } interface CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillmentMessageLiveAgentHandoff { /** * Custom metadata. Dialogflow doesn't impose any structure on this. */ metadata?: string; } interface CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillmentMessageOutputAudioText { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * The SSML text to be synthesized. For more information, see SSML. * This field is part of a union field `source`: Only one of `text` or `ssml` may be set. */ ssml?: string; /** * The raw text to be synthesized. * This field is part of a union field `source`: Only one of `text` or `ssml` may be set. */ text?: string; } interface CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillmentMessagePlayAudio { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. * * The `mixedAudio` block contains: */ allowPlaybackInterruption: boolean; /** * URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. */ audioUri: string; } interface CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillmentMessageTelephonyTransferCall { /** * Transfer the call to a phone number in E.164 format. */ phoneNumber: string; } interface CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillmentMessageText { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. * required: true */ texts?: string[]; } interface CxPageFormParameterFillBehaviorRepromptEventHandlerTriggerFulfillmentSetParameterAction { /** * Display name of the parameter. */ parameter?: string; /** * The new JSON-encoded value of the parameter. A null value clears the parameter. */ value?: string; } interface CxPageKnowledgeConnectorSettings { /** * Optional. List of related data store connections. * Structure is documented below. */ dataStoreConnections?: outputs.diagflow.CxPageKnowledgeConnectorSettingsDataStoreConnection[]; /** * Whether Knowledge Connector is enabled or not. */ enabled?: boolean; /** * The target flow to transition to. Format: projects//locations//agents//flows/. * This field is part of a union field `target`: Only one of `targetPage` or `targetFlow` may be set. */ targetFlow?: string; /** * The target page to transition to. Format: projects//locations//agents//flows//pages/. * The page must be in the same host flow (the flow that owns this `KnowledgeConnectorSettings`). * This field is part of a union field `target`: Only one of `targetPage` or `targetFlow` may be set. */ targetPage?: string; /** * The fulfillment to be triggered. * When the answers from the Knowledge Connector are selected by Dialogflow, you can utitlize the request scoped parameter $request.knowledge.answers (contains up to the 5 highest confidence answers) and $request.knowledge.questions (contains the corresponding questions) to construct the fulfillment. * Structure is documented below. */ triggerFulfillment?: outputs.diagflow.CxPageKnowledgeConnectorSettingsTriggerFulfillment; } interface CxPageKnowledgeConnectorSettingsDataStoreConnection { /** * The full name of the referenced data store. Formats: projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore} projects/{project}/locations/{location}/dataStores/{dataStore} */ dataStore?: string; /** * The type of the connected data store. * * PUBLIC_WEB: A data store that contains public web content. * * UNSTRUCTURED: A data store that contains unstructured private data. * * STRUCTURED: A data store that contains structured data (for example FAQ). * Possible values are: `PUBLIC_WEB`, `UNSTRUCTURED`, `STRUCTURED`. */ dataStoreType?: string; /** * The document processing mode for the data store connection. Should only be set for PUBLIC_WEB and UNSTRUCTURED data stores. If not set it is considered as DOCUMENTS, as this is the legacy mode. * * DOCUMENTS: Documents are processed as documents. * * CHUNKS: Documents are converted to chunks. * Possible values are: `DOCUMENTS`, `CHUNKS`. */ documentProcessingMode?: string; } interface CxPageKnowledgeConnectorSettingsTriggerFulfillment { /** * Hierarchical advanced settings for agent/flow/page/fulfillment/parameter. Settings exposed at lower level overrides the settings exposed at higher level. Overriding occurs at the sub-setting level. For example, the playbackInterruptionSettings at fulfillment level only overrides the playbackInterruptionSettings at the agent level, leaving other settings at the agent level unchanged. * DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. * Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. * Structure is documented below. */ advancedSettings?: outputs.diagflow.CxPageKnowledgeConnectorSettingsTriggerFulfillmentAdvancedSettings; /** * Conditional cases for this fulfillment. * Structure is documented below. */ conditionalCases?: outputs.diagflow.CxPageKnowledgeConnectorSettingsTriggerFulfillmentConditionalCase[]; /** * If the flag is true, the agent will utilize LLM to generate a text response. If LLM generation fails, the defined responses in the fulfillment will be respected. This flag is only useful for fulfillments associated with no-match event handlers. */ enableGenerativeFallback?: boolean; /** * The list of rich message responses to present to the user. * Structure is documented below. */ messages?: outputs.diagflow.CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessage[]; /** * Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. */ returnPartialResponses?: boolean; /** * Set parameter values before executing the webhook. * Structure is documented below. */ setParameterActions?: outputs.diagflow.CxPageKnowledgeConnectorSettingsTriggerFulfillmentSetParameterAction[]; /** * The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. */ tag?: string; /** * The webhook to call. Format: projects//locations//agents//webhooks/. */ webhook?: string; } interface CxPageKnowledgeConnectorSettingsTriggerFulfillmentAdvancedSettings { /** * Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: * * Agent level * * Flow level * * Page level * * Parameter level * Structure is documented below. */ dtmfSettings?: outputs.diagflow.CxPageKnowledgeConnectorSettingsTriggerFulfillmentAdvancedSettingsDtmfSettings; /** * Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: * * Agent level * Structure is documented below. */ loggingSettings?: outputs.diagflow.CxPageKnowledgeConnectorSettingsTriggerFulfillmentAdvancedSettingsLoggingSettings; /** * Settings for speech to text detection. Exposed at the following levels: * * Agent level * * Flow level * * Page level * * Parameter level * Structure is documented below. */ speechSettings?: outputs.diagflow.CxPageKnowledgeConnectorSettingsTriggerFulfillmentAdvancedSettingsSpeechSettings; } interface CxPageKnowledgeConnectorSettingsTriggerFulfillmentAdvancedSettingsDtmfSettings { /** * If true, incoming audio is processed for DTMF (dual tone multi frequtectency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will de the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). */ enabled?: boolean; /** * Endpoint timeout setting for matching dtmf input to regex. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.500s". */ endpointingTimeoutDuration?: string; /** * The digit that terminates a DTMF digit sequence. */ finishDigit?: string; /** * Interdigit timeout setting for matching dtmf input to regex. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.500s". */ interdigitTimeoutDuration?: string; /** * Max length of DTMF digits. */ maxDigits?: number; } interface CxPageKnowledgeConnectorSettingsTriggerFulfillmentAdvancedSettingsLoggingSettings { /** * Enables consent-based end-user input redaction, if true, a pre-defined session parameter **$session.params.conversation-redaction** will be used to determine if the utterance should be redacted. */ enableConsentBasedRedaction?: boolean; /** * Enables DF Interaction logging. */ enableInteractionLogging?: boolean; /** * Enables Google Cloud Logging. */ enableStackdriverLogging?: boolean; } interface CxPageKnowledgeConnectorSettingsTriggerFulfillmentAdvancedSettingsSpeechSettings { /** * Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. */ endpointerSensitivity?: number; /** * Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). * An object containing a list of **"key": value** pairs. Example: **{ "name": "wrench", "mass": "1.3kg", "count": "3" }**. */ models?: { [key: string]: string; }; /** * Timeout before detecting no speech. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.500s". */ noSpeechTimeout?: string; /** * Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. */ useTimeoutBasedEndpointing?: boolean; } interface CxPageKnowledgeConnectorSettingsTriggerFulfillmentConditionalCase { /** * A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. * See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. */ cases?: string; } interface CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessage { /** * The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. */ channel?: string; /** * Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. * Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. * You may set this, for example: * * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. * * In a webhook response when you determine that you handled the customer issue. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ conversationSuccess?: outputs.diagflow.CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessageConversationSuccess; /** * (Output) * This type has no fields. * Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. */ endInteractions: outputs.diagflow.CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessageEndInteraction[]; /** * This type has no fields. * Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. * Otherwise, the info card response is skipped. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. */ knowledgeInfoCard?: outputs.diagflow.CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessageKnowledgeInfoCard; /** * Indicates that the conversation should be handed off to a live agent. * Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. * You may set this, for example: * * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * * In a webhook response when you determine that the customer issue can only be handled by a human. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ liveAgentHandoff?: outputs.diagflow.CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessageLiveAgentHandoff; /** * (Output) * Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via playAudio. This message is generated by Dialogflow only and not supposed to be defined by the user. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ mixedAudios: outputs.diagflow.CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessageMixedAudio[]; /** * A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ outputAudioText?: outputs.diagflow.CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessageOutputAudioText; /** * Returns a response containing a custom, platform-specific payload. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. */ payload?: string; /** * Specifies an audio clip to be played by the client as part of the response. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ playAudio?: outputs.diagflow.CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessagePlayAudio; /** * Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ telephonyTransferCall?: outputs.diagflow.CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessageTelephonyTransferCall; /** * The text response message. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ text?: outputs.diagflow.CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessageText; } interface CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessageConversationSuccess { /** * Custom metadata. Dialogflow doesn't impose any structure on this. */ metadata?: string; } interface CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessageEndInteraction { } interface CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessageKnowledgeInfoCard { } interface CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessageLiveAgentHandoff { /** * Custom metadata. Dialogflow doesn't impose any structure on this. */ metadata?: string; } interface CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessageMixedAudio { /** * Segments this audio response is composed of. */ segments?: outputs.diagflow.CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessageMixedAudioSegment[]; } interface CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessageMixedAudioSegment { /** * (Output) * Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. * A base64-encoded string. * This field is part of a union field `content`: Only one of `audio` or `uri` may be set. */ audio?: string; /** * Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. * This field is part of a union field `content`: Only one of `audio` or `uri` may be set. */ uri?: string; } interface CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessageOutputAudioText { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * The SSML text to be synthesized. For more information, see SSML. * This field is part of a union field `source`: Only one of `text` or `ssml` may be set. */ ssml?: string; /** * The raw text to be synthesized. * This field is part of a union field `source`: Only one of `text` or `ssml` may be set. */ text?: string; } interface CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessagePlayAudio { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. * * The `mixedAudio` block contains: */ allowPlaybackInterruption: boolean; /** * URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. */ audioUri: string; } interface CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessageTelephonyTransferCall { /** * Transfer the call to a phone number in E.164 format. */ phoneNumber: string; } interface CxPageKnowledgeConnectorSettingsTriggerFulfillmentMessageText { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. * required: true */ texts?: string[]; } interface CxPageKnowledgeConnectorSettingsTriggerFulfillmentSetParameterAction { /** * Display name of the parameter. */ parameter?: string; /** * The new JSON-encoded value of the parameter. A null value clears the parameter. */ value?: string; } interface CxPageTransitionRoute { /** * The condition to evaluate against form parameters or session parameters. * At least one of intent or condition must be specified. When both intent and condition are specified, the transition can only happen when both are fulfilled. */ condition?: string; /** * The unique identifier of an Intent. * Format: projects//locations//agents//intents/. Indicates that the transition can only happen when the given intent is matched. At least one of intent or condition must be specified. When both intent and condition are specified, the transition can only happen when both are fulfilled. */ intent?: string; /** * (Output) * The unique identifier of this transition route. */ name: string; /** * The target flow to transition to. * Format: projects//locations//agents//flows/. */ targetFlow?: string; /** * The target page to transition to. * Format: projects//locations//agents//flows//pages/. */ targetPage?: string; /** * The fulfillment to call when the condition is satisfied. At least one of triggerFulfillment and target must be specified. When both are defined, triggerFulfillment is executed first. * Structure is documented below. */ triggerFulfillment?: outputs.diagflow.CxPageTransitionRouteTriggerFulfillment; } interface CxPageTransitionRouteTriggerFulfillment { /** * Conditional cases for this fulfillment. * Structure is documented below. */ conditionalCases?: outputs.diagflow.CxPageTransitionRouteTriggerFulfillmentConditionalCase[]; /** * The list of rich message responses to present to the user. * Structure is documented below. */ messages?: outputs.diagflow.CxPageTransitionRouteTriggerFulfillmentMessage[]; /** * Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. */ returnPartialResponses?: boolean; /** * Set parameter values before executing the webhook. * Structure is documented below. */ setParameterActions?: outputs.diagflow.CxPageTransitionRouteTriggerFulfillmentSetParameterAction[]; /** * The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. */ tag?: string; /** * The webhook to call. Format: projects//locations//agents//webhooks/. */ webhook?: string; } interface CxPageTransitionRouteTriggerFulfillmentConditionalCase { /** * A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. * See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. */ cases?: string; } interface CxPageTransitionRouteTriggerFulfillmentMessage { /** * The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. */ channel?: string; /** * Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. * Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. * You may set this, for example: * * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. * * In a webhook response when you determine that you handled the customer issue. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ conversationSuccess?: outputs.diagflow.CxPageTransitionRouteTriggerFulfillmentMessageConversationSuccess; /** * Indicates that the conversation should be handed off to a live agent. * Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. * You may set this, for example: * * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * * In a webhook response when you determine that the customer issue can only be handled by a human. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ liveAgentHandoff?: outputs.diagflow.CxPageTransitionRouteTriggerFulfillmentMessageLiveAgentHandoff; /** * A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ outputAudioText?: outputs.diagflow.CxPageTransitionRouteTriggerFulfillmentMessageOutputAudioText; /** * Returns a response containing a custom, platform-specific payload. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. */ payload?: string; /** * Specifies an audio clip to be played by the client as part of the response. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ playAudio?: outputs.diagflow.CxPageTransitionRouteTriggerFulfillmentMessagePlayAudio; /** * Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ telephonyTransferCall?: outputs.diagflow.CxPageTransitionRouteTriggerFulfillmentMessageTelephonyTransferCall; /** * The text response message. * This field is part of a union field `message`: Only one of `text`, `payload`, `conversationSuccess`, `outputAudioText`, `liveAgentHandoff`, `endInteraction`, `playAudio`, `mixedAudio`, `telephonyTransferCall`, or `knowledgeInfoCard` may be set. * Structure is documented below. */ text?: outputs.diagflow.CxPageTransitionRouteTriggerFulfillmentMessageText; } interface CxPageTransitionRouteTriggerFulfillmentMessageConversationSuccess { /** * Custom metadata. Dialogflow doesn't impose any structure on this. */ metadata?: string; } interface CxPageTransitionRouteTriggerFulfillmentMessageLiveAgentHandoff { /** * Custom metadata. Dialogflow doesn't impose any structure on this. */ metadata?: string; } interface CxPageTransitionRouteTriggerFulfillmentMessageOutputAudioText { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * The SSML text to be synthesized. For more information, see SSML. * This field is part of a union field `source`: Only one of `text` or `ssml` may be set. */ ssml?: string; /** * The raw text to be synthesized. * This field is part of a union field `source`: Only one of `text` or `ssml` may be set. */ text?: string; } interface CxPageTransitionRouteTriggerFulfillmentMessagePlayAudio { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. * * The `mixedAudio` block contains: */ allowPlaybackInterruption: boolean; /** * URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. */ audioUri: string; } interface CxPageTransitionRouteTriggerFulfillmentMessageTelephonyTransferCall { /** * Transfer the call to a phone number in E.164 format. */ phoneNumber: string; } interface CxPageTransitionRouteTriggerFulfillmentMessageText { /** * (Output) * Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. */ allowPlaybackInterruption: boolean; /** * A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. * required: true */ texts?: string[]; } interface CxPageTransitionRouteTriggerFulfillmentSetParameterAction { /** * Display name of the parameter. */ parameter?: string; /** * The new JSON-encoded value of the parameter. A null value clears the parameter. */ value?: string; } interface CxPlaybookInstruction { /** * General guidelines for the playbook. These are unstructured instructions that are not directly part of the goal, e.g. "Always be polite". It's valid for this text to be long and used instead of steps altogether. */ guidelines?: string; /** * Ordered list of step by step execution instructions to accomplish target goal. * Structure is documented below. */ steps?: outputs.diagflow.CxPlaybookInstructionStep[]; } interface CxPlaybookInstructionStep { /** * Sub-processing needed to execute the current step. * This field uses JSON data as a string. The value provided must be a valid JSON representation documented in [Step](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents.playbooks#step). */ steps?: string; /** * Step instruction in text format. */ text?: string; } interface CxPlaybookLlmModelSettings { /** * The selected LLM model. */ model?: string; /** * The custom prompt to use. */ promptText?: string; } interface CxSecuritySettingsAudioExportSettings { /** * Filename pattern for exported audio. */ audioExportPattern?: string; /** * File format for exported audio file. Currently only in telephony recordings. * * MULAW: G.711 mu-law PCM with 8kHz sample rate. * * MP3: MP3 file format. * * OGG: OGG Vorbis. * Possible values are: `MULAW`, `MP3`, `OGG`. */ audioFormat?: string; /** * Enable audio redaction if it is true. */ enableAudioRedaction?: boolean; /** * Cloud Storage bucket to export audio record to. Setting this field would grant the Storage Object Creator role to the Dialogflow Service Agent. API caller that tries to modify this field should have the permission of storage.buckets.setIamPolicy. */ gcsBucket?: string; } interface CxSecuritySettingsInsightsExportSettings { /** * If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers. */ enableInsightsExport: boolean; } interface CxTestCaseLastTestResult { /** * The conversation turns uttered during the test case replay in chronological order. * Structure is documented below. */ conversationTurns?: outputs.diagflow.CxTestCaseLastTestResultConversationTurn[]; /** * Environment where the test was run. If not set, it indicates the draft environment. */ environment?: string; /** * The unique identifier of the page. * Format: projects//locations//agents//flows//pages/. */ name?: string; /** * Whether the test case passed in the agent environment. * * PASSED: The test passed. * * FAILED: The test did not pass. * Possible values are: `PASSED`, `FAILED`. */ testResult?: string; /** * The time that the test was run. A timestamp in RFC3339 text format. */ testTime?: string; } interface CxTestCaseLastTestResultConversationTurn { /** * The user input. * Structure is documented below. */ userInput?: outputs.diagflow.CxTestCaseLastTestResultConversationTurnUserInput; /** * The virtual agent output. * Structure is documented below. */ virtualAgentOutput?: outputs.diagflow.CxTestCaseLastTestResultConversationTurnVirtualAgentOutput; } interface CxTestCaseLastTestResultConversationTurnUserInput { /** * Whether sentiment analysis is enabled. */ enableSentimentAnalysis?: boolean; /** * Parameters that need to be injected into the conversation during intent detection. */ injectedParameters?: string; /** * User input. Supports text input, event input, dtmf input in the test case. * Structure is documented below. */ input?: outputs.diagflow.CxTestCaseLastTestResultConversationTurnUserInputInput; /** * If webhooks should be allowed to trigger in response to the user utterance. Often if parameters are injected, webhooks should not be enabled. */ isWebhookEnabled?: boolean; } interface CxTestCaseLastTestResultConversationTurnUserInputInput { /** * The DTMF event to be handled. * Structure is documented below. */ dtmf?: outputs.diagflow.CxTestCaseLastTestResultConversationTurnUserInputInputDtmf; /** * The event to be triggered. * Structure is documented below. */ event?: outputs.diagflow.CxTestCaseLastTestResultConversationTurnUserInputInputEvent; /** * The language of the input. See [Language Support](https://cloud.google.com/dialogflow/cx/docs/reference/language) for a list of the currently supported language codes. * Note that queries in the same session do not necessarily need to specify the same language. */ languageCode?: string; /** * The natural language text to be processed. * Structure is documented below. */ text?: outputs.diagflow.CxTestCaseLastTestResultConversationTurnUserInputInputText; } interface CxTestCaseLastTestResultConversationTurnUserInputInputDtmf { /** * The dtmf digits. */ digits?: string; /** * The finish digit (if any). */ finishDigit?: string; } interface CxTestCaseLastTestResultConversationTurnUserInputInputEvent { /** * Name of the event. */ event: string; } interface CxTestCaseLastTestResultConversationTurnUserInputInputText { /** * The natural language text to be processed. Text length must not exceed 256 characters. */ text: string; } interface CxTestCaseLastTestResultConversationTurnVirtualAgentOutput { /** * The [Page](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents.flows.pages#Page) on which the utterance was spoken. * Structure is documented below. */ currentPage?: outputs.diagflow.CxTestCaseLastTestResultConversationTurnVirtualAgentOutputCurrentPage; /** * The list of differences between the original run and the replay for this output, if any. * Structure is documented below. */ differences?: outputs.diagflow.CxTestCaseLastTestResultConversationTurnVirtualAgentOutputDifference[]; /** * The session parameters available to the bot at this point. */ sessionParameters?: string; /** * Response error from the agent in the test result. If set, other output is empty. * Structure is documented below. */ status?: outputs.diagflow.CxTestCaseLastTestResultConversationTurnVirtualAgentOutputStatus; /** * The text responses from the agent for the turn. * Structure is documented below. */ textResponses?: outputs.diagflow.CxTestCaseLastTestResultConversationTurnVirtualAgentOutputTextResponse[]; /** * The [Intent](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents.intents#Intent) that triggered the response. * Structure is documented below. */ triggeredIntent?: outputs.diagflow.CxTestCaseLastTestResultConversationTurnVirtualAgentOutputTriggeredIntent; } interface CxTestCaseLastTestResultConversationTurnVirtualAgentOutputCurrentPage { /** * (Output) * The human-readable name of the page, unique within the flow. */ displayName?: string; /** * The unique identifier of the page. * Format: projects//locations//agents//flows//pages/. */ name?: string; } interface CxTestCaseLastTestResultConversationTurnVirtualAgentOutputDifference { /** * A human readable description of the diff, showing the actual output vs expected output. */ description?: string; /** * The type of diff. * * INTENT: The intent. * * PAGE: The page. * * PARAMETERS: The parameters. * * UTTERANCE: The message utterance. * * FLOW: The flow. * Possible values are: `INTENT`, `PAGE`, `PARAMETERS`, `UTTERANCE`, `FLOW`. */ type?: string; } interface CxTestCaseLastTestResultConversationTurnVirtualAgentOutputStatus { /** * The status code, which should be an enum value of google.rpc.Code. */ code?: number; /** * A JSON encoded list of messages that carry the error details. */ details?: string; /** * A developer-facing error message. */ message?: string; } interface CxTestCaseLastTestResultConversationTurnVirtualAgentOutputTextResponse { /** * A collection of text responses. */ texts?: string[]; } interface CxTestCaseLastTestResultConversationTurnVirtualAgentOutputTriggeredIntent { /** * (Output) * The human-readable name of the intent, unique within the agent. */ displayName?: string; /** * The unique identifier of the intent. * Format: projects//locations//agents//intents/. */ name?: string; } interface CxTestCaseTestCaseConversationTurn { /** * The user input. * Structure is documented below. */ userInput?: outputs.diagflow.CxTestCaseTestCaseConversationTurnUserInput; /** * The virtual agent output. * Structure is documented below. */ virtualAgentOutput?: outputs.diagflow.CxTestCaseTestCaseConversationTurnVirtualAgentOutput; } interface CxTestCaseTestCaseConversationTurnUserInput { /** * Whether sentiment analysis is enabled. */ enableSentimentAnalysis?: boolean; /** * Parameters that need to be injected into the conversation during intent detection. */ injectedParameters?: string; /** * User input. Supports text input, event input, dtmf input in the test case. * Structure is documented below. */ input?: outputs.diagflow.CxTestCaseTestCaseConversationTurnUserInputInput; /** * If webhooks should be allowed to trigger in response to the user utterance. Often if parameters are injected, webhooks should not be enabled. */ isWebhookEnabled?: boolean; } interface CxTestCaseTestCaseConversationTurnUserInputInput { /** * The DTMF event to be handled. * Structure is documented below. */ dtmf?: outputs.diagflow.CxTestCaseTestCaseConversationTurnUserInputInputDtmf; /** * The event to be triggered. * Structure is documented below. */ event?: outputs.diagflow.CxTestCaseTestCaseConversationTurnUserInputInputEvent; /** * The language of the input. See [Language Support](https://cloud.google.com/dialogflow/cx/docs/reference/language) for a list of the currently supported language codes. * Note that queries in the same session do not necessarily need to specify the same language. */ languageCode?: string; /** * The natural language text to be processed. * Structure is documented below. */ text?: outputs.diagflow.CxTestCaseTestCaseConversationTurnUserInputInputText; } interface CxTestCaseTestCaseConversationTurnUserInputInputDtmf { /** * The dtmf digits. */ digits?: string; /** * The finish digit (if any). */ finishDigit?: string; } interface CxTestCaseTestCaseConversationTurnUserInputInputEvent { /** * Name of the event. */ event: string; } interface CxTestCaseTestCaseConversationTurnUserInputInputText { /** * The natural language text to be processed. Text length must not exceed 256 characters. */ text: string; } interface CxTestCaseTestCaseConversationTurnVirtualAgentOutput { /** * The [Page](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents.flows.pages#Page) on which the utterance was spoken. * Structure is documented below. */ currentPage?: outputs.diagflow.CxTestCaseTestCaseConversationTurnVirtualAgentOutputCurrentPage; /** * The session parameters available to the bot at this point. */ sessionParameters?: string; /** * The text responses from the agent for the turn. * Structure is documented below. */ textResponses?: outputs.diagflow.CxTestCaseTestCaseConversationTurnVirtualAgentOutputTextResponse[]; /** * The [Intent](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents.intents#Intent) that triggered the response. * Structure is documented below. */ triggeredIntent?: outputs.diagflow.CxTestCaseTestCaseConversationTurnVirtualAgentOutputTriggeredIntent; } interface CxTestCaseTestCaseConversationTurnVirtualAgentOutputCurrentPage { /** * (Output) * The human-readable name of the page, unique within the flow. */ displayName: string; /** * The unique identifier of the page. * Format: projects//locations//agents//flows//pages/. */ name?: string; } interface CxTestCaseTestCaseConversationTurnVirtualAgentOutputTextResponse { /** * A collection of text responses. */ texts?: string[]; } interface CxTestCaseTestCaseConversationTurnVirtualAgentOutputTriggeredIntent { /** * (Output) * The human-readable name of the intent, unique within the agent. */ displayName: string; /** * The unique identifier of the intent. * Format: projects//locations//agents//intents/. */ name?: string; } interface CxTestCaseTestConfig { /** * Flow name to start the test case with. * Format: projects//locations//agents//flows/. * Only one of flow and page should be set to indicate the starting point of the test case. If neither is set, the test case will start with start page on the default start flow. */ flow?: string; /** * The page to start the test case with. * Format: projects//locations//agents//flows//pages/. * Only one of flow and page should be set to indicate the starting point of the test case. If neither is set, the test case will start with start page on the default start flow. */ page?: string; /** * Session parameters to be compared when calculating differences. */ trackingParameters?: string[]; } interface CxToolConnectorSpec { /** * Actions for the tool to use. * Structure is documented below. */ actions: outputs.diagflow.CxToolConnectorSpecAction[]; /** * Integration Connectors end-user authentication configuration. * If configured, the end-user authentication fields will be passed in the Integration Connectors API request * and override the admin, default authentication configured for the Connection. * Note: The Connection must have authentication override enabled in order to specify an EUC configuration here - otherwise, * the ConnectorTool creation will fail. * See: https://cloud.google.com/application-integration/docs/configure-connectors-task#configure-authentication-override properties: * Structure is documented below. */ endUserAuthConfig?: outputs.diagflow.CxToolConnectorSpecEndUserAuthConfig; /** * The full resource name of the referenced Integration Connectors Connection. * Format: projects/*/locations/*/connections/* */ name: string; } interface CxToolConnectorSpecAction { /** * ID of a Connection action for the tool to use. This field is part of a required union field `actionSpec`. */ connectionActionId?: string; /** * Entity operation configuration for the tool to use. This field is part of a required union field `actionSpec`. * Structure is documented below. */ entityOperation?: outputs.diagflow.CxToolConnectorSpecActionEntityOperation; /** * Entity fields to use as inputs for the operation. * If no fields are specified, all fields of the Entity will be used. */ inputFields?: string[]; /** * Entity fields to return from the operation. * If no fields are specified, all fields of the Entity will be returned. */ outputFields?: string[]; } interface CxToolConnectorSpecActionEntityOperation { /** * ID of the entity. */ entityId: string; /** * The operation to perform on the entity. * Possible values are: `LIST`, `CREATE`, `UPDATE`, `DELETE`, `GET`. */ operation: string; } interface CxToolConnectorSpecEndUserAuthConfig { /** * Oauth 2.0 Authorization Code authentication. This field is part of a union field `endUserAuthConfig`. Only one of `oauth2AuthCodeConfig` or `oauth2JwtBearerConfig` may be set. * Structure is documented below. */ oauth2AuthCodeConfig?: outputs.diagflow.CxToolConnectorSpecEndUserAuthConfigOauth2AuthCodeConfig; /** * JWT Profile Oauth 2.0 Authorization Grant authentication.. This field is part of a union field `endUserAuthConfig`. Only one of `oauth2AuthCodeConfig` or `oauth2JwtBearerConfig` may be set. * Structure is documented below. * * * The `oauth2AuthCodeConfig` block supports: */ oauth2JwtBearerConfig?: outputs.diagflow.CxToolConnectorSpecEndUserAuthConfigOauth2JwtBearerConfig; } interface CxToolConnectorSpecEndUserAuthConfigOauth2AuthCodeConfig { /** * Oauth token value or parameter name to pass it through. */ oauthToken: string; } interface CxToolConnectorSpecEndUserAuthConfigOauth2JwtBearerConfig { /** * Client key value or parameter name to pass it through. */ clientKey: string; /** * Issuer value or parameter name to pass it through. */ issuer: string; /** * Subject value or parameter name to pass it through. */ subject: string; } interface CxToolDataStoreSpec { /** * List of data stores to search. * Structure is documented below. */ dataStoreConnections: outputs.diagflow.CxToolDataStoreSpecDataStoreConnection[]; /** * Fallback prompt configurations to use. */ fallbackPrompt: outputs.diagflow.CxToolDataStoreSpecFallbackPrompt; } interface CxToolDataStoreSpecDataStoreConnection { /** * The full name of the referenced data store. Formats: projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore} projects/{project}/locations/{location}/dataStores/{dataStore} */ dataStore?: string; /** * The type of the connected data store. * See [DataStoreType](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/DataStoreConnection#datastoretype) for valid values. */ dataStoreType?: string; /** * The document processing mode for the data store connection. Should only be set for PUBLIC_WEB and UNSTRUCTURED data stores. If not set it is considered as DOCUMENTS, as this is the legacy mode. * See [DocumentProcessingMode](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/DataStoreConnection#documentprocessingmode) for valid values. */ documentProcessingMode?: string; } interface CxToolDataStoreSpecFallbackPrompt { } interface CxToolFunctionSpec { /** * Optional. The JSON schema is encapsulated in a [google.protobuf.Struct](https://protobuf.dev/reference/protobuf/google.protobuf/#struct) to describe the input of the function. * This input is a JSON object that contains the function's parameters as properties of the object */ inputSchema?: string; /** * Optional. The JSON schema is encapsulated in a [google.protobuf.Struct](https://protobuf.dev/reference/protobuf/google.protobuf/#struct) to describe the output of the function. * This output is a JSON object that contains the function's parameters as properties of the object */ outputSchema?: string; } interface CxToolOpenApiSpec { /** * Optional. Authentication information required by the API. * Structure is documented below. */ authentication?: outputs.diagflow.CxToolOpenApiSpecAuthentication; /** * Optional. Service Directory configuration. * Structure is documented below. */ serviceDirectoryConfig?: outputs.diagflow.CxToolOpenApiSpecServiceDirectoryConfig; /** * The OpenAPI schema specified as a text. * This field is part of a union field `schema`: only one of `textSchema` may be set. */ textSchema: string; /** * Optional. TLS configuration for the HTTPS verification. * Structure is documented below. */ tlsConfig?: outputs.diagflow.CxToolOpenApiSpecTlsConfig; } interface CxToolOpenApiSpecAuthentication { /** * Config for API key auth. * This field is part of a union field `authConfig`: Only one of `apiKeyConfig`, `oauthConfig`, `serviceAgentAuthConfig`, or `bearerTokenConfig` may be set. * Structure is documented below. */ apiKeyConfig?: outputs.diagflow.CxToolOpenApiSpecAuthenticationApiKeyConfig; /** * Config for bearer token auth. * This field is part of a union field `authConfig`: Only one of `apiKeyConfig`, `oauthConfig`, `serviceAgentAuthConfig`, or `bearerTokenConfig` may be set. * Structure is documented below. */ bearerTokenConfig?: outputs.diagflow.CxToolOpenApiSpecAuthenticationBearerTokenConfig; /** * Config for OAuth. * This field is part of a union field `authConfig`: Only one of `apiKeyConfig`, `oauthConfig`, `serviceAgentAuthConfig`, or `bearerTokenConfig` may be set. * Structure is documented below. */ oauthConfig?: outputs.diagflow.CxToolOpenApiSpecAuthenticationOauthConfig; /** * Config for [Diglogflow service agent](https://cloud.google.com/iam/docs/service-agents#dialogflow-service-agent) auth. * This field is part of a union field `authConfig`: Only one of `apiKeyConfig`, `oauthConfig`, `serviceAgentAuthConfig`, or `bearerTokenConfig` may be set. * Structure is documented below. */ serviceAgentAuthConfig?: outputs.diagflow.CxToolOpenApiSpecAuthenticationServiceAgentAuthConfig; } interface CxToolOpenApiSpecAuthenticationApiKeyConfig { /** * Optional. The API key. If the `secretVersionForApiKey`` field is set, this field will be ignored. * **Note**: This property is sensitive and will not be displayed in the plan. */ apiKey?: string; /** * The parameter name or the header name of the API key. * E.g., If the API request is "https://example.com/act?X-Api-Key=", "X-Api-Key" would be the parameter name. */ keyName: string; /** * Key location in the request. * See [RequestLocation](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents.tools#requestlocation) for valid values. */ requestLocation: string; /** * Optional. The name of the SecretManager secret version resource storing the API key. * If this field is set, the apiKey field will be ignored. * Format: projects/{project}/secrets/{secret}/versions/{version} */ secretVersionForApiKey?: string; } interface CxToolOpenApiSpecAuthenticationBearerTokenConfig { /** * Optional. The name of the SecretManager secret version resource storing the Bearer token. If this field is set, the `token` field will be ignored. * Format: projects/{project}/secrets/{secret}/versions/{version} */ secretVersionForToken?: string; /** * Optional. The text token appended to the text Bearer to the request Authorization header. * [Session parameters reference](https://cloud.google.com/dialogflow/cx/docs/concept/parameter#session-ref) can be used to pass the token dynamically, e.g. `$session.params.parameter-id`. * **Note**: This property is sensitive and will not be displayed in the plan. */ token?: string; } interface CxToolOpenApiSpecAuthenticationOauthConfig { /** * The client ID from the OAuth provider. */ clientId: string; /** * Optional. The client secret from the OAuth provider. If the `secretVersionForClientSecret` field is set, this field will be ignored. * **Note**: This property is sensitive and will not be displayed in the plan. */ clientSecret?: string; /** * OAuth grant types. * See [OauthGrantType](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents.tools#oauthgranttype) for valid values */ oauthGrantType: string; /** * Optional. The OAuth scopes to grant. */ scopes?: string[]; /** * Optional. The name of the SecretManager secret version resource storing the client secret. * If this field is set, the clientSecret field will be ignored. * Format: projects/{project}/secrets/{secret}/versions/{version} */ secretVersionForClientSecret?: string; /** * The token endpoint in the OAuth provider to exchange for an access token. */ tokenEndpoint: string; } interface CxToolOpenApiSpecAuthenticationServiceAgentAuthConfig { /** * Optional. Indicate the auth token type generated from the Diglogflow service agent. * The generated token is sent in the Authorization header. * See [ServiceAgentAuth](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents.tools#serviceagentauth) for valid values. */ serviceAgentAuth?: string; } interface CxToolOpenApiSpecServiceDirectoryConfig { /** * The name of [Service Directory](https://cloud.google.com/service-directory/docs) service. * Format: projects//locations//namespaces//services/. LocationID of the service directory must be the same as the location of the agent. */ service: string; } interface CxToolOpenApiSpecTlsConfig { /** * Specifies a list of allowed custom CA certificates for HTTPS verification. * Structure is documented below. */ caCerts: outputs.diagflow.CxToolOpenApiSpecTlsConfigCaCert[]; } interface CxToolOpenApiSpecTlsConfigCaCert { /** * The allowed custom CA certificates (in DER format) for HTTPS verification. This overrides the default SSL trust store. * If this is empty or unspecified, Dialogflow will use Google's default trust store to verify certificates. * N.B. Make sure the HTTPS server certificates are signed with "subject alt name". * For instance a certificate can be self-signed using the following command: * ``` * openssl x509 -req -days 200 -in example.com.csr \ * -signkey example.com.key \ * -out example.com.crt \ * -extfile <(printf "\nsubjectAltName='DNS:www.example.com'") * ``` * A base64-encoded string. */ cert: string; /** * The name of the allowed custom CA certificates. This can be used to disambiguate the custom CA certificates. */ displayName: string; } interface CxToolVersionTool { /** * (Optional, Beta) * Integration connectors tool specification. * This field is part of a union field `specification`: Only one of `openApiSpec`, `dataStoreSpec`, `functionSpec`, or `connectorSpec` may be set. * Structure is documented below. */ connectorSpec?: outputs.diagflow.CxToolVersionToolConnectorSpec; /** * Data store search tool specification. * This field is part of a union field `specification`: Only one of `openApiSpec`, `dataStoreSpec`, or `functionSpec` may be set. * Structure is documented below. */ dataStoreSpec?: outputs.diagflow.CxToolVersionToolDataStoreSpec; /** * High level description of the Tool and its usage. */ description: string; /** * The human-readable name of the tool, unique within the agent. */ displayName: string; /** * Client side executed function specification. * This field is part of a union field `specification`: Only one of `openApiSpec`, `dataStoreSpec`, or `functionSpec` may be set. * Structure is documented below. */ functionSpec?: outputs.diagflow.CxToolVersionToolFunctionSpec; /** * (Output) * The unique identifier of the Tool. * Format: projects//locations//agents//tools/. */ name: string; /** * OpenAPI specification of the Tool. * This field is part of a union field `specification`: Only one of `openApiSpec`, `dataStoreSpec`, or `functionSpec` may be set. * Structure is documented below. */ openApiSpec?: outputs.diagflow.CxToolVersionToolOpenApiSpec; /** * (Output) * The tool type. */ toolType: string; } interface CxToolVersionToolConnectorSpec { /** * Actions for the tool to use. * Structure is documented below. */ actions: outputs.diagflow.CxToolVersionToolConnectorSpecAction[]; /** * Integration Connectors end-user authentication configuration. * If configured, the end-user authentication fields will be passed in the Integration Connectors API request * and override the admin, default authentication configured for the Connection. * Note: The Connection must have authentication override enabled in order to specify an EUC configuration here - otherwise, * the ConnectorTool creation will fail. * See: https://cloud.google.com/application-integration/docs/configure-connectors-task#configure-authentication-override properties: * Structure is documented below. */ endUserAuthConfig?: outputs.diagflow.CxToolVersionToolConnectorSpecEndUserAuthConfig; /** * The full resource name of the referenced Integration Connectors Connection. * Format: projects/*/locations/*/connections/* */ name: string; } interface CxToolVersionToolConnectorSpecAction { /** * ID of a Connection action for the tool to use. This field is part of a required union field `actionSpec`. */ connectionActionId?: string; /** * Entity operation configuration for the tool to use. This field is part of a required union field `actionSpec`. * Structure is documented below. */ entityOperation?: outputs.diagflow.CxToolVersionToolConnectorSpecActionEntityOperation; /** * Entity fields to use as inputs for the operation. * If no fields are specified, all fields of the Entity will be used. */ inputFields?: string[]; /** * Entity fields to return from the operation. * If no fields are specified, all fields of the Entity will be returned. */ outputFields?: string[]; } interface CxToolVersionToolConnectorSpecActionEntityOperation { /** * ID of the entity. */ entityId: string; /** * The operation to perform on the entity. * Possible values are: `LIST`, `CREATE`, `UPDATE`, `DELETE`, `GET`. */ operation: string; } interface CxToolVersionToolConnectorSpecEndUserAuthConfig { /** * Oauth 2.0 Authorization Code authentication. This field is part of a union field `endUserAuthConfig`. Only one of `oauth2AuthCodeConfig` or `oauth2JwtBearerConfig` may be set. * Structure is documented below. */ oauth2AuthCodeConfig?: outputs.diagflow.CxToolVersionToolConnectorSpecEndUserAuthConfigOauth2AuthCodeConfig; /** * JWT Profile Oauth 2.0 Authorization Grant authentication.. This field is part of a union field `endUserAuthConfig`. Only one of `oauth2AuthCodeConfig` or `oauth2JwtBearerConfig` may be set. * Structure is documented below. * * * The `oauth2AuthCodeConfig` block supports: */ oauth2JwtBearerConfig?: outputs.diagflow.CxToolVersionToolConnectorSpecEndUserAuthConfigOauth2JwtBearerConfig; } interface CxToolVersionToolConnectorSpecEndUserAuthConfigOauth2AuthCodeConfig { /** * Oauth token value or parameter name to pass it through. */ oauthToken: string; } interface CxToolVersionToolConnectorSpecEndUserAuthConfigOauth2JwtBearerConfig { /** * Client key value or parameter name to pass it through. */ clientKey: string; /** * Issuer value or parameter name to pass it through. */ issuer: string; /** * Subject value or parameter name to pass it through. */ subject: string; } interface CxToolVersionToolDataStoreSpec { /** * List of data stores to search. * Structure is documented below. */ dataStoreConnections: outputs.diagflow.CxToolVersionToolDataStoreSpecDataStoreConnection[]; /** * Fallback prompt configurations to use. */ fallbackPrompt: outputs.diagflow.CxToolVersionToolDataStoreSpecFallbackPrompt; } interface CxToolVersionToolDataStoreSpecDataStoreConnection { /** * The full name of the referenced data store. Formats: projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore} projects/{project}/locations/{location}/dataStores/{dataStore} */ dataStore?: string; /** * The type of the connected data store. * See [DataStoreType](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/DataStoreConnection#datastoretype) for valid values. */ dataStoreType?: string; /** * The document processing mode for the data store connection. Should only be set for PUBLIC_WEB and UNSTRUCTURED data stores. If not set it is considered as DOCUMENTS, as this is the legacy mode. * See [DocumentProcessingMode](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/DataStoreConnection#documentprocessingmode) for valid values. */ documentProcessingMode?: string; } interface CxToolVersionToolDataStoreSpecFallbackPrompt { } interface CxToolVersionToolFunctionSpec { /** * Optional. The JSON schema is encapsulated in a [google.protobuf.Struct](https://protobuf.dev/reference/protobuf/google.protobuf/#struct) to describe the input of the function. * This input is a JSON object that contains the function's parameters as properties of the object */ inputSchema?: string; /** * Optional. The JSON schema is encapsulated in a [google.protobuf.Struct](https://protobuf.dev/reference/protobuf/google.protobuf/#struct) to describe the output of the function. * This output is a JSON object that contains the function's parameters as properties of the object */ outputSchema?: string; } interface CxToolVersionToolOpenApiSpec { /** * Optional. Authentication information required by the API. * Structure is documented below. */ authentication?: outputs.diagflow.CxToolVersionToolOpenApiSpecAuthentication; /** * Optional. Service Directory configuration. * Structure is documented below. */ serviceDirectoryConfig?: outputs.diagflow.CxToolVersionToolOpenApiSpecServiceDirectoryConfig; /** * The OpenAPI schema specified as a text. * This field is part of a union field `schema`: only one of `textSchema` may be set. */ textSchema: string; /** * Optional. TLS configuration for the HTTPS verification. * Structure is documented below. */ tlsConfig?: outputs.diagflow.CxToolVersionToolOpenApiSpecTlsConfig; } interface CxToolVersionToolOpenApiSpecAuthentication { /** * Config for API key auth. * This field is part of a union field `authConfig`: Only one of `apiKeyConfig`, `oauthConfig`, `serviceAgentAuthConfig`, or `bearerTokenConfig` may be set. * Structure is documented below. */ apiKeyConfig?: outputs.diagflow.CxToolVersionToolOpenApiSpecAuthenticationApiKeyConfig; /** * Config for bearer token auth. * This field is part of a union field `authConfig`: Only one of `apiKeyConfig`, `oauthConfig`, `serviceAgentAuthConfig`, or `bearerTokenConfig` may be set. * Structure is documented below. */ bearerTokenConfig?: outputs.diagflow.CxToolVersionToolOpenApiSpecAuthenticationBearerTokenConfig; /** * Config for OAuth. * This field is part of a union field `authConfig`: Only one of `apiKeyConfig`, `oauthConfig`, `serviceAgentAuthConfig`, or `bearerTokenConfig` may be set. * Structure is documented below. */ oauthConfig?: outputs.diagflow.CxToolVersionToolOpenApiSpecAuthenticationOauthConfig; /** * Config for [Diglogflow service agent](https://cloud.google.com/iam/docs/service-agents#dialogflow-service-agent) auth. * This field is part of a union field `authConfig`: Only one of `apiKeyConfig`, `oauthConfig`, `serviceAgentAuthConfig`, or `bearerTokenConfig` may be set. * Structure is documented below. */ serviceAgentAuthConfig?: outputs.diagflow.CxToolVersionToolOpenApiSpecAuthenticationServiceAgentAuthConfig; } interface CxToolVersionToolOpenApiSpecAuthenticationApiKeyConfig { /** * Optional. The API key. If the `secretVersionForApiKey`` field is set, this field will be ignored. * **Note**: This property is sensitive and will not be displayed in the plan. */ apiKey?: string; /** * The parameter name or the header name of the API key. * E.g., If the API request is "https://example.com/act?X-Api-Key=", "X-Api-Key" would be the parameter name. */ keyName: string; /** * Key location in the request. * See [RequestLocation](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents.tools#requestlocation) for valid values. */ requestLocation: string; /** * Optional. The name of the SecretManager secret version resource storing the API key. * If this field is set, the apiKey field will be ignored. * Format: projects/{project}/secrets/{secret}/versions/{version} */ secretVersionForApiKey?: string; } interface CxToolVersionToolOpenApiSpecAuthenticationBearerTokenConfig { /** * Optional. The name of the SecretManager secret version resource storing the Bearer token. If this field is set, the `token` field will be ignored. * Format: projects/{project}/secrets/{secret}/versions/{version} */ secretVersionForToken?: string; /** * Optional. The text token appended to the text Bearer to the request Authorization header. * [Session parameters reference](https://cloud.google.com/dialogflow/cx/docs/concept/parameter#session-ref) can be used to pass the token dynamically, e.g. `$session.params.parameter-id`. * **Note**: This property is sensitive and will not be displayed in the plan. */ token?: string; } interface CxToolVersionToolOpenApiSpecAuthenticationOauthConfig { /** * The client ID from the OAuth provider. */ clientId: string; /** * Optional. The client secret from the OAuth provider. If the `secretVersionForClientSecret` field is set, this field will be ignored. * **Note**: This property is sensitive and will not be displayed in the plan. */ clientSecret?: string; /** * OAuth grant types. * See [OauthGrantType](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents.tools#oauthgranttype) for valid values */ oauthGrantType: string; /** * Optional. The OAuth scopes to grant. */ scopes?: string[]; /** * Optional. The name of the SecretManager secret version resource storing the client secret. * If this field is set, the clientSecret field will be ignored. * Format: projects/{project}/secrets/{secret}/versions/{version} */ secretVersionForClientSecret?: string; /** * The token endpoint in the OAuth provider to exchange for an access token. */ tokenEndpoint: string; } interface CxToolVersionToolOpenApiSpecAuthenticationServiceAgentAuthConfig { /** * Optional. Indicate the auth token type generated from the Diglogflow service agent. * The generated token is sent in the Authorization header. * See [ServiceAgentAuth](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents.tools#serviceagentauth) for valid values. */ serviceAgentAuth?: string; } interface CxToolVersionToolOpenApiSpecServiceDirectoryConfig { /** * The name of [Service Directory](https://cloud.google.com/service-directory/docs) service. * Format: projects//locations//namespaces//services/. LocationID of the service directory must be the same as the location of the agent. */ service: string; } interface CxToolVersionToolOpenApiSpecTlsConfig { /** * Specifies a list of allowed custom CA certificates for HTTPS verification. * Structure is documented below. */ caCerts: outputs.diagflow.CxToolVersionToolOpenApiSpecTlsConfigCaCert[]; } interface CxToolVersionToolOpenApiSpecTlsConfigCaCert { /** * The allowed custom CA certificates (in DER format) for HTTPS verification. This overrides the default SSL trust store. * If this is empty or unspecified, Dialogflow will use Google's default trust store to verify certificates. * N.B. Make sure the HTTPS server certificates are signed with "subject alt name". * For instance a certificate can be self-signed using the following command: * ``` * openssl x509 -req -days 200 -in example.com.csr \ * -signkey example.com.key \ * -out example.com.crt \ * -extfile <(printf "\nsubjectAltName='DNS:www.example.com'") * ``` * A base64-encoded string. */ cert: string; /** * The name of the allowed custom CA certificates. This can be used to disambiguate the custom CA certificates. */ displayName: string; } interface CxVersionNluSetting { /** * To filter out false positive results and still get variety in matched natural language inputs for your agent, you can tune the machine learning classification threshold. If the returned score value is less than the threshold value, then a no-match event will be triggered. * The score values range from 0.0 (completely uncertain) to 1.0 (completely certain). If set to 0.0, the default of 0.3 is used. */ classificationThreshold?: number; /** * Indicates NLU model training mode. * * MODEL_TRAINING_MODE_AUTOMATIC: NLU model training is automatically triggered when a flow gets modified. User can also manually trigger model training in this mode. * * MODEL_TRAINING_MODE_MANUAL: User needs to manually trigger NLU model training. Best for large flows whose models take long time to train. * Possible values are: `MODEL_TRAINING_MODE_AUTOMATIC`, `MODEL_TRAINING_MODE_MANUAL`. */ modelTrainingMode?: string; /** * Indicates the type of NLU model. * * MODEL_TYPE_STANDARD: Use standard NLU model. * * MODEL_TYPE_ADVANCED: Use advanced NLU model. * Possible values are: `MODEL_TYPE_STANDARD`, `MODEL_TYPE_ADVANCED`. */ modelType?: string; } interface CxWebhookGenericWebService { /** * Specifies a list of allowed custom CA certificates (in DER format) for * HTTPS verification. This overrides the default SSL trust store. If this * is empty or unspecified, Dialogflow will use Google's default trust store * to verify certificates. * N.B. Make sure the HTTPS server certificates are signed with "subject alt * name". For instance a certificate can be self-signed using the following * command, * openssl x509 -req -days 200 -in example.com.csr \ * -signkey example.com.key \ * -out example.com.crt \ * -extfile <(printf "\nsubjectAltName='DNS:www.example.com'") */ allowedCaCerts?: string[]; /** * HTTP method for the flexible webhook calls. Standard webhook always uses * POST. * Possible values are: `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. */ httpMethod?: string; /** * Represents configuration of OAuth client credential flow for 3rd party * API authentication. * Structure is documented below. */ oauthConfig?: outputs.diagflow.CxWebhookGenericWebServiceOauthConfig; /** * Maps the values extracted from specific fields of the flexible webhook * response into session parameters. * - Key: session parameter name * - Value: field path in the webhook response */ parameterMapping?: { [key: string]: string; }; /** * Defines a custom JSON object as request body to send to flexible webhook. */ requestBody?: string; /** * The HTTP request headers to send together with webhook requests. */ requestHeaders?: { [key: string]: string; }; /** * The SecretManager secret version resource storing the username:password * pair for HTTP Basic authentication. * Format: `projects/{project}/secrets/{secret}/versions/{version}` */ secretVersionForUsernamePassword?: string; /** * The HTTP request headers to send together with webhook requests. Header * values are stored in SecretManager secret versions. * When the same header name is specified in both `requestHeaders` and * `secretVersionsForRequestHeaders`, the value in * `secretVersionsForRequestHeaders` will be used. * Structure is documented below. */ secretVersionsForRequestHeaders?: outputs.diagflow.CxWebhookGenericWebServiceSecretVersionsForRequestHeader[]; /** * Configuration for authentication using a service account. * Structure is documented below. */ serviceAccountAuthConfig?: outputs.diagflow.CxWebhookGenericWebServiceServiceAccountAuthConfig; /** * Indicate the auth token type generated from the [Diglogflow service * agent](https://cloud.google.com/iam/docs/service-agents#dialogflow-service-agent). * The generated token is sent in the Authorization header. * Possible values are: `NONE`, `ID_TOKEN`, `ACCESS_TOKEN`. */ serviceAgentAuth?: string; /** * The webhook URI for receiving POST requests. It must use https protocol. */ uri: string; /** * Type of the webhook. * Possible values are: `STANDARD`, `FLEXIBLE`. */ webhookType?: string; } interface CxWebhookGenericWebServiceOauthConfig { /** * The client ID provided by the 3rd party platform. */ clientId: string; /** * The client secret provided by the 3rd party platform. If the * `secretVersionForClientSecret` field is set, this field will be * ignored. */ clientSecret?: string; /** * The OAuth scopes to grant. */ scopes?: string[]; /** * The name of the SecretManager secret version resource storing the * client secret. If this field is set, the `clientSecret` field will be * ignored. * Format: `projects/{project}/secrets/{secret}/versions/{version}` */ secretVersionForClientSecret?: string; /** * The token endpoint provided by the 3rd party platform to exchange an * access token. */ tokenEndpoint: string; } interface CxWebhookGenericWebServiceSecretVersionsForRequestHeader { /** * The identifier for this object. Format specified above. */ key: string; /** * The SecretManager secret version resource storing the header value. * Format: `projects/{project}/secrets/{secret}/versions/{version}` */ secretVersion: string; } interface CxWebhookGenericWebServiceServiceAccountAuthConfig { /** * The email address of the service account used to authenticate the webhook call. * Dialogflow uses this service account to exchange an access token and the access * token is then sent in the **Authorization** header of the webhook request. * The service account must have the **roles/iam.serviceAccountTokenCreator** role * granted to the * [Dialogflow service agent](https://cloud.google.com/iam/docs/service-agents?_gl=1*1jsujvh*_ga*NjYxMzU3OTg2LjE3Njc3MzQ4NjM.*_ga_WH2QY8WWF5*czE3Njc3MzQ2MjgkbzIkZzEkdDE3Njc3MzQ3NzQkajYwJGwwJGgw#dialogflow-service-agent). */ serviceAccount: string; } interface CxWebhookServiceDirectory { /** * Represents configuration for a generic web service. * Structure is documented below. */ genericWebService?: outputs.diagflow.CxWebhookServiceDirectoryGenericWebService; /** * The name of Service Directory service. */ service: string; } interface CxWebhookServiceDirectoryGenericWebService { /** * Specifies a list of allowed custom CA certificates (in DER format) for * HTTPS verification. This overrides the default SSL trust store. If this * is empty or unspecified, Dialogflow will use Google's default trust store * to verify certificates. * N.B. Make sure the HTTPS server certificates are signed with "subject alt * name". For instance a certificate can be self-signed using the following * command, * openssl x509 -req -days 200 -in example.com.csr \ * -signkey example.com.key \ * -out example.com.crt \ * -extfile <(printf "\nsubjectAltName='DNS:www.example.com'") */ allowedCaCerts?: string[]; /** * HTTP method for the flexible webhook calls. Standard webhook always uses * POST. * Possible values are: `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. */ httpMethod?: string; /** * Represents configuration of OAuth client credential flow for 3rd party * API authentication. * Structure is documented below. */ oauthConfig?: outputs.diagflow.CxWebhookServiceDirectoryGenericWebServiceOauthConfig; /** * Maps the values extracted from specific fields of the flexible webhook * response into session parameters. * - Key: session parameter name * - Value: field path in the webhook response */ parameterMapping?: { [key: string]: string; }; /** * Defines a custom JSON object as request body to send to flexible webhook. */ requestBody?: string; /** * The HTTP request headers to send together with webhook requests. */ requestHeaders?: { [key: string]: string; }; /** * The SecretManager secret version resource storing the username:password * pair for HTTP Basic authentication. * Format: `projects/{project}/secrets/{secret}/versions/{version}` */ secretVersionForUsernamePassword?: string; /** * The HTTP request headers to send together with webhook requests. Header * values are stored in SecretManager secret versions. * When the same header name is specified in both `requestHeaders` and * `secretVersionsForRequestHeaders`, the value in * `secretVersionsForRequestHeaders` will be used. * Structure is documented below. */ secretVersionsForRequestHeaders?: outputs.diagflow.CxWebhookServiceDirectoryGenericWebServiceSecretVersionsForRequestHeader[]; /** * Configuration for authentication using a service account. * Structure is documented below. */ serviceAccountAuthConfig?: outputs.diagflow.CxWebhookServiceDirectoryGenericWebServiceServiceAccountAuthConfig; /** * Indicate the auth token type generated from the [Diglogflow service * agent](https://cloud.google.com/iam/docs/service-agents#dialogflow-service-agent). * The generated token is sent in the Authorization header. * Possible values are: `NONE`, `ID_TOKEN`, `ACCESS_TOKEN`. */ serviceAgentAuth?: string; /** * The webhook URI for receiving POST requests. It must use https protocol. */ uri: string; /** * Type of the webhook. * Possible values are: `STANDARD`, `FLEXIBLE`. */ webhookType?: string; } interface CxWebhookServiceDirectoryGenericWebServiceOauthConfig { /** * The client ID provided by the 3rd party platform. */ clientId: string; /** * The client secret provided by the 3rd party platform. If the * `secretVersionForClientSecret` field is set, this field will be * ignored. */ clientSecret?: string; /** * The OAuth scopes to grant. */ scopes?: string[]; /** * The name of the SecretManager secret version resource storing the * client secret. If this field is set, the `clientSecret` field will be * ignored. * Format: `projects/{project}/secrets/{secret}/versions/{version}` */ secretVersionForClientSecret?: string; /** * The token endpoint provided by the 3rd party platform to exchange an * access token. */ tokenEndpoint: string; } interface CxWebhookServiceDirectoryGenericWebServiceSecretVersionsForRequestHeader { /** * The identifier for this object. Format specified above. */ key: string; /** * The SecretManager secret version resource storing the header value. * Format: `projects/{project}/secrets/{secret}/versions/{version}` */ secretVersion: string; } interface CxWebhookServiceDirectoryGenericWebServiceServiceAccountAuthConfig { /** * The email address of the service account used to authenticate the webhook call. * Dialogflow uses this service account to exchange an access token and the access * token is then sent in the **Authorization** header of the webhook request. * The service account must have the **roles/iam.serviceAccountTokenCreator** role * granted to the * [Dialogflow service agent](https://cloud.google.com/iam/docs/service-agents?_gl=1*1jsujvh*_ga*NjYxMzU3OTg2LjE3Njc3MzQ4NjM.*_ga_WH2QY8WWF5*czE3Njc3MzQ2MjgkbzIkZzEkdDE3Njc3MzQ3NzQkajYwJGwwJGgw#dialogflow-service-agent). */ serviceAccount: string; } interface EncryptionSpecEncryptionSpec { /** * The name of customer-managed encryption key that is used to secure a resource and its sub-resources. * If empty, the resource is secured by the default Google encryption key. * Only the key in the same location as this resource is allowed to be used for encryption. * Format: projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{key} */ kmsKey: string; } interface EntityTypeEntity { /** * A collection of value synonyms. For example, if the entity type is vegetable, and value is scallions, a synonym * could be green onions. * For KIND_LIST entity types: * * This collection must contain exactly one synonym equal to value. */ synonyms: string[]; /** * The primary value associated with this entity entry. For example, if the entity type is vegetable, the value * could be scallions. * For KIND_MAP entity types: * * A reference value to be used in place of synonyms. * For KIND_LIST entity types: * * A string that can contain references to other entity types (with or without aliases). */ value: string; } interface FulfillmentFeature { /** * The type of the feature that enabled for fulfillment. * * SMALLTALK: Fulfillment is enabled for SmallTalk. * Possible values are: `SMALLTALK`. */ type: string; } interface FulfillmentGenericWebService { /** * The password for HTTP Basic authentication. */ password?: string; /** * The HTTP request headers to send together with fulfillment requests. */ requestHeaders?: { [key: string]: string; }; /** * The fulfillment URI for receiving POST requests. It must use https protocol. */ uri: string; /** * The user name for HTTP Basic authentication. */ username?: string; } interface GeneratorInferenceParameter { /** * Optional. Maximum number of the output tokens for the generator. */ maxOutputTokens?: number; /** * Optional. Controls the randomness of LLM predictions. Low temperature = less random. High temperature = more random. If unset (or 0), uses a default value of 0. */ temperature?: number; /** * Optional. Top-k changes how the model selects tokens for output. A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature). For each token selection step, the top K tokens with the highest probabilities are sampled. Then tokens are further filtered based on topP with the final token selected using temperature sampling. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [1, 40], default to 40. */ topK?: number; /** * Optional. Top-p changes how the model selects tokens for output. Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value. For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-p value is 0.5, then the model will select either A or B as the next token (using temperature) and doesn't consider C. The default top-p value is 0.95. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [0.0, 1.0], default to 0.95. */ topP?: number; } interface GeneratorSummarizationContext { /** * Optional. List of few shot examples. * Structure is documented below. */ fewShotExamples?: outputs.diagflow.GeneratorSummarizationContextFewShotExample[]; /** * Optional. The target language of the generated summary. The language code for conversation will be used if this field is empty. Supported 2.0 and later versions. */ outputLanguageCode?: string; /** * Optional. List of sections. Note it contains both predefined section sand customer defined sections. * Structure is documented below. */ summarizationSections?: outputs.diagflow.GeneratorSummarizationContextSummarizationSection[]; /** * Optional. Version of the feature. If not set, default to latest version. Current candidates are ["1.0"]. */ version: string; } interface GeneratorSummarizationContextFewShotExample { /** * Optional. Conversation transcripts. * Structure is documented below. */ conversationContext?: outputs.diagflow.GeneratorSummarizationContextFewShotExampleConversationContext; /** * Optional. Key is the placeholder field name in input, value is the value of the placeholder. E.g. instruction contains "@price", and ingested data has <"price", "10"> */ extraInfo?: { [key: string]: string; }; /** * Required. Example output of the model. * Structure is documented below. */ output: outputs.diagflow.GeneratorSummarizationContextFewShotExampleOutput; /** * Summarization sections. * Structure is documented below. */ summarizationSectionList?: outputs.diagflow.GeneratorSummarizationContextFewShotExampleSummarizationSectionList; } interface GeneratorSummarizationContextFewShotExampleConversationContext { /** * Optional. List of message transcripts in the conversation. * Structure is documented below. */ messageEntries?: outputs.diagflow.GeneratorSummarizationContextFewShotExampleConversationContextMessageEntry[]; } interface GeneratorSummarizationContextFewShotExampleConversationContextMessageEntry { /** * Optional. Create time of the message entry. */ createTime?: string; /** * Optional. The language of the text. */ languageCode?: string; /** * Optional. Participant role of the message. * Possible values are: `HUMAN_AGENT`, `AUTOMATED_AGENT`, `END_USER`. */ role?: string; /** * Optional. Transcript content of the message. */ text?: string; } interface GeneratorSummarizationContextFewShotExampleOutput { /** * Optional. Suggested summary. * Structure is documented below. */ summarySuggestion?: outputs.diagflow.GeneratorSummarizationContextFewShotExampleOutputSummarySuggestion; } interface GeneratorSummarizationContextFewShotExampleOutputSummarySuggestion { /** * Required. All the parts of generated summary. * Structure is documented below. */ summarySections: outputs.diagflow.GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionSummarySection[]; } interface GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionSummarySection { /** * Required. Name of the section. */ section: string; /** * Required. Summary text for the section. */ summary: string; } interface GeneratorSummarizationContextFewShotExampleSummarizationSectionList { /** * Optional. Summarization sections. * Structure is documented below. */ summarizationSections?: outputs.diagflow.GeneratorSummarizationContextFewShotExampleSummarizationSectionListSummarizationSection[]; } interface GeneratorSummarizationContextFewShotExampleSummarizationSectionListSummarizationSection { /** * Optional. Definition of the section, for example, "what the customer needs help with or has question about." */ definition?: string; /** * Optional. Name of the section, for example, "situation". */ key?: string; /** * Optional. Type of the summarization section. * Possible values are: `SITUATION`, `ACTION`, `RESOLUTION`, `REASON_FOR_CANCELLATION`, `CUSTOMER_SATISFACTION`, `ENTITIES`, `CUSTOMER_DEFINED`, `SITUATION_CONCISE`, `ACTION_CONCISE`. */ type?: string; } interface GeneratorSummarizationContextSummarizationSection { /** * Optional. Definition of the section, for example, "what the customer needs help with or has question about." */ definition?: string; /** * Optional. Name of the section, for example, "situation". */ key?: string; /** * Optional. Type of the summarization section. * Possible values are: `SITUATION`, `ACTION`, `RESOLUTION`, `REASON_FOR_CANCELLATION`, `CUSTOMER_SATISFACTION`, `ENTITIES`, `CUSTOMER_DEFINED`, `SITUATION_CONCISE`, `ACTION_CONCISE`. */ type?: string; } interface IntentFollowupIntentInfo { /** * The unique identifier of the followup intent. * Format: projects//agent/intents/. */ followupIntentName?: string; /** * The unique identifier of the parent intent in the chain of followup intents. * Format: projects//agent/intents/. */ parentFollowupIntentName?: string; } } export declare namespace discoveryengine { interface AclConfigIdpConfig { /** * External third party identity provider config. * Structure is documented below. */ externalIdpConfig?: outputs.discoveryengine.AclConfigIdpConfigExternalIdpConfig; /** * Identity provider type. * Possible values are: `GSUITE`, `THIRD_PARTY`. */ idpType?: string; } interface AclConfigIdpConfigExternalIdpConfig { /** * Workforce pool name: "locations/global/workforcePools/pool_id" */ workforcePoolName?: string; } interface AssistantCustomerPolicy { /** * List of banned phrases. * Structure is documented below. */ bannedPhrases?: outputs.discoveryengine.AssistantCustomerPolicyBannedPhrase[]; /** * Model Armor configuration to be used for sanitizing user prompts and assistant responses. * Structure is documented below. */ modelArmorConfig?: outputs.discoveryengine.AssistantCustomerPolicyModelArmorConfig; } interface AssistantCustomerPolicyBannedPhrase { /** * If true, diacritical marks (e.g., accents, umlauts) are ignored when * matching banned phrases. For example, "cafe" would match "cafĆ©". */ ignoreDiacritics?: boolean; /** * Match type for the banned phrase. * The supported values: 'SIMPLE_STRING_MATCH', 'WORD_BOUNDARY_STRING_MATCH'. */ matchType?: string; /** * The raw string content to be banned. */ phrase: string; } interface AssistantCustomerPolicyModelArmorConfig { /** * Defines the failure mode for Model Armor sanitization. * The supported values: 'FAIL_OPEN', 'FAIL_CLOSED'. */ failureMode?: string; /** * The resource name of the Model Armor template for sanitizing assistant * responses. Format: * `projects/{project}/locations/{location}/templates/{template_id}` * If not specified, no sanitization will be applied to the assistant * response. */ responseTemplate: string; /** * The resource name of the Model Armor template for sanitizing user * prompts. Format: * `projects/{project}/locations/{location}/templates/{template_id}` * If not specified, no sanitization will be applied to the user prompt. */ userPromptTemplate: string; } interface AssistantGenerationConfig { /** * The default language to use for the generation of the assistant response. * Use an ISO 639-1 language code such as `en`. * If not specified, the language will be automatically detected. */ defaultLanguage?: string; /** * System instruction, also known as the prompt preamble for LLM calls. * See also https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/system-instructions * Structure is documented below. */ systemInstruction?: outputs.discoveryengine.AssistantGenerationConfigSystemInstruction; } interface AssistantGenerationConfigSystemInstruction { /** * Additional system instruction that will be added to the default system instruction. */ additionalSystemInstruction?: string; } interface ChatEngineChatEngineConfig { /** * The configuration to generate the Dialogflow agent that is associated to this Engine. * Exactly one of `agentCreationConfig` or `dialogflowAgentToLink` must be set. * Structure is documented below. */ agentCreationConfig?: outputs.discoveryengine.ChatEngineChatEngineConfigAgentCreationConfig; /** * If the flag set to true, we allow the agent and engine are in * different locations, otherwise the agent and engine are required to be * in the same location. The flag is set to false by default. * Note that the `allowCrossRegion` are one-time consumed by and passed * to EngineService.CreateEngine. It means they cannot be retrieved using * EngineService.GetEngine or EngineService.ListEngines API after engine * creation. */ allowCrossRegion?: boolean; /** * The resource name of an existing Dialogflow agent to link to this Chat Engine. Format: `projects//locations//agents/`. * Exactly one of `agentCreationConfig` or `dialogflowAgentToLink` must be set. */ dialogflowAgentToLink?: string; } interface ChatEngineChatEngineConfigAgentCreationConfig { /** * Name of the company, organization or other entity that the agent represents. Used for knowledge connector LLM prompt and for knowledge search. */ business?: string; /** * The default language of the agent as a language tag. See [Language Support](https://cloud.google.com/dialogflow/docs/reference/language) for a list of the currently supported language codes. */ defaultLanguageCode: string; /** * Agent location for Agent creation, currently supported values: global/us/eu, it needs to be the same region as the Chat Engine. */ location?: string; /** * The time zone of the agent from the [time zone database](https://www.iana.org/time-zones), e.g., America/New_York, Europe/Paris. */ timeZone: string; } interface ChatEngineChatEngineMetadata { /** * (Output) * The resource name of a Dialogflow agent, that this Chat Engine refers to. */ dialogflowAgent: string; } interface ChatEngineCommonConfig { /** * The name of the company, business or entity that is associated with the engine. Setting this may help improve LLM related features. */ companyName?: string; } interface CmekConfigSingleRegionKey { /** * Single-regional kms key resource name which will be used to encrypt * resources * `projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{keyId}`. */ kmsKey: string; } interface ControlBoostAction { /** * The data store to boost. */ dataStore: string; /** * The filter to apply to the search results. */ filter: string; /** * The fixed boost value to apply to the search results. Positive values will increase the relevance of the results, while negative values will decrease the relevance. The value must be between -100 and 100. */ fixedBoost?: number; /** * The interpolation boost specification to apply to the search results. * Structure is documented below. */ interpolationBoostSpec?: outputs.discoveryengine.ControlBoostActionInterpolationBoostSpec; } interface ControlBoostActionInterpolationBoostSpec { /** * The attribute type to be used to determine the boost amount. * Possible values are: `NUMERICAL`, `FRESHNESS`. */ attributeType?: string; /** * The control points used to define the curve. * Structure is documented below. */ controlPoint?: outputs.discoveryengine.ControlBoostActionInterpolationBoostSpecControlPoint; /** * The name of the field whose value will be used to determine the boost amount. */ fieldName?: string; /** * The interpolation type to be applied to connect the control points. * Possible values are: `LINEAR`. */ interpolationType?: string; } interface ControlBoostActionInterpolationBoostSpecControlPoint { /** * The attribute value of the control point. */ attributeValue?: string; /** * The value between -1 to 1 by which to boost the score if the attributeValue * evaluates to the value specified above. */ boostAmount?: number; } interface ControlCondition { /** * The time range when the condition is active. * Structure is documented below. */ activeTimeRanges?: outputs.discoveryengine.ControlConditionActiveTimeRange[]; /** * The regular expression that the query must match for this condition to be met. */ queryRegex?: string; /** * The query terms that must be present in the search request for this condition to be met. * Structure is documented below. */ queryTerms?: outputs.discoveryengine.ControlConditionQueryTerm[]; } interface ControlConditionActiveTimeRange { /** * The end time of the active time range. */ endTime?: string; /** * The start time of the active time range. */ startTime?: string; } interface ControlConditionQueryTerm { /** * If true, the query term must be an exact match. Otherwise, the query term can be a partial match. */ fullMatch?: boolean; /** * The value of the query term. */ value?: string; } interface ControlFilterAction { /** * The data store to filter. */ dataStore: string; /** * The filter to apply to the search results. */ filter: string; } interface ControlPromoteAction { /** * The data store to promote. */ dataStore: string; /** * The search link promotion to apply to the search results. * Structure is documented below. */ searchLinkPromotion: outputs.discoveryengine.ControlPromoteActionSearchLinkPromotion; } interface ControlPromoteActionSearchLinkPromotion { /** * The description of the promoted link. */ description?: string; /** * The document to promote. */ document?: string; /** * Return promotions for basic site search. */ enabled?: boolean; /** * The image URI of the promoted link. */ imageUri?: string; /** * The title of the promoted link. */ title: string; /** * The URI to promote. */ uri?: string; } interface ControlRedirectAction { /** * The URI to redirect to. */ redirectUri: string; } interface ControlSynonymsAction { /** * The synonyms to apply to the search results. */ synonyms?: string[]; } interface DataConnectorEntity { /** * (Output) * The full resource name of the associated data store for the source * entity. * Format: `projects/*/locations/*/collections/*/dataStores/*`. * When the connector is initialized by the DataConnectorService.SetUpDataConnector * method, a DataStore is automatically created for each source entity. */ dataStore: string; /** * The name of the entity. Supported values by data source: * * Salesforce: `Lead`, `Opportunity`, `Contact`, `Account`, `Case`, `Contract`, `Campaign` * * Jira: project, issue, attachment, comment, worklog * * Confluence: `Content`, `Space` */ entityName?: string; /** * Attributes for indexing. * Key: Field name. * Value: The key property to map a field to, such as `title`, and * `description`. Supported key properties: */ keyPropertyMappings?: { [key: string]: string; }; /** * The parameters for the entity to facilitate data ingestion. */ params?: string; } interface DataConnectorError { /** * (Output) * The status code, which should be an enum value of google.rpc.Code. */ code: number; /** * (Output) * A developer-facing error message, which should be in English. */ message: string; } interface DataStoreAdvancedSiteSearchConfig { /** * If set true, automatic refresh is disabled for the DataStore. */ disableAutomaticRefresh?: boolean; /** * If set true, initial indexing is disabled for the DataStore. */ disableInitialIndex?: boolean; } interface DataStoreDocumentProcessingConfig { /** * Whether chunking mode is enabled. * Structure is documented below. */ chunkingConfig?: outputs.discoveryengine.DataStoreDocumentProcessingConfigChunkingConfig; /** * Configurations for default Document parser. If not specified, this resource * will be configured to use a default DigitalParsingConfig, and the default parsing * config will be applied to all file types for Document parsing. * Structure is documented below. */ defaultParsingConfig?: outputs.discoveryengine.DataStoreDocumentProcessingConfigDefaultParsingConfig; /** * (Output) * The full resource name of the Document Processing Config. Format: * `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/documentProcessingConfig`. */ name: string; /** * Map from file type to override the default parsing configuration based on the file type. Supported keys: */ parsingConfigOverrides?: outputs.discoveryengine.DataStoreDocumentProcessingConfigParsingConfigOverride[]; } interface DataStoreDocumentProcessingConfigChunkingConfig { /** * Configuration for the layout based chunking. * Structure is documented below. */ layoutBasedChunkingConfig?: outputs.discoveryengine.DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig; } interface DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig { /** * The token size limit for each chunk. * Supported values: 100-500 (inclusive). Default value: 500. */ chunkSize?: number; /** * Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss. * Default value: False. */ includeAncestorHeadings?: boolean; } interface DataStoreDocumentProcessingConfigDefaultParsingConfig { /** * Configurations applied to digital parser. */ digitalParsingConfig?: outputs.discoveryengine.DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfig; /** * Configurations applied to layout parser. * Structure is documented below. */ layoutParsingConfig?: outputs.discoveryengine.DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig; /** * Configurations applied to OCR parser. Currently it only applies to PDFs. * Structure is documented below. */ ocrParsingConfig?: outputs.discoveryengine.DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfig; } interface DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfig { } interface DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig { /** * If true, the LLM based annotation is added to the image during parsing. */ enableImageAnnotation?: boolean; /** * If true, the LLM based annotation is added to the table during parsing. */ enableTableAnnotation?: boolean; /** * List of HTML classes to exclude from the parsed content. */ excludeHtmlClasses?: string[]; /** * List of HTML elements to exclude from the parsed content. */ excludeHtmlElements?: string[]; /** * List of HTML ids to exclude from the parsed content. */ excludeHtmlIds?: string[]; /** * Contains the required structure types to extract from the document. Supported values: `shareholder-structure`. */ structuredContentTypes?: string[]; } interface DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfig { /** * If true, will use native text instead of OCR text on pages containing native text. */ useNativeText?: boolean; } interface DataStoreDocumentProcessingConfigParsingConfigOverride { /** * Configurations applied to digital parser. */ digitalParsingConfig?: outputs.discoveryengine.DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfig; /** * The identifier for this object. Format specified above. */ fileType: string; /** * Configurations applied to layout parser. * Structure is documented below. */ layoutParsingConfig?: outputs.discoveryengine.DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig; /** * Configurations applied to OCR parser. Currently it only applies to PDFs. * Structure is documented below. */ ocrParsingConfig?: outputs.discoveryengine.DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfig; } interface DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfig { } interface DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig { /** * If true, the LLM based annotation is added to the image during parsing. */ enableImageAnnotation?: boolean; /** * If true, the LLM based annotation is added to the table during parsing. */ enableTableAnnotation?: boolean; /** * List of HTML classes to exclude from the parsed content. */ excludeHtmlClasses?: string[]; /** * List of HTML elements to exclude from the parsed content. */ excludeHtmlElements?: string[]; /** * List of HTML ids to exclude from the parsed content. */ excludeHtmlIds?: string[]; /** * Contains the required structure types to extract from the document. Supported values: `shareholder-structure`. */ structuredContentTypes?: string[]; } interface DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfig { /** * If true, will use native text instead of OCR text on pages containing native text. */ useNativeText?: boolean; } interface LicenseConfigEndDate { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ year?: number; } interface LicenseConfigStartDate { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */ year?: number; } interface RecommendationEngineCommonConfig { /** * The name of the company, business or entity that is associated with the engine. Setting this may help improve LLM related features.cd */ companyName?: string; } interface RecommendationEngineMediaRecommendationEngineConfig { /** * More feature configs of the selected engine type. * Structure is documented below. */ engineFeaturesConfig?: outputs.discoveryengine.RecommendationEngineMediaRecommendationEngineConfigEngineFeaturesConfig; /** * The optimization objective. e.g., `cvr`. * This field together with MediaRecommendationEngineConfig.type describes * engine metadata to use to control engine training and serving. * Currently supported values: `ctr`, `cvr`. * If not specified, we choose default based on engine type. Default depends on type of recommendation: * `recommended-for-you` => `ctr` * `others-you-may-like` => `ctr` */ optimizationObjective?: string; /** * Name and value of the custom threshold for cvr optimization_objective. * For targetField `watch-time`, targetFieldValue must be an integer * value indicating the media progress time in seconds between (0, 86400] * (excludes 0, includes 86400) (e.g., 90). * For targetField `watch-percentage`, the targetFieldValue must be a * valid float value between (0, 1.0] (excludes 0, includes 1.0) (e.g., 0.5). * Structure is documented below. */ optimizationObjectiveConfig?: outputs.discoveryengine.RecommendationEngineMediaRecommendationEngineConfigOptimizationObjectiveConfig; /** * The training state that the engine is in (e.g. `TRAINING` or `PAUSED`). * Since part of the cost of running the service * is frequency of training - this can be used to determine when to train * engine in order to control cost. If not specified: the default value for * `CreateEngine` method is `TRAINING`. The default value for * `UpdateEngine` method is to keep the state the same as before. * Possible values are: `PAUSED`, `TRAINING`. */ trainingState?: string; /** * The type of engine. e.g., `recommended-for-you`. * This field together with MediaRecommendationEngineConfig.optimizationObjective describes * engine metadata to use to control engine training and serving. * Currently supported values: `recommended-for-you`, `others-you-may-like`, * `more-like-this`, `most-popular-items`. */ type?: string; } interface RecommendationEngineMediaRecommendationEngineConfigEngineFeaturesConfig { /** * Feature configurations that are required for creating a Most Popular engine. * Structure is documented below. */ mostPopularConfig?: outputs.discoveryengine.RecommendationEngineMediaRecommendationEngineConfigEngineFeaturesConfigMostPopularConfig; /** * Additional feature configurations for creating a `recommended-for-you` engine. * Structure is documented below. */ recommendedForYouConfig?: outputs.discoveryengine.RecommendationEngineMediaRecommendationEngineConfigEngineFeaturesConfigRecommendedForYouConfig; } interface RecommendationEngineMediaRecommendationEngineConfigEngineFeaturesConfigMostPopularConfig { /** * The time window of which the engine is queried at training and * prediction time. Positive integers only. The value translates to the * last X days of events. Currently required for the `most-popular-items` * engine. */ timeWindowDays?: number; } interface RecommendationEngineMediaRecommendationEngineConfigEngineFeaturesConfigRecommendedForYouConfig { /** * The type of event with which the engine is queried at prediction time. * If set to `generic`, only `view-item`, `media-play`,and * `media-complete` will be used as `context-event` in engine training. If * set to `view-home-page`, `view-home-page` will also be used as * `context-events` in addition to `view-item`, `media-play`, and * `media-complete`. Currently supported for the `recommended-for-you` * engine. Currently supported values: `view-home-page`, `generic`. */ contextEventType?: string; } interface RecommendationEngineMediaRecommendationEngineConfigOptimizationObjectiveConfig { /** * The name of the field to target. Currently supported values: `watch-percentage`, `watch-time`. */ targetField?: string; /** * The threshold to be applied to the target (e.g., 0.5). */ targetFieldValueFloat?: number; } interface SearchEngineCommonConfig { /** * The name of the company, business or entity that is associated with the engine. Setting this may help improve LLM related features.cd */ companyName?: string; } interface SearchEngineKnowledgeGraphConfig { /** * Specify entity types to support. */ cloudKnowledgeGraphTypes?: string[]; /** * Whether to enable the Cloud Knowledge Graph for the engine. */ enableCloudKnowledgeGraph: boolean; /** * Whether to enable the Private Knowledge Graph for the engine. */ enablePrivateKnowledgeGraph: boolean; /** * Feature config for the Knowledge Graph. * Structure is documented below. */ featureConfig?: outputs.discoveryengine.SearchEngineKnowledgeGraphConfigFeatureConfig; } interface SearchEngineKnowledgeGraphConfigFeatureConfig { /** * Whether to disable the private KG auto complete for the engine. */ disablePrivateKgAutoComplete?: boolean; /** * Whether to disable the private KG enrichment for the engine. */ disablePrivateKgEnrichment?: boolean; /** * Whether to disable the private KG for query UI chips. */ disablePrivateKgQueryUiChips?: boolean; /** * Whether to disable the private KG query understanding for the engine. */ disablePrivateKgQueryUnderstanding?: boolean; } interface SearchEngineSearchEngineConfig { /** * The add-on that this search engine enables. * Each value may be one of: `SEARCH_ADD_ON_LLM`. */ searchAddOns?: string[]; /** * The search feature tier of this engine. Defaults to SearchTier.SEARCH_TIER_STANDARD if not specified. * Default value is `SEARCH_TIER_STANDARD`. * Possible values are: `SEARCH_TIER_STANDARD`, `SEARCH_TIER_ENTERPRISE`. */ searchTier?: string; } interface TargetSiteFailureReason { /** * Site verification state indicating the ownership and validity. * Structure is documented below. */ quotaFailure?: outputs.discoveryengine.TargetSiteFailureReasonQuotaFailure; } interface TargetSiteFailureReasonQuotaFailure { /** * This number is an estimation on how much total quota this project * needs to successfully complete indexing. */ totalRequiredQuota?: number; } interface TargetSiteSiteVerificationInfo { /** * Site verification state indicating the ownership and validity. * Possible values are: `VERIFIED`, `UNVERIFIED`, `EXEMPTED`. */ siteVerificationState?: string; /** * Latest site verification time. */ verifyTime?: string; } interface WidgetConfigAccessSettings { /** * Whether public unauthenticated access is allowed. */ allowPublicAccess?: boolean; /** * List of domains that are allowed to integrate the search widget. */ allowlistedDomains?: string[]; /** * Whether web app access is enabled. */ enableWebApp?: boolean; /** * Language code for user interface. Use language tags defined by * [BCP47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). If unset, the * default language code is "en-US". */ languageCode?: string; /** * The workforce identity pool provider used to access the widget. */ workforceIdentityPoolProvider?: string; } interface WidgetConfigHomepageSetting { /** * The shortcuts to display on the homepage. * Structure is documented below. */ shortcuts?: outputs.discoveryengine.WidgetConfigHomepageSettingShortcut[]; } interface WidgetConfigHomepageSettingShortcut { /** * Destination URL of shortcut. */ destinationUri?: string; /** * Icon URL of shortcut. * Structure is documented below. */ icon?: outputs.discoveryengine.WidgetConfigHomepageSettingShortcutIcon; /** * Title of the shortcut. */ title?: string; } interface WidgetConfigHomepageSettingShortcutIcon { /** * Image URL. */ url?: string; } interface WidgetConfigUiBranding { /** * Logo image. * Structure is documented below. */ logo?: outputs.discoveryengine.WidgetConfigUiBrandingLogo; } interface WidgetConfigUiBrandingLogo { /** * Image URL. */ url?: string; } interface WidgetConfigUiSettings { /** * Per data store configuration. * Structure is documented below. */ dataStoreUiConfigs?: outputs.discoveryengine.WidgetConfigUiSettingsDataStoreUiConfig[]; /** * The default ordering for search results if specified. * Used to set SearchRequest#orderBy on applicable requests. */ defaultSearchRequestOrderBy?: string; /** * If set to true, the widget will not collect user events. */ disableUserEventsCollection?: boolean; /** * Whether or not to enable autocomplete. */ enableAutocomplete?: boolean; /** * If set to true, the widget will enable the create agent button. */ enableCreateAgentButton?: boolean; /** * If set to true, the widget will enable people search. */ enablePeopleSearch?: boolean; /** * Turn on or off collecting the search result quality feedback from end users. */ enableQualityFeedback?: boolean; /** * Whether to enable safe search. */ enableSafeSearch?: boolean; /** * Whether to enable search-as-you-type behavior for the search widget. */ enableSearchAsYouType?: boolean; /** * If set to true, the widget will enable visual content summary on applicable * search requests. Only used by healthcare search. */ enableVisualContentSummary?: boolean; /** * Describes generative answer configuration. * Structure is documented below. */ generativeAnswerConfig?: outputs.discoveryengine.WidgetConfigUiSettingsGenerativeAnswerConfig; /** * Describes widget (or web app) interaction type * Possible values are: `SEARCH_ONLY`, `SEARCH_WITH_ANSWER`, `SEARCH_WITH_FOLLOW_UPS`. */ interactionType?: string; /** * Controls whether result extract is display and how (snippet or extractive answer). * Default to no result if unspecified. * Possible values are: `SNIPPET`, `EXTRACTIVE_ANSWER`. */ resultDescriptionType?: string; } interface WidgetConfigUiSettingsDataStoreUiConfig { /** * Structure is documented below. */ facetFields?: outputs.discoveryengine.WidgetConfigUiSettingsDataStoreUiConfigFacetField[]; /** * 'The key is the UI component. Currently supported `title`, `thumbnail`, * `url`, `custom1`, `custom2`, `custom3`. The value is the name of * the field along with its device visibility. The 3 custom fields are optional * and can be added or removed. * `title`, `thumbnail`, `url` are required UI components that cannot be removed. * Structure is documented below. */ fieldsUiComponentsMaps?: outputs.discoveryengine.WidgetConfigUiSettingsDataStoreUiConfigFieldsUiComponentsMap[]; /** * The name of the data store. It should be data store resource name. Format: * `projects/{project}/locations/{location}/collections/{collectionId}/dataStores/{dataStoreId}`. * For APIs under `WidgetService`, such as [WidgetService.LookUpWidgetConfig][], * the project number and location part is erased in this field. */ name?: string; } interface WidgetConfigUiSettingsDataStoreUiConfigFacetField { /** * The field name that end users will see. */ displayName?: string; /** * Registered field name. The format is `field.abc`. */ field: string; } interface WidgetConfigUiSettingsDataStoreUiConfigFieldsUiComponentsMap { /** * Each value may be one of: `MOBILE`, `DESKTOP`. */ deviceVisibilities?: string[]; /** * The template to customize how the field is displayed. * An example value would be a string that looks like: "Price: {value}". */ displayTemplate?: string; /** * Registered field name. The format is `field.abc`. */ field: string; /** * The identifier for this object. Format specified above. */ uiComponent: string; } interface WidgetConfigUiSettingsGenerativeAnswerConfig { /** * Whether generated answer contains suggested related questions. */ disableRelatedQuestions?: boolean; /** * Specifies whether to filter out queries that are adversarial. */ ignoreAdversarialQuery?: boolean; /** * Specifies whether to filter out queries that are not relevant to the content. */ ignoreLowRelevantContent?: boolean; /** * Specifies whether to filter out queries that are not answer-seeking. * The default value is `false`. No answer is returned if the search query * is classified as a non-answer seeking query. * If this field is set to `true`, we skip generating answers for * non-answer seeking queries and return fallback messages instead. */ ignoreNonAnswerSeekingQuery?: boolean; /** * Source of image returned in the answer. * Possible values are: `ALL_AVAILABLE_SOURCES`, `CORPUS_IMAGE_ONLY`, `FIGURE_GENERATION_ONLY`. */ imageSource?: string; /** * Language code for Summary. Use language tags defined by * [BCP47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Note: This * is an experimental feature. */ languageCode?: string; /** * Max rephrase steps. The max number is 5 steps. If not set or * set to < 1, it will be set to 1 by default. */ maxRephraseSteps?: number; /** * Text at the beginning of the prompt that instructs the model that generates the answer. */ modelPromptPreamble?: string; /** * The model version used to generate the answer. */ modelVersion?: string; /** * The number of top results to generate the answer from. Up to 10. */ resultCount?: number; } } export declare namespace dns { interface DnsManagedZoneIamBindingCondition { description?: string; expression: string; title: string; } interface DnsManagedZoneIamMemberCondition { description?: string; expression: string; title: string; } interface GetKeysKeySigningKey { /** * String mnemonic specifying the DNSSEC algorithm of this key. Immutable after creation time. Possible values are `ecdsap256sha256`, `ecdsap384sha384`, `rsasha1`, `rsasha256`, and `rsasha512`. */ algorithm: string; /** * The time that this resource was created in the control plane. This is in RFC3339 text format. */ creationTime: string; /** * A mutable string of at most 1024 characters associated with this resource for the user's convenience. */ description: string; /** * A list of cryptographic hashes of the DNSKEY resource record associated with this DnsKey. These digests are needed to construct a DS record that points at this DNS key. Each contains: */ digests: outputs.dns.GetKeysKeySigningKeyDigest[]; /** * The DS record based on the KSK record. This is used when [delegating](https://cloud.google.com/dns/docs/dnssec-advanced#subdelegation) DNSSEC-signed subdomains. */ dsRecord: string; /** * Unique identifier for the resource; defined by the server. */ id: string; /** * Active keys will be used to sign subsequent changes to the ManagedZone. Inactive keys will still be present as DNSKEY Resource Records for the use of resolvers validating existing signatures. */ isActive: boolean; /** * Length of the key in bits. Specified at creation time then immutable. */ keyLength: number; /** * The key tag is a non-cryptographic hash of the a DNSKEY resource record associated with this DnsKey. The key tag can be used to identify a DNSKEY more quickly (but it is not a unique identifier). In particular, the key tag is used in a parent zone's DS record to point at the DNSKEY in this child ManagedZone. The key tag is a number in the range [0, 65535] and the algorithm to calculate it is specified in RFC4034 Appendix B. */ keyTag: number; /** * Base64 encoded public half of this key. */ publicKey: string; } interface GetKeysKeySigningKeyDigest { /** * The base-16 encoded bytes of this digest. Suitable for use in a DS resource record. */ digest?: string; /** * Specifies the algorithm used to calculate this digest. Possible values are `sha1`, `sha256` and `sha384` */ type?: string; } interface GetKeysZoneSigningKey { /** * String mnemonic specifying the DNSSEC algorithm of this key. Immutable after creation time. Possible values are `ecdsap256sha256`, `ecdsap384sha384`, `rsasha1`, `rsasha256`, and `rsasha512`. */ algorithm: string; /** * The time that this resource was created in the control plane. This is in RFC3339 text format. */ creationTime: string; /** * A mutable string of at most 1024 characters associated with this resource for the user's convenience. */ description: string; /** * A list of cryptographic hashes of the DNSKEY resource record associated with this DnsKey. These digests are needed to construct a DS record that points at this DNS key. Each contains: */ digests: outputs.dns.GetKeysZoneSigningKeyDigest[]; /** * Unique identifier for the resource; defined by the server. */ id: string; /** * Active keys will be used to sign subsequent changes to the ManagedZone. Inactive keys will still be present as DNSKEY Resource Records for the use of resolvers validating existing signatures. */ isActive: boolean; /** * Length of the key in bits. Specified at creation time then immutable. */ keyLength: number; /** * The key tag is a non-cryptographic hash of the a DNSKEY resource record associated with this DnsKey. The key tag can be used to identify a DNSKEY more quickly (but it is not a unique identifier). In particular, the key tag is used in a parent zone's DS record to point at the DNSKEY in this child ManagedZone. The key tag is a number in the range [0, 65535] and the algorithm to calculate it is specified in RFC4034 Appendix B. */ keyTag: number; /** * Base64 encoded public half of this key. */ publicKey: string; } interface GetKeysZoneSigningKeyDigest { /** * The base-16 encoded bytes of this digest. Suitable for use in a DS resource record. */ digest?: string; /** * Specifies the algorithm used to calculate this digest. Possible values are `sha1`, `sha256` and `sha384` */ type?: string; } interface GetManagedZonesManagedZone { description: string; dnsName: string; id: string; managedZoneId: string; name?: string; nameServers: string[]; /** * The ID of the project containing Google Cloud DNS zones. If this is not provided the default project will be used. */ project?: string; visibility: string; } interface ManagedZoneCloudLoggingConfig { /** * If set, enable query logging for this ManagedZone. False by default, making logging opt-in. */ enableLogging: boolean; } interface ManagedZoneDnssecConfig { /** * Specifies parameters that will be used for generating initial DnsKeys * for this ManagedZone. If you provide a spec for keySigning or zoneSigning, * you must also provide one for the other. * defaultKeySpecs can only be updated when the state is `off`. * Structure is documented below. */ defaultKeySpecs: outputs.dns.ManagedZoneDnssecConfigDefaultKeySpec[]; /** * Identifies what kind of resource this is */ kind?: string; /** * Specifies the mechanism used to provide authenticated denial-of-existence responses. * nonExistence can only be updated when the state is `off`. * Possible values are: `nsec`, `nsec3`. */ nonExistence: string; /** * Specifies whether DNSSEC is enabled, and what mode it is in * Possible values are: `off`, `on`, `transfer`. */ state?: string; } interface ManagedZoneDnssecConfigDefaultKeySpec { /** * String mnemonic specifying the DNSSEC algorithm of this key * Possible values are: `ecdsap256sha256`, `ecdsap384sha384`, `rsasha1`, `rsasha256`, `rsasha512`. */ algorithm?: string; /** * Length of the keys in bits */ keyLength?: number; /** * Specifies whether this is a key signing key (KSK) or a zone * signing key (ZSK). Key signing keys have the Secure Entry * Point flag set and, when active, will only be used to sign * resource record sets of type DNSKEY. Zone signing keys do * not have the Secure Entry Point flag set and will be used * to sign all other types of resource record sets. * Possible values are: `keySigning`, `zoneSigning`. */ keyType?: string; /** * Identifies what kind of resource this is */ kind?: string; } interface ManagedZoneForwardingConfig { /** * List of target name servers to forward to. Cloud DNS will * select the best available name server if more than * one target is given. * Structure is documented below. */ targetNameServers: outputs.dns.ManagedZoneForwardingConfigTargetNameServer[]; } interface ManagedZoneForwardingConfigTargetNameServer { /** * Fully qualified domain name for the forwarding target. */ domainName?: string; /** * Forwarding path for this TargetNameServer. If unset or `default` * Cloud DNS will make forwarding decision based on address ranges, * i.e. RFC1918 addresses go to the VPC, Non-RFC1918 addresses go * to the Internet. When set to `private`, Cloud DNS will always * send queries through VPC for this target. * Possible values are: `default`, `private`. */ forwardingPath?: string; /** * IPv4 address of a target name server. * Does not accept both fields (ipv4 & ipv6) being populated. */ ipv4Address?: string; /** * IPv6 address of a target name server. * Does not accept both fields (ipv4 & ipv6) being populated. */ ipv6Address?: string; } interface ManagedZonePeeringConfig { /** * The network with which to peer. * Structure is documented below. */ targetNetwork: outputs.dns.ManagedZonePeeringConfigTargetNetwork; } interface ManagedZonePeeringConfigTargetNetwork { /** * The id or fully qualified URL of the VPC network to forward queries to. * This should be formatted like `projects/{project}/global/networks/{network}` or * `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}` */ networkUrl: string; } interface ManagedZonePrivateVisibilityConfig { /** * The list of Google Kubernetes Engine clusters that can see this zone. * Structure is documented below. */ gkeClusters?: outputs.dns.ManagedZonePrivateVisibilityConfigGkeCluster[]; /** * The list of VPC networks that can see this zone. Until the provider updates to use the Terraform 0.12 SDK in a future release, you * may experience issues with this resource while updating. If you've defined a `networks` block and * add another `networks` block while keeping the old block, Terraform will see an incorrect diff * and apply an incorrect update to the resource. If you encounter this issue, remove all `networks` * blocks in an update and then apply another update adding all of them back simultaneously. * Structure is documented below. */ networks?: outputs.dns.ManagedZonePrivateVisibilityConfigNetwork[]; } interface ManagedZonePrivateVisibilityConfigGkeCluster { /** * The resource name of the cluster to bind this ManagedZone to. * This should be specified in the format like * `projects/*/locations/*/clusters/*` */ gkeClusterName: string; } interface ManagedZonePrivateVisibilityConfigNetwork { /** * The id or fully qualified URL of the VPC network to bind to. * This should be formatted like `projects/{project}/global/networks/{network}` or * `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}` */ networkUrl: string; } interface ManagedZoneServiceDirectoryConfig { /** * The namespace associated with the zone. * Structure is documented below. */ namespace: outputs.dns.ManagedZoneServiceDirectoryConfigNamespace; } interface ManagedZoneServiceDirectoryConfigNamespace { /** * The fully qualified or partial URL of the service directory namespace that should be * associated with the zone. This should be formatted like * `https://servicedirectory.googleapis.com/v1/projects/{project}/locations/{location}/namespaces/{namespace_id}` * or simply `projects/{project}/locations/{location}/namespaces/{namespace_id}` * Ignored for `public` visibility zones. */ namespaceUrl: string; } interface PolicyAlternativeNameServerConfig { /** * Sets an alternative name server for the associated networks. When specified, * all DNS queries are forwarded to a name server that you choose. Names such as .internal * are not available when an alternative name server is specified. * Structure is documented below. */ targetNameServers: outputs.dns.PolicyAlternativeNameServerConfigTargetNameServer[]; } interface PolicyAlternativeNameServerConfigTargetNameServer { /** * Forwarding path for this TargetNameServer. If unset or `default` Cloud DNS will make forwarding * decision based on address ranges, i.e. RFC1918 addresses go to the VPC, Non-RFC1918 addresses go * to the Internet. When set to `private`, Cloud DNS will always send queries through VPC for this target * Possible values are: `default`, `private`. * * The `dns64Config` block supports: */ forwardingPath?: string; /** * IPv4 address to forward to. */ ipv4Address: string; } interface PolicyDns64Config { /** * The scope to which DNS64 config will be applied to. */ scope: outputs.dns.PolicyDns64ConfigScope; } interface PolicyDns64ConfigScope { /** * Controls whether DNS64 is enabled globally at the network level. */ allQueries?: boolean; } interface PolicyNetwork { /** * The id or fully qualified URL of the VPC network to forward queries to. * This should be formatted like `projects/{project}/global/networks/{network}` or * `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}` */ networkUrl: string; } interface RecordSetRoutingPolicy { /** * Specifies whether to enable fencing for geo queries. */ enableGeoFencing?: boolean; /** * The configuration for Geolocation based routing policy. * Structure is documented below. */ geos?: outputs.dns.RecordSetRoutingPolicyGeo[]; /** * Specifies the health check (used with external endpoints). */ healthCheck?: string; /** * The configuration for a failover policy with global to regional failover. Queries are responded to with the global primary targets, but if none of the primary targets are healthy, then we fallback to a regional failover policy. * Structure is documented below. */ primaryBackup?: outputs.dns.RecordSetRoutingPolicyPrimaryBackup; /** * The configuration for Weighted Round Robin based routing policy. * Structure is documented below. */ wrrs?: outputs.dns.RecordSetRoutingPolicyWrr[]; } interface RecordSetRoutingPolicyGeo { /** * For A and AAAA types only. The list of targets to be health checked. These can be specified along with `rrdatas` within this item. * Structure is documented below. */ healthCheckedTargets?: outputs.dns.RecordSetRoutingPolicyGeoHealthCheckedTargets; /** * The location name defined in Google Cloud. */ location: string; /** * Same as `rrdatas` above. */ rrdatas?: string[]; } interface RecordSetRoutingPolicyGeoHealthCheckedTargets { /** * The list of external endpoint addresses to health check. */ externalEndpoints?: string[]; /** * The list of internal load balancers to health check. * Structure is documented below. */ internalLoadBalancers?: outputs.dns.RecordSetRoutingPolicyGeoHealthCheckedTargetsInternalLoadBalancer[]; } interface RecordSetRoutingPolicyGeoHealthCheckedTargetsInternalLoadBalancer { /** * The frontend IP address of the load balancer. */ ipAddress: string; /** * The configured IP protocol of the load balancer. This value is case-sensitive. Possible values: ["tcp", "udp"] */ ipProtocol: string; /** * The type of load balancer. This value is case-sensitive. Possible values: ["regionalL4ilb", "regionalL7ilb", "globalL7ilb"] */ loadBalancerType?: string; /** * The fully qualified url of the network in which the load balancer belongs. This should be formatted like `projects/{project}/global/networks/{network}` or `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}`. */ networkUrl: string; /** * The configured port of the load balancer. */ port: string; /** * The ID of the project in which the load balancer belongs. */ project: string; /** * The region of the load balancer. Only needed for regional load balancers. */ region?: string; } interface RecordSetRoutingPolicyPrimaryBackup { /** * The backup geo targets, which provide a regional failover policy for the otherwise global primary targets. * Structure is document above. */ backupGeos: outputs.dns.RecordSetRoutingPolicyPrimaryBackupBackupGeo[]; /** * Specifies whether to enable fencing for backup geo queries. */ enableGeoFencingForBackups?: boolean; /** * The list of global primary targets to be health checked. * Structure is documented below. */ primary: outputs.dns.RecordSetRoutingPolicyPrimaryBackupPrimary; /** * Specifies the percentage of traffic to send to the backup targets even when the primary targets are healthy. */ trickleRatio?: number; } interface RecordSetRoutingPolicyPrimaryBackupBackupGeo { /** * For A and AAAA types only. The list of targets to be health checked. These can be specified along with `rrdatas` within this item. */ healthCheckedTargets?: outputs.dns.RecordSetRoutingPolicyPrimaryBackupBackupGeoHealthCheckedTargets; /** * The location name defined in Google Cloud. */ location: string; /** * The string data for the records in this record set * whose meaning depends on the DNS type. For TXT record, if the string data contains spaces, add surrounding `\"` if you don't want your string to get split on spaces. To specify a single record value longer than 255 characters such as a TXT record for DKIM, add `\" \"` inside the Terraform configuration string (e.g. `"first255characters\" \"morecharacters"`). */ rrdatas?: string[]; } interface RecordSetRoutingPolicyPrimaryBackupBackupGeoHealthCheckedTargets { /** * The list of external endpoint addresses to health check. */ externalEndpoints?: string[]; /** * The list of internal load balancers to health check. * Structure is documented below. */ internalLoadBalancers?: outputs.dns.RecordSetRoutingPolicyPrimaryBackupBackupGeoHealthCheckedTargetsInternalLoadBalancer[]; } interface RecordSetRoutingPolicyPrimaryBackupBackupGeoHealthCheckedTargetsInternalLoadBalancer { /** * The frontend IP address of the load balancer. */ ipAddress: string; /** * The configured IP protocol of the load balancer. This value is case-sensitive. Possible values: ["tcp", "udp"] */ ipProtocol: string; /** * The type of load balancer. This value is case-sensitive. Possible values: ["regionalL4ilb", "regionalL7ilb", "globalL7ilb"] */ loadBalancerType?: string; /** * The fully qualified url of the network in which the load balancer belongs. This should be formatted like `projects/{project}/global/networks/{network}` or `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}`. */ networkUrl: string; /** * The configured port of the load balancer. */ port: string; /** * The ID of the project in which the load balancer belongs. */ project: string; /** * The region of the load balancer. Only needed for regional load balancers. */ region?: string; } interface RecordSetRoutingPolicyPrimaryBackupPrimary { /** * The Internet IP addresses to be health checked. */ externalEndpoints?: string[]; /** * The list of internal load balancers to health check. */ internalLoadBalancers?: outputs.dns.RecordSetRoutingPolicyPrimaryBackupPrimaryInternalLoadBalancer[]; } interface RecordSetRoutingPolicyPrimaryBackupPrimaryInternalLoadBalancer { /** * The frontend IP address of the load balancer. */ ipAddress: string; /** * The configured IP protocol of the load balancer. This value is case-sensitive. Possible values: ["tcp", "udp"] */ ipProtocol: string; /** * The type of load balancer. This value is case-sensitive. Possible values: ["regionalL4ilb", "regionalL7ilb", "globalL7ilb"] */ loadBalancerType?: string; /** * The fully qualified url of the network in which the load balancer belongs. This should be formatted like `projects/{project}/global/networks/{network}` or `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}`. */ networkUrl: string; /** * The configured port of the load balancer. */ port: string; /** * The ID of the project in which the load balancer belongs. */ project: string; /** * The region of the load balancer. Only needed for regional load balancers. */ region?: string; } interface RecordSetRoutingPolicyWrr { /** * The list of targets to be health checked. Note that if DNSSEC is enabled for this zone, only one of `rrdatas` or `healthCheckedTargets` can be set. * Structure is documented below. */ healthCheckedTargets?: outputs.dns.RecordSetRoutingPolicyWrrHealthCheckedTargets; /** * Same as `rrdatas` above. */ rrdatas?: string[]; /** * The ratio of traffic routed to the target. */ weight: number; } interface RecordSetRoutingPolicyWrrHealthCheckedTargets { /** * The list of external endpoint addresses to health check. */ externalEndpoints?: string[]; /** * The list of internal load balancers to health check. * Structure is documented below. */ internalLoadBalancers?: outputs.dns.RecordSetRoutingPolicyWrrHealthCheckedTargetsInternalLoadBalancer[]; } interface RecordSetRoutingPolicyWrrHealthCheckedTargetsInternalLoadBalancer { /** * The frontend IP address of the load balancer. */ ipAddress: string; /** * The configured IP protocol of the load balancer. This value is case-sensitive. Possible values: ["tcp", "udp"] */ ipProtocol: string; /** * The type of load balancer. This value is case-sensitive. Possible values: ["regionalL4ilb", "regionalL7ilb", "globalL7ilb"] */ loadBalancerType?: string; /** * The fully qualified url of the network in which the load balancer belongs. This should be formatted like `projects/{project}/global/networks/{network}` or `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}`. */ networkUrl: string; /** * The configured port of the load balancer. */ port: string; /** * The ID of the project in which the load balancer belongs. */ project: string; /** * The region of the load balancer. Only needed for regional load balancers. */ region?: string; } interface ResponsePolicyGkeCluster { /** * The resource name of the cluster to bind this ManagedZone to. * This should be specified in the format like * `projects/*/locations/*/clusters/*` */ gkeClusterName: string; } interface ResponsePolicyNetwork { /** * The fully qualified URL of the VPC network to bind to. * This should be formatted like * `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}` */ networkUrl: string; } interface ResponsePolicyRuleLocalData { /** * All resource record sets for this selector, one per resource record type. The name must match the dns_name. * Structure is documented below. */ localDatas: outputs.dns.ResponsePolicyRuleLocalDataLocalData[]; } interface ResponsePolicyRuleLocalDataLocalData { /** * For example, www.example.com. */ name: string; /** * As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) */ rrdatas?: string[]; /** * Number of seconds that this ResourceRecordSet can be cached by * resolvers. */ ttl?: number; /** * One of valid DNS resource types. * Possible values are: `A`, `AAAA`, `CAA`, `CNAME`, `DNSKEY`, `DS`, `HTTPS`, `IPSECVPNKEY`, `MX`, `NAPTR`, `NS`, `PTR`, `SOA`, `SPF`, `SRV`, `SSHFP`, `SVCB`, `TLSA`, `TXT`. */ type: string; } } export declare namespace edgecontainer { interface ClusterAuthorization { /** * User that will be granted the cluster-admin role on the cluster, providing * full access to the cluster. Currently, this is a singular field, but will * be expanded to allow multiple admins in the future. * Structure is documented below. */ adminUsers: outputs.edgecontainer.ClusterAuthorizationAdminUsers; } interface ClusterAuthorizationAdminUsers { /** * An active Google username. */ username: string; } interface ClusterControlPlane { /** * Local control plane configuration. * Structure is documented below. */ local: outputs.edgecontainer.ClusterControlPlaneLocal; /** * Remote control plane configuration. * Structure is documented below. */ remote: outputs.edgecontainer.ClusterControlPlaneRemote; } interface ClusterControlPlaneEncryption { /** * The Cloud KMS CryptoKey e.g. * projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{cryptoKey} * to use for protecting control plane disks. If not specified, a * Google-managed key will be used instead. */ kmsKey: string; /** * (Output) * The Cloud KMS CryptoKeyVersion currently in use for protecting control * plane disks. Only applicable if kmsKey is set. */ kmsKeyActiveVersion: string; /** * (Output) * Availability of the Cloud KMS CryptoKey. If not `KEY_AVAILABLE`, then * nodes may go offline as they cannot access their local data. This can be * caused by a lack of permissions to use the key, or if the key is disabled * or deleted. */ kmsKeyState: string; /** * (Output) * Error status returned by Cloud KMS when using this key. This field may be * populated only if `kmsKeyState` is not `KMS_KEY_STATE_KEY_AVAILABLE`. * If populated, this field contains the error status reported by Cloud KMS. * Structure is documented below. * * * The `kmsStatus` block contains: */ kmsStatuses: outputs.edgecontainer.ClusterControlPlaneEncryptionKmsStatus[]; } interface ClusterControlPlaneEncryptionKmsStatus { /** * The status code, which should be an enum value of google.rpc.Code. */ code: number; /** * A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. */ message: string; } interface ClusterControlPlaneLocal { /** * Only machines matching this filter will be allowed to host control * plane nodes. The filtering language accepts strings like "name=", * and is documented here: [AIP-160](https://google.aip.dev/160). */ machineFilter?: string; /** * The number of nodes to serve as replicas of the Control Plane. * Only 1 and 3 are supported. */ nodeCount: number; /** * Name of the Google Distributed Cloud Edge zones where this node pool * will be created. For example: `us-central1-edge-customer-a`. */ nodeLocation: string; /** * Policy configuration about how user applications are deployed. * Possible values are: `SHARED_DEPLOYMENT_POLICY_UNSPECIFIED`, `ALLOWED`, `DISALLOWED`. */ sharedDeploymentPolicy: string; } interface ClusterControlPlaneRemote { /** * Name of the Google Distributed Cloud Edge zones where this node pool * will be created. For example: `us-central1-edge-customer-a`. */ nodeLocation: string; } interface ClusterFleet { /** * (Output) * The name of the managed Hub Membership resource associated to this cluster. * Membership names are formatted as * `projects//locations/global/membership/`. */ membership: string; /** * The name of the Fleet host project where this cluster will be registered. * Project names are formatted as * `projects/`. */ project: string; } interface ClusterMaintenanceEvent { /** * (Output) * The time when the maintenance event request was created. */ createTime: string; /** * (Output) * The time when the maintenance event ended, either successfully or not. If * the maintenance event is split into multiple maintenance windows, * endTime is only updated when the whole flow ends. */ endTime: string; /** * (Output) * The operation for running the maintenance event. Specified in the format * projects/*/locations/*/operations/*. If the maintenance event is split * into multiple operations (e.g. due to maintenance windows), the latest * one is recorded. */ operation: string; /** * (Output) * The schedule of the maintenance event. */ schedule: string; /** * (Output) * The time when the maintenance event started. */ startTime: string; /** * (Output) * Indicates the maintenance event state. */ state: string; /** * The target cluster version. For example: "1.5.0". */ targetVersion: string; /** * (Output) * Indicates the maintenance event type. */ type: string; /** * (Output) * The time when the maintenance event message was updated. */ updateTime: string; /** * (Output) * UUID of the maintenance event. */ uuid: string; } interface ClusterMaintenancePolicy { /** * Exclusions to automatic maintenance. Non-emergency maintenance should not occur * in these windows. Each exclusion has a unique name and may be active or expired. * The max number of maintenance exclusions allowed at a given time is 3. * Structure is documented below. */ maintenanceExclusions?: outputs.edgecontainer.ClusterMaintenancePolicyMaintenanceExclusion[]; /** * Specifies the maintenance window in which maintenance may be performed. * Structure is documented below. */ window: outputs.edgecontainer.ClusterMaintenancePolicyWindow; } interface ClusterMaintenancePolicyMaintenanceExclusion { /** * A unique (per cluster) id for the window. */ id: string; /** * Represents an arbitrary window of time. * Structure is documented below. */ window: outputs.edgecontainer.ClusterMaintenancePolicyMaintenanceExclusionWindow; } interface ClusterMaintenancePolicyMaintenanceExclusionWindow { /** * The time that the window ends. The end time must take place after the * start time. */ endTime: string; /** * The time that the window first starts. */ startTime: string; } interface ClusterMaintenancePolicyWindow { /** * Represents an arbitrary window of time that recurs. * Structure is documented below. */ recurringWindow: outputs.edgecontainer.ClusterMaintenancePolicyWindowRecurringWindow; } interface ClusterMaintenancePolicyWindowRecurringWindow { /** * An RRULE (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for how * this window recurs. They go on for the span of time between the start and * end time. */ recurrence: string; /** * Represents an arbitrary window of time. * Structure is documented below. */ window: outputs.edgecontainer.ClusterMaintenancePolicyWindowRecurringWindowWindow; } interface ClusterMaintenancePolicyWindowRecurringWindowWindow { /** * The time that the window ends. The end time must take place after the * start time. */ endTime: string; /** * The time that the window first starts. */ startTime: string; } interface ClusterNetworking { /** * All pods in the cluster are assigned an RFC1918 IPv4 address from these * blocks. Only a single block is supported. This field cannot be changed * after creation. */ clusterIpv4CidrBlocks: string[]; /** * If specified, dual stack mode is enabled and all pods in the cluster are * assigned an IPv6 address from these blocks alongside from an IPv4 * address. Only a single block is supported. This field cannot be changed * after creation. */ clusterIpv6CidrBlocks?: string[]; /** * (Output) * IP addressing type of this cluster i.e. SINGLESTACK_V4 vs DUALSTACK_V4_V6. */ networkType: string; /** * All services in the cluster are assigned an RFC1918 IPv4 address from these * blocks. Only a single block is supported. This field cannot be changed * after creation. */ servicesIpv4CidrBlocks: string[]; /** * If specified, dual stack mode is enabled and all services in the cluster are * assigned an IPv6 address from these blocks alongside from an IPv4 * address. Only a single block is supported. This field cannot be changed * after creation. */ servicesIpv6CidrBlocks?: string[]; } interface ClusterSystemAddonsConfig { /** * Config for the Ingress add-on which allows customers to create an Ingress * object to manage external access to the servers in a cluster. The add-on * consists of istiod and istio-ingress. * Structure is documented below. */ ingress: outputs.edgecontainer.ClusterSystemAddonsConfigIngress; } interface ClusterSystemAddonsConfigIngress { /** * Whether Ingress is disabled. */ disabled: boolean; /** * Ingress VIP. */ ipv4Vip: string; } interface NodePoolLocalDiskEncryption { /** * The Cloud KMS CryptoKey e.g. projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{cryptoKey} to use for protecting node local disks. * If not specified, a Google-managed key will be used instead. */ kmsKey?: string; /** * (Output) * The Cloud KMS CryptoKeyVersion currently in use for protecting node local disks. Only applicable if kmsKey is set. */ kmsKeyActiveVersion: string; /** * (Output) * Availability of the Cloud KMS CryptoKey. If not KEY_AVAILABLE, then nodes may go offline as they cannot access their local data. * This can be caused by a lack of permissions to use the key, or if the key is disabled or deleted. */ kmsKeyState: string; } interface NodePoolNodeConfig { /** * "The Kubernetes node labels" */ labels: { [key: string]: string; }; } interface VpnConnectionDetail { /** * (Output) * The Cloud Router info. * Structure is documented below. */ cloudRouters: outputs.edgecontainer.VpnConnectionDetailCloudRouter[]; /** * (Output) * Each connection has multiple Cloud VPN gateways. * Structure is documented below. */ cloudVpns: outputs.edgecontainer.VpnConnectionDetailCloudVpn[]; /** * (Output) * The error message. This is only populated when state=ERROR. */ error: string; /** * (Output) * The current connection state. */ state: string; } interface VpnConnectionDetailCloudRouter { /** * The resource name of VPN connection */ name: string; } interface VpnConnectionDetailCloudVpn { /** * (Output) * The created Cloud VPN gateway name. */ gateway: string; } interface VpnConnectionVpcProject { /** * The project of the VPC to connect to. If not specified, it is the same as the cluster project. */ projectId?: string; } } export declare namespace endpoints { interface ConsumersIamBindingCondition { description?: string; expression: string; title: string; } interface ConsumersIamMemberCondition { description?: string; expression: string; title: string; } interface ServiceApi { /** * A list of Method objects; structure is documented below. */ methods: outputs.endpoints.ServiceApiMethod[]; /** * The simple name of the endpoint as described in the config. */ name: string; /** * `SYNTAX_PROTO2` or `SYNTAX_PROTO3`. */ syntax: string; /** * A version string for this api. If specified, will have the form major-version.minor-version, e.g. `1.10`. */ version: string; } interface ServiceApiMethod { /** * The simple name of the endpoint as described in the config. */ name: string; /** * The type URL for the request to this API. */ requestType: string; /** * The type URL for the response from this API. */ responseType: string; /** * `SYNTAX_PROTO2` or `SYNTAX_PROTO3`. */ syntax: string; } interface ServiceEndpoint { /** * The FQDN of the endpoint as described in the config. */ address: string; /** * The simple name of the endpoint as described in the config. */ name: string; } interface ServiceIamBindingCondition { description?: string; expression: string; title: string; } interface ServiceIamMemberCondition { description?: string; expression: string; title: string; } } export declare namespace essentialcontacts { interface DocumentAiWarehouseDocumentSchemaPropertyDefinition { /** * Date time property. Not supported by CMEK compliant deployment. */ dateTimeTypeOptions?: outputs.essentialcontacts.DocumentAiWarehouseDocumentSchemaPropertyDefinitionDateTimeTypeOptions; /** * The display-name for the property, used for front-end. */ displayName?: string; /** * Enum/categorical property. * Structure is documented below. */ enumTypeOptions?: outputs.essentialcontacts.DocumentAiWarehouseDocumentSchemaPropertyDefinitionEnumTypeOptions; /** * Float property. */ floatTypeOptions?: outputs.essentialcontacts.DocumentAiWarehouseDocumentSchemaPropertyDefinitionFloatTypeOptions; /** * Integer property. */ integerTypeOptions?: outputs.essentialcontacts.DocumentAiWarehouseDocumentSchemaPropertyDefinitionIntegerTypeOptions; /** * Whether the property can be filtered. If this is a sub-property, all the parent properties must be marked filterable. */ isFilterable?: boolean; /** * Whether the property is user supplied metadata. */ isMetadata?: boolean; /** * Whether the property can have multiple values. */ isRepeatable?: boolean; /** * Whether the property is mandatory. */ isRequired?: boolean; /** * Indicates that the property should be included in a global search. */ isSearchable?: boolean; /** * Map property. */ mapTypeOptions?: outputs.essentialcontacts.DocumentAiWarehouseDocumentSchemaPropertyDefinitionMapTypeOptions; /** * The name of the metadata property. */ name: string; /** * Nested structured data property. * Structure is documented below. */ propertyTypeOptions?: outputs.essentialcontacts.DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptions; /** * Stores the retrieval importance. * Possible values are: `HIGHEST`, `HIGHER`, `HIGH`, `MEDIUM`, `LOW`, `LOWEST`. */ retrievalImportance?: string; /** * The schema source information. * Structure is documented below. */ schemaSources?: outputs.essentialcontacts.DocumentAiWarehouseDocumentSchemaPropertyDefinitionSchemaSource[]; /** * Text property. */ textTypeOptions?: outputs.essentialcontacts.DocumentAiWarehouseDocumentSchemaPropertyDefinitionTextTypeOptions; /** * Timestamp property. Not supported by CMEK compliant deployment. */ timestampTypeOptions?: outputs.essentialcontacts.DocumentAiWarehouseDocumentSchemaPropertyDefinitionTimestampTypeOptions; } interface DocumentAiWarehouseDocumentSchemaPropertyDefinitionDateTimeTypeOptions { } interface DocumentAiWarehouseDocumentSchemaPropertyDefinitionEnumTypeOptions { /** * List of possible enum values. */ possibleValues: string[]; /** * Make sure the enum property value provided in the document is in the possile value list during document creation. The validation check runs by default. */ validationCheckDisabled?: boolean; } interface DocumentAiWarehouseDocumentSchemaPropertyDefinitionFloatTypeOptions { } interface DocumentAiWarehouseDocumentSchemaPropertyDefinitionIntegerTypeOptions { } interface DocumentAiWarehouseDocumentSchemaPropertyDefinitionMapTypeOptions { } interface DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptions { /** * Defines the metadata for a schema property. * Structure is documented below. */ propertyDefinitions: outputs.essentialcontacts.DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptionsPropertyDefinition[]; } interface DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptionsPropertyDefinition { /** * Date time property. Not supported by CMEK compliant deployment. */ dateTimeTypeOptions?: outputs.essentialcontacts.DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptionsPropertyDefinitionDateTimeTypeOptions; /** * The display-name for the property, used for front-end. */ displayName?: string; /** * Enum/categorical property. * Structure is documented below. */ enumTypeOptions?: outputs.essentialcontacts.DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptionsPropertyDefinitionEnumTypeOptions; /** * Float property. */ floatTypeOptions?: outputs.essentialcontacts.DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptionsPropertyDefinitionFloatTypeOptions; /** * Integer property. */ integerTypeOptions?: outputs.essentialcontacts.DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptionsPropertyDefinitionIntegerTypeOptions; /** * Whether the property can be filtered. If this is a sub-property, all the parent properties must be marked filterable. */ isFilterable?: boolean; /** * Whether the property is user supplied metadata. */ isMetadata?: boolean; /** * Whether the property can have multiple values. */ isRepeatable?: boolean; /** * Whether the property is mandatory. */ isRequired?: boolean; /** * Indicates that the property should be included in a global search. */ isSearchable?: boolean; /** * Map property. */ mapTypeOptions?: outputs.essentialcontacts.DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptionsPropertyDefinitionMapTypeOptions; /** * The name of the metadata property. */ name: string; /** * Stores the retrieval importance. * Possible values are: `HIGHEST`, `HIGHER`, `HIGH`, `MEDIUM`, `LOW`, `LOWEST`. */ retrievalImportance?: string; /** * The schema source information. * Structure is documented below. */ schemaSources?: outputs.essentialcontacts.DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptionsPropertyDefinitionSchemaSource[]; /** * Text property. */ textTypeOptions?: outputs.essentialcontacts.DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptionsPropertyDefinitionTextTypeOptions; /** * Timestamp property. Not supported by CMEK compliant deployment. */ timestampTypeOptions?: outputs.essentialcontacts.DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptionsPropertyDefinitionTimestampTypeOptions; } interface DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptionsPropertyDefinitionDateTimeTypeOptions { } interface DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptionsPropertyDefinitionEnumTypeOptions { /** * List of possible enum values. */ possibleValues: string[]; /** * Make sure the enum property value provided in the document is in the possile value list during document creation. The validation check runs by default. */ validationCheckDisabled?: boolean; } interface DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptionsPropertyDefinitionFloatTypeOptions { } interface DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptionsPropertyDefinitionIntegerTypeOptions { } interface DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptionsPropertyDefinitionMapTypeOptions { } interface DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptionsPropertyDefinitionSchemaSource { /** * The schema name in the source. */ name?: string; /** * The Doc AI processor type name. */ processorType?: string; } interface DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptionsPropertyDefinitionTextTypeOptions { } interface DocumentAiWarehouseDocumentSchemaPropertyDefinitionPropertyTypeOptionsPropertyDefinitionTimestampTypeOptions { } interface DocumentAiWarehouseDocumentSchemaPropertyDefinitionSchemaSource { /** * The schema name in the source. */ name?: string; /** * The Doc AI processor type name. */ processorType?: string; } interface DocumentAiWarehouseDocumentSchemaPropertyDefinitionTextTypeOptions { } interface DocumentAiWarehouseDocumentSchemaPropertyDefinitionTimestampTypeOptions { } } export declare namespace eventarc { interface GoogleApiSourceLoggingConfig { /** * The minimum severity of logs that will be sent to Stackdriver/Platform * Telemetry. Logs at severitiy ≄ this value will be sent, unless it is NONE. * Possible values are: `NONE`, `DEBUG`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `CRITICAL`, `ALERT`, `EMERGENCY`. */ logSeverity: string; } interface MessageBusLoggingConfig { /** * Optional. The minimum severity of logs that will be sent to Stackdriver/Platform * Telemetry. Logs at severitiy ≄ this value will be sent, unless it is NONE. * Possible values are: `NONE`, `DEBUG`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `CRITICAL`, `ALERT`, `EMERGENCY`. */ logSeverity: string; } interface PipelineDestination { /** * Represents a config used to authenticate message requests. * Structure is documented below. */ authenticationConfig?: outputs.eventarc.PipelineDestinationAuthenticationConfig; /** * Represents a HTTP endpoint destination. * Structure is documented below. */ httpEndpoint?: outputs.eventarc.PipelineDestinationHttpEndpoint; /** * The resource name of the Message Bus to which events should be * published. The Message Bus resource should exist in the same project as * the Pipeline. Format: * `projects/{project}/locations/{location}/messageBuses/{message_bus}` */ messageBus?: string; /** * Represents a network config to be used for destination resolution and * connectivity. * Structure is documented below. */ networkConfig?: outputs.eventarc.PipelineDestinationNetworkConfig; /** * Represents the format of message data. * Structure is documented below. */ outputPayloadFormat?: outputs.eventarc.PipelineDestinationOutputPayloadFormat; /** * The resource name of the Pub/Sub topic to which events should be * published. Format: * `projects/{project}/locations/{location}/topics/{topic}` */ topic?: string; /** * The resource name of the Workflow whose Executions are triggered by * the events. The Workflow resource should be deployed in the same * project as the Pipeline. Format: * `projects/{project}/locations/{location}/workflows/{workflow}` */ workflow?: string; } interface PipelineDestinationAuthenticationConfig { /** * Represents a config used to authenticate with a Google OIDC token using * a GCP service account. Use this authentication method to invoke your * Cloud Run and Cloud Functions destinations or HTTP endpoints that * support Google OIDC. * Structure is documented below. */ googleOidc?: outputs.eventarc.PipelineDestinationAuthenticationConfigGoogleOidc; /** * Contains information needed for generating an * [OAuth token](https://developers.google.com/identity/protocols/OAuth2). * This type of authorization should generally only be used when calling * Google APIs hosted on *.googleapis.com. * Structure is documented below. */ oauthToken?: outputs.eventarc.PipelineDestinationAuthenticationConfigOauthToken; } interface PipelineDestinationAuthenticationConfigGoogleOidc { /** * Audience to be used to generate the OIDC Token. The audience claim * identifies the recipient that the JWT is intended for. If * unspecified, the destination URI will be used. */ audience?: string; /** * Service account email used to generate the OIDC Token. * The principal who calls this API must have * iam.serviceAccounts.actAs permission in the service account. See * https://cloud.google.com/iam/docs/understanding-service-accounts * for more information. Eventarc service agents must have * roles/roles/iam.serviceAccountTokenCreator role to allow the * Pipeline to create OpenID tokens for authenticated requests. */ serviceAccount: string; } interface PipelineDestinationAuthenticationConfigOauthToken { /** * OAuth scope to be used for generating OAuth access token. If not * specified, "https://www.googleapis.com/auth/cloud-platform" will be * used. */ scope?: string; /** * Service account email used to generate the [OAuth * token](https://developers.google.com/identity/protocols/OAuth2). * The principal who calls this API must have * iam.serviceAccounts.actAs permission in the service account. See * https://cloud.google.com/iam/docs/understanding-service-accounts * for more information. Eventarc service agents must have * roles/roles/iam.serviceAccountTokenCreator role to allow Pipeline * to create OAuth2 tokens for authenticated requests. */ serviceAccount: string; } interface PipelineDestinationHttpEndpoint { /** * The CEL expression used to modify how the destination-bound HTTP * request is constructed. * If a binding expression is not specified here, the message * is treated as a CloudEvent and is mapped to the HTTP request according * to the CloudEvent HTTP Protocol Binding Binary Content Mode * (https://github.com/cloudevents/spec/blob/main/cloudevents/bindings/http-protocol-binding.md#31-binary-content-mode). * In this representation, all fields except the `data` and * `datacontenttype` field on the message are mapped to HTTP request * headers with a prefix of `ce-`. * To construct the HTTP request payload and the value of the content-type * HTTP header, the payload format is defined as follows: * 1) Use the outputPayloadFormatType on the Pipeline.Destination if it * is set, else: * 2) Use the inputPayloadFormatType on the Pipeline if it is set, * else: * 3) Treat the payload as opaque binary data. * The `data` field of the message is converted to the payload format or * left as-is for case 3) and then attached as the payload of the HTTP * request. The `content-type` header on the HTTP request is set to the * payload format type or left empty for case 3). However, if a mediation * has updated the `datacontenttype` field on the message so that it is * not the same as the payload format type but it is still a prefix of the * payload format type, then the `content-type` header on the HTTP request * is set to this `datacontenttype` value. For example, if the * `datacontenttype` is "application/json" and the payload format type is * "application/json; charset=utf-8", then the `content-type` header on * the HTTP request is set to "application/json; charset=utf-8". * If a non-empty binding expression is specified then this expression is * used to modify the default CloudEvent HTTP Protocol Binding Binary * Content representation. * The result of the CEL expression must be a map of key/value pairs * which is used as follows: * - If a map named `headers` exists on the result of the expression, * then its key/value pairs are directly mapped to the HTTP request * headers. The headers values are constructed from the corresponding * value type's canonical representation. If the `headers` field doesn't * exist then the resulting HTTP request will be the headers of the * CloudEvent HTTP Binding Binary Content Mode representation of the final * message. Note: If the specified binding expression, has updated the * `datacontenttype` field on the message so that it is not the same as * the payload format type but it is still a prefix of the payload format * type, then the `content-type` header in the `headers` map is set to * this `datacontenttype` value. * - If a field named `body` exists on the result of the expression then * its value is directly mapped to the body of the request. If the value * of the `body` field is of type bytes or string then it is used for * the HTTP request body as-is, with no conversion. If the body field is * of any other type then it is converted to a JSON string. If the body * field does not exist then the resulting payload of the HTTP request * will be data value of the CloudEvent HTTP Binding Binary Content Mode * representation of the final message as described earlier. * - Any other fields in the resulting expression will be ignored. * The CEL expression may access the incoming CloudEvent message in its * definition, as follows: * - The `data` field of the incoming CloudEvent message can be accessed * using the `message.data` value. Subfields of `message.data` may also be * accessed if an inputPayloadFormat has been specified on the Pipeline. * - Each attribute of the incoming CloudEvent message can be accessed * using the `message.` value, where is replaced with the * name of the attribute. * - Existing headers can be accessed in the CEL expression using the * `headers` variable. The `headers` variable defines a map of key/value * pairs corresponding to the HTTP headers of the CloudEvent HTTP Binding * Binary Content Mode representation of the final message as described * earlier. For example, the following CEL expression can be used to * construct an HTTP request by adding an additional header to the HTTP * headers of the CloudEvent HTTP Binding Binary Content Mode * representation of the final message and by overwriting the body of the * request: * ``` * { * "headers": headers.merge({"new-header-key": "new-header-value"}), * "body": "new-body" * } * ``` * - The default binding for the message payload can be accessed using the * `body` variable. It conatins a string representation of the message * payload in the format specified by the `outputPayloadFormat` field. * If the `inputPayloadFormat` field is not set, the `body` * variable contains the same message payload bytes that were published. * Additionally, the following CEL extension functions are provided for * use in this CEL expression: * - toBase64Url: * map.toBase64Url() > string * - Converts a CelValue to a base64url encoded string * - toJsonString: map.toJsonString() > string * - Converts a CelValue to a JSON string * - merge: * map1.merge(map2) > map3 * - Merges the passed CEL map with the existing CEL map the * function is applied to. * - If the same key exists in both maps, if the key's value is type * map both maps are merged else the value from the passed map is * used. * - denormalize: * map.denormalize() > map * - Denormalizes a CEL map such that every value of type map or key * in the map is expanded to return a single level map. * - The resulting keys are "." separated indices of the map keys. * - For example: * { * "a": 1, * "b": { * "c": 2, * "d": 3 * } * "e": [4, 5] * } * .denormalize() * > { * "a": 1, * "b.c": 2, * "b.d": 3, * "e.0": 4, * "e.1": 5 * } * - setField: * map.setField(key, value) > message * - Sets the field of the message with the given key to the * given value. * - If the field is not present it will be added. * - If the field is present it will be overwritten. * - The key can be a dot separated path to set a field in a nested * message. * - Key must be of type string. * - Value may be any valid type. * - removeFields: * map.removeFields([key1, key2, ...]) > message * - Removes the fields of the map with the given keys. * - The keys can be a dot separated path to remove a field in a * nested message. * - If a key is not found it will be ignored. * - Keys must be of type string. * - toMap: * [map1, map2, ...].toMap() > map * - Converts a CEL list of CEL maps to a single CEL map * - toCloudEventJsonWithPayloadFormat: * message.toCloudEventJsonWithPayloadFormat() > map * - Converts a message to the corresponding structure of JSON * format for CloudEvents. * - It converts `data` to destination payload format * specified in `outputPayloadFormat`. If `outputPayloadFormat` is * not set, the data will remain unchanged. * - It also sets the corresponding datacontenttype of * the CloudEvent, as indicated by * `outputPayloadFormat`. If no * `outputPayloadFormat` is set it will use the value of the * "datacontenttype" attribute on the CloudEvent if present, else * remove "datacontenttype" attribute. * - This function expects that the content of the message will * adhere to the standard CloudEvent format. If it doesn't then this * function will fail. * - The result is a CEL map that corresponds to the JSON * representation of the CloudEvent. To convert that data to a JSON * string it can be chained with the toJsonString function. * The Pipeline expects that the message it receives adheres to the * standard CloudEvent format. If it doesn't then the outgoing message * request may fail with a persistent error. */ messageBindingTemplate?: string; /** * The URI of the HTTP enpdoint. * The value must be a RFC2396 URI string. * Examples: `https://svc.us-central1.p.local:8080/route`. * Only the HTTPS protocol is supported. */ uri: string; } interface PipelineDestinationNetworkConfig { /** * Name of the NetworkAttachment that allows access to the consumer VPC. * Format: * `projects/{PROJECT_ID}/regions/{REGION}/networkAttachments/{NETWORK_ATTACHMENT_NAME}` * Required for HTTP endpoint destinations. Must not be specified for * Workflows, MessageBus, or Topic destinations. */ networkAttachment?: string; } interface PipelineDestinationOutputPayloadFormat { /** * The format of an AVRO message payload. * Structure is documented below. */ avro?: outputs.eventarc.PipelineDestinationOutputPayloadFormatAvro; /** * The format of a JSON message payload. */ json?: outputs.eventarc.PipelineDestinationOutputPayloadFormatJson; /** * The format of a Protobuf message payload. * Structure is documented below. */ protobuf?: outputs.eventarc.PipelineDestinationOutputPayloadFormatProtobuf; } interface PipelineDestinationOutputPayloadFormatAvro { /** * The entire schema definition is stored in this field. */ schemaDefinition?: string; } interface PipelineDestinationOutputPayloadFormatJson { } interface PipelineDestinationOutputPayloadFormatProtobuf { /** * The entire schema definition is stored in this field. */ schemaDefinition?: string; } interface PipelineInputPayloadFormat { /** * The format of an AVRO message payload. * Structure is documented below. */ avro?: outputs.eventarc.PipelineInputPayloadFormatAvro; /** * The format of a JSON message payload. */ json?: outputs.eventarc.PipelineInputPayloadFormatJson; /** * The format of a Protobuf message payload. * Structure is documented below. */ protobuf?: outputs.eventarc.PipelineInputPayloadFormatProtobuf; } interface PipelineInputPayloadFormatAvro { /** * The entire schema definition is stored in this field. */ schemaDefinition?: string; } interface PipelineInputPayloadFormatJson { } interface PipelineInputPayloadFormatProtobuf { /** * The entire schema definition is stored in this field. */ schemaDefinition?: string; } interface PipelineLoggingConfig { /** * The minimum severity of logs that will be sent to Stackdriver/Platform * Telemetry. Logs at severitiy ≄ this value will be sent, unless it is NONE. * Possible values are: `NONE`, `DEBUG`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `CRITICAL`, `ALERT`, `EMERGENCY`. */ logSeverity: string; } interface PipelineMediation { /** * Transformation defines the way to transform an incoming message. * Structure is documented below. */ transformation?: outputs.eventarc.PipelineMediationTransformation; } interface PipelineMediationTransformation { /** * The CEL expression template to apply to transform messages. * The following CEL extension functions are provided for * use in this CEL expression: * - merge: * map1.merge(map2) > map3 * - Merges the passed CEL map with the existing CEL map the * function is applied to. * - If the same key exists in both maps, if the key's value is type * map both maps are merged else the value from the passed map is * used. * - denormalize: * map.denormalize() > map * - Denormalizes a CEL map such that every value of type map or key * in the map is expanded to return a single level map. * - The resulting keys are "." separated indices of the map keys. * - For example: * { * "a": 1, * "b": { * "c": 2, * "d": 3 * } * "e": [4, 5] * } * .denormalize() * > { * "a": 1, * "b.c": 2, * "b.d": 3, * "e.0": 4, * "e.1": 5 * } * - setField: * map.setField(key, value) > message * - Sets the field of the message with the given key to the * given value. * - If the field is not present it will be added. * - If the field is present it will be overwritten. * - The key can be a dot separated path to set a field in a nested * message. * - Key must be of type string. * - Value may be any valid type. * - removeFields: * map.removeFields([key1, key2, ...]) > message * - Removes the fields of the map with the given keys. * - The keys can be a dot separated path to remove a field in a * nested message. * - If a key is not found it will be ignored. * - Keys must be of type string. * - toMap: * [map1, map2, ...].toMap() > map * - Converts a CEL list of CEL maps to a single CEL map * - toDestinationPayloadFormat(): * message.data.toDestinationPayloadFormat() > string or bytes * - Converts the message data to the destination payload format * specified in Pipeline.Destination.output_payload_format * - This function is meant to be applied to the message.data field. * - If the destination payload format is not set, the function will * return the message data unchanged. * - toCloudEventJsonWithPayloadFormat: * message.toCloudEventJsonWithPayloadFormat() > map * - Converts a message to the corresponding structure of JSON * format for CloudEvents * - This function applies toDestinationPayloadFormat() to the * message data. It also sets the corresponding datacontenttype of * the CloudEvent, as indicated by * Pipeline.Destination.output_payload_format. If no * outputPayloadFormat is set it will use the existing * datacontenttype on the CloudEvent if present, else leave * datacontenttype absent. * - This function expects that the content of the message will * adhere to the standard CloudEvent format. If it doesn't then this * function will fail. * - The result is a CEL map that corresponds to the JSON * representation of the CloudEvent. To convert that data to a JSON * string it can be chained with the toJsonString function. */ transformationTemplate?: string; } interface PipelineRetryPolicy { /** * The maximum number of delivery attempts for any message. The value must * be between 1 and 100. * The default value for this field is 5. */ maxAttempts?: number; /** * The maximum amount of seconds to wait between retry attempts. The value * must be between 1 and 600. * The default value for this field is 60. */ maxRetryDelay?: string; /** * The minimum amount of seconds to wait between retry attempts. The value * must be between 1 and 600. * The default value for this field is 5. */ minRetryDelay?: string; } interface TriggerDestination { /** * (Output) * The Cloud Function resource name. Only Cloud Functions V2 is supported. Format projects/{project}/locations/{location}/functions/{function} This is a read-only field. [WARNING] Creating Cloud Functions V2 triggers is only supported via the Cloud Functions product. An error will be returned if the user sets this value. */ cloudFunction: string; /** * Cloud Run fully-managed service that receives the events. The service should be running in the same project of the trigger. * Structure is documented below. */ cloudRunService?: outputs.eventarc.TriggerDestinationCloudRunService; /** * A GKE service capable of receiving events. The service should be running in the same project as the trigger. * Structure is documented below. */ gke?: outputs.eventarc.TriggerDestinationGke; /** * An HTTP endpoint destination described by an URI. * Structure is documented below. */ httpEndpoint?: outputs.eventarc.TriggerDestinationHttpEndpoint; /** * Optional. Network config is used to configure how Eventarc resolves and connect to a destination. This should only be used with HttpEndpoint destination type. * Structure is documented below. */ networkConfig?: outputs.eventarc.TriggerDestinationNetworkConfig; /** * The resource name of the Workflow whose Executions are triggered by the events. The Workflow resource should be deployed in the same project as the trigger. Format: `projects/{project}/locations/{location}/workflows/{workflow}` */ workflow?: string; } interface TriggerDestinationCloudRunService { /** * Optional. The relative path on the Cloud Run service the events should be sent to. The value must conform to the definition of URI path segment (section 3.3 of RFC2396). Examples: "/route", "route", "route/subroute". */ path?: string; /** * Required. The region the Cloud Run service is deployed in. */ region: string; /** * Required. The name of the Cloud Run service being addressed. See https://cloud.google.com/run/docs/reference/rest/v1/namespaces.services. Only services located in the same project of the trigger object can be addressed. */ service: string; } interface TriggerDestinationGke { /** * Required. The name of the cluster the GKE service is running in. The cluster must be running in the same project as the trigger being created. */ cluster: string; /** * Required. The name of the Google Compute Engine in which the cluster resides, which can either be compute zone (for example, us-central1-a) for the zonal clusters or region (for example, us-central1) for regional clusters. */ location: string; /** * Required. The namespace the GKE service is running in. */ namespace: string; /** * Optional. The relative path on the GKE service the events should be sent to. The value must conform to the definition of a URI path segment (section 3.3 of RFC2396). Examples: "/route", "route", "route/subroute". */ path?: string; /** * Required. Name of the GKE service. */ service: string; } interface TriggerDestinationHttpEndpoint { /** * Required. The URI of the HTTP enpdoint. The value must be a RFC2396 URI string. Examples: `http://10.10.10.8:80/route`, `http://svc.us-central1.p.local:8080/`. Only HTTP and HTTPS protocols are supported. The host can be either a static IP addressable from the VPC specified by the network config, or an internal DNS hostname of the service resolvable via Cloud DNS. */ uri: string; } interface TriggerDestinationNetworkConfig { /** * Required. Name of the NetworkAttachment that allows access to the destination VPC. Format: `projects/{PROJECT_ID}/regions/{REGION}/networkAttachments/{NETWORK_ATTACHMENT_NAME}` */ networkAttachment: string; } interface TriggerMatchingCriteria { /** * Required. The name of a CloudEvents attribute. Currently, only a subset of attributes are supported for filtering. All triggers MUST provide a filter for the 'type' attribute. */ attribute: string; /** * Optional. The operator used for matching the events with the value of the filter. If not specified, only events that have an exact key-value pair specified in the filter are matched. The only allowed value is `match-path-pattern`. */ operator?: string; /** * Required. The value for the attribute. See https://cloud.google.com/eventarc/docs/creating-triggers#trigger-gcloud for available values. */ value: string; } interface TriggerRetryPolicy { /** * The maximum number of delivery attempts for any message. The only valid * value is 1. */ maxAttempts?: number; } interface TriggerTransport { /** * The Pub/Sub topic and subscription used by Eventarc as delivery intermediary. * Structure is documented below. */ pubsub?: outputs.eventarc.TriggerTransportPubsub; } interface TriggerTransportPubsub { /** * (Output) * Output only. The name of the Pub/Sub subscription created and managed by Eventarc system as a transport for the event delivery. Format: `projects/{PROJECT_ID}/subscriptions/{SUBSCRIPTION_NAME}`. */ subscription: string; /** * Optional. The name of the Pub/Sub topic created and managed by Eventarc system as a transport for the event delivery. Format: `projects/{PROJECT_ID}/topics/{TOPIC_NAME}. You may set an existing topic for triggers of the type google.cloud.pubsub.topic.v1.messagePublished` only. The topic you provide here will not be deleted by Eventarc at trigger deletion. */ topic?: string; } } export declare namespace filestore { interface GetInstanceDirectoryService { /** * Configuration for LDAP servers. */ ldaps: outputs.filestore.GetInstanceDirectoryServiceLdap[]; } interface GetInstanceDirectoryServiceLdap { /** * The LDAP domain name in the format of 'my-domain.com'. */ domain: string; /** * The groups Organizational Unit (OU) is optional. This parameter is a hint * to allow faster lookup in the LDAP namespace. In case that this parameter * is not provided, Filestore instance will query the whole LDAP namespace. */ groupsOu: string; /** * The servers names are used for specifying the LDAP servers names. * The LDAP servers names can come with two formats: * 1. DNS name, for example: 'ldap.example1.com', 'ldap.example2.com'. * 2. IP address, for example: '10.0.0.1', '10.0.0.2', '10.0.0.3'. * All servers names must be in the same format: either all DNS names or all * IP addresses. */ servers: string[]; /** * The users Organizational Unit (OU) is optional. This parameter is a hint * to allow faster lookup in the LDAP namespace. In case that this parameter * is not provided, Filestore instance will query the whole LDAP namespace. */ usersOu: string; } interface GetInstanceEffectiveReplication { /** * The replication role. */ replicas: outputs.filestore.GetInstanceEffectiveReplicationReplica[]; /** * The replication role. */ role: string; } interface GetInstanceEffectiveReplicationReplica { /** * Output only. The timestamp of the latest replication snapshot taken on the active instance and is already replicated safely. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z" */ lastActiveSyncTime: string; /** * The peer instance. */ peerInstance: string; /** * Output only. The replica state */ state: string; /** * Output only. Additional information about the replication state, if available. */ stateReasons: string[]; } interface GetInstanceFileShare { /** * File share capacity in GiB. This must be at least 1024 GiB * for the standard tier, or 2560 GiB for the premium tier. */ capacityGb: number; /** * The name of a Filestore instance. * * - - - */ name: string; /** * Nfs Export Options. There is a limit of 10 export options per file share. */ nfsExportOptions: outputs.filestore.GetInstanceFileShareNfsExportOption[]; /** * The resource name of the backup, in the format * projects/{projectId}/locations/{locationId}/backups/{backupId}, * that this file share has been restored from. */ sourceBackup: string; } interface GetInstanceFileShareNfsExportOption { /** * Either READ_ONLY, for allowing only read requests on the exported directory, * or READ_WRITE, for allowing both read and write requests. The default is READ_WRITE. Default value: "READ_WRITE" Possible values: ["READ_ONLY", "READ_WRITE"] */ accessMode: string; /** * An integer representing the anonymous group id with a default value of 65534. * Anon_gid may only be set with squashMode of ROOT_SQUASH. An error will be returned * if this field is specified for other squashMode settings. */ anonGid: number; /** * An integer representing the anonymous user id with a default value of 65534. * Anon_uid may only be set with squashMode of ROOT_SQUASH. An error will be returned * if this field is specified for other squashMode settings. */ anonUid: number; /** * List of either IPv4 addresses, or ranges in CIDR notation which may mount the file share. * Overlapping IP ranges are not allowed, both within and across NfsExportOptions. An error will be returned. * The limit is 64 IP ranges/addresses for each FileShareConfig among all NfsExportOptions. */ ipRanges: string[]; /** * The source VPC network for 'ip_ranges'. * Required for instances using Private Service Connect, optional otherwise. */ network: string; /** * Either NO_ROOT_SQUASH, for allowing root access on the exported directory, or ROOT_SQUASH, * for not allowing root access. The default is NO_ROOT_SQUASH. Default value: "NO_ROOT_SQUASH" Possible values: ["NO_ROOT_SQUASH", "ROOT_SQUASH"] */ squashMode: string; } interface GetInstanceInitialReplication { /** * The replication role. */ replicas: outputs.filestore.GetInstanceInitialReplicationReplica[]; /** * The replication role. Default value: "STANDBY" Possible values: ["ROLE_UNSPECIFIED", "ACTIVE", "STANDBY"] */ role: string; } interface GetInstanceInitialReplicationReplica { /** * The peer instance. */ peerInstance: string; } interface GetInstanceNetwork { /** * The network connect mode of the Filestore instance. * If not provided, the connect mode defaults to * DIRECT_PEERING. Default value: "DIRECT_PEERING" Possible values: ["DIRECT_PEERING", "PRIVATE_SERVICE_ACCESS", "PRIVATE_SERVICE_CONNECT"] */ connectMode: string; /** * A list of IPv4 or IPv6 addresses. */ ipAddresses: string[]; /** * IP versions for which the instance has * IP addresses assigned. Possible values: ["ADDRESS_MODE_UNSPECIFIED", "MODE_IPV4", "MODE_IPV6"] */ modes: string[]; /** * The name of the GCE VPC network to which the * instance is connected. */ network: string; /** * Private Service Connect configuration. * Should only be set when connectMode is PRIVATE_SERVICE_CONNECT. */ pscConfigs: outputs.filestore.GetInstanceNetworkPscConfig[]; /** * A /29 CIDR block that identifies the range of IP * addresses reserved for this instance. */ reservedIpRange: string; } interface GetInstanceNetworkPscConfig { /** * Consumer service project in which the Private Service Connect endpoint * would be set up. This is optional, and only relevant in case the network * is a shared VPC. If this is not specified, the endpoint would be set up * in the VPC host project. */ endpointProject: string; } interface GetInstancePerformanceConfig { /** * The instance will have a fixed provisioned IOPS value, * which will remain constant regardless of instance * capacity. */ fixedIops: outputs.filestore.GetInstancePerformanceConfigFixedIop[]; /** * The instance provisioned IOPS will change dynamically * based on the capacity of the instance. */ iopsPerTbs: outputs.filestore.GetInstancePerformanceConfigIopsPerTb[]; } interface GetInstancePerformanceConfigFixedIop { /** * The number of IOPS to provision for the instance. * maxIops must be in multiple of 1000. */ maxIops: number; } interface GetInstancePerformanceConfigIopsPerTb { /** * The instance max IOPS will be calculated by multiplying * the capacity of the instance (TB) by max_iops_per_tb, * and rounding to the nearest 1000. The instance max IOPS * will be changed dynamically based on the instance * capacity. */ maxIopsPerTb: number; } interface InstanceDirectoryServices { /** * Configuration for LDAP servers. * Structure is documented below. */ ldap?: outputs.filestore.InstanceDirectoryServicesLdap; } interface InstanceDirectoryServicesLdap { /** * The LDAP domain name in the format of `my-domain.com`. */ domain: string; /** * The groups Organizational Unit (OU) is optional. This parameter is a hint * to allow faster lookup in the LDAP namespace. In case that this parameter * is not provided, Filestore instance will query the whole LDAP namespace. */ groupsOu?: string; /** * The servers names are used for specifying the LDAP servers names. * The LDAP servers names can come with two formats: * 1. DNS name, for example: `ldap.example1.com`, `ldap.example2.com`. * 2. IP address, for example: `10.0.0.1`, `10.0.0.2`, `10.0.0.3`. * All servers names must be in the same format: either all DNS names or all * IP addresses. */ servers: string[]; /** * The users Organizational Unit (OU) is optional. This parameter is a hint * to allow faster lookup in the LDAP namespace. In case that this parameter * is not provided, Filestore instance will query the whole LDAP namespace. */ usersOu?: string; } interface InstanceEffectiveReplication { /** * The replication role. * Structure is documented below. */ replicas?: outputs.filestore.InstanceEffectiveReplicationReplica[]; /** * (Output) * The replication role. */ role: string; } interface InstanceEffectiveReplicationReplica { /** * (Output) * Output only. The timestamp of the latest replication snapshot taken on the active instance and is already replicated safely. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z" */ lastActiveSyncTime: string; /** * The peer instance. */ peerInstance: string; /** * (Output) * Output only. The replica state */ state: string; /** * (Output) * Output only. Additional information about the replication state, if available. */ stateReasons: string[]; } interface InstanceFileShares { /** * File share capacity in GiB. This must be at least 1024 GiB * for the standard tier, or 2560 GiB for the premium tier. */ capacityGb: number; /** * The name of the fileshare (16 characters or less) */ name: string; /** * Nfs Export Options. There is a limit of 10 export options per file share. * Structure is documented below. */ nfsExportOptions?: outputs.filestore.InstanceFileSharesNfsExportOption[]; /** * The resource name of the backup, in the format * projects/{projectId}/locations/{locationId}/backups/{backupId}, * that this file share has been restored from. */ sourceBackup?: string; } interface InstanceFileSharesNfsExportOption { /** * Either READ_ONLY, for allowing only read requests on the exported directory, * or READ_WRITE, for allowing both read and write requests. The default is READ_WRITE. * Default value is `READ_WRITE`. * Possible values are: `READ_ONLY`, `READ_WRITE`. */ accessMode?: string; /** * An integer representing the anonymous group id with a default value of 65534. * Anon_gid may only be set with squashMode of ROOT_SQUASH. An error will be returned * if this field is specified for other squashMode settings. */ anonGid?: number; /** * An integer representing the anonymous user id with a default value of 65534. * Anon_uid may only be set with squashMode of ROOT_SQUASH. An error will be returned * if this field is specified for other squashMode settings. */ anonUid?: number; /** * List of either IPv4 addresses, or ranges in CIDR notation which may mount the file share. * Overlapping IP ranges are not allowed, both within and across NfsExportOptions. An error will be returned. * The limit is 64 IP ranges/addresses for each FileShareConfig among all NfsExportOptions. */ ipRanges?: string[]; /** * The source VPC network for `ipRanges`. * Required for instances using Private Service Connect, optional otherwise. */ network?: string; /** * Either NO_ROOT_SQUASH, for allowing root access on the exported directory, or ROOT_SQUASH, * for not allowing root access. The default is NO_ROOT_SQUASH. * Default value is `NO_ROOT_SQUASH`. * Possible values are: `NO_ROOT_SQUASH`, `ROOT_SQUASH`. */ squashMode?: string; } interface InstanceInitialReplication { /** * The replication role. * Structure is documented below. */ replicas?: outputs.filestore.InstanceInitialReplicationReplica[]; /** * The replication role. * Default value is `STANDBY`. * Possible values are: `ROLE_UNSPECIFIED`, `ACTIVE`, `STANDBY`. */ role?: string; } interface InstanceInitialReplicationReplica { /** * The peer instance. */ peerInstance: string; } interface InstanceNetwork { /** * The network connect mode of the Filestore instance. * If not provided, the connect mode defaults to * DIRECT_PEERING. * Default value is `DIRECT_PEERING`. * Possible values are: `DIRECT_PEERING`, `PRIVATE_SERVICE_ACCESS`, `PRIVATE_SERVICE_CONNECT`. */ connectMode?: string; /** * (Output) * A list of IPv4 or IPv6 addresses. */ ipAddresses: string[]; /** * IP versions for which the instance has * IP addresses assigned. * Each value may be one of: `ADDRESS_MODE_UNSPECIFIED`, `MODE_IPV4`, `MODE_IPV6`. */ modes: string[]; /** * The name of the GCE VPC network to which the * instance is connected. */ network: string; /** * Private Service Connect configuration. * Should only be set when connectMode is PRIVATE_SERVICE_CONNECT. * Structure is documented below. */ pscConfig?: outputs.filestore.InstanceNetworkPscConfig; /** * A /29 CIDR block that identifies the range of IP * addresses reserved for this instance. */ reservedIpRange: string; } interface InstanceNetworkPscConfig { /** * Consumer service project in which the Private Service Connect endpoint * would be set up. This is optional, and only relevant in case the network * is a shared VPC. If this is not specified, the endpoint would be set up * in the VPC host project. */ endpointProject?: string; } interface InstancePerformanceConfig { /** * The instance will have a fixed provisioned IOPS value, * which will remain constant regardless of instance * capacity. * Structure is documented below. */ fixedIops?: outputs.filestore.InstancePerformanceConfigFixedIops; /** * The instance provisioned IOPS will change dynamically * based on the capacity of the instance. * Structure is documented below. */ iopsPerTb?: outputs.filestore.InstancePerformanceConfigIopsPerTb; } interface InstancePerformanceConfigFixedIops { /** * The number of IOPS to provision for the instance. * maxIops must be in multiple of 1000. */ maxIops?: number; } interface InstancePerformanceConfigIopsPerTb { /** * The instance max IOPS will be calculated by multiplying * the capacity of the instance (TB) by max_iops_per_tb, * and rounding to the nearest 1000. The instance max IOPS * will be changed dynamically based on the instance * capacity. */ maxIopsPerTb?: number; } } export declare namespace firebase { interface AiLogicConfigGenerativeLanguageConfig { /** * The value of the API key. The API key must have * `generativelanguage.googleapis.com` in its "API restrictions" allowlist. * Note that this API is sometimes called the *Generative Language API* in * the Google Cloud console. * Do **not** add this Gemini API key into your app's codebase * **Note**: This property is sensitive and will not be displayed in the plan. */ apiKey?: string; /** * **NOTE:** This field is write-only and its value will not be updated in state as part of read operations. * (Optional, Write-Only) * The value of the API key. The API key must have * `generativelanguage.googleapis.com` in its "API restrictions" allowlist. * Note that this API is sometimes called the *Generative Language API* in * the Google Cloud console. * Do **not** add this Gemini API key into your app's codebase * **Note**: This property is write-only and will not be read from the API. * * > **Note:** One of `apiKey` or `apiKeyWo` can only be set. */ apiKeyWo?: string; /** * Triggers update of `apiKeyWo` write-only. Increment this value when an update to `apiKeyWo` is needed. For more info see [updating write-only arguments](https://www.terraform.io/docs/providers/google/guides/using_write_only_arguments.html#updating-write-only-arguments) */ apiKeyWoVersion?: string; } interface AiLogicConfigTelemetryConfig { /** * The current monitoring mode used for this project. * Possible values: * NONE * ALL */ mode?: string; /** * The percentage of requests to be sampled, expressed as a fraction * in the range (0,1]. Note that the actual sampling rate may be lower than * the specified value if the system is overloaded. Default is 1.0. */ samplingRate?: number; } interface AppHostingBackendCodebase { /** * The resource name for the Developer Connect * [`gitRepositoryLink`](https://cloud.google.com/developer-connect/docs/api/reference/rest/v1/projects.locations.connections.gitRepositoryLinks) * connected to this backend, in the format: * projects/{project}/locations/{location}/connections/{connection}/gitRepositoryLinks/{repositoryLink} */ repository: string; /** * If `repository` is provided, the directory relative to the root of the * repository to use as the root for the deployed web app. */ rootDirectory?: string; } interface AppHostingBackendManagedResource { /** * (Output) * A managed Cloud Run * [`service`](https://cloud.google.com/run/docs/reference/rest/v2/projects.locations.services#resource:-service). * Structure is documented below. */ runServices: outputs.firebase.AppHostingBackendManagedResourceRunService[]; } interface AppHostingBackendManagedResourceRunService { /** * (Output) * The name of the Cloud Run * [`service`](https://cloud.google.com/run/docs/reference/rest/v2/projects.locations.services#resource:-service), * in the format: * projects/{project}/locations/{location}/services/{serviceId} */ service: string; } interface AppHostingBuildError { /** * (Output) * The status code, which should be an enum value of google.rpc.Code. */ code: number; /** * (Output) * A list of messages that carry the error details. There is a common set of * message types for APIs to use. */ details: { [key: string]: string; }[]; /** * (Output) * A developer-facing error message, which should be in English. Any * user-facing error message should be localized and sent in the * google.rpc.Status.details field, or localized by the client. */ message: string; } interface AppHostingBuildSource { /** * A codebase source, representing the state of the codebase * that the build will be created at. * Structure is documented below. */ codebase?: outputs.firebase.AppHostingBuildSourceCodebase; /** * The URI of an Artifact Registry * [container * image](https://cloud.google.com/artifact-registry/docs/reference/rest/v1/projects.locations.repositories.dockerImages) * to use as the build source. * Structure is documented below. */ container?: outputs.firebase.AppHostingBuildSourceContainer; } interface AppHostingBuildSourceCodebase { /** * (Output) * Version control metadata for a user associated with a resolved codebase. * Currently assumes a Git user. * Structure is documented below. */ authors: outputs.firebase.AppHostingBuildSourceCodebaseAuthor[]; /** * The branch in the codebase to build from, using the latest commit. */ branch?: string; /** * The commit in the codebase to build from. * * * The `author` block contains: */ commit?: string; /** * (Output) * The message of a codebase change. */ commitMessage: string; /** * (Output) * The time the change was made. */ commitTime: string; /** * (Output) * The 'name' field in a Git user's git.config. Required by Git. */ displayName: string; /** * (Output) * The full SHA-1 hash of a Git commit, if available. */ hash: string; /** * (Output) * A URI linking to the codebase on an hosting provider's website. May * not be valid if the commit has been rebased or force-pushed out of * existence in the linked repository. */ uri: string; } interface AppHostingBuildSourceCodebaseAuthor { /** * Human-readable name. 63 character limit. */ displayName: string; /** * The 'email' field in a Git user's git.config, if available. */ email: string; /** * The URI of an image file associated with the user's account in an * external source control provider, if available. */ imageUri: string; } interface AppHostingBuildSourceContainer { /** * A URI representing a container for the backend to use. */ image: string; } interface AppHostingDomainCustomDomainStatus { /** * (Output) * Possible values: * CERT_PREPARING * CERT_VALIDATING * CERT_PROPAGATING * CERT_ACTIVE * CERT_EXPIRING_SOON * CERT_EXPIRED */ certState: string; /** * (Output) * Possible values: * HOST_UNHOSTED * HOST_UNREACHABLE * HOST_NON_FAH * HOST_CONFLICT * HOST_WRONG_SHARD * HOST_ACTIVE */ hostState: string; /** * (Output) * A list of issues with domain configuration. Allows users to self-correct * problems with DNS records. * Structure is documented below. */ issues: outputs.firebase.AppHostingDomainCustomDomainStatusIssue[]; /** * (Output) * Possible values: * OWNERSHIP_MISSING * OWNERSHIP_UNREACHABLE * OWNERSHIP_MISMATCH * OWNERSHIP_CONFLICT * OWNERSHIP_PENDING * OWNERSHIP_ACTIVE */ ownershipState: string; /** * (Output) * Lists the records that must added or removed to a custom domain's DNS * in order to finish setup and start serving content. * Field is present during onboarding. Also present after onboarding if one * or more of the above states is not *_ACTIVE, indicating the domain's DNS * records are in a bad state. * Structure is documented below. */ requiredDnsUpdates: outputs.firebase.AppHostingDomainCustomDomainStatusRequiredDnsUpdate[]; } interface AppHostingDomainCustomDomainStatusIssue { /** * (Output) * The status code, which should be an enum value of google.rpc.Code. */ code: number; /** * (Output) * A list of messages that carry the error details. */ details: string; /** * (Output) * A developer-facing error message, which should be in English. Any * user-facing error message should be localized and sent in the * google.rpc.Status.details field, or localized by the client. */ message: string; } interface AppHostingDomainCustomDomainStatusRequiredDnsUpdate { /** * (Output) * The last time App Hosting checked your custom domain's DNS records. */ checkTime: string; /** * (Output) * The set of DNS records App Hosting needs in order to be able to serve * secure content on the domain. * Structure is documented below. */ desireds: outputs.firebase.AppHostingDomainCustomDomainStatusRequiredDnsUpdateDesired[]; /** * (Output) * The set of DNS records App Hosting discovered when inspecting a domain. * Structure is documented below. */ discovereds: outputs.firebase.AppHostingDomainCustomDomainStatusRequiredDnsUpdateDiscovered[]; /** * (Output) * The domain the record pertains to, e.g. `foo.bar.com.`. */ domainName: string; } interface AppHostingDomainCustomDomainStatusRequiredDnsUpdateDesired { /** * (Output) * The `Status` type defines a logical error model that is suitable for * different programming environments, including REST APIs and RPC APIs. It is * used by [gRPC](https://github.com/grpc). Each `Status` message contains * three pieces of data: error code, error message, and error details. * You can find out more about this error model and how to work with it in the * [API Design Guide](https://cloud.google.com/apis/design/errors). * Structure is documented below. */ checkErrors: outputs.firebase.AppHostingDomainCustomDomainStatusRequiredDnsUpdateDesiredCheckError[]; /** * (Output) * The domain the record pertains to, e.g. `foo.bar.com.`. */ domainName: string; /** * (Output) * Records on the domain. * Structure is documented below. */ records: outputs.firebase.AppHostingDomainCustomDomainStatusRequiredDnsUpdateDesiredRecord[]; } interface AppHostingDomainCustomDomainStatusRequiredDnsUpdateDesiredCheckError { /** * (Output) * The status code, which should be an enum value of google.rpc.Code. */ code: number; /** * (Output) * A list of messages that carry the error details. */ details: string; /** * (Output) * A developer-facing error message, which should be in English. Any * user-facing error message should be localized and sent in the * google.rpc.Status.details field, or localized by the client. */ message: string; } interface AppHostingDomainCustomDomainStatusRequiredDnsUpdateDesiredRecord { /** * (Output) * The domain the record pertains to, e.g. `foo.bar.com.`. */ domainName: string; /** * (Output) * The data of the record. The meaning of the value depends on record type: * - A and AAAA: IP addresses for the domain. * - CNAME: Another domain to check for records. * - TXT: Arbitrary text strings associated with the domain. App Hosting * uses TXT records to determine which Firebase projects have * permission to act on the domain's behalf. * - CAA: The record's flags, tag, and value, e.g. `0 issue "pki.goog"`. */ rdata: string; /** * (Output) * An enum that indicates which state(s) this DNS record applies to. Populated * for all records with an `ADD` or `REMOVE` required action. */ relevantStates: string[]; /** * (Output) * An enum that indicates the a required action for this record. Populated * when the record is part of a required change in a `DnsUpdates` * `discovered` or `desired` record set. * Possible values: * NONE * ADD * REMOVE */ requiredAction: string; /** * (Output) * The record's type, which determines what data the record contains. * Possible values: * A * CNAME * TXT * AAAA * CAA */ type: string; } interface AppHostingDomainCustomDomainStatusRequiredDnsUpdateDiscovered { /** * (Output) * The `Status` type defines a logical error model that is suitable for * different programming environments, including REST APIs and RPC APIs. It is * used by [gRPC](https://github.com/grpc). Each `Status` message contains * three pieces of data: error code, error message, and error details. * You can find out more about this error model and how to work with it in the * [API Design Guide](https://cloud.google.com/apis/design/errors). * Structure is documented below. */ checkErrors: outputs.firebase.AppHostingDomainCustomDomainStatusRequiredDnsUpdateDiscoveredCheckError[]; /** * (Output) * The domain the record pertains to, e.g. `foo.bar.com.`. */ domainName: string; /** * (Output) * Records on the domain. * Structure is documented below. */ records: outputs.firebase.AppHostingDomainCustomDomainStatusRequiredDnsUpdateDiscoveredRecord[]; } interface AppHostingDomainCustomDomainStatusRequiredDnsUpdateDiscoveredCheckError { /** * (Output) * The status code, which should be an enum value of google.rpc.Code. */ code: number; /** * (Output) * A list of messages that carry the error details. */ details: string; /** * (Output) * A developer-facing error message, which should be in English. Any * user-facing error message should be localized and sent in the * google.rpc.Status.details field, or localized by the client. */ message: string; } interface AppHostingDomainCustomDomainStatusRequiredDnsUpdateDiscoveredRecord { /** * (Output) * The domain the record pertains to, e.g. `foo.bar.com.`. */ domainName: string; /** * (Output) * The data of the record. The meaning of the value depends on record type: * - A and AAAA: IP addresses for the domain. * - CNAME: Another domain to check for records. * - TXT: Arbitrary text strings associated with the domain. App Hosting * uses TXT records to determine which Firebase projects have * permission to act on the domain's behalf. * - CAA: The record's flags, tag, and value, e.g. `0 issue "pki.goog"`. */ rdata: string; /** * (Output) * An enum that indicates which state(s) this DNS record applies to. Populated * for all records with an `ADD` or `REMOVE` required action. */ relevantStates: string[]; /** * (Output) * An enum that indicates the a required action for this record. Populated * when the record is part of a required change in a `DnsUpdates` * `discovered` or `desired` record set. * Possible values: * NONE * ADD * REMOVE */ requiredAction: string; /** * (Output) * The record's type, which determines what data the record contains. * Possible values: * A * CNAME * TXT * AAAA * CAA */ type: string; } interface AppHostingDomainServe { /** * Specifies redirect behavior for a domain. * Structure is documented below. */ redirect?: outputs.firebase.AppHostingDomainServeRedirect; } interface AppHostingDomainServeRedirect { /** * The status code to use in a redirect response. Must be a valid HTTP 3XX * status code. Defaults to 302 if not present. */ status?: string; /** * The URI of the redirect's intended destination. This URI will be * prepended to the original request path. URI without a scheme are * assumed to be HTTPS. */ uri: string; } interface AppHostingTrafficCurrent { /** * (Output) * A list of traffic splits that together represent where traffic is being routed. * Structure is documented below. */ splits: outputs.firebase.AppHostingTrafficCurrentSplit[]; } interface AppHostingTrafficCurrentSplit { /** * The build that traffic is being routed to. */ build: string; /** * The percentage of traffic to send to the build. Currently must be 100 or 0. */ percent: number; } interface AppHostingTrafficRolloutPolicy { /** * Specifies a branch that triggers a new build to be started with this * policy. If not set, no automatic rollouts will happen. */ codebaseBranch?: string; /** * A flag that, if true, prevents rollouts from being created via this RolloutPolicy. */ disabled?: boolean; /** * (Output) * If disabled is set, the time at which the rollouts were disabled. */ disabledTime: string; } interface AppHostingTrafficTarget { /** * A list of traffic splits that together represent where traffic is being routed. * Structure is documented below. */ splits: outputs.firebase.AppHostingTrafficTargetSplit[]; } interface AppHostingTrafficTargetSplit { /** * The build that traffic is being routed to. */ build: string; /** * The percentage of traffic to send to the build. Currently must be 100 or 0. */ percent: number; } interface ExtensionsInstanceConfig { /** * List of extension events selected by consumer that extension is allowed to * emit, identified by their types. */ allowedEventTypes?: string[]; /** * (Output) * The time at which the Extension Instance Config was created. */ createTime: string; /** * Fully qualified Eventarc resource name that consumers should use for event triggers. */ eventarcChannel: string; /** * The ref of the Extension from the Registry (e.g. publisher-id/awesome-extension) */ extensionRef: string; /** * The version of the Extension from the Registry (e.g. 1.0.3). If left blank, latest is assumed. */ extensionVersion: string; /** * (Output) * The unique identifier for this configuration. */ name: string; /** * Environment variables that may be configured for the Extension */ params: { [key: string]: string; }; /** * (Output) * Postinstall instructions to be shown for this Extension, with * template strings representing function and parameter values substituted * with actual values. These strings include: ${param:FOO}, * ${function:myFunc.url}, * ${function:myFunc.name}, and ${function:myFunc.location} */ populatedPostinstallContent: string; /** * Params whose values are only available at deployment time. * Unlike other params, these will not be set as environment variables on * functions. See a full list of system parameters at * https://firebase.google.com/docs/extensions/publishers/parameters#system_parameters */ systemParams: { [key: string]: string; }; } interface ExtensionsInstanceErrorStatus { /** * The status code, which should be an enum value of google.rpc.Code. */ code?: number; /** * A list of messages that carry the error details. */ details?: { [key: string]: string; }[]; /** * A developer-facing error message, which should be in English. */ message?: string; } interface ExtensionsInstanceRuntimeData { /** * The fatal error state for the extension instance * Structure is documented below. */ fatalError?: outputs.firebase.ExtensionsInstanceRuntimeDataFatalError; /** * The processing state for the extension instance * Structure is documented below. */ processingState?: outputs.firebase.ExtensionsInstanceRuntimeDataProcessingState; /** * The time of the last state update. */ stateUpdateTime?: string; } interface ExtensionsInstanceRuntimeDataFatalError { /** * The error message. This is set by the extension developer to give * more detail on why the extension is unusable and must be re-installed * or reconfigured. */ errorMessage?: string; } interface ExtensionsInstanceRuntimeDataProcessingState { /** * Details about the processing. e.g. This could include the type of * processing in progress or it could list errors or failures. * This information will be shown in the console on the detail page * for the extension instance. */ detailMessage?: string; /** * The processing state of the extension instance. */ state?: string; } interface HostingCustomDomainCert { /** * The state of the certificate. Only the `CERT_ACTIVE` and * `CERT_EXPIRING_SOON` states provide SSL coverage for a domain name. If the * state is `PROPAGATING` and Hosting had an active cert for the domain name * before, that formerly-active cert provides SSL coverage for the domain name * until the current cert propagates. */ state?: string; /** * The record's type, which determines what data the record contains. */ type?: string; /** * A set of ACME challenges you can add to your DNS records or existing, * non-Hosting hosting provider to allow Hosting to create an SSL certificate * for your domain name before you point traffic toward hosting. You can use * thse challenges as part of a zero downtime transition from your old * provider to Hosting. * Structure is documented below. */ verification?: outputs.firebase.HostingCustomDomainCertVerification; } interface HostingCustomDomainCertVerification { /** * A `TXT` record to add to your DNS records that confirms your intent to * let Hosting create an SSL cert for your domain name. * Structure is documented below. */ dns?: outputs.firebase.HostingCustomDomainCertVerificationDns; /** * A file to add to your existing, non-Hosting hosting service that confirms * your intent to let Hosting create an SSL cert for your domain name. * Structure is documented below. */ http?: outputs.firebase.HostingCustomDomainCertVerificationHttp; } interface HostingCustomDomainCertVerificationDns { /** * (Output) * The last time Hosting checked your CustomDomain's DNS records. */ checkTime: string; /** * A text string to serve at the path. */ desireds?: outputs.firebase.HostingCustomDomainCertVerificationDnsDesired[]; /** * Whether Hosting was able to find the required file contents on the * specified path during its last check. */ discovereds?: outputs.firebase.HostingCustomDomainCertVerificationDnsDiscovered[]; } interface HostingCustomDomainCertVerificationDnsDesired { /** * The domain name the record pertains to, e.g. `foo.bar.com.`. */ domainName?: string; /** * Records on the domain * Structure is documented below. */ records?: outputs.firebase.HostingCustomDomainCertVerificationDnsDesiredRecord[]; } interface HostingCustomDomainCertVerificationDnsDesiredRecord { /** * The domain name the record pertains to, e.g. `foo.bar.com.`. */ domainName?: string; /** * The data of the record. The meaning of the value depends on record type: * - A and AAAA: IP addresses for the domain name. * - CNAME: Another domain to check for records. * - TXT: Arbitrary text strings associated with the domain name. Hosting * uses TXT records to determine a which Firebase Projects have * permission to act on the domain name's behalf. * - CAA: The record's flags, tag, and value, e.g. `0 issue "pki.goog"`. */ rdata?: string; /** * Indicates the a required action for this record. */ requiredAction?: string; /** * The record's type, which determines what data the record contains. */ type?: string; } interface HostingCustomDomainCertVerificationDnsDiscovered { /** * The domain name the record pertains to, e.g. `foo.bar.com.`. */ domainName?: string; /** * Records on the domain * Structure is documented below. */ records?: outputs.firebase.HostingCustomDomainCertVerificationDnsDiscoveredRecord[]; } interface HostingCustomDomainCertVerificationDnsDiscoveredRecord { /** * The domain name the record pertains to, e.g. `foo.bar.com.`. */ domainName?: string; /** * The data of the record. The meaning of the value depends on record type: * - A and AAAA: IP addresses for the domain name. * - CNAME: Another domain to check for records. * - TXT: Arbitrary text strings associated with the domain name. Hosting * uses TXT records to determine a which Firebase Projects have * permission to act on the domain name's behalf. * - CAA: The record's flags, tag, and value, e.g. `0 issue "pki.goog"`. */ rdata?: string; /** * Indicates the a required action for this record. */ requiredAction?: string; /** * The record's type, which determines what data the record contains. */ type?: string; } interface HostingCustomDomainCertVerificationHttp { /** * A text string to serve at the path. */ desired?: string; /** * Whether Hosting was able to find the required file contents on the * specified path during its last check. */ discovered?: string; /** * (Output) * The last time Hosting systems checked for the file contents. */ lastCheckTime: string; /** * The path to the file. */ path?: string; } interface HostingCustomDomainIssue { /** * The status code, which should be an enum value of `google.rpc.Code` */ code?: number; /** * A list of messages that carry the error details. */ details?: string; /** * Error message */ message?: string; } interface HostingCustomDomainRequiredDnsUpdate { /** * (Output) * The last time Hosting checked your CustomDomain's DNS records. */ checkTime: string; /** * A text string to serve at the path. */ desireds?: outputs.firebase.HostingCustomDomainRequiredDnsUpdateDesired[]; /** * Whether Hosting was able to find the required file contents on the * specified path during its last check. */ discovereds?: outputs.firebase.HostingCustomDomainRequiredDnsUpdateDiscovered[]; } interface HostingCustomDomainRequiredDnsUpdateDesired { /** * The domain name the record pertains to, e.g. `foo.bar.com.`. */ domainName?: string; /** * Records on the domain * Structure is documented below. */ records?: outputs.firebase.HostingCustomDomainRequiredDnsUpdateDesiredRecord[]; } interface HostingCustomDomainRequiredDnsUpdateDesiredRecord { /** * The domain name the record pertains to, e.g. `foo.bar.com.`. */ domainName?: string; /** * The data of the record. The meaning of the value depends on record type: * - A and AAAA: IP addresses for the domain name. * - CNAME: Another domain to check for records. * - TXT: Arbitrary text strings associated with the domain name. Hosting * uses TXT records to determine a which Firebase Projects have * permission to act on the domain name's behalf. * - CAA: The record's flags, tag, and value, e.g. `0 issue "pki.goog"`. */ rdata?: string; /** * Indicates the a required action for this record. */ requiredAction?: string; /** * The record's type, which determines what data the record contains. */ type?: string; } interface HostingCustomDomainRequiredDnsUpdateDiscovered { /** * The domain name the record pertains to, e.g. `foo.bar.com.`. */ domainName?: string; /** * Records on the domain * Structure is documented below. */ records?: outputs.firebase.HostingCustomDomainRequiredDnsUpdateDiscoveredRecord[]; } interface HostingCustomDomainRequiredDnsUpdateDiscoveredRecord { /** * The domain name the record pertains to, e.g. `foo.bar.com.`. */ domainName?: string; /** * The data of the record. The meaning of the value depends on record type: * - A and AAAA: IP addresses for the domain name. * - CNAME: Another domain to check for records. * - TXT: Arbitrary text strings associated with the domain name. Hosting * uses TXT records to determine a which Firebase Projects have * permission to act on the domain name's behalf. * - CAA: The record's flags, tag, and value, e.g. `0 issue "pki.goog"`. */ rdata?: string; /** * Indicates the a required action for this record. */ requiredAction?: string; /** * The record's type, which determines what data the record contains. */ type?: string; } interface HostingVersionConfig { /** * An array of objects, where each object specifies a URL pattern that, if matched to the request URL path, * triggers Hosting to apply the specified custom response headers. * Structure is documented below. */ headers?: outputs.firebase.HostingVersionConfigHeader[]; /** * An array of objects (called redirect rules), where each rule specifies a URL pattern that, if matched to the request URL path, * triggers Hosting to respond with a redirect to the specified destination path. * Structure is documented below. */ redirects?: outputs.firebase.HostingVersionConfigRedirect[]; /** * An array of objects (called rewrite rules), where each rule specifies a URL pattern that, if matched to the * request URL path, triggers Hosting to respond as if the service were given the specified destination URL. * Structure is documented below. */ rewrites?: outputs.firebase.HostingVersionConfigRewrite[]; } interface HostingVersionConfigHeader { /** * The user-supplied glob to match against the request URL path. */ glob?: string; /** * The additional headers to add to the response. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ headers: { [key: string]: string; }; /** * The user-supplied RE2 regular expression to match against the request URL path. */ regex?: string; } interface HostingVersionConfigRedirect { /** * The user-supplied glob to match against the request URL path. */ glob?: string; /** * The value to put in the HTTP location header of the response. * The location can contain capture group values from the pattern using a : prefix to identify * the segment and an optional * to capture the rest of the URL. For example: */ location: string; /** * The user-supplied RE2 regular expression to match against the request URL path. */ regex?: string; /** * The status HTTP code to return in the response. It must be a valid 3xx status code. */ statusCode: number; } interface HostingVersionConfigRewrite { /** * The function to proxy requests to. Must match the exported function name exactly. */ function?: string; /** * The user-supplied glob to match against the request URL path. */ glob?: string; /** * The URL path to rewrite the request to. */ path?: string; /** * The user-supplied RE2 regular expression to match against the request URL path. */ regex?: string; /** * The request will be forwarded to Cloud Run. * Structure is documented below. */ run?: outputs.firebase.HostingVersionConfigRewriteRun; } interface HostingVersionConfigRewriteRun { /** * Optional. User-provided region where the Cloud Run service is hosted. Defaults to `us-central1` if not supplied. */ region?: string; /** * User-defined ID of the Cloud Run service. */ serviceId: string; } } export declare namespace firebaserules { interface RulesetMetadata { /** * Services that this ruleset has declarations for (e.g., "cloud.firestore"). There may be 0+ of these. */ services: string[]; } interface RulesetSource { /** * `File` set constituting the `Source` bundle. */ files: outputs.firebaserules.RulesetSourceFile[]; /** * `Language` of the `Source` bundle. If unspecified, the language will default to `FIREBASE_RULES`. Possible values: LANGUAGE_UNSPECIFIED, FIREBASE_RULES, EVENT_FLOW_TRIGGERS * * - - - */ language?: string; } interface RulesetSourceFile { /** * Textual Content. */ content: string; /** * Fingerprint (e.g. github sha) associated with the `File`. */ fingerprint?: string; /** * File name. */ name: string; } } export declare namespace firestore { interface BackupScheduleDailyRecurrence { } interface BackupScheduleWeeklyRecurrence { /** * The day of week to run. * Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ day?: string; } interface DatabaseCmekConfig { /** * (Output) * Currently in-use KMS key versions (https://cloud.google.com/kms/docs/resource-hierarchy#key_versions). * During key rotation (https://cloud.google.com/kms/docs/key-rotation), there can be * multiple in-use key versions. * The expected format is * `projects/{project_id}/locations/{kms_location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{key_version}`. */ activeKeyVersions: string[]; /** * The resource ID of a Cloud KMS key. If set, the database created will * be a Customer-managed Encryption Key (CMEK) database encrypted with * this key. This feature is allowlist only in initial launch. * Only keys in the same location as this database are allowed to be used * for encryption. For Firestore's nam5 multi-region, this corresponds to Cloud KMS * multi-region us. For Firestore's eur3 multi-region, this corresponds to * Cloud KMS multi-region europe. See https://cloud.google.com/kms/docs/locations. * This value should be the KMS key resource ID in the format of * `projects/{project_id}/locations/{kms_location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}`. * How to retrieve this resource ID is listed at * https://cloud.google.com/kms/docs/getting-resource-ids#getting_the_id_for_a_key_and_version. */ kmsKeyName: string; } interface FieldIndexConfig { /** * The indexes to configure on the field. Order or array contains must be specified. * Structure is documented below. */ indexes?: outputs.firestore.FieldIndexConfigIndex[]; } interface FieldIndexConfigIndex { /** * Indicates that this field supports operations on arrayValues. Only one of `order` and `arrayConfig` can * be specified. * Possible values are: `CONTAINS`. */ arrayConfig?: string; /** * Indicates that this field supports ordering by the specified order or comparing using =, <, <=, >, >=, !=. * Only one of `order` and `arrayConfig` can be specified. * Possible values are: `ASCENDING`, `DESCENDING`. */ order?: string; /** * The scope at which a query is run. Collection scoped queries require you specify * the collection at query time. Collection group scope allows queries across all * collections with the same id. * Default value is `COLLECTION`. * Possible values are: `COLLECTION`, `COLLECTION_GROUP`. */ queryScope?: string; } interface FieldTtlConfig { /** * (Output) * The state of TTL (time-to-live) configuration for documents that have this Field set. */ state: string; } interface IndexField { /** * Indicates that this field supports operations on arrayValues. Only one of `order`, `arrayConfig`, and * `vectorConfig` can be specified. * Possible values are: `CONTAINS`. */ arrayConfig?: string; /** * Name of the field. */ fieldPath?: string; /** * Indicates that this field supports ordering by the specified order or comparing using =, <, <=, >, >=. * Only one of `order`, `arrayConfig`, and `vectorConfig` can be specified. * Possible values are: `ASCENDING`, `DESCENDING`. */ order?: string; /** * Indicates that this field supports vector search operations. Only one of `order`, `arrayConfig`, and * `vectorConfig` can be specified. Vector Fields should come after the field path `__name__`. * Structure is documented below. */ vectorConfig?: outputs.firestore.IndexFieldVectorConfig; } interface IndexFieldVectorConfig { /** * The resulting index will only include vectors of this dimension, and can be used for vector search * with the same dimension. */ dimension?: number; /** * Indicates the vector index is a flat index. */ flat?: outputs.firestore.IndexFieldVectorConfigFlat; } interface IndexFieldVectorConfigFlat { } interface UserCredsResourceIdentity { /** * (Output) * The principal identifier string. * See https://cloud.google.com/iam/docs/principal-identifiers. */ principal: string; } } export declare namespace folder { interface AccessApprovalSettingsEnrolledService { /** * The product for which Access Approval will be enrolled. Allowed values are listed (case-sensitive): * * all * * App Engine * * BigQuery * * Cloud Bigtable * * Cloud Key Management Service * * Compute Engine * * Cloud Dataflow * * Cloud Identity and Access Management * * Cloud Pub/Sub * * Cloud Storage * * Persistent Disk * Note: These values are supported as input, but considered a legacy format: * * all * * appengine.googleapis.com * * bigquery.googleapis.com * * bigtable.googleapis.com * * cloudkms.googleapis.com * * compute.googleapis.com * * dataflow.googleapis.com * * iam.googleapis.com * * pubsub.googleapis.com * * storage.googleapis.com */ cloudProduct: string; /** * The enrollment level of the service. * Default value is `BLOCK_ALL`. * Possible values are: `BLOCK_ALL`. */ enrollmentLevel?: string; } interface GetOrganizationPolicyBooleanPolicy { /** * If true, then the Policy is enforced. If false, then any configuration is acceptable. */ enforced: boolean; } interface GetOrganizationPolicyListPolicy { /** * One or the other must be set. */ allows: outputs.folder.GetOrganizationPolicyListPolicyAllow[]; /** * One or the other must be set. */ denies: outputs.folder.GetOrganizationPolicyListPolicyDeny[]; /** * If set to true, the values from the effective Policy of the parent resource are inherited, meaning the values set in this Policy are added to the values inherited up the hierarchy. */ inheritFromParent: boolean; /** * The Google Cloud Console will try to default to a configuration that matches the value specified in this field. */ suggestedValue: string; } interface GetOrganizationPolicyListPolicyAllow { /** * The policy allows or denies all values. */ all: boolean; /** * The policy can define specific values that are allowed or denied. */ values: string[]; } interface GetOrganizationPolicyListPolicyDeny { /** * The policy allows or denies all values. */ all: boolean; /** * The policy can define specific values that are allowed or denied. */ values: string[]; } interface GetOrganizationPolicyRestorePolicy { /** * May only be set to true. If set, then the default Policy is restored. */ default: boolean; } interface IAMBindingCondition { description?: string; expression: string; title: string; } interface IAMMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface IamAuditConfigAuditLogConfig { /** * Identities that do not cause logging for this type of permission. The format is the same as that for `members`. */ exemptedMembers?: string[]; /** * Permission type for which logging is to be configured. Must be one of `DATA_READ`, `DATA_WRITE`, or `ADMIN_READ`. */ logType: string; } interface OrganizationPolicyBooleanPolicy { /** * If true, then the Policy is enforced. If false, then any configuration is acceptable. */ enforced: boolean; } interface OrganizationPolicyListPolicy { /** * or `deny` - (Optional) One or the other must be set. */ allow?: outputs.folder.OrganizationPolicyListPolicyAllow; /** * One or the other must be set. */ deny?: outputs.folder.OrganizationPolicyListPolicyDeny; /** * If set to true, the values from the effective Policy of the parent resource * are inherited, meaning the values set in this Policy are added to the values inherited up the hierarchy. * * The `allow` or `deny` blocks support: */ inheritFromParent?: boolean; /** * The Google Cloud Console will try to default to a configuration that matches the value specified in this field. */ suggestedValue: string; } interface OrganizationPolicyListPolicyAllow { /** * The policy allows or denies all values. */ all?: boolean; /** * The policy can define specific values that are allowed or denied. */ values?: string[]; } interface OrganizationPolicyListPolicyDeny { /** * The policy allows or denies all values. */ all?: boolean; /** * The policy can define specific values that are allowed or denied. */ values?: string[]; } interface OrganizationPolicyRestorePolicy { /** * May only be set to true. If set, then the default Policy is restored. */ default: boolean; } } export declare namespace gemini { interface CodeToolsSettingEnabledTool { /** * Link to the Dev Connect Account Connector that holds the user credentials. * projects/{project}/locations/{location}/accountConnectors/{account_connector_id} */ accountConnector?: string; /** * Configuration parameters for the tool. * Structure is documented below. */ configs?: outputs.gemini.CodeToolsSettingEnabledToolConfig[]; /** * Handle used to invoke the tool. */ handle: string; /** * Link to the Tool */ tool: string; /** * Overridden URI, if allowed by Tool. */ uriOverride?: string; } interface CodeToolsSettingEnabledToolConfig { /** * Key of the configuration item. */ key: string; /** * Value of the configuration item. */ value: string; } interface RepositoryGroupIamBindingCondition { description?: string; expression: string; title: string; } interface RepositoryGroupIamMemberCondition { description?: string; expression: string; title: string; } interface RepositoryGroupRepository { /** * Required. The Git branch pattern used for indexing in RE2 syntax. * See https://github.com/google/re2/wiki/syntax for syntax. */ branchPattern: string; /** * Required. The DeveloperConnect repository full resource name, relative resource name * or resource URL to be indexed. */ resource: string; } } export declare namespace gkebackup { interface BackupPlanBackupConfig { /** * If True, include all namespaced resources. */ allNamespaces?: boolean; /** * This defines a customer managed encryption key that will be used to encrypt the "config" * portion (the Kubernetes resources) of Backups created via this plan. * Structure is documented below. */ encryptionKey?: outputs.gkebackup.BackupPlanBackupConfigEncryptionKey; /** * This flag specifies whether Kubernetes Secret resources should be included * when they fall into the scope of Backups. */ includeSecrets: boolean; /** * This flag specifies whether volume data should be backed up when PVCs are * included in the scope of a Backup. */ includeVolumeData: boolean; /** * This flag specifies whether Backups will not fail when * Backup for GKE detects Kubernetes configuration that is * non-standard or requires additional setup to restore. */ permissiveMode?: boolean; /** * A list of namespaced Kubernetes Resources. * Structure is documented below. */ selectedApplications?: outputs.gkebackup.BackupPlanBackupConfigSelectedApplications; /** * If set, include just the resources in the listed namespace Labels. * Structure is documented below. */ selectedNamespaceLabels?: outputs.gkebackup.BackupPlanBackupConfigSelectedNamespaceLabels; /** * If set, include just the resources in the listed namespaces. * Structure is documented below. */ selectedNamespaces?: outputs.gkebackup.BackupPlanBackupConfigSelectedNamespaces; } interface BackupPlanBackupConfigEncryptionKey { /** * Google Cloud KMS encryption key. Format: projects/*/locations/*/keyRings/*/cryptoKeys/* */ gcpKmsEncryptionKey: string; } interface BackupPlanBackupConfigSelectedApplications { /** * A list of namespaced Kubernetes resources. * Structure is documented below. */ namespacedNames: outputs.gkebackup.BackupPlanBackupConfigSelectedApplicationsNamespacedName[]; } interface BackupPlanBackupConfigSelectedApplicationsNamespacedName { /** * The name of a Kubernetes Resource. */ name: string; /** * The namespace of a Kubernetes Resource. */ namespace: string; } interface BackupPlanBackupConfigSelectedNamespaceLabels { /** * A list of Kubernetes Namespace labels. * Structure is documented below. */ resourceLabels: outputs.gkebackup.BackupPlanBackupConfigSelectedNamespaceLabelsResourceLabel[]; } interface BackupPlanBackupConfigSelectedNamespaceLabelsResourceLabel { /** * The key of the kubernetes label. */ key: string; /** * The value of the Label. */ value: string; } interface BackupPlanBackupConfigSelectedNamespaces { /** * A list of Kubernetes Namespaces. */ namespaces: string[]; } interface BackupPlanBackupSchedule { /** * A standard cron string that defines a repeating schedule for * creating Backups via this BackupPlan. * This is mutually exclusive with the rpoConfig field since at most one * schedule can be defined for a BackupPlan. * If this is defined, then backupRetainDays must also be defined. */ cronSchedule?: string; /** * This flag denotes whether automatic Backup creation is paused for this BackupPlan. */ paused: boolean; /** * Defines the RPO schedule configuration for this BackupPlan. This is mutually * exclusive with the cronSchedule field since at most one schedule can be defined * for a BackupPLan. If this is defined, then backupRetainDays must also be defined. * Structure is documented below. */ rpoConfig?: outputs.gkebackup.BackupPlanBackupScheduleRpoConfig; } interface BackupPlanBackupScheduleRpoConfig { /** * User specified time windows during which backup can NOT happen for this BackupPlan. * Backups should start and finish outside of any given exclusion window. Note: backup * jobs will be scheduled to start and finish outside the duration of the window as * much as possible, but running jobs will not get canceled when it runs into the window. * All the time and date values in exclusionWindows entry in the API are in UTC. We * only allow <=1 recurrence (daily or weekly) exclusion window for a BackupPlan while no * restriction on number of single occurrence windows. * Structure is documented below. */ exclusionWindows?: outputs.gkebackup.BackupPlanBackupScheduleRpoConfigExclusionWindow[]; /** * Defines the target RPO for the BackupPlan in minutes, which means the target * maximum data loss in time that is acceptable for this BackupPlan. This must be * at least 60, i.e., 1 hour, and at most 86400, i.e., 60 days. */ targetRpoMinutes: number; } interface BackupPlanBackupScheduleRpoConfigExclusionWindow { /** * The exclusion window occurs every day if set to "True". * Specifying this field to "False" is an error. * Only one of singleOccurrenceDate, daily and daysOfWeek may be set. */ daily?: boolean; /** * The exclusion window occurs on these days of each week in UTC. * Only one of singleOccurrenceDate, daily and daysOfWeek may be set. * Structure is documented below. */ daysOfWeek?: outputs.gkebackup.BackupPlanBackupScheduleRpoConfigExclusionWindowDaysOfWeek; /** * Specifies duration of the window in seconds with up to nine fractional digits, * terminated by 's'. Example: "3.5s". Restrictions for duration based on the * recurrence type to allow some time for backup to happen: * - single_occurrence_date: no restriction * - daily window: duration < 24 hours * - weekly window: * - days of week includes all seven days of a week: duration < 24 hours * - all other weekly window: duration < 168 hours (i.e., 24 * 7 hours) */ duration: string; /** * No recurrence. The exclusion window occurs only once and on this date in UTC. * Only one of singleOccurrenceDate, daily and daysOfWeek may be set. * Structure is documented below. */ singleOccurrenceDate?: outputs.gkebackup.BackupPlanBackupScheduleRpoConfigExclusionWindowSingleOccurrenceDate; /** * Specifies the start time of the window using time of the day in UTC. * Structure is documented below. */ startTime: outputs.gkebackup.BackupPlanBackupScheduleRpoConfigExclusionWindowStartTime; } interface BackupPlanBackupScheduleRpoConfigExclusionWindowDaysOfWeek { /** * A list of days of week. * Each value may be one of: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ daysOfWeeks?: string[]; } interface BackupPlanBackupScheduleRpoConfigExclusionWindowSingleOccurrenceDate { /** * Day of a month. */ day?: number; /** * Month of a year. */ month?: number; /** * Year of the date. */ year?: number; } interface BackupPlanBackupScheduleRpoConfigExclusionWindowStartTime { /** * Hours of day in 24 hour format. */ hours?: number; /** * Minutes of hour of day. */ minutes?: number; /** * Fractions of seconds in nanoseconds. */ nanos?: number; /** * Seconds of minutes of the time. */ seconds?: number; } interface BackupPlanIamBindingCondition { description?: string; expression: string; title: string; } interface BackupPlanIamMemberCondition { description?: string; expression: string; title: string; } interface BackupPlanRetentionPolicy { /** * Minimum age for a Backup created via this BackupPlan (in days). * Must be an integer value between 0-90 (inclusive). * A Backup created under this BackupPlan will not be deletable * until it reaches Backup's (create time + backup_delete_lock_days). * Updating this field of a BackupPlan does not affect existing Backups. * Backups created after a successful update will inherit this new value. */ backupDeleteLockDays: number; /** * The default maximum age of a Backup created via this BackupPlan. * This field MUST be an integer value >= 0 and <= 365. If specified, * a Backup created under this BackupPlan will be automatically deleted * after its age reaches (createTime + backupRetainDays). * If not specified, Backups created under this BackupPlan will NOT be * subject to automatic deletion. Updating this field does NOT affect * existing Backups under it. Backups created AFTER a successful update * will automatically pick up the new value. * NOTE: backupRetainDays must be >= backupDeleteLockDays. * If cronSchedule is defined, then this must be <= 360 * the creation interval. * If rpoConfig is defined, then this must be * <= 360 * targetRpoMinutes/(1440minutes/day) */ backupRetainDays: number; /** * This flag denotes whether the retention policy of this BackupPlan is locked. * If set to True, no further update is allowed on this policy, including * the locked field itself. */ locked: boolean; } interface RestorePlanIamBindingCondition { description?: string; expression: string; title: string; } interface RestorePlanIamMemberCondition { description?: string; expression: string; title: string; } interface RestorePlanRestoreConfig { /** * If True, restore all namespaced resources in the Backup. * Setting this field to False will result in an error. */ allNamespaces?: boolean; /** * Defines the behavior for handling the situation where cluster-scoped resources * being restored already exist in the target cluster. * This MUST be set to a value other than `CLUSTER_RESOURCE_CONFLICT_POLICY_UNSPECIFIED` * if `clusterResourceRestoreScope` is anyting other than `noGroupKinds`. * See https://cloud.google.com/kubernetes-engine/docs/add-on/backup-for-gke/reference/rest/v1/RestoreConfig#clusterresourceconflictpolicy * for more information on each policy option. * Possible values are: `USE_EXISTING_VERSION`, `USE_BACKUP_VERSION`. */ clusterResourceConflictPolicy?: string; /** * Identifies the cluster-scoped resources to restore from the Backup. * Structure is documented below. */ clusterResourceRestoreScope?: outputs.gkebackup.RestorePlanRestoreConfigClusterResourceRestoreScope; /** * A list of selected namespaces excluded from restoration. * All namespaces except those in this list will be restored. * Structure is documented below. */ excludedNamespaces?: outputs.gkebackup.RestorePlanRestoreConfigExcludedNamespaces; /** * Defines the behavior for handling the situation where sets of namespaced resources * being restored already exist in the target cluster. * This MUST be set to a value other than `NAMESPACED_RESOURCE_RESTORE_MODE_UNSPECIFIED` * if the `namespacedResourceRestoreScope` is anything other than `noNamespaces`. * See https://cloud.google.com/kubernetes-engine/docs/add-on/backup-for-gke/reference/rest/v1/RestoreConfig#namespacedresourcerestoremode * for more information on each mode. * Possible values are: `DELETE_AND_RESTORE`, `FAIL_ON_CONFLICT`, `MERGE_SKIP_ON_CONFLICT`, `MERGE_REPLACE_VOLUME_ON_CONFLICT`, `MERGE_REPLACE_ON_CONFLICT`. */ namespacedResourceRestoreMode?: string; /** * Do not restore any namespaced resources if set to "True". * Specifying this field to "False" is not allowed. */ noNamespaces?: boolean; /** * It contains custom ordering to use on a Restore. * Structure is documented below. */ restoreOrder?: outputs.gkebackup.RestorePlanRestoreConfigRestoreOrder; /** * A list of selected ProtectedApplications to restore. * The listed ProtectedApplications and all the resources * to which they refer will be restored. * Structure is documented below. */ selectedApplications?: outputs.gkebackup.RestorePlanRestoreConfigSelectedApplications; /** * A list of selected namespaces to restore from the Backup. * The listed Namespaces and all resources contained in them will be restored. * Structure is documented below. */ selectedNamespaces?: outputs.gkebackup.RestorePlanRestoreConfigSelectedNamespaces; /** * A list of transformation rules to be applied against Kubernetes * resources as they are selected for restoration from a Backup. * Rules are executed in order defined - this order matters, * as changes made by a rule may impact the filtering logic of subsequent * rules. An empty list means no transformation will occur. * Structure is documented below. */ transformationRules?: outputs.gkebackup.RestorePlanRestoreConfigTransformationRule[]; /** * Specifies the mechanism to be used to restore volume data. * This should be set to a value other than `NAMESPACED_RESOURCE_RESTORE_MODE_UNSPECIFIED` * if the `namespacedResourceRestoreScope` is anything other than `noNamespaces`. * If not specified, it will be treated as `NO_VOLUME_DATA_RESTORATION`. * See https://cloud.google.com/kubernetes-engine/docs/add-on/backup-for-gke/reference/rest/v1/RestoreConfig#VolumeDataRestorePolicy * for more information on each policy option. * Possible values are: `RESTORE_VOLUME_DATA_FROM_BACKUP`, `REUSE_VOLUME_HANDLE_FROM_BACKUP`, `NO_VOLUME_DATA_RESTORATION`. */ volumeDataRestorePolicy?: string; /** * A table that binds volumes by their scope to a restore policy. Bindings * must have a unique scope. Any volumes not scoped in the bindings are * subject to the policy defined in volume_data_restore_policy. * Structure is documented below. */ volumeDataRestorePolicyBindings?: outputs.gkebackup.RestorePlanRestoreConfigVolumeDataRestorePolicyBinding[]; } interface RestorePlanRestoreConfigClusterResourceRestoreScope { /** * If True, all valid cluster-scoped resources will be restored. * Mutually exclusive to any other field in `clusterResourceRestoreScope`. */ allGroupKinds?: boolean; /** * A list of cluster-scoped resource group kinds to NOT restore from the backup. * If specified, all valid cluster-scoped resources will be restored except * for those specified in the list. * Mutually exclusive to any other field in `clusterResourceRestoreScope`. * Structure is documented below. */ excludedGroupKinds?: outputs.gkebackup.RestorePlanRestoreConfigClusterResourceRestoreScopeExcludedGroupKind[]; /** * If True, no cluster-scoped resources will be restored. * Mutually exclusive to any other field in `clusterResourceRestoreScope`. */ noGroupKinds?: boolean; /** * A list of cluster-scoped resource group kinds to restore from the backup. * If specified, only the selected resources will be restored. * Mutually exclusive to any other field in the `clusterResourceRestoreScope`. * Structure is documented below. */ selectedGroupKinds?: outputs.gkebackup.RestorePlanRestoreConfigClusterResourceRestoreScopeSelectedGroupKind[]; } interface RestorePlanRestoreConfigClusterResourceRestoreScopeExcludedGroupKind { /** * API Group string of a Kubernetes resource, e.g. * "apiextensions.k8s.io", "storage.k8s.io", etc. * Use empty string for core group. */ resourceGroup?: string; /** * Kind of a Kubernetes resource, e.g. * "CustomResourceDefinition", "StorageClass", etc. */ resourceKind?: string; } interface RestorePlanRestoreConfigClusterResourceRestoreScopeSelectedGroupKind { /** * API Group string of a Kubernetes resource, e.g. * "apiextensions.k8s.io", "storage.k8s.io", etc. * Use empty string for core group. */ resourceGroup?: string; /** * Kind of a Kubernetes resource, e.g. * "CustomResourceDefinition", "StorageClass", etc. */ resourceKind?: string; } interface RestorePlanRestoreConfigExcludedNamespaces { /** * A list of Kubernetes Namespaces. */ namespaces: string[]; } interface RestorePlanRestoreConfigRestoreOrder { /** * A list of group kind dependency pairs * that is used by Backup for GKE to * generate a group kind restore order. * Structure is documented below. */ groupKindDependencies: outputs.gkebackup.RestorePlanRestoreConfigRestoreOrderGroupKindDependency[]; } interface RestorePlanRestoreConfigRestoreOrderGroupKindDependency { /** * The requiring group kind requires that the satisfying * group kind be restored first. * Structure is documented below. */ requiring: outputs.gkebackup.RestorePlanRestoreConfigRestoreOrderGroupKindDependencyRequiring; /** * The satisfying group kind must be restored first * in order to satisfy the dependency. * Structure is documented below. */ satisfying: outputs.gkebackup.RestorePlanRestoreConfigRestoreOrderGroupKindDependencySatisfying; } interface RestorePlanRestoreConfigRestoreOrderGroupKindDependencyRequiring { /** * API Group of a Kubernetes resource, e.g. * "apiextensions.k8s.io", "storage.k8s.io", etc. * Use empty string for core group. */ resourceGroup?: string; /** * Kind of a Kubernetes resource, e.g. * "CustomResourceDefinition", "StorageClass", etc. */ resourceKind?: string; } interface RestorePlanRestoreConfigRestoreOrderGroupKindDependencySatisfying { /** * API Group of a Kubernetes resource, e.g. * "apiextensions.k8s.io", "storage.k8s.io", etc. * Use empty string for core group. */ resourceGroup?: string; /** * Kind of a Kubernetes resource, e.g. * "CustomResourceDefinition", "StorageClass", etc. */ resourceKind?: string; } interface RestorePlanRestoreConfigSelectedApplications { /** * A list of namespaced Kubernetes resources. * Structure is documented below. */ namespacedNames: outputs.gkebackup.RestorePlanRestoreConfigSelectedApplicationsNamespacedName[]; } interface RestorePlanRestoreConfigSelectedApplicationsNamespacedName { /** * The name of a Kubernetes Resource. */ name: string; /** * The namespace of a Kubernetes Resource. */ namespace: string; } interface RestorePlanRestoreConfigSelectedNamespaces { /** * A list of Kubernetes Namespaces. */ namespaces: string[]; } interface RestorePlanRestoreConfigTransformationRule { /** * The description is a user specified string description * of the transformation rule. */ description?: string; /** * A list of transformation rule actions to take against candidate * resources. Actions are executed in order defined - this order * matters, as they could potentially interfere with each other and * the first operation could affect the outcome of the second operation. * Structure is documented below. */ fieldActions: outputs.gkebackup.RestorePlanRestoreConfigTransformationRuleFieldAction[]; /** * This field is used to specify a set of fields that should be used to * determine which resources in backup should be acted upon by the * supplied transformation rule actions, and this will ensure that only * specific resources are affected by transformation rule actions. * Structure is documented below. */ resourceFilter?: outputs.gkebackup.RestorePlanRestoreConfigTransformationRuleResourceFilter; } interface RestorePlanRestoreConfigTransformationRuleFieldAction { /** * A string containing a JSON Pointer value that references the * location in the target document to move the value from. */ fromPath?: string; /** * Specifies the operation to perform. * Possible values are: `REMOVE`, `MOVE`, `COPY`, `ADD`, `TEST`, `REPLACE`. */ op: string; /** * A string containing a JSON-Pointer value that references a * location within the target document where the operation is performed. */ path?: string; /** * A string that specifies the desired value in string format * to use for transformation. */ value?: string; } interface RestorePlanRestoreConfigTransformationRuleResourceFilter { /** * (Filtering parameter) Any resource subject to transformation must * belong to one of the listed "types". If this field is not provided, * no type filtering will be performed * (all resources of all types matching previous filtering parameters * will be candidates for transformation). * Structure is documented below. */ groupKinds?: outputs.gkebackup.RestorePlanRestoreConfigTransformationRuleResourceFilterGroupKind[]; /** * This is a JSONPath expression that matches specific fields of * candidate resources and it operates as a filtering parameter * (resources that are not matched with this expression will not * be candidates for transformation). */ jsonPath?: string; /** * (Filtering parameter) Any resource subject to transformation must * be contained within one of the listed Kubernetes Namespace in the * Backup. If this field is not provided, no namespace filtering will * be performed (all resources in all Namespaces, including all * cluster-scoped resources, will be candidates for transformation). * To mix cluster-scoped and namespaced resources in the same rule, * use an empty string ("") as one of the target namespaces. */ namespaces?: string[]; } interface RestorePlanRestoreConfigTransformationRuleResourceFilterGroupKind { /** * API Group string of a Kubernetes resource, e.g. * "apiextensions.k8s.io", "storage.k8s.io", etc. * Use empty string for core group. */ resourceGroup?: string; /** * Kind of a Kubernetes resource, e.g. * "CustomResourceDefinition", "StorageClass", etc. */ resourceKind?: string; } interface RestorePlanRestoreConfigVolumeDataRestorePolicyBinding { /** * Specifies the mechanism to be used to restore this volume data. * See https://cloud.google.com/kubernetes-engine/docs/add-on/backup-for-gke/reference/rest/v1/RestoreConfig#VolumeDataRestorePolicy * for more information on each policy option. * Possible values are: `RESTORE_VOLUME_DATA_FROM_BACKUP`, `REUSE_VOLUME_HANDLE_FROM_BACKUP`, `NO_VOLUME_DATA_RESTORATION`. */ policy: string; /** * The volume type, as determined by the PVC's * bound PV, to apply the policy to. * Possible values are: `GCE_PERSISTENT_DISK`. */ volumeType: string; } } export declare namespace gkehub { interface FeatureFleetDefaultMemberConfig { /** * Config Management spec * Structure is documented below. */ configmanagement?: outputs.gkehub.FeatureFleetDefaultMemberConfigConfigmanagement; /** * Service Mesh spec * Structure is documented below. */ mesh?: outputs.gkehub.FeatureFleetDefaultMemberConfigMesh; /** * Policy Controller spec * Structure is documented below. */ policycontroller?: outputs.gkehub.FeatureFleetDefaultMemberConfigPolicycontroller; } interface FeatureFleetDefaultMemberConfigConfigmanagement { /** * ConfigSync configuration for the cluster * Structure is documented below. */ configSync?: outputs.gkehub.FeatureFleetDefaultMemberConfigConfigmanagementConfigSync; /** * Set this field to MANAGEMENT_AUTOMATIC to enable Config Sync auto-upgrades, and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED to disable Config Sync auto-upgrades. * Possible values are: `MANAGEMENT_UNSPECIFIED`, `MANAGEMENT_AUTOMATIC`, `MANAGEMENT_MANUAL`. */ management?: string; /** * Version of Config Sync installed */ version?: string; } interface FeatureFleetDefaultMemberConfigConfigmanagementConfigSync { /** * Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. */ enabled?: boolean; /** * Git repo configuration for the cluster * Structure is documented below. */ git?: outputs.gkehub.FeatureFleetDefaultMemberConfigConfigmanagementConfigSyncGit; /** * The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring. The GSA should have the Monitoring Metric Writer(roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. */ metricsGcpServiceAccountEmail?: string; /** * OCI repo configuration for the cluster * Structure is documented below. */ oci?: outputs.gkehub.FeatureFleetDefaultMemberConfigConfigmanagementConfigSyncOci; /** * Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. */ preventDrift?: boolean; /** * Specifies whether the Config Sync Repo is in hierarchical or unstructured mode */ sourceFormat?: string; } interface FeatureFleetDefaultMemberConfigConfigmanagementConfigSyncGit { /** * The Google Cloud Service Account Email used for auth when secretType is gcpServiceAccount */ gcpServiceAccountEmail?: string; /** * URL for the HTTPS Proxy to be used when communicating with the Git repo */ httpsProxy?: string; /** * The path within the Git repository that represents the top level of the repo to sync */ policyDir?: string; /** * Type of secret configured for access to the Git repo */ secretType: string; /** * The branch of the repository to sync from. Default: master */ syncBranch?: string; /** * The URL of the Git repository to use as the source of truth */ syncRepo?: string; /** * Git revision (tag or hash) to check out. Default HEAD */ syncRev?: string; /** * Period in seconds between consecutive syncs. Default: 15 */ syncWaitSecs?: string; } interface FeatureFleetDefaultMemberConfigConfigmanagementConfigSyncOci { /** * The Google Cloud Service Account Email used for auth when secretType is gcpServiceAccount */ gcpServiceAccountEmail?: string; /** * The absolute path of the directory that contains the local resources. Default: the root directory of the image */ policyDir?: string; /** * Type of secret configured for access to the Git repo */ secretType: string; /** * The OCI image repository URL for the package to sync from */ syncRepo?: string; /** * Period in seconds between consecutive syncs. Default: 15 */ syncWaitSecs?: string; /** * (Optional, Deprecated) * Version of Config Sync installed * * > **Warning:** The `configmanagement.config_sync.oci.version` field is deprecated and will be removed in a future major release. Please use `configmanagement.version` field to specify the version of Config Sync installed instead. * * @deprecated The `configmanagement.config_sync.oci.version` field is deprecated and will be removed in a future major release. Please use `configmanagement.version` field to specify the version of Config Sync installed instead. */ version?: string; } interface FeatureFleetDefaultMemberConfigMesh { /** * Whether to automatically manage Service Mesh * Possible values are: `MANAGEMENT_UNSPECIFIED`, `MANAGEMENT_AUTOMATIC`, `MANAGEMENT_MANUAL`. */ management: string; } interface FeatureFleetDefaultMemberConfigPolicycontroller { /** * Configuration of Policy Controller * Structure is documented below. */ policyControllerHubConfig: outputs.gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfig; /** * Configures the version of Policy Controller */ version: string; } interface FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfig { /** * Interval for Policy Controller Audit scans (in seconds). When set to 0, this disables audit functionality altogether. */ auditIntervalSeconds: number; /** * The maximum number of audit violations to be stored in a constraint. If not set, the internal default of 20 will be used. */ constraintViolationLimit?: number; /** * Map of deployment configs to deployments ("admission", "audit", "mutation"). * Structure is documented below. */ deploymentConfigs: outputs.gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfig[]; /** * The set of namespaces that are excluded from Policy Controller checks. Namespaces do not need to currently exist on the cluster. */ exemptableNamespaces?: string[]; /** * Configures the mode of the Policy Controller installation * Possible values are: `INSTALL_SPEC_UNSPECIFIED`, `INSTALL_SPEC_NOT_INSTALLED`, `INSTALL_SPEC_ENABLED`, `INSTALL_SPEC_SUSPENDED`, `INSTALL_SPEC_DETACHED`. */ installSpec: string; /** * Logs all denies and dry run failures. */ logDeniesEnabled?: boolean; /** * Monitoring specifies the configuration of monitoring Policy Controller. * Structure is documented below. */ monitoring: outputs.gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigMonitoring; /** * Enables the ability to mutate resources using Policy Controller. */ mutationEnabled?: boolean; /** * Specifies the desired policy content on the cluster. * Structure is documented below. */ policyContent?: outputs.gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContent; /** * Enables the ability to use Constraint Templates that reference to objects other than the object currently being evaluated. */ referentialRulesEnabled?: boolean; } interface FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfig { /** * The identifier for this object. Format specified above. */ component: string; /** * Container resource requirements. * Structure is documented below. */ containerResources?: outputs.gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResources; /** * Pod affinity configuration. * Possible values are: `AFFINITY_UNSPECIFIED`, `NO_AFFINITY`, `ANTI_AFFINITY`. */ podAffinity: string; /** * Pod tolerations of node taints. * Structure is documented below. */ podTolerations?: outputs.gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigPodToleration[]; /** * Pod replica count. */ replicaCount: number; } interface FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResources { /** * Limits describes the maximum amount of compute resources allowed for use by the running container. * Structure is documented below. */ limits?: outputs.gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourcesLimits; /** * Requests describes the amount of compute resources reserved for the container by the kube-scheduler. * Structure is documented below. */ requests?: outputs.gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourcesRequests; } interface FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourcesLimits { /** * CPU requirement expressed in Kubernetes resource units. */ cpu?: string; /** * Memory requirement expressed in Kubernetes resource units. */ memory?: string; } interface FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourcesRequests { /** * CPU requirement expressed in Kubernetes resource units. */ cpu?: string; /** * Memory requirement expressed in Kubernetes resource units. */ memory?: string; } interface FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigPodToleration { /** * Matches a taint effect. */ effect?: string; /** * Matches a taint key (not necessarily unique). */ key?: string; /** * Matches a taint operator. */ operator?: string; /** * Matches a taint value. */ value?: string; } interface FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigMonitoring { /** * Specifies the list of backends Policy Controller will export to. An empty list would effectively disable metrics export. * Each value may be one of: `MONITORING_BACKEND_UNSPECIFIED`, `PROMETHEUS`, `CLOUD_MONITORING`. */ backends: string[]; } interface FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContent { /** * Configures which bundles to install and their corresponding install specs. * Structure is documented below. */ bundles?: outputs.gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentBundle[]; /** * Configures the installation of the Template Library. * Structure is documented below. */ templateLibrary: outputs.gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary; } interface FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentBundle { /** * The identifier for this object. Format specified above. */ bundle: string; /** * The set of namespaces to be exempted from the bundle. */ exemptedNamespaces?: string[]; } interface FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { /** * Configures the manner in which the template library is installed on the cluster. * Possible values are: `INSTALLATION_UNSPECIFIED`, `NOT_INSTALLED`, `ALL`. */ installation?: string; } interface FeatureIamBindingCondition { description?: string; expression: string; title: string; } interface FeatureIamMemberCondition { description?: string; expression: string; title: string; } interface FeatureMembershipConfigmanagement { /** * Config Sync configuration for the cluster. Structure is documented below. */ configSync?: outputs.gkehub.FeatureMembershipConfigmanagementConfigSync; /** * Hierarchy Controller configuration for the cluster. Structure is documented below. * Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. * Use open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead. * Follow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller) * to migrate from Hierarchy Controller to HNC. */ hierarchyController?: outputs.gkehub.FeatureMembershipConfigmanagementHierarchyController; /** * Enables automatic Feature management. Set this field to MANAGEMENT_AUTOMATIC to enable Config Sync auto-upgrades, * and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED to disable Config Sync auto-upgrades. * This field was introduced in Terraform version 5.41.0. */ management: string; /** * Policy Controller configuration for the cluster. Structure is documented below. * Configuring Policy Controller through the configmanagement feature is no longer recommended. * Use the policycontroller feature instead. */ policyController?: outputs.gkehub.FeatureMembershipConfigmanagementPolicyController; /** * Version of Config Sync installed. */ version: string; } interface FeatureMembershipConfigmanagementConfigSync { /** * The override configurations for the Config Sync Deployments. Structure is documented below. The field is only available on Config Sync version 1.20.1 or later. */ deploymentOverrides?: outputs.gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverride[]; /** * Whether Config Sync is enabled in the cluster. This field was introduced in Terraform version * 5.41.0, and * needs to be set to `true` explicitly to install Config Sync. */ enabled?: boolean; /** * (Optional) Structure is documented below. */ git?: outputs.gkehub.FeatureMembershipConfigmanagementConfigSyncGit; /** * Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. */ metricsGcpServiceAccountEmail?: string; /** * (Optional) Supported from Config Sync versions 1.12.0 onwards. Structure is documented below. * * Use either `git` or `oci` config option. */ oci?: outputs.gkehub.FeatureMembershipConfigmanagementConfigSyncOci; /** * Supported from Config Sync versions 1.10.0 onwards. Set to `true` to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. */ preventDrift: boolean; /** * Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. */ sourceFormat?: string; /** * Set to true to stop syncing configs for a single cluster. Default to false. */ stopSyncing?: boolean; } interface FeatureMembershipConfigmanagementConfigSyncDeploymentOverride { /** * The override configurations for the containers in the Deployment. Structure is documented below. */ containers?: outputs.gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverrideContainer[]; /** * The name of the Deployment. */ deploymentName?: string; /** * The namespace of the Deployment. */ deploymentNamespace?: string; } interface FeatureMembershipConfigmanagementConfigSyncDeploymentOverrideContainer { /** * The name of the container. */ containerName?: string; /** * The CPU limit of the container. */ cpuLimit?: string; /** * The CPU request of the container. */ cpuRequest?: string; /** * The memory limit of the container. */ memoryLimit?: string; /** * The memory request of the container. */ memoryRequest?: string; } interface FeatureMembershipConfigmanagementConfigSyncGit { /** * The GCP Service Account Email used for auth when secretType is gcpServiceAccount. */ gcpServiceAccountEmail?: string; /** * URL for the HTTPS proxy to be used when communicating with the Git repo. */ httpsProxy?: string; /** * The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. */ policyDir?: string; /** * Type of secret configured for access to the Git repo. */ secretType?: string; /** * The branch of the repository to sync from. Default: master. */ syncBranch?: string; /** * The URL of the Git repository to use as the source of truth. */ syncRepo?: string; /** * Git revision (tag or hash) to check out. Default HEAD. */ syncRev?: string; /** * Period in seconds between consecutive syncs. Default: 15. */ syncWaitSecs?: string; } interface FeatureMembershipConfigmanagementConfigSyncOci { /** * The GCP Service Account Email used for auth when secretType is gcpserviceaccount. */ gcpServiceAccountEmail?: string; /** * The absolute path of the directory that contains the local resources. Default: the root directory of the image. */ policyDir?: string; /** * Type of secret configured for access to the OCI Image. Must be one of gcenode, gcpserviceaccount or none. */ secretType?: string; /** * The OCI image repository URL for the package to sync from. e.g. LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME. */ syncRepo?: string; /** * Period in seconds(int64 format) between consecutive syncs. Default: 15. */ syncWaitSecs?: string; } interface FeatureMembershipConfigmanagementHierarchyController { /** * Whether hierarchical resource quota is enabled in this cluster. */ enableHierarchicalResourceQuota?: boolean; /** * Whether pod tree labels are enabled in this cluster. */ enablePodTreeLabels?: boolean; /** * Whether Hierarchy Controller is enabled in this cluster. */ enabled?: boolean; } interface FeatureMembershipConfigmanagementPolicyController { /** * Sets the interval for Policy Controller Audit Scans (in seconds). When set to 0, this disables audit functionality altogether. */ auditIntervalSeconds?: string; /** * Enables the installation of Policy Controller. If false, the rest of PolicyController fields take no effect. */ enabled?: boolean; /** * The set of namespaces that are excluded from Policy Controller checks. Namespaces do not need to currently exist on the cluster. */ exemptableNamespaces?: string[]; /** * Logs all denies and dry run failures. */ logDeniesEnabled?: boolean; /** * Specifies the backends Policy Controller should export metrics to. For example, to specify metrics should be exported to Cloud Monitoring and Prometheus, specify backends: ["cloudmonitoring", "prometheus"]. Default: ["cloudmonitoring", "prometheus"] */ monitoring: outputs.gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoring; /** * Enables mutation in policy controller. If true, mutation CRDs, webhook, and controller deployment will be deployed to the cluster. */ mutationEnabled?: boolean; /** * Enables the ability to use Constraint Templates that reference to objects other than the object currently being evaluated. */ referentialRulesEnabled?: boolean; /** * Installs the default template library along with Policy Controller. */ templateLibraryInstalled?: boolean; } interface FeatureMembershipConfigmanagementPolicyControllerMonitoring { /** * Specifies the list of backends Policy Controller will export to. Must be one of `CLOUD_MONITORING` or `PROMETHEUS`. Defaults to [`CLOUD_MONITORING`, `PROMETHEUS`]. Specifying an empty value `[]` disables metrics export. */ backends: string[]; } interface FeatureMembershipMesh { /** * **DEPRECATED** Whether to automatically manage Service Mesh control planes. Possible values: CONTROL_PLANE_MANAGEMENT_UNSPECIFIED, AUTOMATIC, MANUAL * * @deprecated Deprecated in favor of the `management` field */ controlPlane?: string; /** * Whether to automatically manage Service Mesh. Can either be `MANAGEMENT_AUTOMATIC` or `MANAGEMENT_MANUAL`. */ management?: string; } interface FeatureMembershipPolicycontroller { /** * Policy Controller configuration for the cluster. Structure is documented below. */ policyControllerHubConfig: outputs.gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfig; /** * Version of Policy Controller to install. Defaults to the latest version. */ version: string; } interface FeatureMembershipPolicycontrollerPolicyControllerHubConfig { /** * Sets the interval for Policy Controller Audit Scans (in seconds). When set to 0, this disables audit functionality altogether. */ auditIntervalSeconds?: number; /** * The maximum number of audit violations to be stored in a constraint. If not set, the default of 20 will be used. */ constraintViolationLimit?: number; /** * Map of deployment configs to deployments ("admission", "audit", "mutation"). */ deploymentConfigs: outputs.gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfig[]; /** * The set of namespaces that are excluded from Policy Controller checks. Namespaces do not need to currently exist on the cluster. */ exemptableNamespaces?: string[]; /** * Configures the mode of the Policy Controller installation. Must be one of `INSTALL_SPEC_NOT_INSTALLED`, `INSTALL_SPEC_ENABLED`, `INSTALL_SPEC_SUSPENDED` or `INSTALL_SPEC_DETACHED`. */ installSpec?: string; /** * Logs all denies and dry run failures. */ logDeniesEnabled?: boolean; /** * Specifies the backends Policy Controller should export metrics to. Structure is documented below. */ monitoring: outputs.gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring; /** * Enables mutation in policy controller. If true, mutation CRDs, webhook, and controller deployment will be deployed to the cluster. */ mutationEnabled?: boolean; /** * Specifies the desired policy content on the cluster. Structure is documented below. */ policyContent: outputs.gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent; /** * Enables the ability to use Constraint Templates that reference to objects other than the object currently being evaluated. */ referentialRulesEnabled?: boolean; } interface FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfig { /** * The name of the component. One of `admission` `audit` or `mutation` */ componentName: string; /** * Container resource requirements. */ containerResources?: outputs.gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResources; /** * Pod affinity configuration. Possible values: AFFINITY_UNSPECIFIED, NO_AFFINITY, ANTI_AFFINITY */ podAffinity?: string; /** * Pod tolerations of node taints. */ podTolerations?: outputs.gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigPodToleration[]; /** * Pod replica count. */ replicaCount?: number; } interface FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResources { /** * Limits describes the maximum amount of compute resources allowed for use by the running container. */ limits?: outputs.gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourcesLimits; /** * Requests describes the amount of compute resources reserved for the container by the kube-scheduler. */ requests?: outputs.gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourcesRequests; } interface FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourcesLimits { /** * CPU requirement expressed in Kubernetes resource units. */ cpu?: string; /** * Memory requirement expressed in Kubernetes resource units. */ memory?: string; } interface FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourcesRequests { /** * CPU requirement expressed in Kubernetes resource units. */ cpu?: string; /** * Memory requirement expressed in Kubernetes resource units. */ memory?: string; } interface FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigPodToleration { /** * Matches a taint effect. */ effect?: string; /** * Matches a taint key (not necessarily unique). */ key?: string; /** * Matches a taint operator. */ operator?: string; /** * Matches a taint value. */ value?: string; } interface FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { /** * Specifies the list of backends Policy Controller will export to. Must be one of `CLOUD_MONITORING` or `PROMETHEUS`. Defaults to [`CLOUD_MONITORING`, `PROMETHEUS`]. Specifying an empty value `[]` disables metrics export. */ backends: string[]; } interface FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { /** * map of bundle name to BundleInstallSpec. The bundle name maps to the `bundleName` key in the `policycontroller.gke.io/constraintData` annotation on a constraint. */ bundles?: outputs.gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundle[]; /** * Configures the installation of the Template Library. Structure is documented below. */ templateLibrary: outputs.gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary; } interface FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundle { /** * The name of the bundle. */ bundleName: string; /** * The set of namespaces to be exempted from the bundle. */ exemptedNamespaces?: string[]; } interface FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { /** * Configures the manner in which the template library is installed on the cluster. Must be one of `ALL`, `NOT_INSTALLED` or `INSTALLATION_UNSPECIFIED`. Defaults to `ALL`. */ installation?: string; } interface FeatureResourceState { /** * (Output) * Whether this Feature has outstanding resources that need to be cleaned up before it can be disabled. */ hasResources: boolean; /** * (Output) * Output only. The "running state" of the Feature in this Hub. * Structure is documented below. */ state: string; } interface FeatureSpec { /** * Clusterupgrade feature spec. * Structure is documented below. */ clusterupgrade?: outputs.gkehub.FeatureSpecClusterupgrade; /** * Fleet Observability feature spec. * Structure is documented below. */ fleetobservability?: outputs.gkehub.FeatureSpecFleetobservability; /** * Multicluster Ingress-specific spec. * Structure is documented below. */ multiclusteringress?: outputs.gkehub.FeatureSpecMulticlusteringress; /** * RBACRolebinding Actuation feature spec. * Structure is documented below. */ rbacrolebindingactuation?: outputs.gkehub.FeatureSpecRbacrolebindingactuation; } interface FeatureSpecClusterupgrade { /** * Configuration overrides for individual upgrades. * Structure is documented below. */ gkeUpgradeOverrides?: outputs.gkehub.FeatureSpecClusterupgradeGkeUpgradeOverride[]; /** * Post conditions to override for the specified upgrade. * Structure is documented below. */ postConditions: outputs.gkehub.FeatureSpecClusterupgradePostConditions; /** * Specified if other fleet should be considered as a source of upgrades. Currently, at most one upstream fleet is allowed. The fleet name should be either fleet project number or id. */ upstreamFleets: string[]; } interface FeatureSpecClusterupgradeGkeUpgradeOverride { /** * Post conditions to override for the specified upgrade. * Structure is documented below. */ postConditions: outputs.gkehub.FeatureSpecClusterupgradeGkeUpgradeOverridePostConditions; /** * Which upgrade to override. * Structure is documented below. */ upgrade: outputs.gkehub.FeatureSpecClusterupgradeGkeUpgradeOverrideUpgrade; } interface FeatureSpecClusterupgradeGkeUpgradeOverridePostConditions { /** * Amount of time to "soak" after a rollout has been finished before marking it COMPLETE. Cannot exceed 30 days. */ soaking: string; } interface FeatureSpecClusterupgradeGkeUpgradeOverrideUpgrade { /** * Name of the upgrade, e.g., "k8sControlPlane". It should be a valid upgrade name. It must not exceet 99 characters. */ name: string; /** * Version of the upgrade, e.g., "1.22.1-gke.100". It should be a valid version. It must not exceet 99 characters. */ version: string; } interface FeatureSpecClusterupgradePostConditions { /** * Amount of time to "soak" after a rollout has been finished before marking it COMPLETE. Cannot exceed 30 days. */ soaking: string; } interface FeatureSpecFleetobservability { /** * Specified if fleet logging feature is enabled for the entire fleet. If UNSPECIFIED, fleet logging feature is disabled for the entire fleet. * Structure is documented below. */ loggingConfig?: outputs.gkehub.FeatureSpecFleetobservabilityLoggingConfig; } interface FeatureSpecFleetobservabilityLoggingConfig { /** * Specified if applying the default routing config to logs not specified in other configs. * Structure is documented below. */ defaultConfig?: outputs.gkehub.FeatureSpecFleetobservabilityLoggingConfigDefaultConfig; /** * Specified if applying the routing config to all logs for all fleet scopes. * Structure is documented below. */ fleetScopeLogsConfig?: outputs.gkehub.FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig; } interface FeatureSpecFleetobservabilityLoggingConfigDefaultConfig { /** * Specified if fleet logging feature is enabled. * Possible values are: `MODE_UNSPECIFIED`, `COPY`, `MOVE`. */ mode?: string; } interface FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig { /** * Specified if fleet logging feature is enabled. * Possible values are: `MODE_UNSPECIFIED`, `COPY`, `MOVE`. */ mode?: string; } interface FeatureSpecMulticlusteringress { /** * Fully-qualified Membership name which hosts the MultiClusterIngress CRD. Example: `projects/foo-proj/locations/global/memberships/bar` */ configMembership: string; } interface FeatureSpecRbacrolebindingactuation { /** * The list of allowed custom roles (ClusterRoles). If a custom role is not part of this list, it cannot be used in a fleet scope RBACRoleBinding. If a custom role in this list is in use, it cannot be removed from the list until the scope RBACRolebindings using it are deleted. */ allowedCustomRoles?: string[]; } interface FeatureState { /** * (Output) * Output only. The "running state" of the Feature in this Hub. * Structure is documented below. */ states: outputs.gkehub.FeatureStateState[]; } interface FeatureStateState { /** * (Output) * The high-level, machine-readable status of this Feature. */ code: string; /** * (Output) * A human-readable description of the current status. */ description: string; /** * (Output) * The time this status and any related Feature-specific details were updated. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z" */ updateTime: string; } interface FleetDefaultClusterConfig { /** * Enable/Disable binary authorization features for the cluster. * Structure is documented below. */ binaryAuthorizationConfig?: outputs.gkehub.FleetDefaultClusterConfigBinaryAuthorizationConfig; /** * Enable/Disable Security Posture features for the cluster. * Structure is documented below. */ securityPostureConfig?: outputs.gkehub.FleetDefaultClusterConfigSecurityPostureConfig; } interface FleetDefaultClusterConfigBinaryAuthorizationConfig { /** * Mode of operation for binauthz policy evaluation. * Possible values are: `DISABLED`, `POLICY_BINDINGS`. */ evaluationMode?: string; /** * Binauthz policies that apply to this cluster. * Structure is documented below. */ policyBindings?: outputs.gkehub.FleetDefaultClusterConfigBinaryAuthorizationConfigPolicyBinding[]; } interface FleetDefaultClusterConfigBinaryAuthorizationConfigPolicyBinding { /** * The relative resource name of the binauthz platform policy to audit. GKE * platform policies have the following format: * `projects/{project_number}/platforms/gke/policies/{policy_id}`. */ name?: string; } interface FleetDefaultClusterConfigSecurityPostureConfig { /** * Sets which mode to use for Security Posture features. * Possible values are: `DISABLED`, `BASIC`, `ENTERPRISE`. */ mode?: string; /** * Sets which mode to use for vulnerability scanning. * Possible values are: `VULNERABILITY_DISABLED`, `VULNERABILITY_BASIC`, `VULNERABILITY_ENTERPRISE`. */ vulnerabilityMode?: string; } interface FleetState { /** * (Output) * Describes the state of a Fleet resource. */ code: string; } interface GetFeatureFleetDefaultMemberConfig { /** * Config Management spec */ configmanagements: outputs.gkehub.GetFeatureFleetDefaultMemberConfigConfigmanagement[]; /** * Service Mesh spec */ meshes: outputs.gkehub.GetFeatureFleetDefaultMemberConfigMesh[]; /** * Policy Controller spec */ policycontrollers: outputs.gkehub.GetFeatureFleetDefaultMemberConfigPolicycontroller[]; } interface GetFeatureFleetDefaultMemberConfigConfigmanagement { /** * ConfigSync configuration for the cluster */ configSyncs: outputs.gkehub.GetFeatureFleetDefaultMemberConfigConfigmanagementConfigSync[]; /** * Set this field to MANAGEMENT_AUTOMATIC to enable Config Sync auto-upgrades, and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED to disable Config Sync auto-upgrades. Possible values: ["MANAGEMENT_UNSPECIFIED", "MANAGEMENT_AUTOMATIC", "MANAGEMENT_MANUAL"] */ management: string; /** * Version of Config Sync installed */ version: string; } interface GetFeatureFleetDefaultMemberConfigConfigmanagementConfigSync { /** * Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. */ enabled: boolean; /** * Git repo configuration for the cluster */ gits: outputs.gkehub.GetFeatureFleetDefaultMemberConfigConfigmanagementConfigSyncGit[]; /** * The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring. The GSA should have the Monitoring Metric Writer(roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount 'default' in the namespace 'config-management-monitoring' should be bound to the GSA. */ metricsGcpServiceAccountEmail: string; /** * OCI repo configuration for the cluster */ ocis: outputs.gkehub.GetFeatureFleetDefaultMemberConfigConfigmanagementConfigSyncOci[]; /** * Set to true to enable the Config Sync admission webhook to prevent drifts. If set to 'false', disables the Config Sync admission webhook and does not prevent drifts. */ preventDrift: boolean; /** * Specifies whether the Config Sync Repo is in hierarchical or unstructured mode */ sourceFormat: string; } interface GetFeatureFleetDefaultMemberConfigConfigmanagementConfigSyncGit { /** * The Google Cloud Service Account Email used for auth when secretType is gcpServiceAccount */ gcpServiceAccountEmail: string; /** * URL for the HTTPS Proxy to be used when communicating with the Git repo */ httpsProxy: string; /** * The path within the Git repository that represents the top level of the repo to sync */ policyDir: string; /** * Type of secret configured for access to the Git repo */ secretType: string; /** * The branch of the repository to sync from. Default: master */ syncBranch: string; /** * The URL of the Git repository to use as the source of truth */ syncRepo: string; /** * Git revision (tag or hash) to check out. Default HEAD */ syncRev: string; /** * Period in seconds between consecutive syncs. Default: 15 */ syncWaitSecs: string; } interface GetFeatureFleetDefaultMemberConfigConfigmanagementConfigSyncOci { /** * The Google Cloud Service Account Email used for auth when secretType is gcpServiceAccount */ gcpServiceAccountEmail: string; /** * The absolute path of the directory that contains the local resources. Default: the root directory of the image */ policyDir: string; /** * Type of secret configured for access to the Git repo */ secretType: string; /** * The OCI image repository URL for the package to sync from */ syncRepo: string; /** * Period in seconds between consecutive syncs. Default: 15 */ syncWaitSecs: string; /** * Version of Config Sync installed */ version: string; } interface GetFeatureFleetDefaultMemberConfigMesh { /** * Whether to automatically manage Service Mesh Possible values: ["MANAGEMENT_UNSPECIFIED", "MANAGEMENT_AUTOMATIC", "MANAGEMENT_MANUAL"] */ management: string; } interface GetFeatureFleetDefaultMemberConfigPolicycontroller { /** * Configuration of Policy Controller */ policyControllerHubConfigs: outputs.gkehub.GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfig[]; /** * Configures the version of Policy Controller */ version: string; } interface GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfig { /** * Interval for Policy Controller Audit scans (in seconds). When set to 0, this disables audit functionality altogether. */ auditIntervalSeconds: number; /** * The maximum number of audit violations to be stored in a constraint. If not set, the internal default of 20 will be used. */ constraintViolationLimit: number; /** * Map of deployment configs to deployments ("admission", "audit", "mutation"). */ deploymentConfigs: outputs.gkehub.GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfig[]; /** * The set of namespaces that are excluded from Policy Controller checks. Namespaces do not need to currently exist on the cluster. */ exemptableNamespaces: string[]; /** * Configures the mode of the Policy Controller installation Possible values: ["INSTALL_SPEC_UNSPECIFIED", "INSTALL_SPEC_NOT_INSTALLED", "INSTALL_SPEC_ENABLED", "INSTALL_SPEC_SUSPENDED", "INSTALL_SPEC_DETACHED"] */ installSpec: string; /** * Logs all denies and dry run failures. */ logDeniesEnabled: boolean; /** * Monitoring specifies the configuration of monitoring Policy Controller. */ monitorings: outputs.gkehub.GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigMonitoring[]; /** * Enables the ability to mutate resources using Policy Controller. */ mutationEnabled: boolean; /** * Specifies the desired policy content on the cluster. */ policyContents: outputs.gkehub.GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContent[]; /** * Enables the ability to use Constraint Templates that reference to objects other than the object currently being evaluated. */ referentialRulesEnabled: boolean; } interface GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfig { component: string; /** * Container resource requirements. */ containerResources: outputs.gkehub.GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResource[]; /** * Pod affinity configuration. Possible values: ["AFFINITY_UNSPECIFIED", "NO_AFFINITY", "ANTI_AFFINITY"] */ podAffinity: string; /** * Pod tolerations of node taints. */ podTolerations: outputs.gkehub.GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigPodToleration[]; /** * Pod replica count. */ replicaCount: number; } interface GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResource { /** * Limits describes the maximum amount of compute resources allowed for use by the running container. */ limits: outputs.gkehub.GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourceLimit[]; /** * Requests describes the amount of compute resources reserved for the container by the kube-scheduler. */ requests: outputs.gkehub.GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourceRequest[]; } interface GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourceLimit { /** * CPU requirement expressed in Kubernetes resource units. */ cpu: string; /** * Memory requirement expressed in Kubernetes resource units. */ memory: string; } interface GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourceRequest { /** * CPU requirement expressed in Kubernetes resource units. */ cpu: string; /** * Memory requirement expressed in Kubernetes resource units. */ memory: string; } interface GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigPodToleration { /** * Matches a taint effect. */ effect: string; /** * Matches a taint key (not necessarily unique). */ key: string; /** * Matches a taint operator. */ operator: string; /** * Matches a taint value. */ value: string; } interface GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigMonitoring { /** * Specifies the list of backends Policy Controller will export to. An empty list would effectively disable metrics export. Possible values: ["MONITORING_BACKEND_UNSPECIFIED", "PROMETHEUS", "CLOUD_MONITORING"] */ backends: string[]; } interface GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContent { /** * Configures which bundles to install and their corresponding install specs. */ bundles: outputs.gkehub.GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentBundle[]; /** * Configures the installation of the Template Library. */ templateLibraries: outputs.gkehub.GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary[]; } interface GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentBundle { bundle: string; /** * The set of namespaces to be exempted from the bundle. */ exemptedNamespaces: string[]; } interface GetFeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { /** * Configures the manner in which the template library is installed on the cluster. Possible values: ["INSTALLATION_UNSPECIFIED", "NOT_INSTALLED", "ALL"] */ installation: string; } interface GetFeatureResourceState { /** * Whether this Feature has outstanding resources that need to be cleaned up before it can be disabled. */ hasResources: boolean; /** * The current state of the Feature resource in the Hub API. */ state: string; } interface GetFeatureSpec { /** * Clusterupgrade feature spec. */ clusterupgrades: outputs.gkehub.GetFeatureSpecClusterupgrade[]; /** * Fleet Observability feature spec. */ fleetobservabilities: outputs.gkehub.GetFeatureSpecFleetobservability[]; /** * Multicluster Ingress-specific spec. */ multiclusteringresses: outputs.gkehub.GetFeatureSpecMulticlusteringress[]; /** * RBACRolebinding Actuation feature spec. */ rbacrolebindingactuations: outputs.gkehub.GetFeatureSpecRbacrolebindingactuation[]; } interface GetFeatureSpecClusterupgrade { /** * Configuration overrides for individual upgrades. */ gkeUpgradeOverrides: outputs.gkehub.GetFeatureSpecClusterupgradeGkeUpgradeOverride[]; /** * Post conditions to override for the specified upgrade. */ postConditions: outputs.gkehub.GetFeatureSpecClusterupgradePostCondition[]; /** * Specified if other fleet should be considered as a source of upgrades. Currently, at most one upstream fleet is allowed. The fleet name should be either fleet project number or id. */ upstreamFleets: string[]; } interface GetFeatureSpecClusterupgradeGkeUpgradeOverride { /** * Post conditions to override for the specified upgrade. */ postConditions: outputs.gkehub.GetFeatureSpecClusterupgradeGkeUpgradeOverridePostCondition[]; /** * Which upgrade to override. */ upgrades: outputs.gkehub.GetFeatureSpecClusterupgradeGkeUpgradeOverrideUpgrade[]; } interface GetFeatureSpecClusterupgradeGkeUpgradeOverridePostCondition { /** * Amount of time to "soak" after a rollout has been finished before marking it COMPLETE. Cannot exceed 30 days. */ soaking: string; } interface GetFeatureSpecClusterupgradeGkeUpgradeOverrideUpgrade { /** * The name of the feature you want to know the status of. */ name: string; /** * Version of the upgrade, e.g., "1.22.1-gke.100". It should be a valid version. It must not exceet 99 characters. */ version: string; } interface GetFeatureSpecClusterupgradePostCondition { /** * Amount of time to "soak" after a rollout has been finished before marking it COMPLETE. Cannot exceed 30 days. */ soaking: string; } interface GetFeatureSpecFleetobservability { /** * Specified if fleet logging feature is enabled for the entire fleet. If UNSPECIFIED, fleet logging feature is disabled for the entire fleet. */ loggingConfigs: outputs.gkehub.GetFeatureSpecFleetobservabilityLoggingConfig[]; } interface GetFeatureSpecFleetobservabilityLoggingConfig { /** * Specified if applying the default routing config to logs not specified in other configs. */ defaultConfigs: outputs.gkehub.GetFeatureSpecFleetobservabilityLoggingConfigDefaultConfig[]; /** * Specified if applying the routing config to all logs for all fleet scopes. */ fleetScopeLogsConfigs: outputs.gkehub.GetFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig[]; } interface GetFeatureSpecFleetobservabilityLoggingConfigDefaultConfig { /** * Specified if fleet logging feature is enabled. Possible values: ["MODE_UNSPECIFIED", "COPY", "MOVE"] */ mode: string; } interface GetFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig { /** * Specified if fleet logging feature is enabled. Possible values: ["MODE_UNSPECIFIED", "COPY", "MOVE"] */ mode: string; } interface GetFeatureSpecMulticlusteringress { /** * Fully-qualified Membership name which hosts the MultiClusterIngress CRD. Example: 'projects/foo-proj/locations/global/memberships/bar' */ configMembership: string; } interface GetFeatureSpecRbacrolebindingactuation { /** * The list of allowed custom roles (ClusterRoles). If a custom role is not part of this list, it cannot be used in a fleet scope RBACRoleBinding. If a custom role in this list is in use, it cannot be removed from the list until the scope RBACRolebindings using it are deleted. */ allowedCustomRoles: string[]; } interface GetFeatureState { /** * Output only. The "running state" of the Feature in this Hub. */ states: outputs.gkehub.GetFeatureStateState[]; } interface GetFeatureStateState { /** * The high-level, machine-readable status of this Feature. */ code: string; /** * A human-readable description of the current status. */ description: string; /** * The time this status and any related Feature-specific details were updated. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z" */ updateTime: string; } interface GetMembershipAuthority { /** * A JSON Web Token (JWT) issuer URI. 'issuer' must start with 'https://' and // be a valid * with length <2000 characters. For example: 'https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster'. If the cluster is provisioned with Terraform, this is '"https://container.googleapis.com/v1/${google_container_cluster.my-cluster.id}"'. */ issuer: string; } interface GetMembershipBindingState { /** * Code describes the state of a MembershipBinding resource. */ code: string; } interface GetMembershipEndpoint { /** * If this Membership is a Kubernetes API server hosted on GKE, this is a self link to its GCP resource. */ gkeClusters: outputs.gkehub.GetMembershipEndpointGkeCluster[]; } interface GetMembershipEndpointGkeCluster { /** * Self-link of the GCP resource for the GKE cluster. * For example: '//container.googleapis.com/projects/my-project/locations/us-west1-a/clusters/my-cluster'. * It can be at the most 1000 characters in length. If the cluster is provisioned with Terraform, * this can be '"//container.googleapis.com/${google_container_cluster.my-cluster.id}"' or * 'google_container_cluster.my-cluster.id'. */ resourceLink: string; } interface MembershipAuthority { /** * A JSON Web Token (JWT) issuer URI. `issuer` must start with `https://` and // be a valid * with length <2000 characters. For example: `https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster`. If the cluster is provisioned with Terraform, this is `"https://container.googleapis.com/v1/${google_container_cluster.my-cluster.id}"`. */ issuer: string; } interface MembershipBindingState { /** * (Output) * Code describes the state of a MembershipBinding resource. */ code: string; } interface MembershipEndpoint { /** * If this Membership is a Kubernetes API server hosted on GKE, this is a self link to its GCP resource. * Structure is documented below. */ gkeCluster?: outputs.gkehub.MembershipEndpointGkeCluster; } interface MembershipEndpointGkeCluster { /** * Self-link of the GCP resource for the GKE cluster. * For example: `//container.googleapis.com/projects/my-project/locations/us-west1-a/clusters/my-cluster`. * It can be at the most 1000 characters in length. If the cluster is provisioned with Terraform, * this can be `"//container.googleapis.com/${google_container_cluster.my-cluster.id}"` or * `google_container_cluster.my-cluster.id`. */ resourceLink: string; } interface MembershipIamBindingCondition { description?: string; expression: string; title: string; } interface MembershipIamMemberCondition { description?: string; expression: string; title: string; } interface MembershipRbacRoleBindingRole { /** * PredefinedRole is an ENUM representation of the default Kubernetes Roles * Possible values are: `UNKNOWN`, `ADMIN`, `EDIT`, `VIEW`, `ANTHOS_SUPPORT`. */ predefinedRole: string; } interface MembershipRbacRoleBindingState { /** * (Output) * Code describes the state of a RBAC Role Binding resource. */ code: string; } interface NamespaceState { /** * (Output) * Code describes the state of a Namespace resource. */ code: string; } interface RolloutSequenceStage { /** * Filter to select a subset of clusters from the specified Fleet projects. * If not specified, all clusters in the fleet projects are selected. * Structure is documented below. */ clusterSelector?: outputs.gkehub.RolloutSequenceStageClusterSelector; /** * List of Fleet projects to select the clusters from. * Expected format: projects/{project} */ fleetProjects: string[]; /** * Soak time after upgrading all the clusters in the stage, specified in seconds. */ soakDuration?: string; } interface RolloutSequenceStageClusterSelector { /** * The label selector must be a valid CEL (Common Expression Language) expression which * evaluates resource.labels. */ labelSelector: string; } interface ScopeIamBindingCondition { description?: string; expression: string; title: string; } interface ScopeIamMemberCondition { description?: string; expression: string; title: string; } interface ScopeRbacRoleBindingRole { /** * CustomRole is the custom Kubernetes ClusterRole to be used. The custom role format must be allowlisted in the rbacrolebindingactuation feature and RFC 1123 compliant. */ customRole?: string; /** * PredefinedRole is an ENUM representation of the default Kubernetes Roles * Possible values are: `UNKNOWN`, `ADMIN`, `EDIT`, `VIEW`. */ predefinedRole?: string; } interface ScopeRbacRoleBindingState { /** * (Output) * Code describes the state of a RBAC Role Binding resource. */ code: string; } interface ScopeState { /** * (Output) * Code describes the state of a Scope resource. */ code: string; } } export declare namespace gkeonprem { interface BareMetalAdminClusterClusterOperations { /** * Whether collection of application logs/metrics should be enabled (in addition to system logs/metrics). */ enableApplicationLogs?: boolean; } interface BareMetalAdminClusterControlPlane { /** * Customizes the default API server args. Only a subset of * customized flags are supported. Please refer to the API server * documentation below to know the exact format: * https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/ * Structure is documented below. */ apiServerArgs?: outputs.gkeonprem.BareMetalAdminClusterControlPlaneApiServerArg[]; /** * Configures the node pool running the control plane. If specified the corresponding NodePool will be created for the cluster's control plane. The NodePool will have the same name and namespace as the cluster. * Structure is documented below. */ controlPlaneNodePoolConfig: outputs.gkeonprem.BareMetalAdminClusterControlPlaneControlPlaneNodePoolConfig; } interface BareMetalAdminClusterControlPlaneApiServerArg { /** * The argument name as it appears on the API Server command line please make sure to remove the leading dashes. */ argument: string; /** * The value of the arg as it will be passed to the API Server command line. */ value: string; } interface BareMetalAdminClusterControlPlaneControlPlaneNodePoolConfig { /** * The generic configuration for a node pool running the control plane. * Structure is documented below. */ nodePoolConfig: outputs.gkeonprem.BareMetalAdminClusterControlPlaneControlPlaneNodePoolConfigNodePoolConfig; } interface BareMetalAdminClusterControlPlaneControlPlaneNodePoolConfigNodePoolConfig { /** * The labels assigned to nodes of this node pool. * An object containing a list of key/value pairs. * Example: * { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ labels?: { [key: string]: string; }; /** * Structure is documented below. */ nodeConfigs?: outputs.gkeonprem.BareMetalAdminClusterControlPlaneControlPlaneNodePoolConfigNodePoolConfigNodeConfig[]; /** * The available Operating Systems to be run in a Node. */ operatingSystem?: string; /** * Structure is documented below. */ taints?: outputs.gkeonprem.BareMetalAdminClusterControlPlaneControlPlaneNodePoolConfigNodePoolConfigTaint[]; } interface BareMetalAdminClusterControlPlaneControlPlaneNodePoolConfigNodePoolConfigNodeConfig { /** * The labels assigned to nodes of this node pool. * An object containing a list of key/value pairs. * Example: * { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ labels?: { [key: string]: string; }; /** * (Optional) */ nodeIp?: string; } interface BareMetalAdminClusterControlPlaneControlPlaneNodePoolConfigNodePoolConfigTaint { /** * Available taint effects. */ effect?: string; /** * (Optional) */ key?: string; /** * (Optional) */ value?: string; } interface BareMetalAdminClusterFleet { /** * (Output) * The name of the managed Hub Membership resource associated to this cluster. * Membership names are formatted as * `projects//locations//memberships/`. */ membership: string; } interface BareMetalAdminClusterLoadBalancer { /** * A nested object resource. * Structure is documented below. */ bgpLbConfig?: outputs.gkeonprem.BareMetalAdminClusterLoadBalancerBgpLbConfig; /** * A nested object resource. * Structure is documented below. */ manualLbConfig?: outputs.gkeonprem.BareMetalAdminClusterLoadBalancerManualLbConfig; /** * Specifies the load balancer ports. * Structure is documented below. */ portConfig: outputs.gkeonprem.BareMetalAdminClusterLoadBalancerPortConfig; /** * Specified the Bare Metal Load Balancer Config * Structure is documented below. */ vipConfig: outputs.gkeonprem.BareMetalAdminClusterLoadBalancerVipConfig; } interface BareMetalAdminClusterLoadBalancerBgpLbConfig { /** * a list of non-overlapping IP pools used * by load balancer typed services. * Structure is documented below. */ addressPools?: outputs.gkeonprem.BareMetalAdminClusterLoadBalancerBgpLbConfigAddressPool[]; /** * BGP autonomous system number (ASN) of the cluster. */ asn?: number; /** * BGP autonomous system number (ASN) of the cluster. * Structure is documented below. */ bgpPeerConfigs?: outputs.gkeonprem.BareMetalAdminClusterLoadBalancerBgpLbConfigBgpPeerConfig[]; /** * A nested object resource. * Structure is documented below. */ loadBalancerNodePoolConfig?: outputs.gkeonprem.BareMetalAdminClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfig; } interface BareMetalAdminClusterLoadBalancerBgpLbConfigAddressPool { /** * The addresses that are part of this pool. */ addresses?: string[]; /** * This avoids buggy consumer devices mistakenly * dropping IPv4 traffic for those special IP addresses. */ avoidBuggyIps?: boolean; /** * If true, prevent IP addresses from being automatically assigned. */ manualAssign?: boolean; /** * (Optional) */ pool?: string; } interface BareMetalAdminClusterLoadBalancerBgpLbConfigBgpPeerConfig { /** * (Optional) */ asn?: number; /** * The IP address of the control plane node that * connects to the external peer. */ controlPlaneNodes?: string[]; /** * (Optional) */ ipAddress?: string; } interface BareMetalAdminClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfig { /** * A nested object resource. * Structure is documented below. */ nodePoolConfig?: outputs.gkeonprem.BareMetalAdminClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfigNodePoolConfig; } interface BareMetalAdminClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfigNodePoolConfig { /** * A nested object resource. * Structure is documented below. */ kubeletConfig?: outputs.gkeonprem.BareMetalAdminClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfigNodePoolConfigKubeletConfig; /** * The labels assigned to nodes of this node pool. * An object containing a list of key/value pairs. * Example: * { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ labels?: { [key: string]: string; }; /** * Structure is documented below. */ nodeConfigs?: outputs.gkeonprem.BareMetalAdminClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfigNodePoolConfigNodeConfig[]; /** * The available Operating Systems to be run in a Node. */ operatingSystem?: string; /** * Structure is documented below. */ taints: outputs.gkeonprem.BareMetalAdminClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfigNodePoolConfigTaint[]; } interface BareMetalAdminClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfigNodePoolConfigKubeletConfig { /** * (Optional) */ registryBurst?: number; /** * (Optional) */ registryPullQps?: number; /** * (Optional) */ serializeImagePullsDisabled?: boolean; } interface BareMetalAdminClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfigNodePoolConfigNodeConfig { /** * The labels assigned to nodes of this node pool. * An object containing a list of key/value pairs. * Example: * { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ labels?: { [key: string]: string; }; /** * (Optional) */ nodeIp?: string; } interface BareMetalAdminClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfigNodePoolConfigTaint { /** * Available taint effects. */ effect?: string; /** * (Optional) */ key?: string; /** * (Optional) */ value?: string; } interface BareMetalAdminClusterLoadBalancerManualLbConfig { /** * Whether manual load balancing is enabled. */ enabled: boolean; } interface BareMetalAdminClusterLoadBalancerPortConfig { /** * The port that control plane hosted load balancers will listen on. */ controlPlaneLoadBalancerPort: number; } interface BareMetalAdminClusterLoadBalancerVipConfig { /** * The VIP which you previously set aside for the Kubernetes API of this Bare Metal Admin Cluster. */ controlPlaneVip: string; } interface BareMetalAdminClusterMaintenanceConfig { /** * All IPv4 address from these ranges will be placed into maintenance mode. * Nodes in maintenance mode will be cordoned and drained. When both of these * are true, the "baremetal.cluster.gke.io/maintenance" annotation will be set * on the node resource. */ maintenanceAddressCidrBlocks: string[]; } interface BareMetalAdminClusterNetworkConfig { /** * Enables the use of advanced Anthos networking features. */ advancedNetworking?: boolean; /** * A nested object resource. * Structure is documented below. */ islandModeCidr?: outputs.gkeonprem.BareMetalAdminClusterNetworkConfigIslandModeCidr; /** * Configuration for multiple network interfaces. * Structure is documented below. */ multipleNetworkInterfacesConfig?: outputs.gkeonprem.BareMetalAdminClusterNetworkConfigMultipleNetworkInterfacesConfig; } interface BareMetalAdminClusterNetworkConfigIslandModeCidr { /** * All pods in the cluster are assigned an RFC1918 IPv4 address from these ranges. This field cannot be changed after creation. */ podAddressCidrBlocks: string[]; /** * All services in the cluster are assigned an RFC1918 IPv4 address from these ranges. This field cannot be changed after creation. */ serviceAddressCidrBlocks: string[]; } interface BareMetalAdminClusterNetworkConfigMultipleNetworkInterfacesConfig { /** * When set network_config.advanced_networking is automatically * set to true. */ enabled?: boolean; } interface BareMetalAdminClusterNodeAccessConfig { /** * LoginUser is the user name used to access node machines. * It defaults to "root" if not set. */ loginUser?: string; } interface BareMetalAdminClusterNodeConfig { /** * The maximum number of pods a node can run. The size of the CIDR range * assigned to the node will be derived from this parameter. */ maxPodsPerNode?: number; } interface BareMetalAdminClusterProxy { /** * A list of IPs, hostnames, and domains that should skip the proxy. * For example: ["127.0.0.1", "example.com", ".corp", "localhost"]. */ noProxies?: string[]; /** * Specifies the address of your proxy server. * For Example: http://domain * WARNING: Do not provide credentials in the format * of http://(username:password@)domain these will be rejected by the server. */ uri: string; } interface BareMetalAdminClusterSecurityConfig { /** * Configures user access to the Bare Metal User cluster. * Structure is documented below. */ authorization?: outputs.gkeonprem.BareMetalAdminClusterSecurityConfigAuthorization; } interface BareMetalAdminClusterSecurityConfigAuthorization { /** * Users that will be granted the cluster-admin role on the cluster, providing full access to the cluster. * Structure is documented below. */ adminUsers: outputs.gkeonprem.BareMetalAdminClusterSecurityConfigAuthorizationAdminUser[]; } interface BareMetalAdminClusterSecurityConfigAuthorizationAdminUser { /** * The name of the user, e.g. `my-gcp-id@gmail.com`. */ username: string; } interface BareMetalAdminClusterStatus { /** * (Output) * ResourceConditions provide a standard mechanism for higher-level status reporting from admin cluster controller. * Structure is documented below. */ conditions: outputs.gkeonprem.BareMetalAdminClusterStatusCondition[]; /** * (Output) * Human-friendly representation of the error message from the admin cluster * controller. The error message can be temporary as the admin cluster * controller creates a cluster or node pool. If the error message persists * for a longer period of time, it can be used to surface error message to * indicate real problems requiring user intervention. */ errorMessage: string; } interface BareMetalAdminClusterStatusCondition { /** * (Output) * Last time the condition transit from one status to another. */ lastTransitionTime: string; /** * Human-readable message indicating details about last transition. */ message?: string; /** * (Output) * A human-readable message of the check failure. */ reason?: string; /** * (Output) * The lifecycle state of the condition. */ state: string; /** * Type of the condition. * (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady) */ type?: string; } interface BareMetalAdminClusterStorage { /** * Specifies the config for local PersistentVolumes backed * by mounted node disks. These disks need to be formatted and mounted by the * user, which can be done before or after cluster creation. * Structure is documented below. */ lvpNodeMountsConfig: outputs.gkeonprem.BareMetalAdminClusterStorageLvpNodeMountsConfig; /** * Specifies the config for local PersistentVolumes backed by * subdirectories in a shared filesystem. These subdirectores are * automatically created during cluster creation. * Structure is documented below. */ lvpShareConfig: outputs.gkeonprem.BareMetalAdminClusterStorageLvpShareConfig; } interface BareMetalAdminClusterStorageLvpNodeMountsConfig { /** * The host machine path. */ path: string; /** * The StorageClass name that PVs will be created with. */ storageClass: string; } interface BareMetalAdminClusterStorageLvpShareConfig { /** * Defines the machine path and storage class for the LVP Share. * Structure is documented below. */ lvpConfig: outputs.gkeonprem.BareMetalAdminClusterStorageLvpShareConfigLvpConfig; /** * The number of subdirectories to create under path. */ sharedPathPvCount?: number; } interface BareMetalAdminClusterStorageLvpShareConfigLvpConfig { /** * The host machine path. */ path: string; /** * The StorageClass name that PVs will be created with. */ storageClass: string; } interface BareMetalAdminClusterValidationCheck { /** * (Output) * Options used for the validation check. */ options: string; /** * (Output) * The scenario when the preflight checks were run.. */ scenario: string; /** * (Output) * Specifies the detailed validation check status * Structure is documented below. */ statuses: outputs.gkeonprem.BareMetalAdminClusterValidationCheckStatus[]; } interface BareMetalAdminClusterValidationCheckStatus { /** * (Output) * Individual checks which failed as part of the Preflight check execution. * Structure is documented below. */ results: outputs.gkeonprem.BareMetalAdminClusterValidationCheckStatusResult[]; } interface BareMetalAdminClusterValidationCheckStatusResult { /** * (Output) * The category of the validation. */ category: string; /** * A human readable description of this Bare Metal Admin Cluster. */ description: string; /** * (Output) * Detailed failure information, which might be unformatted. */ details: string; /** * (Output) * Options used for the validation check. */ options: string; /** * (Output) * A human-readable message of the check failure. */ reason: string; } interface BareMetalClusterBinaryAuthorization { /** * Mode of operation for binauthz policy evaluation. If unspecified, * defaults to DISABLED. * Possible values are: `DISABLED`, `PROJECT_SINGLETON_POLICY_ENFORCE`. */ evaluationMode?: string; } interface BareMetalClusterClusterOperations { /** * Whether collection of application logs/metrics should be enabled (in addition to system logs/metrics). */ enableApplicationLogs?: boolean; } interface BareMetalClusterControlPlane { /** * Customizes the default API server args. Only a subset of * customized flags are supported. Please refer to the API server * documentation below to know the exact format: * https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/ * Structure is documented below. */ apiServerArgs?: outputs.gkeonprem.BareMetalClusterControlPlaneApiServerArg[]; /** * Configures the node pool running the control plane. If specified the corresponding NodePool will be created for the cluster's control plane. The NodePool will have the same name and namespace as the cluster. * Structure is documented below. */ controlPlaneNodePoolConfig: outputs.gkeonprem.BareMetalClusterControlPlaneControlPlaneNodePoolConfig; } interface BareMetalClusterControlPlaneApiServerArg { /** * The argument name as it appears on the API Server command line please make sure to remove the leading dashes. */ argument: string; /** * The value of the arg as it will be passed to the API Server command line. */ value: string; } interface BareMetalClusterControlPlaneControlPlaneNodePoolConfig { /** * The generic configuration for a node pool running the control plane. * Structure is documented below. */ nodePoolConfig: outputs.gkeonprem.BareMetalClusterControlPlaneControlPlaneNodePoolConfigNodePoolConfig; } interface BareMetalClusterControlPlaneControlPlaneNodePoolConfigNodePoolConfig { /** * The map of Kubernetes labels (key/value pairs) to be applied to * each node. These will added in addition to any default label(s) * that Kubernetes may apply to the node. In case of conflict in * label keys, the applied set may differ depending on the Kubernetes * version -- it's best to assume the behavior is undefined and * conflicts should be avoided. For more information, including usage * and the valid values, see: * - http://kubernetes.io/v1.1/docs/user-guide/labels.html * An object containing a list of "key": value pairs. * For example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ labels: { [key: string]: string; }; /** * The list of machine addresses in the Bare Metal Node Pool. * Structure is documented below. */ nodeConfigs?: outputs.gkeonprem.BareMetalClusterControlPlaneControlPlaneNodePoolConfigNodePoolConfigNodeConfig[]; /** * Specifies the nodes operating system (default: LINUX). */ operatingSystem?: string; /** * The initial taints assigned to nodes of this node pool. * Structure is documented below. */ taints: outputs.gkeonprem.BareMetalClusterControlPlaneControlPlaneNodePoolConfigNodePoolConfigTaint[]; } interface BareMetalClusterControlPlaneControlPlaneNodePoolConfigNodePoolConfigNodeConfig { /** * The map of Kubernetes labels (key/value pairs) to be applied to * each node. These will added in addition to any default label(s) * that Kubernetes may apply to the node. In case of conflict in * label keys, the applied set may differ depending on the Kubernetes * version -- it's best to assume the behavior is undefined and * conflicts should be avoided. For more information, including usage * and the valid values, see: * - http://kubernetes.io/v1.1/docs/user-guide/labels.html * An object containing a list of "key": value pairs. * For example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ labels?: { [key: string]: string; }; /** * The default IPv4 address for SSH access and Kubernetes node. * Example: 192.168.0.1 */ nodeIp?: string; } interface BareMetalClusterControlPlaneControlPlaneNodePoolConfigNodePoolConfigTaint { /** * Specifies the nodes operating system (default: LINUX). * Possible values are: `EFFECT_UNSPECIFIED`, `PREFER_NO_SCHEDULE`, `NO_EXECUTE`. */ effect?: string; /** * Key associated with the effect. */ key?: string; /** * Value associated with the effect. */ value?: string; } interface BareMetalClusterFleet { /** * (Output) * The name of the managed Hub Membership resource associated to this cluster. * Membership names are formatted as * `projects//locations//memberships/`. */ membership: string; } interface BareMetalClusterLoadBalancer { /** * Configuration for BGP typed load balancers. * Structure is documented below. */ bgpLbConfig?: outputs.gkeonprem.BareMetalClusterLoadBalancerBgpLbConfig; /** * A nested object resource. * Structure is documented below. */ manualLbConfig?: outputs.gkeonprem.BareMetalClusterLoadBalancerManualLbConfig; /** * A nested object resource. * Structure is documented below. */ metalLbConfig?: outputs.gkeonprem.BareMetalClusterLoadBalancerMetalLbConfig; /** * Specifies the load balancer ports. * Structure is documented below. */ portConfig: outputs.gkeonprem.BareMetalClusterLoadBalancerPortConfig; /** * Specified the Bare Metal Load Balancer Config * Structure is documented below. */ vipConfig: outputs.gkeonprem.BareMetalClusterLoadBalancerVipConfig; } interface BareMetalClusterLoadBalancerBgpLbConfig { /** * AddressPools is a list of non-overlapping IP pools used by load balancer * typed services. All addresses must be routable to load balancer nodes. * IngressVIP must be included in the pools. * Structure is documented below. */ addressPools: outputs.gkeonprem.BareMetalClusterLoadBalancerBgpLbConfigAddressPool[]; /** * BGP autonomous system number (ASN) of the cluster. * This field can be updated after cluster creation. */ asn: number; /** * The list of BGP peers that the cluster will connect to. * At least one peer must be configured for each control plane node. * Control plane nodes will connect to these peers to advertise the control * plane VIP. The Services load balancer also uses these peers by default. * This field can be updated after cluster creation. * Structure is documented below. */ bgpPeerConfigs: outputs.gkeonprem.BareMetalClusterLoadBalancerBgpLbConfigBgpPeerConfig[]; /** * Specifies the node pool running data plane load balancing. L2 connectivity * is required among nodes in this pool. If missing, the control plane node * pool is used for data plane load balancing. * Structure is documented below. */ loadBalancerNodePoolConfig?: outputs.gkeonprem.BareMetalClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfig; } interface BareMetalClusterLoadBalancerBgpLbConfigAddressPool { /** * The addresses that are part of this pool. Each address must be either in the CIDR form (1.2.3.0/24) or range form (1.2.3.1-1.2.3.5). */ addresses: string[]; /** * If true, avoid using IPs ending in .0 or .255. * This avoids buggy consumer devices mistakenly dropping IPv4 traffic for those special IP addresses. */ avoidBuggyIps?: boolean; /** * If true, prevent IP addresses from being automatically assigned. */ manualAssign?: boolean; /** * The name of the address pool. */ pool: string; } interface BareMetalClusterLoadBalancerBgpLbConfigBgpPeerConfig { /** * BGP autonomous system number (ASN) for the network that contains the * external peer device. */ asn: number; /** * The IP address of the control plane node that connects to the external * peer. * If you don't specify any control plane nodes, all control plane nodes * can connect to the external peer. If you specify one or more IP addresses, * only the nodes specified participate in peering sessions. */ controlPlaneNodes?: string[]; /** * The IP address of the external peer device. */ ipAddress: string; } interface BareMetalClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfig { /** * The generic configuration for a node pool running a load balancer. * Structure is documented below. */ nodePoolConfig?: outputs.gkeonprem.BareMetalClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfigNodePoolConfig; } interface BareMetalClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfigNodePoolConfig { /** * The modifiable kubelet configurations for the baremetal machines. * Structure is documented below. */ kubeletConfig?: outputs.gkeonprem.BareMetalClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfigNodePoolConfigKubeletConfig; /** * The map of Kubernetes labels (key/value pairs) to be applied to * each node. These will added in addition to any default label(s) * that Kubernetes may apply to the node. In case of conflict in * label keys, the applied set may differ depending on the Kubernetes * version -- it's best to assume the behavior is undefined and * conflicts should be avoided. For more information, including usage * and the valid values, see: * - http://kubernetes.io/v1.1/docs/user-guide/labels.html * An object containing a list of "key": value pairs. * For example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ labels?: { [key: string]: string; }; /** * The list of machine addresses in the Bare Metal Node Pool. * Structure is documented below. */ nodeConfigs?: outputs.gkeonprem.BareMetalClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfigNodePoolConfigNodeConfig[]; /** * Specifies the nodes operating system (default: LINUX). */ operatingSystem?: string; /** * The initial taints assigned to nodes of this node pool. * Structure is documented below. */ taints?: outputs.gkeonprem.BareMetalClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfigNodePoolConfigTaint[]; } interface BareMetalClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfigNodePoolConfigKubeletConfig { /** * The maximum size of bursty pulls, temporarily allows pulls to burst to this * number, while still not exceeding registry_pull_qps. * The value must not be a negative number. * Updating this field may impact scalability by changing the amount of * traffic produced by image pulls. * Defaults to 10. */ registryBurst?: number; /** * The limit of registry pulls per second. * Setting this value to 0 means no limit. * Updating this field may impact scalability by changing the amount of * traffic produced by image pulls. * Defaults to 5. */ registryPullQps?: number; /** * Prevents the Kubelet from pulling multiple images at a time. * We recommend *not* changing the default value on nodes that run docker * daemon with version < 1.9 or an Another Union File System (Aufs) storage * backend. Issue https://github.com/kubernetes/kubernetes/issues/10959 has * more details. */ serializeImagePullsDisabled?: boolean; } interface BareMetalClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfigNodePoolConfigNodeConfig { /** * The map of Kubernetes labels (key/value pairs) to be applied to * each node. These will added in addition to any default label(s) * that Kubernetes may apply to the node. In case of conflict in * label keys, the applied set may differ depending on the Kubernetes * version -- it's best to assume the behavior is undefined and * conflicts should be avoided. For more information, including usage * and the valid values, see: * - http://kubernetes.io/v1.1/docs/user-guide/labels.html * An object containing a list of "key": value pairs. * For example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ labels?: { [key: string]: string; }; /** * The default IPv4 address for SSH access and Kubernetes node. * Example: 192.168.0.1 */ nodeIp?: string; } interface BareMetalClusterLoadBalancerBgpLbConfigLoadBalancerNodePoolConfigNodePoolConfigTaint { /** * Specifies the nodes operating system (default: LINUX). * Possible values are: `EFFECT_UNSPECIFIED`, `PREFER_NO_SCHEDULE`, `NO_EXECUTE`. */ effect?: string; /** * Key associated with the effect. */ key?: string; /** * Value associated with the effect. */ value?: string; } interface BareMetalClusterLoadBalancerManualLbConfig { /** * Whether manual load balancing is enabled. */ enabled: boolean; } interface BareMetalClusterLoadBalancerMetalLbConfig { /** * AddressPools is a list of non-overlapping IP pools used by load balancer * typed services. All addresses must be routable to load balancer nodes. * IngressVIP must be included in the pools. * Structure is documented below. */ addressPools: outputs.gkeonprem.BareMetalClusterLoadBalancerMetalLbConfigAddressPool[]; /** * Specifies the load balancer's node pool configuration. * Structure is documented below. */ loadBalancerNodePoolConfig?: outputs.gkeonprem.BareMetalClusterLoadBalancerMetalLbConfigLoadBalancerNodePoolConfig; } interface BareMetalClusterLoadBalancerMetalLbConfigAddressPool { /** * The addresses that are part of this pool. Each address must be either in the CIDR form (1.2.3.0/24) or range form (1.2.3.1-1.2.3.5). */ addresses: string[]; /** * If true, avoid using IPs ending in .0 or .255. * This avoids buggy consumer devices mistakenly dropping IPv4 traffic for those special IP addresses. */ avoidBuggyIps?: boolean; /** * If true, prevent IP addresses from being automatically assigned. */ manualAssign?: boolean; /** * The name of the address pool. */ pool: string; } interface BareMetalClusterLoadBalancerMetalLbConfigLoadBalancerNodePoolConfig { /** * The generic configuration for a node pool running a load balancer. * Structure is documented below. */ nodePoolConfig?: outputs.gkeonprem.BareMetalClusterLoadBalancerMetalLbConfigLoadBalancerNodePoolConfigNodePoolConfig; } interface BareMetalClusterLoadBalancerMetalLbConfigLoadBalancerNodePoolConfigNodePoolConfig { /** * The map of Kubernetes labels (key/value pairs) to be applied to * each node. These will added in addition to any default label(s) * that Kubernetes may apply to the node. In case of conflict in * label keys, the applied set may differ depending on the Kubernetes * version -- it's best to assume the behavior is undefined and * conflicts should be avoided. For more information, including usage * and the valid values, see: * - http://kubernetes.io/v1.1/docs/user-guide/labels.html * An object containing a list of "key": value pairs. * For example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ labels: { [key: string]: string; }; /** * The list of machine addresses in the Bare Metal Node Pool. * Structure is documented below. */ nodeConfigs?: outputs.gkeonprem.BareMetalClusterLoadBalancerMetalLbConfigLoadBalancerNodePoolConfigNodePoolConfigNodeConfig[]; /** * Specifies the nodes operating system (default: LINUX). */ operatingSystem: string; /** * The initial taints assigned to nodes of this node pool. * Structure is documented below. */ taints: outputs.gkeonprem.BareMetalClusterLoadBalancerMetalLbConfigLoadBalancerNodePoolConfigNodePoolConfigTaint[]; } interface BareMetalClusterLoadBalancerMetalLbConfigLoadBalancerNodePoolConfigNodePoolConfigNodeConfig { /** * The map of Kubernetes labels (key/value pairs) to be applied to * each node. These will added in addition to any default label(s) * that Kubernetes may apply to the node. In case of conflict in * label keys, the applied set may differ depending on the Kubernetes * version -- it's best to assume the behavior is undefined and * conflicts should be avoided. For more information, including usage * and the valid values, see: * - http://kubernetes.io/v1.1/docs/user-guide/labels.html * An object containing a list of "key": value pairs. * For example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ labels?: { [key: string]: string; }; /** * The default IPv4 address for SSH access and Kubernetes node. * Example: 192.168.0.1 */ nodeIp?: string; } interface BareMetalClusterLoadBalancerMetalLbConfigLoadBalancerNodePoolConfigNodePoolConfigTaint { /** * Specifies the nodes operating system (default: LINUX). * Possible values are: `EFFECT_UNSPECIFIED`, `PREFER_NO_SCHEDULE`, `NO_EXECUTE`. */ effect?: string; /** * Key associated with the effect. */ key?: string; /** * Value associated with the effect. */ value?: string; } interface BareMetalClusterLoadBalancerPortConfig { /** * The port that control plane hosted load balancers will listen on. */ controlPlaneLoadBalancerPort: number; } interface BareMetalClusterLoadBalancerVipConfig { /** * The VIP which you previously set aside for the Kubernetes API of this Bare Metal User Cluster. */ controlPlaneVip: string; /** * The VIP which you previously set aside for ingress traffic into this Bare Metal User Cluster. */ ingressVip: string; } interface BareMetalClusterMaintenanceConfig { /** * All IPv4 address from these ranges will be placed into maintenance mode. * Nodes in maintenance mode will be cordoned and drained. When both of these * are true, the "baremetal.cluster.gke.io/maintenance" annotation will be set * on the node resource. */ maintenanceAddressCidrBlocks: string[]; } interface BareMetalClusterNetworkConfig { /** * Enables the use of advanced Anthos networking features, such as Bundled * Load Balancing with BGP or the egress NAT gateway. * Setting configuration for advanced networking features will automatically * set this flag. */ advancedNetworking?: boolean; /** * A nested object resource. * Structure is documented below. */ islandModeCidr?: outputs.gkeonprem.BareMetalClusterNetworkConfigIslandModeCidr; /** * Configuration for multiple network interfaces. * Structure is documented below. */ multipleNetworkInterfacesConfig?: outputs.gkeonprem.BareMetalClusterNetworkConfigMultipleNetworkInterfacesConfig; /** * Configuration for SR-IOV. * Structure is documented below. */ srIovConfig?: outputs.gkeonprem.BareMetalClusterNetworkConfigSrIovConfig; } interface BareMetalClusterNetworkConfigIslandModeCidr { /** * All pods in the cluster are assigned an RFC1918 IPv4 address from these ranges. This field cannot be changed after creation. */ podAddressCidrBlocks: string[]; /** * All services in the cluster are assigned an RFC1918 IPv4 address from these ranges. This field cannot be changed after creation. */ serviceAddressCidrBlocks: string[]; } interface BareMetalClusterNetworkConfigMultipleNetworkInterfacesConfig { /** * Whether to enable multiple network interfaces for your pods. * When set network_config.advanced_networking is automatically * set to true. */ enabled?: boolean; } interface BareMetalClusterNetworkConfigSrIovConfig { /** * Whether to install the SR-IOV operator. */ enabled?: boolean; } interface BareMetalClusterNodeAccessConfig { /** * LoginUser is the user name used to access node machines. * It defaults to "root" if not set. */ loginUser: string; } interface BareMetalClusterNodeConfig { /** * The available runtimes that can be used to run containers in a Bare Metal User Cluster. * Possible values are: `CONTAINER_RUNTIME_UNSPECIFIED`, `DOCKER`, `CONTAINERD`. */ containerRuntime: string; /** * The maximum number of pods a node can run. The size of the CIDR range * assigned to the node will be derived from this parameter. */ maxPodsPerNode: number; } interface BareMetalClusterOsEnvironmentConfig { /** * Whether the package repo should not be included when initializing * bare metal machines. */ packageRepoExcluded: boolean; } interface BareMetalClusterProxy { /** * A list of IPs, hostnames, and domains that should skip the proxy. * For example ["127.0.0.1", "example.com", ".corp", "localhost"]. */ noProxies?: string[]; /** * Specifies the address of your proxy server. * For example: http://domain * WARNING: Do not provide credentials in the format * of http://(username:password@)domain these will be rejected by the server. */ uri: string; } interface BareMetalClusterSecurityConfig { /** * Configures user access to the Bare Metal User cluster. * Structure is documented below. */ authorization?: outputs.gkeonprem.BareMetalClusterSecurityConfigAuthorization; } interface BareMetalClusterSecurityConfigAuthorization { /** * Users that will be granted the cluster-admin role on the cluster, providing full access to the cluster. * Structure is documented below. */ adminUsers: outputs.gkeonprem.BareMetalClusterSecurityConfigAuthorizationAdminUser[]; } interface BareMetalClusterSecurityConfigAuthorizationAdminUser { /** * The name of the user, e.g. `my-gcp-id@gmail.com`. */ username: string; } interface BareMetalClusterStatus { /** * (Output) * ResourceConditions provide a standard mechanism for higher-level status reporting from user cluster controller. * Structure is documented below. */ conditions: outputs.gkeonprem.BareMetalClusterStatusCondition[]; /** * (Output) * Human-friendly representation of the error message from the user cluster * controller. The error message can be temporary as the user cluster * controller creates a cluster or node pool. If the error message persists * for a longer period of time, it can be used to surface error message to * indicate real problems requiring user intervention. */ errorMessage: string; } interface BareMetalClusterStatusCondition { /** * (Output) * Last time the condition transit from one status to another. */ lastTransitionTime: string; /** * Human-readable message indicating details about last transition. */ message?: string; /** * (Output) * A human-readable message of the check failure. */ reason?: string; /** * (Output) * The lifecycle state of the condition. */ state: string; /** * Type of the condition. * (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady) */ type?: string; } interface BareMetalClusterStorage { /** * Specifies the config for local PersistentVolumes backed * by mounted node disks. These disks need to be formatted and mounted by the * user, which can be done before or after cluster creation. * Structure is documented below. */ lvpNodeMountsConfig: outputs.gkeonprem.BareMetalClusterStorageLvpNodeMountsConfig; /** * Specifies the config for local PersistentVolumes backed by * subdirectories in a shared filesystem. These subdirectores are * automatically created during cluster creation. * Structure is documented below. */ lvpShareConfig: outputs.gkeonprem.BareMetalClusterStorageLvpShareConfig; } interface BareMetalClusterStorageLvpNodeMountsConfig { /** * The host machine path. */ path: string; /** * The StorageClass name that PVs will be created with. */ storageClass: string; } interface BareMetalClusterStorageLvpShareConfig { /** * Defines the machine path and storage class for the LVP Share. * Structure is documented below. */ lvpConfig: outputs.gkeonprem.BareMetalClusterStorageLvpShareConfigLvpConfig; /** * The number of subdirectories to create under path. */ sharedPathPvCount?: number; } interface BareMetalClusterStorageLvpShareConfigLvpConfig { /** * The host machine path. */ path: string; /** * The StorageClass name that PVs will be created with. */ storageClass: string; } interface BareMetalClusterUpgradePolicy { /** * Specifies which upgrade policy to use. * Possible values are: `SERIAL`, `CONCURRENT`. */ policy?: string; } interface BareMetalClusterValidationCheck { /** * (Output) * Options used for the validation check. */ options: string; /** * (Output) * The scenario when the preflight checks were run.. */ scenario: string; /** * (Output) * Specifies the detailed validation check status * Structure is documented below. */ statuses: outputs.gkeonprem.BareMetalClusterValidationCheckStatus[]; } interface BareMetalClusterValidationCheckStatus { /** * (Output) * Individual checks which failed as part of the Preflight check execution. * Structure is documented below. */ results: outputs.gkeonprem.BareMetalClusterValidationCheckStatusResult[]; } interface BareMetalClusterValidationCheckStatusResult { /** * (Output) * The category of the validation. */ category: string; /** * A human readable description of this Bare Metal User Cluster. */ description: string; /** * (Output) * Detailed failure information, which might be unformatted. */ details: string; /** * (Output) * Options used for the validation check. */ options: string; /** * (Output) * A human-readable message of the check failure. */ reason: string; } interface BareMetalNodePoolNodePoolConfig { /** * The map of Kubernetes labels (key/value pairs) to be applied to * each node. These will added in addition to any default label(s) * that Kubernetes may apply to the node. In case of conflict in * label keys, the applied set may differ depending on the Kubernetes * version -- it's best to assume the behavior is undefined and * conflicts should be avoided. For more information, including usage * and the valid values, see: * - http://kubernetes.io/v1.1/docs/user-guide/labels.html * An object containing a list of "key": value pairs. * For example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ labels: { [key: string]: string; }; /** * The list of machine addresses in the Bare Metal Node Pool. * Structure is documented below. */ nodeConfigs: outputs.gkeonprem.BareMetalNodePoolNodePoolConfigNodeConfig[]; /** * Specifies the nodes operating system (default: LINUX). */ operatingSystem: string; /** * The initial taints assigned to nodes of this node pool. * Structure is documented below. */ taints: outputs.gkeonprem.BareMetalNodePoolNodePoolConfigTaint[]; } interface BareMetalNodePoolNodePoolConfigNodeConfig { /** * The map of Kubernetes labels (key/value pairs) to be applied to * each node. These will added in addition to any default label(s) * that Kubernetes may apply to the node. In case of conflict in * label keys, the applied set may differ depending on the Kubernetes * version -- it's best to assume the behavior is undefined and * conflicts should be avoided. For more information, including usage * and the valid values, see: * - http://kubernetes.io/v1.1/docs/user-guide/labels.html * An object containing a list of "key": value pairs. * For example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ labels?: { [key: string]: string; }; /** * The default IPv4 address for SSH access and Kubernetes node. * Example: 192.168.0.1 */ nodeIp?: string; } interface BareMetalNodePoolNodePoolConfigTaint { /** * Specifies the nodes operating system (default: LINUX). * Possible values are: `EFFECT_UNSPECIFIED`, `PREFER_NO_SCHEDULE`, `NO_EXECUTE`. */ effect?: string; /** * Key associated with the effect. */ key?: string; /** * Value associated with the effect. */ value?: string; } interface BareMetalNodePoolStatus { /** * (Output) * ResourceConditions provide a standard mechanism for higher-level status reporting from user cluster controller. * Structure is documented below. */ conditions: outputs.gkeonprem.BareMetalNodePoolStatusCondition[]; /** * (Output) * Human-friendly representation of the error message from the user cluster * controller. The error message can be temporary as the user cluster * controller creates a cluster or node pool. If the error message persists * for a longer period of time, it can be used to surface error message to * indicate real problems requiring user intervention. */ errorMessage: string; } interface BareMetalNodePoolStatusCondition { /** * (Output) * Last time the condition transit from one status to another. */ lastTransitionTime: string; /** * Human-readable message indicating details about last transition. */ message?: string; /** * Machine-readable message indicating details about last transition. */ reason?: string; /** * (Output) * The lifecycle state of the condition. */ state: string; /** * Type of the condition. * (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady) */ type?: string; } interface VMwareClusterAntiAffinityGroups { /** * Spread nodes across at least three physical hosts (requires at least three * hosts). * Enabled by default. */ aagConfigDisabled: boolean; } interface VMwareClusterAuthorization { /** * Users that will be granted the cluster-admin role on the cluster, providing * full access to the cluster. * Structure is documented below. */ adminUsers?: outputs.gkeonprem.VMwareClusterAuthorizationAdminUser[]; } interface VMwareClusterAuthorizationAdminUser { /** * The name of the user, e.g. `my-gcp-id@gmail.com`. */ username: string; } interface VMwareClusterAutoRepairConfig { /** * Whether auto repair is enabled. */ enabled: boolean; } interface VMwareClusterControlPlaneNode { /** * AutoResizeConfig provides auto resizing configurations. * Structure is documented below. */ autoResizeConfig: outputs.gkeonprem.VMwareClusterControlPlaneNodeAutoResizeConfig; /** * The number of CPUs for each admin cluster node that serve as control planes * for this VMware User Cluster. (default: 4 CPUs) */ cpus?: number; /** * The megabytes of memory for each admin cluster node that serves as a * control plane for this VMware User Cluster (default: 8192 MB memory). */ memory?: number; /** * The number of control plane nodes for this VMware User Cluster. * (default: 1 replica). */ replicas?: number; /** * (Output) * Vsphere-specific config. * Structure is documented below. */ vsphereConfigs: outputs.gkeonprem.VMwareClusterControlPlaneNodeVsphereConfig[]; } interface VMwareClusterControlPlaneNodeAutoResizeConfig { /** * Whether to enable control plane node auto resizing. * * The `vsphereConfig` block contains: */ enabled: boolean; } interface VMwareClusterControlPlaneNodeVsphereConfig { /** * The Vsphere datastore used by the Control Plane Node. */ datastore: string; /** * The Vsphere storage policy used by the control plane Node. */ storagePolicyName: string; } interface VMwareClusterDataplaneV2 { /** * Enable advanced networking which requires dataplaneV2Enabled to be set true. */ advancedNetworking?: boolean; /** * Enables Dataplane V2. */ dataplaneV2Enabled?: boolean; /** * Enable Dataplane V2 for clusters with Windows nodes. */ windowsDataplaneV2Enabled?: boolean; } interface VMwareClusterFleet { /** * (Output) * The name of the managed Hub Membership resource associated to this cluster. * Membership names are formatted as * `projects//locations//memberships/`. */ membership: string; } interface VMwareClusterLoadBalancer { /** * Configuration for F5 Big IP typed load balancers. * Structure is documented below. */ f5Config?: outputs.gkeonprem.VMwareClusterLoadBalancerF5Config; /** * Manually configured load balancers. * Structure is documented below. */ manualLbConfig?: outputs.gkeonprem.VMwareClusterLoadBalancerManualLbConfig; /** * Configuration for MetalLB typed load balancers. * Structure is documented below. */ metalLbConfig?: outputs.gkeonprem.VMwareClusterLoadBalancerMetalLbConfig; /** * The VIPs used by the load balancer. * Structure is documented below. */ vipConfig?: outputs.gkeonprem.VMwareClusterLoadBalancerVipConfig; } interface VMwareClusterLoadBalancerF5Config { /** * The load balancer's IP address. */ address?: string; /** * he preexisting partition to be used by the load balancer. T * his partition is usually created for the admin cluster for example: * 'my-f5-admin-partition'. */ partition?: string; /** * The pool name. Only necessary, if using SNAT. */ snatPool: string; } interface VMwareClusterLoadBalancerManualLbConfig { /** * NodePort for control plane service. The Kubernetes API server in the admin * cluster is implemented as a Service of type NodePort (ex. 30968). */ controlPlaneNodePort: number; /** * NodePort for ingress service's http. The ingress service in the admin * cluster is implemented as a Service of type NodePort (ex. 32527). */ ingressHttpNodePort: number; /** * NodePort for ingress service's https. The ingress service in the admin * cluster is implemented as a Service of type NodePort (ex. 30139). */ ingressHttpsNodePort: number; /** * NodePort for konnectivity server service running as a sidecar in each * kube-apiserver pod (ex. 30564). */ konnectivityServerNodePort: number; } interface VMwareClusterLoadBalancerMetalLbConfig { /** * AddressPools is a list of non-overlapping IP pools used by load balancer * typed services. All addresses must be routable to load balancer nodes. * IngressVIP must be included in the pools. * Structure is documented below. */ addressPools: outputs.gkeonprem.VMwareClusterLoadBalancerMetalLbConfigAddressPool[]; } interface VMwareClusterLoadBalancerMetalLbConfigAddressPool { /** * The addresses that are part of this pool. Each address * must be either in the CIDR form (1.2.3.0/24) or range * form (1.2.3.1-1.2.3.5). */ addresses: string[]; /** * If true, avoid using IPs ending in .0 or .255. * This avoids buggy consumer devices mistakenly dropping IPv4 traffic for * those special IP addresses. */ avoidBuggyIps: boolean; /** * If true, prevent IP addresses from being automatically assigned. * * The `dataplaneV2` block supports: */ manualAssign: boolean; /** * The name of the address pool. */ pool: string; } interface VMwareClusterLoadBalancerVipConfig { /** * The VIP which you previously set aside for the Kubernetes API of this cluster. */ controlPlaneVip?: string; /** * The VIP which you previously set aside for ingress traffic into this cluster. * * The `f5Config` block supports: */ ingressVip?: string; } interface VMwareClusterNetworkConfig { /** * Configuration for control plane V2 mode. * Structure is documented below. */ controlPlaneV2Config?: outputs.gkeonprem.VMwareClusterNetworkConfigControlPlaneV2Config; /** * Configuration settings for a DHCP IP configuration. * Structure is documented below. */ dhcpIpConfig: outputs.gkeonprem.VMwareClusterNetworkConfigDhcpIpConfig; /** * Represents common network settings irrespective of the host's IP address. * Structure is documented below. */ hostConfig: outputs.gkeonprem.VMwareClusterNetworkConfigHostConfig; /** * All pods in the cluster are assigned an RFC1918 IPv4 address from these ranges. * Only a single range is supported. This field cannot be changed after creation. */ podAddressCidrBlocks: string[]; /** * All services in the cluster are assigned an RFC1918 IPv4 address * from these ranges. Only a single range is supported.. This field * cannot be changed after creation. */ serviceAddressCidrBlocks: string[]; /** * Configuration settings for a static IP configuration. * Structure is documented below. */ staticIpConfig?: outputs.gkeonprem.VMwareClusterNetworkConfigStaticIpConfig; /** * vcenter_network specifies vCenter network name. Inherited from the admin cluster. */ vcenterNetwork: string; } interface VMwareClusterNetworkConfigControlPlaneV2Config { /** * Static IP addresses for the control plane nodes. */ controlPlaneIpBlock?: outputs.gkeonprem.VMwareClusterNetworkConfigControlPlaneV2ConfigControlPlaneIpBlock; } interface VMwareClusterNetworkConfigControlPlaneV2ConfigControlPlaneIpBlock { /** * The network gateway used by the VMware User Cluster. */ gateway?: string; /** * The node's network configurations used by the VMware User Cluster. * Structure is documented below. */ ips?: outputs.gkeonprem.VMwareClusterNetworkConfigControlPlaneV2ConfigControlPlaneIpBlockIp[]; /** * The netmask used by the VMware User Cluster. */ netmask?: string; } interface VMwareClusterNetworkConfigControlPlaneV2ConfigControlPlaneIpBlockIp { /** * Hostname of the machine. VM's name will be used if this field is empty. */ hostname: string; /** * IP could be an IP address (like 1.2.3.4) or a CIDR (like 1.2.3.0/24). */ ip?: string; } interface VMwareClusterNetworkConfigDhcpIpConfig { /** * enabled is a flag to mark if DHCP IP allocation is * used for VMware user clusters. */ enabled: boolean; } interface VMwareClusterNetworkConfigHostConfig { /** * DNS search domains. * * The `controlPlaneV2Config` block supports: */ dnsSearchDomains?: string[]; /** * DNS servers. */ dnsServers?: string[]; /** * NTP servers. */ ntpServers?: string[]; } interface VMwareClusterNetworkConfigStaticIpConfig { /** * Represents the configuration values for static IP allocation to nodes. * Structure is documented below. */ ipBlocks: outputs.gkeonprem.VMwareClusterNetworkConfigStaticIpConfigIpBlock[]; } interface VMwareClusterNetworkConfigStaticIpConfigIpBlock { /** * The network gateway used by the VMware User Cluster. */ gateway: string; /** * The node's network configurations used by the VMware User Cluster. * Structure is documented below. */ ips: outputs.gkeonprem.VMwareClusterNetworkConfigStaticIpConfigIpBlockIp[]; /** * The netmask used by the VMware User Cluster. */ netmask: string; } interface VMwareClusterNetworkConfigStaticIpConfigIpBlockIp { /** * Hostname of the machine. VM's name will be used if this field is empty. */ hostname: string; /** * IP could be an IP address (like 1.2.3.4) or a CIDR (like 1.2.3.0/24). */ ip: string; } interface VMwareClusterStatus { /** * (Output) * ResourceConditions provide a standard mechanism for higher-level status reporting from user cluster controller. * Structure is documented below. */ conditions: outputs.gkeonprem.VMwareClusterStatusCondition[]; /** * (Output) * Human-friendly representation of the error message from the user cluster * controller. The error message can be temporary as the user cluster * controller creates a cluster or node pool. If the error message persists * for a longer period of time, it can be used to surface error message to * indicate real problems requiring user intervention. */ errorMessage: string; } interface VMwareClusterStatusCondition { /** * (Output) * Last time the condition transit from one status to another. */ lastTransitionTime: string; /** * (Output) * Human-readable message indicating details about last transition. */ message: string; /** * (Output) * Machine-readable message indicating details about last transition. */ reason: string; /** * (Output) * The lifecycle state of the condition. */ state: string; /** * (Output) * Type of the condition. * (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady) */ type: string; } interface VMwareClusterStorage { /** * Whether or not to deploy vSphere CSI components in the VMware User Cluster. * Enabled by default. */ vsphereCsiDisabled: boolean; } interface VMwareClusterUpgradePolicy { /** * Controls whether the upgrade applies to the control plane only. */ controlPlaneOnly?: boolean; } interface VMwareClusterValidationCheck { /** * (Output) * Options used for the validation check. */ options: string; /** * (Output) * The scenario when the preflight checks were run.. */ scenario: string; /** * (Output) * Specifies the detailed validation check status * Structure is documented below. */ statuses: outputs.gkeonprem.VMwareClusterValidationCheckStatus[]; } interface VMwareClusterValidationCheckStatus { /** * (Output) * Individual checks which failed as part of the Preflight check execution. * Structure is documented below. */ results: outputs.gkeonprem.VMwareClusterValidationCheckStatusResult[]; } interface VMwareClusterValidationCheckStatusResult { /** * (Output) * The category of the validation. */ category: string; /** * A human readable description of this VMware User Cluster. */ description: string; /** * (Output) * Detailed failure information, which might be unformatted. */ details: string; /** * (Output) * Options used for the validation check. */ options: string; /** * (Output) * Machine-readable message indicating details about last transition. */ reason: string; } interface VMwareClusterVcenter { /** * (Output) * The vCenter IP address. */ address: string; /** * Contains the vCenter CA certificate public key for SSL verification. */ caCertData: string; /** * The name of the vCenter cluster for the user cluster. */ cluster: string; /** * The name of the vCenter datacenter for the user cluster. */ datacenter: string; /** * The name of the vCenter datastore for the user cluster. */ datastore: string; /** * The name of the vCenter folder for the user cluster. */ folder: string; /** * The name of the vCenter resource pool for the user cluster. */ resourcePool: string; /** * The name of the vCenter storage policy for the user cluster. */ storagePolicyName: string; } interface VMwareNodePoolConfig { /** * VMware disk size to be used during creation. */ bootDiskSizeGb?: number; /** * The number of CPUs for each node in the node pool. */ cpus?: number; /** * Allow node pool traffic to be load balanced. Only works for clusters with * MetalLB load balancers. */ enableLoadBalancer?: boolean; /** * The OS image name in vCenter, only valid when using Windows. */ image: string; /** * The OS image to be used for each node in a node pool. * Currently `cos`, `cosCgv2`, `ubuntu`, `ubuntuCgv2`, `ubuntuContainerd` and `windows` are supported. */ imageType: string; /** * The map of Kubernetes labels (key/value pairs) to be applied to each node. * These will added in addition to any default label(s) that * Kubernetes may apply to the node. * In case of conflict in label keys, the applied set may differ depending on * the Kubernetes version -- it's best to assume the behavior is undefined * and conflicts should be avoided. */ labels: { [key: string]: string; }; /** * The megabytes of memory for each node in the node pool. */ memoryMb?: number; /** * The number of nodes in the node pool. */ replicas?: number; /** * The initial taints assigned to nodes of this node pool. * Structure is documented below. */ taints?: outputs.gkeonprem.VMwareNodePoolConfigTaint[]; /** * Specifies the vSphere config for node pool. * Structure is documented below. */ vsphereConfig?: outputs.gkeonprem.VMwareNodePoolConfigVsphereConfig; } interface VMwareNodePoolConfigTaint { /** * Available taint effects. * Possible values are: `EFFECT_UNSPECIFIED`, `NO_SCHEDULE`, `PREFER_NO_SCHEDULE`, `NO_EXECUTE`. */ effect?: string; /** * Key associated with the effect. */ key: string; /** * Value associated with the effect. */ value: string; } interface VMwareNodePoolConfigVsphereConfig { /** * The name of the vCenter datastore. Inherited from the user cluster. */ datastore?: string; /** * Vsphere host groups to apply to all VMs in the node pool */ hostGroups?: string[]; /** * Tags to apply to VMs. * Structure is documented below. */ tags?: outputs.gkeonprem.VMwareNodePoolConfigVsphereConfigTag[]; } interface VMwareNodePoolConfigVsphereConfigTag { /** * The Vsphere tag category. */ category?: string; /** * The Vsphere tag name. */ tag?: string; } interface VMwareNodePoolNodePoolAutoscaling { /** * Maximum number of replicas in the NodePool. */ maxReplicas: number; /** * Minimum number of replicas in the NodePool. */ minReplicas: number; } interface VMwareNodePoolStatus { /** * (Output) * ResourceConditions provide a standard mechanism for higher-level status reporting from user cluster controller. * Structure is documented below. */ conditions: outputs.gkeonprem.VMwareNodePoolStatusCondition[]; /** * (Output) * Human-friendly representation of the error message from the user cluster * controller. The error message can be temporary as the user cluster * controller creates a cluster or node pool. If the error message persists * for a longer period of time, it can be used to surface error message to * indicate real problems requiring user intervention. */ errorMessage: string; } interface VMwareNodePoolStatusCondition { /** * (Output) * Last time the condition transit from one status to another. */ lastTransitionTime: string; /** * (Output) * Human-readable message indicating details about last transition. */ message: string; /** * (Output) * Machine-readable message indicating details about last transition. */ reason: string; /** * (Output) * The lifecycle state of the condition. */ state: string; /** * (Output) * Type of the condition. * (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady) */ type: string; } interface VmwareAdminClusterAddonNode { /** * Specifies auto resize config. * Structure is documented below. */ autoResizeConfig: outputs.gkeonprem.VmwareAdminClusterAddonNodeAutoResizeConfig; } interface VmwareAdminClusterAddonNodeAutoResizeConfig { /** * Whether to enable controle plane node auto resizing. */ enabled: boolean; } interface VmwareAdminClusterAntiAffinityGroups { /** * Spread nodes across at least three physical hosts (requires at least three * hosts). * Enabled by default. */ aagConfigDisabled: boolean; } interface VmwareAdminClusterAuthorization { /** * Users that will be granted the cluster-admin role on the cluster, providing * full access to the cluster. * Structure is documented below. */ viewerUsers?: outputs.gkeonprem.VmwareAdminClusterAuthorizationViewerUser[]; } interface VmwareAdminClusterAuthorizationViewerUser { /** * The name of the user, e.g. `my-gcp-id@gmail.com`. */ username: string; } interface VmwareAdminClusterAutoRepairConfig { /** * Whether auto repair is enabled. */ enabled: boolean; } interface VmwareAdminClusterControlPlaneNode { /** * The number of vCPUs for the control-plane node of the admin cluster. */ cpus?: number; /** * The number of mebibytes of memory for the control-plane node of the admin cluster. */ memory?: number; /** * The number of control plane nodes for this VMware admin cluster. */ replicas?: number; } interface VmwareAdminClusterFleet { /** * (Output) * The name of the managed Fleet Membership resource associated to this cluster. * Membership names are formatted as * `projects//locations//memberships/`. */ membership: string; } interface VmwareAdminClusterLoadBalancer { /** * Configuration for F5 Big IP typed load balancers. * Structure is documented below. */ f5Config?: outputs.gkeonprem.VmwareAdminClusterLoadBalancerF5Config; /** * Manually configured load balancers. * Structure is documented below. */ manualLbConfig?: outputs.gkeonprem.VmwareAdminClusterLoadBalancerManualLbConfig; /** * Metal LB load balancers. * Structure is documented below. */ metalLbConfig?: outputs.gkeonprem.VmwareAdminClusterLoadBalancerMetalLbConfig; /** * Specified the VMware Load Balancer Config * Structure is documented below. */ vipConfig: outputs.gkeonprem.VmwareAdminClusterLoadBalancerVipConfig; } interface VmwareAdminClusterLoadBalancerF5Config { /** * The load balancer's IP address. */ address?: string; /** * he preexisting partition to be used by the load balancer. T * his partition is usually created for the admin cluster for example: * 'my-f5-admin-partition'. */ partition?: string; /** * The pool name. Only necessary, if using SNAT. */ snatPool?: string; } interface VmwareAdminClusterLoadBalancerManualLbConfig { /** * NodePort for add-ons server in the admin cluster. */ addonsNodePort: number; /** * NodePort for control plane service. The Kubernetes API server in the admin * cluster is implemented as a Service of type NodePort (ex. 30968). */ controlPlaneNodePort: number; /** * NodePort for ingress service's http. The ingress service in the admin * cluster is implemented as a Service of type NodePort (ex. 32527). */ ingressHttpNodePort: number; /** * NodePort for ingress service's https. The ingress service in the admin * cluster is implemented as a Service of type NodePort (ex. 30139). */ ingressHttpsNodePort: number; /** * NodePort for konnectivity server service running as a sidecar in each * kube-apiserver pod (ex. 30564). */ konnectivityServerNodePort: number; } interface VmwareAdminClusterLoadBalancerMetalLbConfig { /** * Metal LB is enabled. */ enabled?: boolean; } interface VmwareAdminClusterLoadBalancerVipConfig { /** * The VIP to configure the load balancer for add-ons. * * The `f5Config` block supports: */ addonsVip?: string; /** * The VIP which you previously set aside for the Kubernetes * API of this VMware Admin Cluster. */ controlPlaneVip: string; } interface VmwareAdminClusterNetworkConfig { /** * Configuration settings for a DHCP IP configuration. * Structure is documented below. */ dhcpIpConfig: outputs.gkeonprem.VmwareAdminClusterNetworkConfigDhcpIpConfig; /** * Configuration for HA admin cluster control plane. * Structure is documented below. */ haControlPlaneConfig?: outputs.gkeonprem.VmwareAdminClusterNetworkConfigHaControlPlaneConfig; /** * Represents common network settings irrespective of the host's IP address. * Structure is documented below. */ hostConfig: outputs.gkeonprem.VmwareAdminClusterNetworkConfigHostConfig; /** * All pods in the cluster are assigned an RFC1918 IPv4 address from these ranges. * Only a single range is supported. This field cannot be changed after creation. */ podAddressCidrBlocks: string[]; /** * All services in the cluster are assigned an RFC1918 IPv4 address * from these ranges. Only a single range is supported.. This field * cannot be changed after creation. */ serviceAddressCidrBlocks: string[]; /** * Configuration settings for a static IP configuration. * Structure is documented below. */ staticIpConfig?: outputs.gkeonprem.VmwareAdminClusterNetworkConfigStaticIpConfig; /** * vcenter_network specifies vCenter network name. */ vcenterNetwork?: string; } interface VmwareAdminClusterNetworkConfigDhcpIpConfig { /** * enabled is a flag to mark if DHCP IP allocation is * used for VMware admin clusters. */ enabled: boolean; } interface VmwareAdminClusterNetworkConfigHaControlPlaneConfig { /** * Static IP addresses for the control plane nodes. * Structure is documented below. */ controlPlaneIpBlock?: outputs.gkeonprem.VmwareAdminClusterNetworkConfigHaControlPlaneConfigControlPlaneIpBlock; } interface VmwareAdminClusterNetworkConfigHaControlPlaneConfigControlPlaneIpBlock { /** * The network gateway used by the VMware Admin Cluster. */ gateway: string; /** * The node's network configurations used by the VMware Admin Cluster. * Structure is documented below. */ ips: outputs.gkeonprem.VmwareAdminClusterNetworkConfigHaControlPlaneConfigControlPlaneIpBlockIp[]; /** * The netmask used by the VMware Admin Cluster. */ netmask: string; } interface VmwareAdminClusterNetworkConfigHaControlPlaneConfigControlPlaneIpBlockIp { /** * Hostname of the machine. VM's name will be used if this field is empty. */ hostname: string; /** * IP could be an IP address (like 1.2.3.4) or a CIDR (like 1.2.3.0/24). */ ip: string; } interface VmwareAdminClusterNetworkConfigHostConfig { /** * DNS search domains. */ dnsSearchDomains?: string[]; /** * DNS servers. */ dnsServers?: string[]; /** * NTP servers. */ ntpServers?: string[]; } interface VmwareAdminClusterNetworkConfigStaticIpConfig { /** * Represents the configuration values for static IP allocation to nodes. * Structure is documented below. */ ipBlocks?: outputs.gkeonprem.VmwareAdminClusterNetworkConfigStaticIpConfigIpBlock[]; } interface VmwareAdminClusterNetworkConfigStaticIpConfigIpBlock { /** * The network gateway used by the VMware Admin Cluster. */ gateway: string; /** * The node's network configurations used by the VMware Admin Cluster. * Structure is documented below. */ ips: outputs.gkeonprem.VmwareAdminClusterNetworkConfigStaticIpConfigIpBlockIp[]; /** * The netmask used by the VMware Admin Cluster. */ netmask: string; } interface VmwareAdminClusterNetworkConfigStaticIpConfigIpBlockIp { /** * Hostname of the machine. VM's name will be used if this field is empty. */ hostname: string; /** * IP could be an IP address (like 1.2.3.4) or a CIDR (like 1.2.3.0/24). */ ip: string; } interface VmwareAdminClusterPlatformConfig { /** * (Output) * The list of bundles installed in the admin cluster. * Structure is documented below. */ bundles: outputs.gkeonprem.VmwareAdminClusterPlatformConfigBundle[]; /** * (Output) * The platform version e.g. 1.13.2. */ platformVersion: string; /** * The required platform version e.g. 1.13.1. * If the current platform version is lower than the target version, * the platform version will be updated to the target version. * If the target version is not installed in the platform * (bundle versions), download the target version bundle. */ requiredPlatformVersion?: string; /** * (Output) * ResourceStatus representing detailed cluster state. * Structure is documented below. * * * The `status` block contains: */ statuses: outputs.gkeonprem.VmwareAdminClusterPlatformConfigStatus[]; } interface VmwareAdminClusterPlatformConfigBundle { /** * ResourceStatus representing detailed cluster state. * Structure is documented below. */ statuses: outputs.gkeonprem.VmwareAdminClusterPlatformConfigBundleStatus[]; /** * The version of the bundle. */ version: string; } interface VmwareAdminClusterPlatformConfigBundleStatus { /** * (Output) * ResourceConditions provide a standard mechanism for higher-level status reporting from admin cluster controller. * Structure is documented below. */ conditions: outputs.gkeonprem.VmwareAdminClusterPlatformConfigBundleStatusCondition[]; /** * (Output) * Human-friendly representation of the error message from the admin cluster * controller. The error message can be temporary as the admin cluster * controller creates a cluster or node pool. If the error message persists * for a longer period of time, it can be used to surface error message to * indicate real problems requiring user intervention. */ errorMessage: string; } interface VmwareAdminClusterPlatformConfigBundleStatusCondition { /** * (Output) * Last time the condition transit from one status to another. */ lastTransitionTime: string; /** * (Output) * Human-readable message indicating details about last transition. */ message: string; /** * (Output) * Machine-readable message indicating details about last transition. */ reason: string; /** * (Output) * The lifecycle state of the condition. */ state: string; /** * (Output) * Type of the condition. * (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady) */ type: string; } interface VmwareAdminClusterPlatformConfigStatus { /** * (Output) * ResourceConditions provide a standard mechanism for higher-level status reporting from admin cluster controller. * Structure is documented below. */ conditions: outputs.gkeonprem.VmwareAdminClusterPlatformConfigStatusCondition[]; /** * (Output) * Human-friendly representation of the error message from the admin cluster * controller. The error message can be temporary as the admin cluster * controller creates a cluster or node pool. If the error message persists * for a longer period of time, it can be used to surface error message to * indicate real problems requiring user intervention. */ errorMessage: string; } interface VmwareAdminClusterPlatformConfigStatusCondition { /** * (Output) * Last time the condition transit from one status to another. */ lastTransitionTime: string; /** * (Output) * Human-readable message indicating details about last transition. */ message: string; /** * (Output) * Machine-readable message indicating details about last transition. */ reason: string; /** * (Output) * The lifecycle state of the condition. */ state: string; /** * (Output) * Type of the condition. * (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady) */ type: string; } interface VmwareAdminClusterPrivateRegistryConfig { /** * The registry address. */ address?: string; /** * The CA certificate public key for private registry. */ caCert?: string; } interface VmwareAdminClusterProxy { /** * A comma-separated list of IP addresses, IP address ranges, * host names, and domain names that should not go through the proxy server. */ noProxy?: string; /** * The proxy url. */ url: string; } interface VmwareAdminClusterStatus { /** * (Output) * ResourceConditions provide a standard mechanism for higher-level status reporting from admin cluster controller. * Structure is documented below. */ conditions: outputs.gkeonprem.VmwareAdminClusterStatusCondition[]; /** * (Output) * Human-friendly representation of the error message from the admin cluster * controller. The error message can be temporary as the admin cluster * controller creates a cluster or node pool. If the error message persists * for a longer period of time, it can be used to surface error message to * indicate real problems requiring user intervention. */ errorMessage: string; } interface VmwareAdminClusterStatusCondition { /** * (Output) * Last time the condition transit from one status to another. */ lastTransitionTime: string; /** * (Output) * Human-readable message indicating details about last transition. */ message: string; /** * (Output) * Machine-readable message indicating details about last transition. */ reason: string; /** * (Output) * The lifecycle state of the condition. */ state: string; /** * (Output) * Type of the condition. * (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady) */ type: string; } interface VmwareAdminClusterVcenter { /** * The vCenter IP address. */ address?: string; /** * Contains the vCenter CA certificate public key for SSL verification. */ caCertData?: string; /** * The name of the vCenter cluster for the admin cluster. */ cluster?: string; /** * The name of the virtual machine disk (VMDK) for the admin cluster. */ dataDisk?: string; /** * The name of the vCenter datacenter for the admin cluster. */ datacenter?: string; /** * The name of the vCenter datastore for the admin cluster. */ datastore?: string; /** * The name of the vCenter folder for the admin cluster. */ folder?: string; /** * The name of the vCenter resource pool for the admin cluster. */ resourcePool?: string; /** * The name of the vCenter storage policy for the user cluster. */ storagePolicyName?: string; } } export declare namespace healthcare { interface ConsentStoreIamBindingCondition { description?: string; expression: string; title: string; } interface ConsentStoreIamMemberCondition { description?: string; expression: string; title: string; } interface DatasetEncryptionSpec { /** * KMS encryption key that is used to secure this dataset and its sub-resources. The key used for * encryption and the dataset must be in the same location. If empty, the default Google encryption * key will be used to secure this dataset. The format is * projects/{projectId}/locations/{locationId}/keyRings/{keyRingId}/cryptoKeys/{keyId}. */ kmsKeyName?: string; } interface DatasetIamBindingCondition { description?: string; expression: string; title: string; } interface DatasetIamMemberCondition { description?: string; expression: string; title: string; } interface DicomStoreIamBindingCondition { description?: string; expression: string; title: string; } interface DicomStoreIamMemberCondition { description?: string; expression: string; title: string; } interface DicomStoreNotificationConfig { /** * The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. * PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. * It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message * was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a * project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given * Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail. */ pubsubTopic: string; /** * Indicates whether or not to send Pub/Sub notifications on bulk import. Only supported for DICOM imports. */ sendForBulkImport?: boolean; } interface DicomStoreStreamConfig { /** * BigQueryDestination to include a fully qualified BigQuery table URI where DICOM instance metadata will be streamed. * Structure is documented below. */ bigqueryDestination: outputs.healthcare.DicomStoreStreamConfigBigqueryDestination; } interface DicomStoreStreamConfigBigqueryDestination { /** * a fully qualified BigQuery table URI where DICOM instance metadata will be streamed. */ tableUri: string; } interface FhirStoreConsentConfig { /** * Specifies how the server logs the consent-aware requests. If not specified, the AccessDeterminationLogConfig.LogLevel.MINIMUM option is used. * Structure is documented below. */ accessDeterminationLogConfig?: outputs.healthcare.FhirStoreConsentConfigAccessDeterminationLogConfig; /** * The default value is false. If set to true, when accessing FHIR resources, the consent headers will be verified against consents given by patients. See the ConsentEnforcementVersion for the supported consent headers. */ accessEnforced?: boolean; /** * Different options to configure the behaviour of the server when handling the X-Consent-Scope header. * Structure is documented below. */ consentHeaderHandling?: outputs.healthcare.FhirStoreConsentConfigConsentHeaderHandling; /** * (Output) * The versioned names of the enforced admin Consent resource(s), in the format projects/{projectId}/locations/{location}/datasets/{datasetId}/fhirStores/{fhirStoreId}/fhir/Consent/{resourceId}/_history/{version_id}. For FHIR stores with disableResourceVersioning=true, the format is projects/{projectId}/locations/{location}/datasets/{datasetId}/fhirStores/{fhirStoreId}/fhir/Consent/{resourceId}. This field can only be updated using [fhirStores.applyAdminConsents][]. */ enforcedAdminConsents: string[]; /** * Specifies which consent enforcement version is being used for this FHIR store. This field can only be set once by either [fhirStores.create][] or [fhirStores.patch][]. After that, you must call [fhirStores.applyConsents][] to change the version. * Possible values are: `CONSENT_ENFORCEMENT_VERSION_UNSPECIFIED`, `V1`. */ version: string; } interface FhirStoreConsentConfigAccessDeterminationLogConfig { /** * Controls the amount of detail to include as part of the audit logs. * Default value is `MINIMUM`. * Possible values are: `LOG_LEVEL_UNSPECIFIED`, `DISABLED`, `MINIMUM`, `VERBOSE`. */ logLevel?: string; } interface FhirStoreConsentConfigConsentHeaderHandling { /** * Specifies the default server behavior when the header is empty. If not specified, the ScopeProfile.PERMIT_EMPTY_SCOPE option is used. * Default value is `PERMIT_EMPTY_SCOPE`. * Possible values are: `SCOPE_PROFILE_UNSPECIFIED`, `PERMIT_EMPTY_SCOPE`, `REQUIRED_ON_READ`. */ profile?: string; } interface FhirStoreIamBindingCondition { description?: string; expression: string; title: string; } interface FhirStoreIamMemberCondition { description?: string; expression: string; title: string; } interface FhirStoreNotificationConfig { /** * The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. * PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. * It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message * was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a * project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given * Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail. */ pubsubTopic: string; /** * Whether to send full FHIR resource to this Pub/Sub topic for Create and Update operation. * Note that setting this to true does not guarantee that all resources will be sent in the format of * full FHIR resource. When a resource change is too large or during heavy traffic, only the resource name will be * sent. Clients should always check the "payloadType" label from a Pub/Sub message to determine whether * it needs to fetch the full resource as a separate operation. */ sendFullResource?: boolean; /** * Whether to send full FHIR resource to this Pub/Sub topic for deleting FHIR resource. Note that setting this to * true does not guarantee that all previous resources will be sent in the format of full FHIR resource. When a * resource change is too large or during heavy traffic, only the resource name will be sent. Clients should always * check the "payloadType" label from a Pub/Sub message to determine whether it needs to fetch the full previous * resource as a separate operation. */ sendPreviousResourceOnDelete?: boolean; } interface FhirStoreStreamConfig { /** * The destination BigQuery structure that contains both the dataset location and corresponding schema config. * The output is organized in one table per resource type. The server reuses the existing tables (if any) that * are named after the resource types, e.g. "Patient", "Observation". When there is no existing table for a given * resource type, the server attempts to create one. * See the [streaming config reference](https://cloud.google.com/healthcare/docs/reference/rest/v1beta1/projects.locations.datasets.fhirStores#streamconfig) for more details. * Structure is documented below. */ bigqueryDestination: outputs.healthcare.FhirStoreStreamConfigBigqueryDestination; /** * Supply a FHIR resource type (such as "Patient" or "Observation"). See * https://www.hl7.org/fhir/valueset-resource-types.html for a list of all FHIR resource types. The server treats * an empty list as an intent to stream all the supported resource types in this FHIR store. */ resourceTypes?: string[]; } interface FhirStoreStreamConfigBigqueryDestination { /** * BigQuery URI to a dataset, up to 2000 characters long, in the format bq://projectId.bqDatasetId */ datasetUri: string; /** * The configuration for the exported BigQuery schema. * Structure is documented below. */ schemaConfig: outputs.healthcare.FhirStoreStreamConfigBigqueryDestinationSchemaConfig; } interface FhirStoreStreamConfigBigqueryDestinationSchemaConfig { /** * The configuration for exported BigQuery tables to be partitioned by FHIR resource's last updated time column. * Structure is documented below. */ lastUpdatedPartitionConfig?: outputs.healthcare.FhirStoreStreamConfigBigqueryDestinationSchemaConfigLastUpdatedPartitionConfig; /** * The depth for all recursive structures in the output analytics schema. For example, concept in the CodeSystem * resource is a recursive structure; when the depth is 2, the CodeSystem table will have a column called * concept.concept but not concept.concept.concept. If not specified or set to 0, the server will use the default * value 2. The maximum depth allowed is 5. */ recursiveStructureDepth: number; /** * Specifies the output schema type. * * ANALYTICS: Analytics schema defined by the FHIR community. * See https://github.com/FHIR/sql-on-fhir/blob/master/sql-on-fhir.md. * * ANALYTICS_V2: Analytics V2, similar to schema defined by the FHIR community, with added support for extensions with one or more occurrences and contained resources in stringified JSON. * * LOSSLESS: A data-driven schema generated from the fields present in the FHIR data being exported, with no additional simplification. * Default value is `ANALYTICS`. * Possible values are: `ANALYTICS`, `ANALYTICS_V2`, `LOSSLESS`. */ schemaType?: string; } interface FhirStoreStreamConfigBigqueryDestinationSchemaConfigLastUpdatedPartitionConfig { /** * Number of milliseconds for which to keep the storage for a partition. */ expirationMs?: string; /** * Type of partitioning. * Possible values are: `PARTITION_TYPE_UNSPECIFIED`, `HOUR`, `DAY`, `MONTH`, `YEAR`. */ type: string; } interface FhirStoreValidationConfig { /** * Whether to disable FHIRPath validation for incoming resources. The default value is false. Set this to true to disable checking incoming resources for conformance against FHIRPath requirement defined in the FHIR specification. This property only affects resource types that do not have profiles configured for them, any rules in enabled implementation guides will still be enforced. */ disableFhirpathValidation?: boolean; /** * Whether to disable profile validation for this FHIR store. The default value is false. Set this to true to disable checking incoming resources for conformance against structure definitions in this FHIR store. */ disableProfileValidation?: boolean; /** * Whether to disable reference type validation for incoming resources. The default value is false. Set this to true to disable checking incoming resources for conformance against reference type requirement defined in the FHIR specification. This property only affects resource types that do not have profiles configured for them, any rules in enabled implementation guides will still be enforced. */ disableReferenceTypeValidation?: boolean; /** * Whether to disable required fields validation for incoming resources. The default value is false. Set this to true to disable checking incoming resources for conformance against required fields requirement defined in the FHIR specification. This property only affects resource types that do not have profiles configured for them, any rules in enabled implementation guides will still be enforced. */ disableRequiredFieldValidation?: boolean; /** * A list of implementation guide URLs in this FHIR store that are used to configure the profiles to use for validation. * When a URL cannot be resolved (for example, in a type assertion), the server does not return an error. * For example, to use the US Core profiles for validation, set enabledImplementationGuides to ["http://hl7.org/fhir/us/core/ImplementationGuide/ig"]. If enabledImplementationGuides is empty or omitted, then incoming resources are only required to conform to the base FHIR profiles. Otherwise, a resource must conform to at least one profile listed in the global property of one of the enabled ImplementationGuides. * The Cloud Healthcare API does not currently enforce all of the rules in a StructureDefinition. The following rules are supported: * - min/max * - minValue/maxValue * - maxLength * - type * - fixed[x] * - pattern[x] on simple types * - slicing, when using "value" as the discriminator type */ enabledImplementationGuides?: string[]; } interface Hl7StoreIamBindingCondition { description?: string; expression: string; title: string; } interface Hl7StoreIamMemberCondition { description?: string; expression: string; title: string; } interface Hl7StoreNotificationConfig { /** * The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. * PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. * It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message * was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a * project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given * Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail. */ pubsubTopic: string; } interface Hl7StoreNotificationConfigs { /** * Restricts notifications sent for messages matching a filter. If this is empty, all messages * are matched. Syntax: https://cloud.google.com/appengine/docs/standard/python/search/query_strings * Fields/functions available for filtering are: * * messageType, from the MSH-9.1 field. For example, NOT messageType = "ADT". * * sendDate or sendDate, the YYYY-MM-DD date the message was sent in the dataset's timeZone, from the MSH-7 segment. For example, sendDate < "2017-01-02". * * sendTime, the timestamp when the message was sent, using the RFC3339 time format for comparisons, from the MSH-7 segment. For example, sendTime < "2017-01-02T00:00:00-05:00". * * sendFacility, the care center that the message came from, from the MSH-4 segment. For example, sendFacility = "ABC". * * PatientId(value, type), which matches if the message lists a patient having an ID of the given value and type in the PID-2, PID-3, or PID-4 segments. For example, PatientId("123456", "MRN"). * * labels.x, a string value of the label with key x as set using the Message.labels map. For example, labels."priority"="high". The operator :* can be used to assert the existence of a label. For example, labels."priority":*. */ filter?: string; /** * The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. * PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. * It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message * was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a * project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given * Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail. * If a notification cannot be published to Cloud Pub/Sub, errors will be logged to Stackdriver */ pubsubTopic: string; } interface Hl7StoreParserConfig { /** * Determines whether messages with no header are allowed. */ allowNullHeader?: boolean; /** * JSON encoded string for schemas used to parse messages in this * store if schematized parsing is desired. */ schema?: string; /** * Byte(s) to be used as the segment terminator. If this is unset, '\r' will be used as segment terminator. * A base64-encoded string. */ segmentTerminator?: string; /** * The version of the unschematized parser to be used when a custom `schema` is not set. * Default value is `V1`. * Possible values are: `V1`, `V2`, `V3`. */ version?: string; } interface PipelineJobBackfillPipelineJob { /** * Specifies the mapping pipeline job to backfill, the name format * should follow: projects/{projectId}/locations/{locationId}/datasets/{datasetId}/pipelineJobs/{pipelineJobId}. */ mappingPipelineJob?: string; } interface PipelineJobMappingPipelineJob { /** * If set, the mapping pipeline will write snapshots to this * FHIR store without assigning stable IDs. You must * grant your pipeline project's Cloud Healthcare Service * Agent serviceaccount healthcare.fhirResources.executeBundle * and healthcare.fhirResources.create permissions on the * destination store. The destination store must set * [disableReferentialIntegrity][FhirStore.disable_referential_integrity] * to true. The destination store must use FHIR version R4. * Format: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{fhirStoreID}. */ fhirStoreDestination?: string; /** * A streaming FHIR data source. * Structure is documented below. */ fhirStreamingSource?: outputs.healthcare.PipelineJobMappingPipelineJobFhirStreamingSource; /** * The location of the mapping configuration. * Structure is documented below. */ mappingConfig: outputs.healthcare.PipelineJobMappingPipelineJobMappingConfig; /** * If set to true, a mapping pipeline will send output snapshots * to the reconciliation pipeline in its dataset. A reconciliation * pipeline must exist in this dataset before a mapping pipeline * with a reconciliation destination can be created. */ reconciliationDestination?: boolean; } interface PipelineJobMappingPipelineJobFhirStreamingSource { /** * Describes the streaming FHIR data source. */ description?: string; /** * The path to the FHIR store in the format projects/{projectId}/locations/{locationId}/datasets/{datasetId}/fhirStores/{fhirStoreId}. */ fhirStore: string; } interface PipelineJobMappingPipelineJobMappingConfig { /** * Describes the mapping configuration. */ description?: string; /** * Specifies the path to the mapping configuration for harmonization pipeline. * Structure is documented below. */ whistleConfigSource?: outputs.healthcare.PipelineJobMappingPipelineJobMappingConfigWhistleConfigSource; } interface PipelineJobMappingPipelineJobMappingConfigWhistleConfigSource { /** * Directory path where all the Whistle files are located. * Example: gs://{bucket-id}/{path/to/import-root/dir} */ importUriPrefix: string; /** * Main configuration file which has the entrypoint or the root function. * Example: gs://{bucket-id}/{path/to/import-root/dir}/entrypoint-file-name.wstl. */ uri: string; } interface PipelineJobReconciliationPipelineJob { /** * The harmonized FHIR store to write harmonized FHIR resources to, * in the format of: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{id} */ fhirStoreDestination?: string; /** * Specifies the top level directory of the matching configs used * in all mapping pipelines, which extract properties for resources * to be matched on. * Example: gs://{bucket-id}/{path/to/matching/configs} */ matchingUriPrefix: string; /** * Specifies the location of the reconciliation configuration. * Structure is documented below. */ mergeConfig: outputs.healthcare.PipelineJobReconciliationPipelineJobMergeConfig; } interface PipelineJobReconciliationPipelineJobMergeConfig { /** * Describes the mapping configuration. */ description?: string; /** * Specifies the path to the mapping configuration for harmonization pipeline. * Structure is documented below. */ whistleConfigSource: outputs.healthcare.PipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSource; } interface PipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSource { /** * Directory path where all the Whistle files are located. * Example: gs://{bucket-id}/{path/to/import-root/dir} */ importUriPrefix: string; /** * Main configuration file which has the entrypoint or the root function. * Example: gs://{bucket-id}/{path/to/import-root/dir}/entrypoint-file-name.wstl. */ uri: string; } interface WorkspaceSettings { /** * Project IDs for data projects hosted in a workspace. */ dataProjectIds: string[]; } } export declare namespace iam { interface AccessBoundaryPolicyRule { /** * An access boundary rule in an IAM policy. * Structure is documented below. */ accessBoundaryRule?: outputs.iam.AccessBoundaryPolicyRuleAccessBoundaryRule; /** * The description of the rule. */ description?: string; } interface AccessBoundaryPolicyRuleAccessBoundaryRule { /** * The availability condition further constrains the access allowed by the access boundary rule. * Structure is documented below. */ availabilityCondition?: outputs.iam.AccessBoundaryPolicyRuleAccessBoundaryRuleAvailabilityCondition; /** * A list of permissions that may be allowed for use on the specified resource. */ availablePermissions?: string[]; /** * The full resource name of a Google Cloud resource entity. */ availableResource?: string; } interface AccessBoundaryPolicyRuleAccessBoundaryRuleAvailabilityCondition { /** * Description of the expression. This is a longer text which describes the expression, * e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, * e.g. a file name and a position in the file. */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. * This can be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface DenyPolicyRule { /** * A deny rule in an IAM deny policy. * Structure is documented below. */ denyRule?: outputs.iam.DenyPolicyRuleDenyRule; /** * The description of the rule. */ description?: string; } interface DenyPolicyRuleDenyRule { /** * User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. * Structure is documented below. */ denialCondition?: outputs.iam.DenyPolicyRuleDenyRuleDenialCondition; /** * The permissions that are explicitly denied by this rule. Each permission uses the format `{service-fqdn}/{resource}.{verb}`, * where `{service-fqdn}` is the fully qualified domain name for the service. For example, `iam.googleapis.com/roles.list`. */ deniedPermissions?: string[]; /** * The identities that are prevented from using one or more permissions on Google Cloud resources. */ deniedPrincipals?: string[]; /** * Specifies the permissions that this rule excludes from the set of denied permissions given by deniedPermissions. * If a permission appears in deniedPermissions and in exceptionPermissions then it will not be denied. * The excluded permissions can be specified using the same syntax as deniedPermissions. */ exceptionPermissions?: string[]; /** * The identities that are excluded from the deny rule, even if they are listed in the deniedPrincipals. * For example, you could add a Google group to the deniedPrincipals, then exclude specific users who belong to that group. */ exceptionPrincipals?: string[]; } interface DenyPolicyRuleDenyRuleDenialCondition { /** * Description of the expression. This is a longer text which describes the expression, * e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, * e.g. a file name and a position in the file. */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. * This can be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface FoldersPolicyBindingCondition { /** * Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression?: string; /** * Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. */ location?: string; /** * Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface FoldersPolicyBindingTarget { /** * Required. Immutable. Full Resource Name of the principal set used for principal access boundary policy bindings. * Examples for each one of the following supported principal set types: * * Folder: `//cloudresourcemanager.googleapis.com/folders/FOLDER_ID` * It must be parent by the policy binding's parent (the folder). */ principalSet?: string; } interface GetTestablePermissionsPermission { /** * Whether the corresponding API has been enabled for the resource. */ apiDisabled: boolean; /** * The level of support for custom roles. Can be one of `"NOT_SUPPORTED"`, `"SUPPORTED"`, `"TESTING"`. Default is `"SUPPORTED"` */ customSupportLevel: string; /** * Name of the permission. */ name: string; /** * Release stage of the permission. */ stage: string; /** * Human readable title of the permission. */ title: string; } interface GetWorkloadIdentityPoolInlineCertificateIssuanceConfig { /** * A required mapping of a cloud region to the CA pool resource located in that region used * for certificate issuance, adhering to these constraints: * * * **Key format:** A supported cloud region name equivalent to the location identifier in * the corresponding map entry's value. * * **Value format:** A valid CA pool resource path format like: * 'projects/{project}/locations/{location}/caPools/{ca_pool}' * * **Region Matching:** Workloads are ONLY issued certificates from CA pools within the * same region. Also the CA pool region (in value) must match the workload's region (key). */ caPools: { [key: string]: string; }; /** * Key algorithm to use when generating the key pair. This key pair will be used to create * the certificate. If unspecified, this will default to 'ECDSA_P256'. * * * 'RSA_2048': Specifies RSA with a 2048-bit modulus. * * 'RSA_3072': Specifies RSA with a 3072-bit modulus. * * 'RSA_4096': Specifies RSA with a 4096-bit modulus. * * 'ECDSA_P256': Specifies ECDSA with curve P256. * * 'ECDSA_P384': Specifies ECDSA with curve P384. Possible values: ["RSA_2048", "RSA_3072", "RSA_4096", "ECDSA_P256", "ECDSA_P384"] */ keyAlgorithm: string; /** * Lifetime of the workload certificates issued by the CA pool in seconds. Must be between * '86400s' (24 hours) to '2592000s' (30 days), ends in the suffix "'s'" (indicating seconds) * and is preceded by the number of seconds. If unspecified, this will be defaulted to * '86400s' (24 hours). */ lifetime: string; /** * Rotation window percentage indicating when certificate rotation should be initiated based * on remaining lifetime. Must be between '50' - '80'. If unspecified, this will be defaulted * to '50'. */ rotationWindowPercentage: number; } interface GetWorkloadIdentityPoolInlineTrustConfig { /** * Maps specific trust domains (e.g., "example.com") to their corresponding 'TrustStore' * objects, which contain the trusted root certificates for that domain. There can be a * maximum of '10' trust domain entries in this map. * * Note that a trust domain automatically trusts itself and don't need to be specified here. * If however, this 'WorkloadIdentityPool''s trust domain contains any trust anchors in the * 'additional_trust_bundles' map, those trust anchors will be *appended to* the Trust Bundle * automatically derived from your 'InlineCertificateIssuanceConfig''s 'ca_pools'. */ additionalTrustBundles: outputs.iam.GetWorkloadIdentityPoolInlineTrustConfigAdditionalTrustBundle[]; } interface GetWorkloadIdentityPoolInlineTrustConfigAdditionalTrustBundle { /** * List of Trust Anchors to be used while performing validation against a given * 'TrustStore'. The incoming end entity's certificate must be chained up to one of the * trust anchors here. */ trustAnchors: outputs.iam.GetWorkloadIdentityPoolInlineTrustConfigAdditionalTrustBundleTrustAnchor[]; trustDomain: string; } interface GetWorkloadIdentityPoolInlineTrustConfigAdditionalTrustBundleTrustAnchor { /** * PEM certificate of the PKI used for validation. Must only contain one ca * certificate(either root or intermediate cert). */ pemCertificate: string; } interface GetWorkloadIdentityPoolProviderAw { /** * The AWS account ID. */ accountId: string; } interface GetWorkloadIdentityPoolProviderOidc { /** * Acceptable values for the 'aud' field (audience) in the OIDC token. Token exchange * requests are rejected if the token audience does not match one of the configured * values. Each audience may be at most 256 characters. A maximum of 10 audiences may * be configured. * * If this list is empty, the OIDC token audience must be equal to the full canonical * resource name of the WorkloadIdentityPoolProvider, with or without the HTTPS prefix. * For example: * ''' * //iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ * https://iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ * ''' */ allowedAudiences: string[]; /** * The OIDC issuer URL. */ issuerUri: string; /** * OIDC JWKs in JSON String format. For details on definition of a * JWK, see https:tools.ietf.org/html/rfc7517. If not set, then we * use the 'jwks_uri' from the discovery document fetched from the * .well-known path for the 'issuer_uri'. Currently, RSA and EC asymmetric * keys are supported. The JWK must use following format and include only * the following fields: * ''' * { * "keys": [ * { * "kty": "RSA/EC", * "alg": "", * "use": "sig", * "kid": "", * "n": "", * "e": "", * "x": "", * "y": "", * "crv": "" * } * ] * } * ''' */ jwksJson: string; } interface GetWorkloadIdentityPoolProviderSaml { /** * SAML Identity provider configuration metadata xml doc. */ idpMetadataXml: string; } interface GetWorkloadIdentityPoolProviderX509 { /** * A Trust store, use this trust store as a wrapper to config the trust * anchor and optional intermediate cas to help build the trust chain for * the incoming end entity certificate. Follow the x509 guidelines to * define those PEM encoded certs. Only 1 trust store is currently * supported. */ trustStores: outputs.iam.GetWorkloadIdentityPoolProviderX509TrustStore[]; } interface GetWorkloadIdentityPoolProviderX509TrustStore { /** * Set of intermediate CA certificates used for building the trust chain to * trust anchor. * IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. */ intermediateCas: outputs.iam.GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa[]; /** * List of Trust Anchors to be used while performing validation * against a given TrustStore. The incoming end entity's certificate * must be chained up to one of the trust anchors here. */ trustAnchors: outputs.iam.GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor[]; } interface GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa { /** * PEM certificate of the PKI used for validation. Must only contain one * ca certificate(either root or intermediate cert). */ pemCertificate: string; } interface GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor { /** * PEM certificate of the PKI used for validation. Must only contain one * ca certificate(either root or intermediate cert). */ pemCertificate: string; } interface OrganizationsPolicyBindingCondition { /** * Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression?: string; /** * Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. */ location?: string; /** * Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface OrganizationsPolicyBindingTarget { /** * Required. Immutable. Full Resource Name of the principal set used for principal access boundary policy bindings. * Examples for each one of the following supported principal set types: * * Organization `//cloudresourcemanager.googleapis.com/organizations/ORGANIZATION_ID` * * Workforce Identity: `//iam.googleapis.com/locations/global/workforcePools/WORKFORCE_POOL_ID` * * Workspace Identity: `//iam.googleapis.com/locations/global/workspace/WORKSPACE_ID` * It must be parent by the policy binding's parent (the organization). */ principalSet?: string; } interface PrincipalAccessBoundaryPolicyDetails { /** * The version number that indicates which Google Cloud services * are included in the enforcement (e.g. \"latest\", \"1\", ...). If empty, the * PAB policy version will be set to the current latest version, and this version * won't get updated when new versions are released. */ enforcementVersion: string; /** * A list of principal access boundary policy rules. The number of rules in a policy is limited to 500. * Structure is documented below. */ rules: outputs.iam.PrincipalAccessBoundaryPolicyDetailsRule[]; } interface PrincipalAccessBoundaryPolicyDetailsRule { /** * The description of the principal access boundary policy rule. Must be less than or equal to 256 characters. */ description?: string; /** * The access relationship of principals to the resources in this rule. * Possible values: ALLOW */ effect: string; /** * A list of Cloud Resource Manager resources. The resource * and all the descendants are included. The number of resources in a policy * is limited to 500 across all rules. * The following resource types are supported: * * Organizations, such as `//cloudresourcemanager.googleapis.com/organizations/123`. * * Folders, such as `//cloudresourcemanager.googleapis.com/folders/123`. * * Projects, such as `//cloudresourcemanager.googleapis.com/projects/123` * or `//cloudresourcemanager.googleapis.com/projects/my-project-id`. */ resources: string[]; } interface ProjectsPolicyBindingCondition { /** * Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression?: string; /** * Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. */ location?: string; /** * Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface ProjectsPolicyBindingTarget { /** * Required. Immutable. Full Resource Name of the principal set used for principal access boundary policy bindings. * Examples for each one of the following supported principal set types: * * Project: * * `//cloudresourcemanager.googleapis.com/projects/PROJECT_NUMBER` * * `//cloudresourcemanager.googleapis.com/projects/PROJECT_ID` * * Workload Identity Pool: `//iam.googleapis.com/projects/PROJECT_NUMBER/locations/LOCATION/workloadIdentityPools/WORKLOAD_POOL_ID` * It must be parent by the policy binding's parent (the project). */ principalSet?: string; } interface WorkforcePoolAccessRestrictions { /** * Services allowed for web sign-in with the workforce pool. * If not set by default there are no restrictions. * Structure is documented below. */ allowedServices?: outputs.iam.WorkforcePoolAccessRestrictionsAllowedService[]; /** * Disable programmatic sign-in by disabling token issue via the Security Token API endpoint. * See [Security Token Service API](https://cloud.google.com/iam/docs/reference/sts/rest). */ disableProgrammaticSignin?: boolean; } interface WorkforcePoolAccessRestrictionsAllowedService { /** * Domain name of the service. * Example: console.cloud.google */ domain?: string; } interface WorkforcePoolIamBindingCondition { description?: string; expression: string; title: string; } interface WorkforcePoolIamMemberCondition { description?: string; expression: string; title: string; } interface WorkforcePoolProviderExtendedAttributesOauth2Client { /** * Represents the IdP and type of claims that should be fetched. * * AZURE_AD_GROUPS_ID: Used to get the user's group claims from the Azure AD identity provider * using configuration provided in ExtendedAttributesOAuth2Client and 'id' * property of the 'microsoft.graph.group' object is used for claim mapping. See * https://learn.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0#properties * for more details on 'microsoft.graph.group' properties. The * group IDs obtained from Azure AD are present in 'assertion.groups' for * OIDC providers and 'assertion.attributes.groups' for SAML providers for * attribute mapping. Possible values: ["AZURE_AD_GROUPS_ID"] */ attributesType: string; /** * The OAuth 2.0 client ID for retrieving extended attributes from the identity provider. Required to get the Access Token using client credentials grant flow. */ clientId: string; /** * The OAuth 2.0 client secret for retrieving extended attributes from the identity provider. Required to get the Access Token using client credentials grant flow. */ clientSecret: outputs.iam.WorkforcePoolProviderExtendedAttributesOauth2ClientClientSecret; /** * The OIDC identity provider's issuer URI. Must be a valid URI using the 'https' scheme. Required to get the OIDC discovery document. */ issuerUri: string; /** * Represents the parameters to control which claims are fetched from an IdP. */ queryParameters?: outputs.iam.WorkforcePoolProviderExtendedAttributesOauth2ClientQueryParameters; } interface WorkforcePoolProviderExtendedAttributesOauth2ClientClientSecret { /** * The value of the client secret. * Structure is documented below. */ value?: outputs.iam.WorkforcePoolProviderExtendedAttributesOauth2ClientClientSecretValue; } interface WorkforcePoolProviderExtendedAttributesOauth2ClientClientSecretValue { /** * The plain text of the client secret value. */ plainText: string; /** * (Output) * A thumbprint to represent the current client secret value. */ thumbprint: string; } interface WorkforcePoolProviderExtendedAttributesOauth2ClientQueryParameters { /** * The filter used to request specific records from IdP. In case of attributes type as AZURE_AD_GROUPS_ID, it represents the * filter used to request specific groups for users from IdP. By default, all of the groups associated with the user are fetched. The * groups should be security enabled. See https://learn.microsoft.com/en-us/graph/search-query-parameter for more details. */ filter?: string; } interface WorkforcePoolProviderExtraAttributesOauth2Client { /** * Represents the IdP and type of claims that should be fetched. * * AZURE_AD_GROUPS_MAIL: Used to get the user's group claims from the Azure AD identity provider using configuration provided * in ExtraAttributesOAuth2Client and 'mail' property of the 'microsoft.graph.group' object is used for claim mapping. * See https://learn.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0#properties for more details on * 'microsoft.graph.group' properties. The attributes obtained from idntity provider are mapped to 'assertion.groups'. * * AZURE_AD_GROUPS_ID: Used to get the user's group claims from the Azure AD identity provider * using configuration provided in ExtraAttributesOAuth2Client and 'id' * property of the 'microsoft.graph.group' object is used for claim mapping. See * https://learn.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0#properties * for more details on 'microsoft.graph.group' properties. The * group IDs obtained from Azure AD are present in 'assertion.groups' for * OIDC providers and 'assertion.attributes.groups' for SAML providers for * attribute mapping. Possible values: ["AZURE_AD_GROUPS_MAIL", "AZURE_AD_GROUPS_ID"] */ attributesType: string; /** * The OAuth 2.0 client ID for retrieving extra attributes from the identity provider. Required to get the Access Token using client credentials grant flow. */ clientId: string; /** * The OAuth 2.0 client secret for retrieving extra attributes from the identity provider. Required to get the Access Token using client credentials grant flow. */ clientSecret: outputs.iam.WorkforcePoolProviderExtraAttributesOauth2ClientClientSecret; /** * The OIDC identity provider's issuer URI. Must be a valid URI using the 'https' scheme. Required to get the OIDC discovery document. */ issuerUri: string; /** * Represents the parameters to control which claims are fetched from an IdP. */ queryParameters?: outputs.iam.WorkforcePoolProviderExtraAttributesOauth2ClientQueryParameters; } interface WorkforcePoolProviderExtraAttributesOauth2ClientClientSecret { /** * The value of the client secret. * Structure is documented below. */ value?: outputs.iam.WorkforcePoolProviderExtraAttributesOauth2ClientClientSecretValue; } interface WorkforcePoolProviderExtraAttributesOauth2ClientClientSecretValue { /** * The plain text of the client secret value. */ plainText: string; /** * (Output) * A thumbprint to represent the current client secret value. */ thumbprint: string; } interface WorkforcePoolProviderExtraAttributesOauth2ClientQueryParameters { /** * The filter used to request specific records from IdP. In case of attributes type as AZURE_AD_GROUPS_ID, it represents the * filter used to request specific groups for users from IdP. By default, all of the groups associated with the user are fetched. The * groups should be security enabled. See https://learn.microsoft.com/en-us/graph/search-query-parameter for more details. */ filter?: string; } interface WorkforcePoolProviderKeyKeyData { /** * (Output) * The format of the key. */ format: string; /** * (Output) * The key data. The format of the key is represented by the format field. */ key: string; /** * The specifications for the key. * Possible values are: `RSA_2048`, `RSA_3072`, `RSA_4096`. */ keySpec: string; /** * (Output) * Latest timestamp when this key is valid. Attempts to use this key after this time will fail. * Only present if the key data represents a X.509 certificate. * Uses RFC 3339, where generated output will always be Z-normalized and uses 0, 3, 6 or 9 fractional digits. * Offsets other than "Z" are also accepted. * Examples: "2014-10-02T15:01:23Z", "2014-10-02T15:01:23.045123456Z" or "2014-10-02T15:01:23+05:30". */ notAfterTime: string; /** * (Output) * Earliest timestamp when this key is valid. Attempts to use this key before this time will fail. * Only present if the key data represents a X.509 certificate. * Uses RFC 3339, where generated output will always be Z-normalized and uses 0, 3, 6 or 9 fractional digits. * Offsets other than "Z" are also accepted. * Examples: "2014-10-02T15:01:23Z", "2014-10-02T15:01:23.045123456Z" or "2014-10-02T15:01:23+05:30". */ notBeforeTime: string; } interface WorkforcePoolProviderOidc { /** * The client ID. Must match the audience claim of the JWT issued by the identity provider. */ clientId: string; /** * The optional client secret. Required to enable Authorization Code flow for web sign-in. * Structure is documented below. */ clientSecret?: outputs.iam.WorkforcePoolProviderOidcClientSecret; /** * The OIDC issuer URI. Must be a valid URI using the 'https' scheme. */ issuerUri: string; /** * OIDC JWKs in JSON String format. For details on definition of a * JWK, see https:tools.ietf.org/html/rfc7517. If not set, then we * use the `jwksUri` from the discovery document fetched from the * .well-known path for the `issuerUri`. Currently, RSA and EC asymmetric * keys are supported. The JWK must use following format and include only * the following fields: * ``` * { * "keys": [ * { * "kty": "RSA/EC", * "alg": "", * "use": "sig", * "kid": "", * "n": "", * "e": "", * "x": "", * "y": "", * "crv": "" * } * ] * } * ``` */ jwksJson?: string; /** * Configuration for web single sign-on for the OIDC provider. Here, web sign-in refers to console sign-in and gcloud sign-in through the browser. * Structure is documented below. */ webSsoConfig: outputs.iam.WorkforcePoolProviderOidcWebSsoConfig; } interface WorkforcePoolProviderOidcClientSecret { /** * The value of the client secret. * Structure is documented below. */ value?: outputs.iam.WorkforcePoolProviderOidcClientSecretValue; } interface WorkforcePoolProviderOidcClientSecretValue { /** * The plain text of the client secret value. */ plainText: string; /** * (Output) * A thumbprint to represent the current client secret value. */ thumbprint: string; } interface WorkforcePoolProviderOidcWebSsoConfig { /** * Additional scopes to request for in the OIDC authentication request on top of scopes requested by default. By default, the `openid`, `profile` and `email` scopes that are supported by the identity provider are requested. * Each additional scope may be at most 256 characters. A maximum of 10 additional scopes may be configured. * * The `extraAttributesOauth2Client` block supports: */ additionalScopes?: string[]; /** * The behavior for how OIDC Claims are included in the `assertion` object used for attribute mapping and attribute condition. * * MERGE_USER_INFO_OVER_ID_TOKEN_CLAIMS: Merge the UserInfo Endpoint Claims with ID Token Claims, preferring UserInfo Claim Values for the same Claim Name. This option is available only for the Authorization Code Flow. * * ONLY_ID_TOKEN_CLAIMS: Only include ID Token Claims. * Possible values are: `MERGE_USER_INFO_OVER_ID_TOKEN_CLAIMS`, `ONLY_ID_TOKEN_CLAIMS`. */ assertionClaimsBehavior: string; /** * The Response Type to request for in the OIDC Authorization Request for web sign-in. * The `CODE` Response Type is recommended to avoid the Implicit Flow, for security reasons. * * CODE: The `response_type=code` selection uses the Authorization Code Flow for web sign-in. Requires a configured client secret. * * ID_TOKEN: The `response_type=id_token` selection uses the Implicit Flow for web sign-in. * Possible values are: `CODE`, `ID_TOKEN`. */ responseType: string; } interface WorkforcePoolProviderSaml { /** * SAML Identity provider configuration metadata xml doc. * The xml document should comply with [SAML 2.0 specification](https://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf). * The max size of the acceptable xml document will be bounded to 128k characters. * The metadata xml document should satisfy the following constraints: * 1) Must contain an Identity Provider Entity ID. * 2) Must contain at least one non-expired signing key certificate. * 3) For each signing key: * a) Valid from should be no more than 7 days from now. * b) Valid to should be no more than 10 years in the future. * 4) Up to 3 IdP signing keys are allowed in the metadata xml. * When updating the provider's metadata xml, at least one non-expired signing key * must overlap with the existing metadata. This requirement is skipped if there are * no non-expired signing keys present in the existing metadata. */ idpMetadataXml: string; } interface WorkloadIdentityPoolIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface WorkloadIdentityPoolIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface WorkloadIdentityPoolInlineCertificateIssuanceConfig { /** * A required mapping of a cloud region to the CA pool resource located in that region used * for certificate issuance, adhering to these constraints: * * **Key format:** A supported cloud region name equivalent to the location identifier in * the corresponding map entry's value. * * **Value format:** A valid CA pool resource path format like: * `projects/{project}/locations/{location}/caPools/{ca_pool}` * * **Region Matching:** Workloads are ONLY issued certificates from CA pools within the * same region. Also the CA pool region (in value) must match the workload's region (key). */ caPools: { [key: string]: string; }; /** * Key algorithm to use when generating the key pair. This key pair will be used to create * the certificate. If unspecified, this will default to `ECDSA_P256`. * * `RSA_2048`: Specifies RSA with a 2048-bit modulus. * * `RSA_3072`: Specifies RSA with a 3072-bit modulus. * * `RSA_4096`: Specifies RSA with a 4096-bit modulus. * * `ECDSA_P256`: Specifies ECDSA with curve P256. * * `ECDSA_P384`: Specifies ECDSA with curve P384. * Possible values are: `RSA_2048`, `RSA_3072`, `RSA_4096`, `ECDSA_P256`, `ECDSA_P384`. */ keyAlgorithm: string; /** * Lifetime of the workload certificates issued by the CA pool in seconds. Must be between * `86400s` (24 hours) to `2592000s` (30 days), ends in the suffix "`s`" (indicating seconds) * and is preceded by the number of seconds. If unspecified, this will be defaulted to * `86400s` (24 hours). */ lifetime: string; /** * Rotation window percentage indicating when certificate rotation should be initiated based * on remaining lifetime. Must be between `50` - `80`. If unspecified, this will be defaulted * to `50`. */ rotationWindowPercentage: number; } interface WorkloadIdentityPoolInlineTrustConfig { /** * Maps specific trust domains (e.g., "example.com") to their corresponding `TrustStore` * objects, which contain the trusted root certificates for that domain. There can be a * maximum of `10` trust domain entries in this map. * Note that a trust domain automatically trusts itself and don't need to be specified here. * If however, this `WorkloadIdentityPool`'s trust domain contains any trust anchors in the * `additionalTrustBundles` map, those trust anchors will be *appended to* the Trust Bundle * automatically derived from your `InlineCertificateIssuanceConfig`'s `caPools`. * Structure is documented below. */ additionalTrustBundles?: outputs.iam.WorkloadIdentityPoolInlineTrustConfigAdditionalTrustBundle[]; } interface WorkloadIdentityPoolInlineTrustConfigAdditionalTrustBundle { /** * List of Trust Anchors to be used while performing validation against a given * `TrustStore`. The incoming end entity's certificate must be chained up to one of the * trust anchors here. * Structure is documented below. */ trustAnchors: outputs.iam.WorkloadIdentityPoolInlineTrustConfigAdditionalTrustBundleTrustAnchor[]; /** * The identifier for this object. Format specified above. */ trustDomain: string; } interface WorkloadIdentityPoolInlineTrustConfigAdditionalTrustBundleTrustAnchor { /** * PEM certificate of the PKI used for validation. Must only contain one ca * certificate(either root or intermediate cert). */ pemCertificate: string; } interface WorkloadIdentityPoolManagedIdentityAttestationRule { /** * A single workload operating on Google Cloud. For example: * `//compute.googleapis.com/projects/123/uid/zones/us-central1-a/instances/12345678`. */ googleCloudResource: string; } interface WorkloadIdentityPoolNamespaceOwnerService { /** * (Output) * The service agent principal subject, e.g. * `serviceAccount:service-1234@gcp-sa-gkehub.iam.gserviceaccount.com`. */ principalSubject: string; } interface WorkloadIdentityPoolProviderAws { /** * The AWS account ID. */ accountId: string; } interface WorkloadIdentityPoolProviderOidc { /** * Acceptable values for the `aud` field (audience) in the OIDC token. Token exchange * requests are rejected if the token audience does not match one of the configured * values. Each audience may be at most 256 characters. A maximum of 10 audiences may * be configured. * If this list is empty, the OIDC token audience must be equal to the full canonical * resource name of the WorkloadIdentityPoolProvider, with or without the HTTPS prefix. * For example: * ``` * //iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ * https://iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ * ``` */ allowedAudiences?: string[]; /** * The OIDC issuer URL. */ issuerUri: string; /** * OIDC JWKs in JSON String format. For details on definition of a * JWK, see https:tools.ietf.org/html/rfc7517. If not set, then we * use the `jwksUri` from the discovery document fetched from the * .well-known path for the `issuerUri`. Currently, RSA and EC asymmetric * keys are supported. The JWK must use following format and include only * the following fields: * ``` * { * "keys": [ * { * "kty": "RSA/EC", * "alg": "", * "use": "sig", * "kid": "", * "n": "", * "e": "", * "x": "", * "y": "", * "crv": "" * } * ] * } * ``` */ jwksJson?: string; } interface WorkloadIdentityPoolProviderSaml { /** * SAML Identity provider configuration metadata xml doc. * * The `x509` block supports: */ idpMetadataXml: string; } interface WorkloadIdentityPoolProviderX509 { /** * A Trust store, use this trust store as a wrapper to config the trust * anchor and optional intermediate cas to help build the trust chain for * the incoming end entity certificate. Follow the x509 guidelines to * define those PEM encoded certs. Only 1 trust store is currently * supported. */ trustStore: outputs.iam.WorkloadIdentityPoolProviderX509TrustStore; } interface WorkloadIdentityPoolProviderX509TrustStore { /** * Set of intermediate CA certificates used for building the trust chain to * trust anchor. * IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. * Structure is documented below. */ intermediateCas?: outputs.iam.WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa[]; /** * List of Trust Anchors to be used while performing validation * against a given TrustStore. The incoming end entity's certificate * must be chained up to one of the trust anchors here. * Structure is documented below. */ trustAnchors: outputs.iam.WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor[]; } interface WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa { /** * PEM certificate of the PKI used for validation. Must only contain one * ca certificate(either root or intermediate cert). */ pemCertificate?: string; } interface WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor { /** * PEM certificate of the PKI used for validation. Must only contain one * ca certificate(either root or intermediate cert). */ pemCertificate?: string; } } export declare namespace iap { interface AppEngineServiceIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** The provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface AppEngineServiceIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** The provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface AppEngineVersionIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** The provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface AppEngineVersionIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** The provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface SettingsAccessSettings { /** * Settings to configure and enable allowed domains. * Structure is documented below. */ allowedDomainsSettings?: outputs.iap.SettingsAccessSettingsAllowedDomainsSettings; /** * Configuration to allow cross-origin requests via IAP. * Structure is documented below. */ corsSettings?: outputs.iap.SettingsAccessSettingsCorsSettings; /** * GCIP claims and endpoint configurations for 3p identity providers. * * Enabling gcipSetting significantly changes the way IAP authenticates users. Identity Platform does not support IAM, so IAP will not enforce any IAM policies for requests to your application. * Structure is documented below. */ gcipSettings?: outputs.iap.SettingsAccessSettingsGcipSettings; /** * Identity sources that IAP can use to authenticate the end user. Only one identity source * can be configured. The possible values are: * * `WORKFORCE_IDENTITY_FEDERATION`: Use external identities set up on Google Cloud Workforce * Identity Federation. * Each value may be one of: `WORKFORCE_IDENTITY_FEDERATION`. */ identitySources?: string[]; /** * Settings to configure IAP's OAuth behavior. * Structure is documented below. */ oauthSettings?: outputs.iap.SettingsAccessSettingsOauthSettings; /** * Settings to configure reauthentication policies in IAP. * Structure is documented below. */ reauthSettings?: outputs.iap.SettingsAccessSettingsReauthSettings; /** * Settings to configure the workforce identity federation, including workforce pools * and OAuth 2.0 settings. * Structure is documented below. */ workforceIdentitySettings?: outputs.iap.SettingsAccessSettingsWorkforceIdentitySettings; } interface SettingsAccessSettingsAllowedDomainsSettings { /** * List of trusted domains. */ domains?: string[]; /** * Configuration for customers to opt in for the feature. */ enable?: boolean; } interface SettingsAccessSettingsCorsSettings { /** * Configuration to allow HTTP OPTIONS calls to skip authorization. * If undefined, IAP will not apply any special logic to OPTIONS requests. */ allowHttpOptions?: boolean; } interface SettingsAccessSettingsGcipSettings { /** * Login page URI associated with the GCIP tenants. Typically, all resources within * the same project share the same login page, though it could be overridden at the * sub resource level. */ loginPageUri?: string; /** * GCIP tenant ids that are linked to the IAP resource. tenantIds could be a string * beginning with a number character to indicate authenticating with GCIP tenant flow, * or in the format of _ to indicate authenticating with GCIP agent flow. If agent flow * is used, tenantIds should only contain one single element, while for tenant flow, * tenantIds can contain multiple elements. */ tenantIds?: string[]; } interface SettingsAccessSettingsOauthSettings { /** * Domain hint to send as hd=? parameter in OAuth request flow. * Enables redirect to primary IDP by skipping Google's login screen. * (https://developers.google.com/identity/protocols/OpenIDConnect#hd-param) * Note: IAP does not verify that the id token's hd claim matches this value * since access behavior is managed by IAM policies. * * loginHint setting is not a replacement for access control. Always enforce an appropriate access policy if you want to restrict access to users outside your domain. */ loginHint?: string; /** * List of client ids allowed to use IAP programmatically. */ programmaticClients?: string[]; } interface SettingsAccessSettingsReauthSettings { /** * Reauth session lifetime, how long before a user has to reauthenticate again. * A duration in seconds with up to nine fractional digits, ending with 's'. * Example: "3.5s". */ maxAge: string; /** * Reauth method requested. The possible values are: * * `LOGIN`: Prompts the user to log in again. * * `SECURE_KEY`: User must use their secure key 2nd factor device. * * `ENROLLED_SECOND_FACTORS`: User can use any enabled 2nd factor. * Possible values are: `LOGIN`, `SECURE_KEY`, `ENROLLED_SECOND_FACTORS`. */ method: string; /** * How IAP determines the effective policy in cases of hierarchical policies. * Policies are merged from higher in the hierarchy to lower in the hierarchy. * The possible values are: * * `MINIMUM`: This policy acts as a minimum to other policies, lower in the hierarchy. * Effective policy may only be the same or stricter. * * `DEFAULT`: This policy acts as a default if no other reauth policy is set. * Possible values are: `MINIMUM`, `DEFAULT`. */ policyType: string; } interface SettingsAccessSettingsWorkforceIdentitySettings { /** * OAuth 2.0 settings for IAP to perform OIDC flow with workforce identity * federation services. * Structure is documented below. * * * The `oauth2` block supports: */ oauth2?: outputs.iap.SettingsAccessSettingsWorkforceIdentitySettingsOauth2; /** * The workforce pool resources. Only one workforce pool is accepted. */ workforcePools?: string; } interface SettingsAccessSettingsWorkforceIdentitySettingsOauth2 { /** * The OAuth 2.0 client ID registered in the workforce identity * federation OAuth 2.0 Server. */ clientId?: string; /** * Input only. The OAuth 2.0 client secret created while registering * the client ID. */ clientSecret?: string; /** * Output only. SHA256 hash value for the client secret. This field * is returned by IAP when the settings are retrieved. */ clientSecretSha256: string; } interface SettingsApplicationSettings { /** * Customization for Access Denied page. IAP allows customers to define a custom URI * to use as the error page when access is denied to users. If IAP prevents access * to this page, the default IAP error page will be displayed instead. * Structure is documented below. */ accessDeniedPageSettings?: outputs.iap.SettingsApplicationSettingsAccessDeniedPageSettings; /** * Settings to configure attribute propagation. * Structure is documented below. */ attributePropagationSettings?: outputs.iap.SettingsApplicationSettingsAttributePropagationSettings; /** * The Domain value to set for cookies generated by IAP. This value is not validated by the API, * but will be ignored at runtime if invalid. */ cookieDomain?: string; /** * Settings to configure IAP's behavior for a service mesh. * Structure is documented below. */ csmSettings?: outputs.iap.SettingsApplicationSettingsCsmSettings; } interface SettingsApplicationSettingsAccessDeniedPageSettings { /** * The URI to be redirected to when access is denied. */ accessDeniedPageUri?: string; /** * Whether to generate a troubleshooting URL on access denied events to this application. */ generateTroubleshootingUri?: boolean; /** * Whether to generate remediation token on access denied events to this application. */ remediationTokenGenerationEnabled?: boolean; } interface SettingsApplicationSettingsAttributePropagationSettings { /** * Whether the provided attribute propagation settings should be evaluated on user requests. * If set to true, attributes returned from the expression will be propagated in the set output credentials. */ enable?: boolean; /** * Raw string CEL expression. Must return a list of attributes. A maximum of 45 attributes can * be selected. Expressions can select different attribute types from attributes: * attributes.saml_attributes, attributes.iap_attributes. */ expression?: string; /** * Which output credentials attributes selected by the CEL expression should be propagated in. * All attributes will be fully duplicated in each selected output credential. * Possible values are: * * `HEADER`: Propagate attributes in the headers with "x-goog-iap-attr-" prefix. * * `JWT`: Propagate attributes in the JWT of the form: * "additionalClaims": { "myAttribute": ["value1", "value2"] } * * `RCTOKEN`: Propagate attributes in the RCToken of the form: " * additionalClaims": { "myAttribute": ["value1", "value2"] } * Each value may be one of: `HEADER`, `JWT`, `RCTOKEN`. */ outputCredentials?: string[]; } interface SettingsApplicationSettingsCsmSettings { /** * Audience claim set in the generated RCToken. This value is not validated by IAP. */ rctokenAud?: string; } interface TunnelDestGroupIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface TunnelDestGroupIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface TunnelIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface TunnelIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface TunnelInstanceIAMBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface TunnelInstanceIAMMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface WebBackendServiceIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface WebBackendServiceIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface WebCloudRunServiceIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface WebCloudRunServiceIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface WebForwardingRuleServiceIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface WebForwardingRuleServiceIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface WebIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface WebIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface WebRegionBackendServiceIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface WebRegionBackendServiceIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface WebRegionForwardingRuleServiceIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface WebRegionForwardingRuleServiceIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface WebTypeAppEngingIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface WebTypeAppEngingIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface WebTypeComputeIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface WebTypeComputeIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } } export declare namespace identityplatform { interface ConfigBlockingFunctions { /** * The user credentials to include in the JWT payload that is sent to the registered Blocking Functions. * Structure is documented below. */ forwardInboundCredentials?: outputs.identityplatform.ConfigBlockingFunctionsForwardInboundCredentials; /** * Map of Trigger to event type. Key should be one of the supported event types: "beforeCreate", "beforeSignIn". * Structure is documented below. */ triggers: outputs.identityplatform.ConfigBlockingFunctionsTrigger[]; } interface ConfigBlockingFunctionsForwardInboundCredentials { /** * Whether to pass the user's OAuth identity provider's access token. */ accessToken?: boolean; /** * Whether to pass the user's OIDC identity provider's ID token. */ idToken?: boolean; /** * Whether to pass the user's OAuth identity provider's refresh token. */ refreshToken?: boolean; } interface ConfigBlockingFunctionsTrigger { /** * The identifier for this object. Format specified above. */ eventType: string; /** * HTTP URI trigger for the Cloud Function. */ functionUri: string; /** * (Output) * When the trigger was changed. */ updateTime: string; } interface ConfigClient { /** * (Output) * API key that can be used when making requests for this project. * **Note**: This property is sensitive and will not be displayed in the plan. */ apiKey: string; /** * (Output) * Firebase subdomain. */ firebaseSubdomain: string; /** * Configuration related to restricting a user's ability to affect their account. * Structure is documented below. */ permissions?: outputs.identityplatform.ConfigClientPermissions; } interface ConfigClientPermissions { /** * When true, end users cannot delete their account on the associated project through any of our API methods */ disabledUserDeletion?: boolean; /** * When true, end users cannot sign up for a new account on the associated project through any of our API methods */ disabledUserSignup?: boolean; } interface ConfigMfa { /** * A list of usable second factors for this project. * Each value may be one of: `PHONE_SMS`. */ enabledProviders?: string[]; /** * A list of usable second factors for this project along with their configurations. * This field does not support phone based MFA, for that use the 'enabledProviders' field. * Structure is documented below. */ providerConfigs?: outputs.identityplatform.ConfigMfaProviderConfig[]; /** * Whether MultiFactor Authentication has been enabled for this project. * Possible values are: `DISABLED`, `ENABLED`, `MANDATORY`. */ state: string; } interface ConfigMfaProviderConfig { /** * Whether MultiFactor Authentication has been enabled for this project. * Possible values are: `DISABLED`, `ENABLED`, `MANDATORY`. */ state: string; /** * TOTP MFA provider config for this project. * Structure is documented below. */ totpProviderConfig?: outputs.identityplatform.ConfigMfaProviderConfigTotpProviderConfig; } interface ConfigMfaProviderConfigTotpProviderConfig { /** * The allowed number of adjacent intervals that will be used for verification to avoid clock skew. */ adjacentIntervals?: number; } interface ConfigMonitoring { /** * Configuration for logging requests made to this project to Stackdriver Logging * Structure is documented below. */ requestLogging?: outputs.identityplatform.ConfigMonitoringRequestLogging; } interface ConfigMonitoringRequestLogging { /** * Whether logging is enabled for this project or not. */ enabled?: boolean; } interface ConfigMultiTenant { /** * Whether this project can have tenants or not. */ allowTenants?: boolean; /** * The default cloud parent org or folder that the tenant project should be created under. * The parent resource name should be in the format of "/", such as "folders/123" or "organizations/456". * If the value is not set, the tenant will be created under the same organization or folder as the agent project. */ defaultTenantLocation?: string; } interface ConfigQuota { /** * Quota for the Signup endpoint, if overwritten. Signup quota is measured in sign ups per project per hour per IP. None of quota, startTime, or quotaDuration can be skipped. * Structure is documented below. */ signUpQuotaConfig?: outputs.identityplatform.ConfigQuotaSignUpQuotaConfig; } interface ConfigQuotaSignUpQuotaConfig { /** * A sign up APIs quota that customers can override temporarily. Value can be in between 1 and 1000. */ quota?: number; /** * How long this quota will be active for. It is measurred in seconds, e.g., Example: "9.615s". */ quotaDuration?: string; /** * When this quota will take affect. */ startTime?: string; } interface ConfigSignIn { /** * Whether to allow more than one account to have the same email. */ allowDuplicateEmails?: boolean; /** * Configuration options related to authenticating an anonymous user. * Structure is documented below. */ anonymous?: outputs.identityplatform.ConfigSignInAnonymous; /** * Configuration options related to authenticating a user by their email address. * Structure is documented below. */ email?: outputs.identityplatform.ConfigSignInEmail; /** * (Output) * Output only. Hash config information. * Structure is documented below. */ hashConfigs: outputs.identityplatform.ConfigSignInHashConfig[]; /** * Configuration options related to authenticated a user by their phone number. * Structure is documented below. */ phoneNumber?: outputs.identityplatform.ConfigSignInPhoneNumber; } interface ConfigSignInAnonymous { /** * Whether anonymous user auth is enabled for the project or not. * * The `hashConfig` block contains: */ enabled: boolean; } interface ConfigSignInEmail { /** * Whether email auth is enabled for the project or not. */ enabled: boolean; /** * Whether a password is required for email auth or not. If true, both an email and * password must be provided to sign in. If false, a user may sign in via either * email/password or email link. */ passwordRequired?: boolean; } interface ConfigSignInHashConfig { /** * Different password hash algorithms used in Identity Toolkit. */ algorithm: string; /** * Memory cost for hash calculation. Used by scrypt and other similar password derivation algorithms. See https://tools.ietf.org/html/rfc7914 for explanation of field. */ memoryCost: number; /** * How many rounds for hash calculation. Used by scrypt and other similar password derivation algorithms. */ rounds: number; /** * Non-printable character to be inserted between the salt and plain text password in base64. */ saltSeparator: string; /** * Signer key in base64. */ signerKey: string; } interface ConfigSignInPhoneNumber { /** * Whether phone number auth is enabled for the project or not. */ enabled: boolean; /** * A map of that can be used for phone auth testing. */ testPhoneNumbers?: { [key: string]: string; }; } interface ConfigSmsRegionConfig { /** * A policy of allowing SMS to every region by default and adding disallowed regions to a disallow list. * Structure is documented below. */ allowByDefault?: outputs.identityplatform.ConfigSmsRegionConfigAllowByDefault; /** * A policy of only allowing regions by explicitly adding them to an allowlist. * Structure is documented below. */ allowlistOnly?: outputs.identityplatform.ConfigSmsRegionConfigAllowlistOnly; } interface ConfigSmsRegionConfigAllowByDefault { /** * Two letter unicode region codes to disallow as defined by https://cldr.unicode.org/ The full list of these region codes is here: https://github.com/unicode-cldr/cldr-localenames-full/blob/master/main/en/territories.json */ disallowedRegions?: string[]; } interface ConfigSmsRegionConfigAllowlistOnly { /** * Two letter unicode region codes to allow as defined by https://cldr.unicode.org/ The full list of these region codes is here: https://github.com/unicode-cldr/cldr-localenames-full/blob/master/main/en/territories.json */ allowedRegions?: string[]; } interface InboundSamlConfigIdpConfig { /** * The IdP's certificate data to verify the signature in the SAMLResponse issued by the IDP. * Structure is documented below. */ idpCertificates: outputs.identityplatform.InboundSamlConfigIdpConfigIdpCertificate[]; /** * Unique identifier for all SAML entities */ idpEntityId: string; /** * Indicates if outbounding SAMLRequest should be signed. */ signRequest?: boolean; /** * URL to send Authentication request to. */ ssoUrl: string; } interface InboundSamlConfigIdpConfigIdpCertificate { /** * The IdP's x509 certificate. */ x509Certificate?: string; } interface InboundSamlConfigSpConfig { /** * Callback URI where responses from IDP are handled. Must start with `https://`. */ callbackUri?: string; /** * (Output) * The IDP's certificate data to verify the signature in the SAMLResponse issued by the IDP. * Structure is documented below. * * * The `spCertificates` block contains: */ spCertificates: outputs.identityplatform.InboundSamlConfigSpConfigSpCertificate[]; /** * Unique identifier for all SAML entities. */ spEntityId?: string; } interface InboundSamlConfigSpConfigSpCertificate { /** * The x509 certificate */ x509Certificate: string; } interface OauthIdpConfigResponseType { /** * If true, authorization code is returned from IdP's authorization endpoint. */ code?: boolean; /** * If true, ID token is returned from IdP's authorization endpoint. */ idToken?: boolean; } interface TenantClient { /** * Configuration related to restricting a user's ability to affect their account. * Structure is documented below. */ permissions?: outputs.identityplatform.TenantClientPermissions; } interface TenantClientPermissions { /** * When true, end users cannot delete their account on the associated project through any of our API methods. */ disabledUserDeletion?: boolean; /** * When true, end users cannot sign up for a new account on the associated project through any of our API methods. */ disabledUserSignup?: boolean; } interface TenantInboundSamlConfigIdpConfig { /** * The IDP's certificate data to verify the signature in the SAMLResponse issued by the IDP. * Structure is documented below. */ idpCertificates: outputs.identityplatform.TenantInboundSamlConfigIdpConfigIdpCertificate[]; /** * Unique identifier for all SAML entities */ idpEntityId: string; /** * Indicates if outbounding SAMLRequest should be signed. */ signRequest?: boolean; /** * URL to send Authentication request to. */ ssoUrl: string; } interface TenantInboundSamlConfigIdpConfigIdpCertificate { /** * The x509 certificate */ x509Certificate?: string; } interface TenantInboundSamlConfigSpConfig { /** * Callback URI where responses from IDP are handled. Must start with `https://`. */ callbackUri: string; /** * (Output) * The IDP's certificate data to verify the signature in the SAMLResponse issued by the IDP. * Structure is documented below. * * * The `spCertificates` block contains: */ spCertificates: outputs.identityplatform.TenantInboundSamlConfigSpConfigSpCertificate[]; /** * Unique identifier for all SAML entities. */ spEntityId: string; } interface TenantInboundSamlConfigSpConfigSpCertificate { /** * The x509 certificate */ x509Certificate: string; } } export declare namespace integrationconnectors { interface ConnectionAuthConfig { /** * List containing additional auth configs. * Structure is documented below. */ additionalVariables?: outputs.integrationconnectors.ConnectionAuthConfigAdditionalVariable[]; /** * The type of authentication configured. */ authKey?: string; /** * authType of the Connection * Possible values are: `USER_PASSWORD`. */ authType: string; /** * Parameters to support Oauth 2.0 Auth Code Grant Authentication. * Structure is documented below. */ oauth2AuthCodeFlow?: outputs.integrationconnectors.ConnectionAuthConfigOauth2AuthCodeFlow; /** * OAuth3 Client Credentials for Authentication. * Structure is documented below. */ oauth2ClientCredentials?: outputs.integrationconnectors.ConnectionAuthConfigOauth2ClientCredentials; /** * OAuth2 JWT Bearer for Authentication. * Structure is documented below. */ oauth2JwtBearer?: outputs.integrationconnectors.ConnectionAuthConfigOauth2JwtBearer; /** * SSH Public Key for Authentication. * Structure is documented below. */ sshPublicKey?: outputs.integrationconnectors.ConnectionAuthConfigSshPublicKey; /** * User password for Authentication. * Structure is documented below. */ userPassword?: outputs.integrationconnectors.ConnectionAuthConfigUserPassword; } interface ConnectionAuthConfigAdditionalVariable { /** * Boolean Value of configVariable. */ booleanValue?: boolean; /** * Encryption key value of configVariable. * Structure is documented below. */ encryptionKeyValue?: outputs.integrationconnectors.ConnectionAuthConfigAdditionalVariableEncryptionKeyValue; /** * Integer Value of configVariable. */ integerValue?: number; /** * Key for the configVariable */ key: string; /** * Secret value of configVariable * Structure is documented below. */ secretValue?: outputs.integrationconnectors.ConnectionAuthConfigAdditionalVariableSecretValue; /** * String Value of configVariabley. */ stringValue?: string; } interface ConnectionAuthConfigAdditionalVariableEncryptionKeyValue { /** * The [KMS key name] with which the content of the Operation is encrypted. The * expected format: projects/*/locations/*/keyRings/*/cryptoKeys/*. * Will be empty string if google managed. */ kmsKeyName?: string; /** * Type of Encryption Key * Possible values are: `GOOGLE_MANAGED`, `CUSTOMER_MANAGED`. */ type: string; } interface ConnectionAuthConfigAdditionalVariableSecretValue { /** * Secret version of Secret Value for Config variable. */ secretVersion: string; } interface ConnectionAuthConfigOauth2AuthCodeFlow { /** * Auth URL for Authorization Code Flow. */ authUri?: string; /** * Client ID for user-provided OAuth app. */ clientId?: string; /** * Client secret for user-provided OAuth app. */ clientSecret?: outputs.integrationconnectors.ConnectionAuthConfigOauth2AuthCodeFlowClientSecret; /** * Whether to enable PKCE when the user performs the auth code flow. */ enablePkce?: boolean; /** * Scopes the connection will request when the user performs the auth code flow. */ scopes?: string[]; } interface ConnectionAuthConfigOauth2AuthCodeFlowClientSecret { /** * The resource name of the secret version in the format, * format as: projects/*/secrets/*/versions/*. */ secretVersion: string; } interface ConnectionAuthConfigOauth2ClientCredentials { /** * Secret version of Password for Authentication. */ clientId: string; /** * Secret version reference containing the client secret. */ clientSecret?: outputs.integrationconnectors.ConnectionAuthConfigOauth2ClientCredentialsClientSecret; } interface ConnectionAuthConfigOauth2ClientCredentialsClientSecret { /** * The resource name of the secret version in the format, * format as: projects/*/secrets/*/versions/*. */ secretVersion: string; } interface ConnectionAuthConfigOauth2JwtBearer { /** * Secret version reference containing a PKCS#8 PEM-encoded private key associated with the Client Certificate. * This private key will be used to sign JWTs used for the jwt-bearer authorization grant. * Specified in the form as: projects/*/secrets/*/versions/*. */ clientKey?: outputs.integrationconnectors.ConnectionAuthConfigOauth2JwtBearerClientKey; /** * JwtClaims providers fields to generate the token. */ jwtClaims?: outputs.integrationconnectors.ConnectionAuthConfigOauth2JwtBearerJwtClaims; } interface ConnectionAuthConfigOauth2JwtBearerClientKey { /** * The resource name of the secret version in the format, * format as: projects/*/secrets/*/versions/*. */ secretVersion: string; } interface ConnectionAuthConfigOauth2JwtBearerJwtClaims { /** * Value for the "aud" claim. * * The `oauth2ClientCredentials` block supports: */ audience?: string; /** * Value for the "iss" claim. */ issuer?: string; /** * Value for the "sub" claim. */ subject?: string; } interface ConnectionAuthConfigSshPublicKey { /** * Format of SSH Client cert. */ certType?: string; /** * SSH Client Cert. It should contain both public and private key. * Structure is documented below. */ sshClientCert?: outputs.integrationconnectors.ConnectionAuthConfigSshPublicKeySshClientCert; /** * Password (passphrase) for ssh client certificate if it has one. * Structure is documented below. */ sshClientCertPass?: outputs.integrationconnectors.ConnectionAuthConfigSshPublicKeySshClientCertPass; /** * The user account used to authenticate. */ username: string; } interface ConnectionAuthConfigSshPublicKeySshClientCert { /** * The resource name of the secret version in the format, * format as: projects/*/secrets/*/versions/*. */ secretVersion: string; } interface ConnectionAuthConfigSshPublicKeySshClientCertPass { /** * The resource name of the secret version in the format, * format as: projects/*/secrets/*/versions/*. * * The `oauth2AuthCodeFlow` block supports: */ secretVersion: string; } interface ConnectionAuthConfigUserPassword { /** * Password for Authentication. * Structure is documented below. */ password?: outputs.integrationconnectors.ConnectionAuthConfigUserPasswordPassword; /** * Username for Authentication. */ username: string; } interface ConnectionAuthConfigUserPasswordPassword { /** * The resource name of the secret version in the format, * format as: projects/*/secrets/*/versions/*. */ secretVersion: string; } interface ConnectionConfigVariable { /** * Boolean Value of configVariable */ booleanValue?: boolean; /** * Encryption key value of configVariable. * Structure is documented below. */ encryptionKeyValue?: outputs.integrationconnectors.ConnectionConfigVariableEncryptionKeyValue; /** * Integer Value of configVariable */ integerValue?: number; /** * Key for the configVariable */ key: string; /** * Secret value of configVariable. * Structure is documented below. */ secretValue?: outputs.integrationconnectors.ConnectionConfigVariableSecretValue; /** * String Value of configVariabley */ stringValue?: string; } interface ConnectionConfigVariableEncryptionKeyValue { /** * The [KMS key name] with which the content of the Operation is encrypted. The * expected format: projects/*/locations/*/keyRings/*/cryptoKeys/*. * Will be empty string if google managed. */ kmsKeyName?: string; /** * Type of Encryption Key * Possible values are: `GOOGLE_MANAGED`, `CUSTOMER_MANAGED`. */ type: string; } interface ConnectionConfigVariableSecretValue { /** * Secret version of Secret Value for Config variable. */ secretVersion: string; } interface ConnectionConnectorVersionInfraConfig { /** * (Output) * Max QPS supported by the connector version before throttling of requests. */ ratelimitThreshold: string; } interface ConnectionDestinationConfig { /** * The destinations for the key. * Structure is documented below. */ destinations?: outputs.integrationconnectors.ConnectionDestinationConfigDestination[]; /** * The key is the destination identifier that is supported by the Connector. */ key: string; } interface ConnectionDestinationConfigDestination { /** * Host */ host?: string; /** * port number */ port?: number; /** * Service Attachment */ serviceAttachment?: string; } interface ConnectionEventingConfig { /** * List containing additional auth configs. * Structure is documented below. */ additionalVariables?: outputs.integrationconnectors.ConnectionEventingConfigAdditionalVariable[]; /** * authConfig for Eventing Configuration. * Structure is documented below. */ authConfig?: outputs.integrationconnectors.ConnectionEventingConfigAuthConfig; /** * Enrichment Enabled. */ enrichmentEnabled?: boolean; /** * registrationDestinationConfig * Structure is documented below. */ registrationDestinationConfig: outputs.integrationconnectors.ConnectionEventingConfigRegistrationDestinationConfig; } interface ConnectionEventingConfigAdditionalVariable { /** * Boolean Value of configVariable. */ booleanValue?: boolean; /** * Encryption key value of configVariable. * Structure is documented below. */ encryptionKeyValue?: outputs.integrationconnectors.ConnectionEventingConfigAdditionalVariableEncryptionKeyValue; /** * Integer Value of configVariable. */ integerValue?: number; /** * Key for the configVariable */ key: string; /** * Secret value of configVariable * Structure is documented below. */ secretValue?: outputs.integrationconnectors.ConnectionEventingConfigAdditionalVariableSecretValue; /** * String Value of configVariabley. */ stringValue?: string; } interface ConnectionEventingConfigAdditionalVariableEncryptionKeyValue { /** * The [KMS key name] with which the content of the Operation is encrypted. The * expected format: projects/*/locations/*/keyRings/*/cryptoKeys/*. * Will be empty string if google managed. */ kmsKeyName?: string; /** * Type of Encryption Key * Possible values are: `GOOGLE_MANAGED`, `CUSTOMER_MANAGED`. */ type?: string; } interface ConnectionEventingConfigAdditionalVariableSecretValue { /** * Secret version of Secret Value for Config variable. */ secretVersion: string; } interface ConnectionEventingConfigAuthConfig { /** * List containing additional auth configs. * Structure is documented below. */ additionalVariables?: outputs.integrationconnectors.ConnectionEventingConfigAuthConfigAdditionalVariable[]; /** * The type of authentication configured. */ authKey?: string; /** * authType of the Connection * Possible values are: `USER_PASSWORD`. */ authType: string; /** * User password for Authentication. * Structure is documented below. */ userPassword: outputs.integrationconnectors.ConnectionEventingConfigAuthConfigUserPassword; } interface ConnectionEventingConfigAuthConfigAdditionalVariable { /** * Boolean Value of configVariable. */ booleanValue?: boolean; /** * Encryption key value of configVariable. * Structure is documented below. */ encryptionKeyValue?: outputs.integrationconnectors.ConnectionEventingConfigAuthConfigAdditionalVariableEncryptionKeyValue; /** * Integer Value of configVariable. */ integerValue?: number; /** * Key for the configVariable */ key: string; /** * Secret value of configVariable * Structure is documented below. */ secretValue?: outputs.integrationconnectors.ConnectionEventingConfigAuthConfigAdditionalVariableSecretValue; /** * String Value of configVariabley. */ stringValue?: string; } interface ConnectionEventingConfigAuthConfigAdditionalVariableEncryptionKeyValue { /** * The [KMS key name] with which the content of the Operation is encrypted. The * expected format: projects/*/locations/*/keyRings/*/cryptoKeys/*. * Will be empty string if google managed. */ kmsKeyName?: string; /** * Type of Encryption Key * Possible values are: `GOOGLE_MANAGED`, `CUSTOMER_MANAGED`. */ type?: string; } interface ConnectionEventingConfigAuthConfigAdditionalVariableSecretValue { /** * Secret version of Secret Value for Config variable. */ secretVersion: string; } interface ConnectionEventingConfigAuthConfigUserPassword { /** * Password for Authentication. * Structure is documented below. */ password?: outputs.integrationconnectors.ConnectionEventingConfigAuthConfigUserPasswordPassword; /** * Username for Authentication. */ username?: string; } interface ConnectionEventingConfigAuthConfigUserPasswordPassword { /** * The resource name of the secret version in the format, * format as: projects/*/secrets/*/versions/*. */ secretVersion: string; } interface ConnectionEventingConfigRegistrationDestinationConfig { /** * destinations for the connection * Structure is documented below. */ destinations?: outputs.integrationconnectors.ConnectionEventingConfigRegistrationDestinationConfigDestination[]; /** * Key for the connection */ key?: string; } interface ConnectionEventingConfigRegistrationDestinationConfigDestination { /** * Host */ host?: string; /** * port number */ port?: number; /** * Service Attachment */ serviceAttachment?: string; } interface ConnectionEventingRuntimeData { /** * Events listener endpoint. The value will populated after provisioning the events listener. */ eventsListenerEndpoint?: string; /** * (Output) * Current status of eventing. * Structure is documented below. */ statuses: outputs.integrationconnectors.ConnectionEventingRuntimeDataStatus[]; } interface ConnectionEventingRuntimeDataStatus { /** * An arbitrary description for the Connection. */ description: string; /** * (Output) * State of the Eventing */ state: string; } interface ConnectionLockConfig { /** * Indicates whether or not the connection is locked. */ locked: boolean; /** * Describes why a connection is locked. */ reason?: string; } interface ConnectionLogConfig { /** * Enabled represents whether logging is enabled or not for a connection. */ enabled: boolean; /** * Log configuration level. * Possible values are: `LOG_LEVEL_UNSPECIFIED`, `ERROR`, `INFO`, `DEBUG`. */ level: string; } interface ConnectionNodeConfig { /** * Minimum number of nodes in the runtime nodes. */ maxNodeCount: number; /** * Minimum number of nodes in the runtime nodes. */ minNodeCount: number; } interface ConnectionSslConfig { /** * Additional SSL related field values. * Structure is documented below. */ additionalVariables?: outputs.integrationconnectors.ConnectionSslConfigAdditionalVariable[]; /** * Type of Client Cert (PEM/JKS/.. etc.) * Possible values are: `PEM`. */ clientCertType?: string; /** * Client Certificate * Structure is documented below. */ clientCertificate?: outputs.integrationconnectors.ConnectionSslConfigClientCertificate; /** * Client Private Key * Structure is documented below. */ clientPrivateKey?: outputs.integrationconnectors.ConnectionSslConfigClientPrivateKey; /** * Secret containing the passphrase protecting the Client Private Key * Structure is documented below. */ clientPrivateKeyPass?: outputs.integrationconnectors.ConnectionSslConfigClientPrivateKeyPass; /** * Private Server Certificate. Needs to be specified if trust model is PRIVATE. * Structure is documented below. */ privateServerCertificate?: outputs.integrationconnectors.ConnectionSslConfigPrivateServerCertificate; /** * Type of Server Cert (PEM/JKS/.. etc.) * Possible values are: `PEM`. */ serverCertType?: string; /** * Enum for Trust Model * Possible values are: `PUBLIC`, `PRIVATE`, `INSECURE`. */ trustModel?: string; /** * Enum for controlling the SSL Type (TLS/MTLS) * Possible values are: `TLS`, `MTLS`. */ type: string; /** * Bool for enabling SSL */ useSsl?: boolean; } interface ConnectionSslConfigAdditionalVariable { /** * Boolean Value of configVariable. */ booleanValue?: boolean; /** * Encryption key value of configVariable. * Structure is documented below. */ encryptionKeyValue?: outputs.integrationconnectors.ConnectionSslConfigAdditionalVariableEncryptionKeyValue; /** * Integer Value of configVariable. */ integerValue?: number; /** * Key for the configVariable */ key: string; /** * Secret value of configVariable * Structure is documented below. */ secretValue?: outputs.integrationconnectors.ConnectionSslConfigAdditionalVariableSecretValue; /** * String Value of configVariabley. */ stringValue?: string; } interface ConnectionSslConfigAdditionalVariableEncryptionKeyValue { /** * The [KMS key name] with which the content of the Operation is encrypted. The * expected format: projects/*/locations/*/keyRings/*/cryptoKeys/*. * Will be empty string if google managed. */ kmsKeyName?: string; /** * Type of Encryption Key * Possible values are: `GOOGLE_MANAGED`, `CUSTOMER_MANAGED`. */ type?: string; } interface ConnectionSslConfigAdditionalVariableSecretValue { /** * Secret version of Secret Value for Config variable. */ secretVersion: string; } interface ConnectionSslConfigClientCertificate { /** * Secret version of Secret Value for Config variable. */ secretVersion: string; } interface ConnectionSslConfigClientPrivateKey { /** * Secret version of Secret Value for Config variable. */ secretVersion: string; } interface ConnectionSslConfigClientPrivateKeyPass { /** * Secret version of Secret Value for Config variable. */ secretVersion: string; } interface ConnectionSslConfigPrivateServerCertificate { /** * Secret version of Secret Value for Config variable. */ secretVersion: string; } interface ConnectionStatus { /** * An arbitrary description for the Connection. */ description: string; /** * (Output) * State of the Eventing */ state: string; /** * (Output) * Current status of eventing. * Structure is documented below. */ status: string; } } export declare namespace kms { interface CryptoKeyIAMBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** The provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface CryptoKeyIAMMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** The provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface CryptoKeyKeyAccessJustificationsPolicy { /** * The list of allowed reasons for access to this CryptoKey. Zero allowed * access reasons means all encrypt, decrypt, and sign operations for * this CryptoKey will fail. */ allowedAccessReasons?: string[]; } interface CryptoKeyPrimary { /** * The resource name for the CryptoKey. */ name: string; /** * (Output) * The current state of the CryptoKeyVersion. */ state: string; } interface CryptoKeyVersionAttestation { /** * The certificate chains needed to validate the attestation * Structure is documented below. */ certChains?: outputs.kms.CryptoKeyVersionAttestationCertChains; /** * (Output) * The attestation data provided by the HSM when the key operation was performed. */ content: string; /** * ExternalProtectionLevelOptions stores a group of additional fields for configuring a CryptoKeyVersion that are specific to the EXTERNAL protection level and EXTERNAL_VPC protection levels. * Structure is documented below. * * @deprecated `externalProtectionLevelOptions` is being un-nested from the `attestation` field. Please use the top level `externalProtectionLevelOptions` field instead. */ externalProtectionLevelOptions?: outputs.kms.CryptoKeyVersionAttestationExternalProtectionLevelOptions; /** * (Output) * The format of the attestation data. */ format: string; } interface CryptoKeyVersionAttestationCertChains { /** * Cavium certificate chain corresponding to the attestation. */ caviumCerts?: string[]; /** * Google card certificate chain corresponding to the attestation. */ googleCardCerts?: string[]; /** * Google partition certificate chain corresponding to the attestation. */ googlePartitionCerts?: string[]; } interface CryptoKeyVersionAttestationExternalProtectionLevelOptions { /** * The path to the external key material on the EKM when using EkmConnection e.g., "v0/my/key". Set this field instead of externalKeyUri when using an EkmConnection. */ ekmConnectionKeyPath?: string; /** * The URI for an external resource that this CryptoKeyVersion represents. */ externalKeyUri?: string; } interface CryptoKeyVersionExternalProtectionLevelOptions { /** * The path to the external key material on the EKM when using EkmConnection e.g., "v0/my/key". Set this field instead of externalKeyUri when using an EkmConnection. */ ekmConnectionKeyPath?: string; /** * The URI for an external resource that this CryptoKeyVersion represents. */ externalKeyUri?: string; } interface CryptoKeyVersionTemplate { /** * The algorithm to use when creating a version based on this template. * See the [algorithm reference](https://cloud.google.com/kms/docs/reference/rest/v1/CryptoKeyVersionAlgorithm) for possible inputs. */ algorithm: string; /** * The protection level to use when creating a version based on this template. Possible values include "SOFTWARE", "HSM", "EXTERNAL", "EXTERNAL_VPC". Defaults to "SOFTWARE". */ protectionLevel?: string; } interface EkmConnectionIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface EkmConnectionIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface EkmConnectionServiceResolver { /** * Optional. The filter applied to the endpoints of the resolved service. If no filter is specified, all endpoints will be considered. An endpoint will be chosen arbitrarily from the filtered list for each request. For endpoint filter syntax and examples, see https://cloud.google.com/service-directory/docs/reference/rpc/google.cloud.servicedirectory.v1#resolveservicerequest. */ endpointFilter: string; /** * Required. The hostname of the EKM replica used at TLS and HTTP layers. */ hostname: string; /** * Required. A list of leaf server certificates used to authenticate HTTPS connections to the EKM replica. Currently, a maximum of 10 Certificate is supported. * Structure is documented below. */ serverCertificates: outputs.kms.EkmConnectionServiceResolverServerCertificate[]; /** * Required. The resource name of the Service Directory service pointing to an EKM replica, in the format projects/*/locations/*/namespaces/*/services/* */ serviceDirectoryService: string; } interface EkmConnectionServiceResolverServerCertificate { /** * (Output) * Output only. The issuer distinguished name in RFC 2253 format. Only present if parsed is true. */ issuer: string; /** * (Output) * Output only. The certificate is not valid after this time. Only present if parsed is true. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ notAfterTime: string; /** * (Output) * Output only. The certificate is not valid before this time. Only present if parsed is true. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ notBeforeTime: string; /** * (Output) * Output only. True if the certificate was parsed successfully. */ parsed: boolean; /** * Required. The raw certificate bytes in DER format. A base64-encoded string. */ rawDer: string; /** * (Output) * Output only. The certificate serial number as a hex string. Only present if parsed is true. */ serialNumber: string; /** * (Output) * Output only. The SHA-256 certificate fingerprint as a hex string. Only present if parsed is true. */ sha256Fingerprint: string; /** * (Output) * Output only. The subject distinguished name in RFC 2253 format. Only present if parsed is true. */ subject: string; /** * (Output) * Output only. The subject Alternative DNS names. Only present if parsed is true. */ subjectAlternativeDnsNames: string[]; } interface FolderKajPolicyConfigDefaultKeyAccessJustificationPolicy { /** * A KeyAccessJustificationsPolicy specifies zero or more allowed * AccessReason values for encrypt, decrypt, and sign operations on a * CryptoKey. * Each value may be one of: `CUSTOMER_INITIATED_SUPPORT`, `GOOGLE_INITIATED_SERVICE`, `THIRD_PARTY_DATA_REQUEST`, `GOOGLE_INITIATED_REVIEW`, `CUSTOMER_INITIATED_ACCESS`, `GOOGLE_INITIATED_SYSTEM_OPERATION`, `REASON_NOT_EXPECTED`, `MODIFIED_CUSTOMER_INITIATED_ACCESS`, `MODIFIED_GOOGLE_INITIATED_SYSTEM_OPERATION`, `GOOGLE_RESPONSE_TO_PRODUCTION_ALERT`, `CUSTOMER_AUTHORIZED_WORKFLOW_SERVICING`. */ allowedAccessReasons?: string[]; } interface GetCryptoKeyLatestVersionPublicKey { /** * The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. */ algorithm: string; /** * The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. */ pem: string; } interface GetCryptoKeyVersionsPublicKey { /** * The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. */ algorithm: string; /** * The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. */ pem: string; } interface GetCryptoKeyVersionsVersion { /** * The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. */ algorithm: string; /** * The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the * `gcp.kms.CryptoKey` resource/datasource. */ cryptoKey: string; id: string; name: string; protectionLevel: string; publicKeys: outputs.kms.GetCryptoKeyVersionsVersionPublicKey[]; state: string; version: number; } interface GetCryptoKeyVersionsVersionPublicKey { /** * The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. */ algorithm: string; /** * The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. */ pem: string; } interface GetCryptoKeysKey { /** * The resource name of the backend environment associated with all CryptoKeyVersions within this CryptoKey. * The resource name is in the format "projects/*/locations/*/ekmConnections/*" and only applies to "EXTERNAL_VPC" keys. */ cryptoKeyBackend: string; /** * The period of time that versions of this key spend in the DESTROY_SCHEDULED state before transitioning to DESTROYED. * If not specified at creation time, the default duration is 30 days. */ destroyScheduledDuration: string; /** * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. */ effectiveLabels: { [key: string]: string; }; id: string; /** * Whether this key may contain imported versions only. */ importOnly: boolean; /** * The policy used for Key Access Justifications Policy Enforcement. If this * field is present and this key is enrolled in Key Access Justifications * Policy Enforcement, the policy will be evaluated in encrypt, decrypt, and * sign operations, and the operation will fail if rejected by the policy. The * policy is defined by specifying zero or more allowed justification codes. * https://cloud.google.com/assured-workloads/key-access-justifications/docs/justification-codes * By default, this field is absent, and all justification codes are allowed. * This field is currently in beta and is subject to change. */ keyAccessJustificationsPolicies: outputs.kms.GetCryptoKeysKeyKeyAccessJustificationsPolicy[]; /** * The key ring that the keys belongs to. Format: 'projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}'., */ keyRing?: string; /** * Labels with user-defined metadata to apply to this resource. * * * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. * Please refer to the field 'effective_labels' for all of the labels present on the resource. */ labels: { [key: string]: string; }; /** * The resource name for the CryptoKey. */ name?: string; /** * A copy of the primary CryptoKeyVersion that will be used by cryptoKeys.encrypt when this CryptoKey is given in EncryptRequest.name. * Keys with purpose ENCRYPT_DECRYPT may have a primary. For other keys, this field will be unset. */ primaries: outputs.kms.GetCryptoKeysKeyPrimary[]; /** * The combination of labels configured directly on the resource * and default labels configured on the provider. */ pulumiLabels: { [key: string]: string; }; /** * The immutable purpose of this CryptoKey. See the * [purpose reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys#CryptoKeyPurpose) * for possible inputs. * Default value is "ENCRYPT_DECRYPT". */ purpose: string; /** * Every time this period passes, generate a new CryptoKeyVersion and set it as the primary. * The first rotation will take place after the specified period. The rotation period has * the format of a decimal number with up to 9 fractional digits, followed by the * letter 's' (seconds). It must be greater than a day (ie, 86400). */ rotationPeriod: string; /** * If set to true, the request will create a CryptoKey without any CryptoKeyVersions. * You must use the 'google_kms_crypto_key_version' resource to create a new CryptoKeyVersion * or 'google_kms_key_ring_import_job' resource to import the CryptoKeyVersion. * This field is only applicable during initial CryptoKey creation. */ skipInitialVersionCreation: boolean; /** * A template describing settings for new crypto key versions. */ versionTemplates: outputs.kms.GetCryptoKeysKeyVersionTemplate[]; } interface GetCryptoKeysKeyKeyAccessJustificationsPolicy { /** * The list of allowed reasons for access to this CryptoKey. Zero allowed * access reasons means all encrypt, decrypt, and sign operations for * this CryptoKey will fail. */ allowedAccessReasons: string[]; } interface GetCryptoKeysKeyPrimary { /** * The resource name for this CryptoKeyVersion. */ name: string; /** * The current state of the CryptoKeyVersion. */ state: string; } interface GetCryptoKeysKeyVersionTemplate { /** * The algorithm to use when creating a version based on this template. * See the [algorithm reference](https://cloud.google.com/kms/docs/reference/rest/v1/CryptoKeyVersionAlgorithm) for possible inputs. */ algorithm: string; /** * The protection level to use when creating a version based on this template. Possible values include "SOFTWARE", "HSM", "EXTERNAL", "EXTERNAL_VPC". Defaults to "SOFTWARE". */ protectionLevel: string; } interface GetKMSCryptoKeyKeyAccessJustificationsPolicy { /** * The list of allowed reasons for access to this CryptoKey. Zero allowed * access reasons means all encrypt, decrypt, and sign operations for * this CryptoKey will fail. */ allowedAccessReasons: string[]; } interface GetKMSCryptoKeyPrimary { /** * The CryptoKey's name. * A CryptoKey’s name belonging to the specified Google Cloud Platform KeyRing and match the regular expression `[a-zA-Z0-9_-]{1,63}` */ name: string; /** * The current state of the CryptoKeyVersion. */ state: string; } interface GetKMSCryptoKeyVersionPublicKey { /** * The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. */ algorithm: string; /** * The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. */ pem: string; } interface GetKMSCryptoKeyVersionTemplate { /** * The algorithm to use when creating a version based on this template. * See the [algorithm reference](https://cloud.google.com/kms/docs/reference/rest/v1/CryptoKeyVersionAlgorithm) for possible inputs. */ algorithm: string; /** * The protection level to use when creating a version based on this template. Possible values include "SOFTWARE", "HSM", "EXTERNAL", "EXTERNAL_VPC". Defaults to "SOFTWARE". */ protectionLevel: string; } interface GetKeyHandlesKeyHandle { /** * The identifier of the KMS Key created for the KeyHandle. Its format is `projects/{projectId}/locations/{location}/keyRings/{keyRingName}/cryptoKeys/{cryptoKeyName}`. */ kmsKey: string; /** * The name of the KeyHandle. Its format is `projects/{projectId}/locations/{location}/keyHandles/{keyHandleName}`. */ name: string; /** * The resource type by which to filter KeyHandle e.g. {SERVICE}.googleapis.com/{TYPE}. See documentation for supported resource types. * * - - - */ resourceTypeSelector: string; } interface GetKeyRingsKeyRing { id: string; name: string; } interface KeyRingIAMBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** The provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface KeyRingIAMMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** The provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface KeyRingImportJobAttestation { /** * (Output) * The attestation data provided by the HSM when the key operation was performed. * A base64-encoded string. */ content: string; /** * (Output) * The format of the attestation data. */ format: string; } interface KeyRingImportJobPublicKey { /** * (Output) * The public key, encoded in PEM format. For more information, see the RFC 7468 sections * for General Considerations and Textual Encoding of Subject Public Key Info. */ pem: string; } interface OrganizationKajPolicyConfigDefaultKeyAccessJustificationPolicy { /** * A KeyAccessJustificationsPolicy specifies zero or more allowed * AccessReason values for encrypt, decrypt, and sign operations on a * CryptoKey. * Each value may be one of: `CUSTOMER_INITIATED_SUPPORT`, `GOOGLE_INITIATED_SERVICE`, `THIRD_PARTY_DATA_REQUEST`, `GOOGLE_INITIATED_REVIEW`, `CUSTOMER_INITIATED_ACCESS`, `GOOGLE_INITIATED_SYSTEM_OPERATION`, `REASON_NOT_EXPECTED`, `MODIFIED_CUSTOMER_INITIATED_ACCESS`, `MODIFIED_GOOGLE_INITIATED_SYSTEM_OPERATION`, `GOOGLE_RESPONSE_TO_PRODUCTION_ALERT`, `CUSTOMER_AUTHORIZED_WORKFLOW_SERVICING`. */ allowedAccessReasons?: string[]; } interface ProjectKajPolicyConfigDefaultKeyAccessJustificationPolicy { /** * A KeyAccessJustificationsPolicy specifies zero or more allowed * AccessReason values for encrypt, decrypt, and sign operations on a * CryptoKey. * Each value may be one of: `CUSTOMER_INITIATED_SUPPORT`, `GOOGLE_INITIATED_SERVICE`, `THIRD_PARTY_DATA_REQUEST`, `GOOGLE_INITIATED_REVIEW`, `CUSTOMER_INITIATED_ACCESS`, `GOOGLE_INITIATED_SYSTEM_OPERATION`, `REASON_NOT_EXPECTED`, `MODIFIED_CUSTOMER_INITIATED_ACCESS`, `MODIFIED_GOOGLE_INITIATED_SYSTEM_OPERATION`, `GOOGLE_RESPONSE_TO_PRODUCTION_ALERT`, `CUSTOMER_AUTHORIZED_WORKFLOW_SERVICING`. */ allowedAccessReasons?: string[]; } } export declare namespace logging { interface BillingAccountBucketConfigCmekSettings { /** * The resource name for the configured Cloud KMS key. * KMS key name format: * "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" * To enable CMEK for the bucket, set this field to a valid kmsKeyName for which the associated service account has the required cloudkms.cryptoKeyEncrypterDecrypter roles assigned for the key. * The Cloud KMS key used by the bucket can be updated by changing the kmsKeyName to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked. * See [Enabling CMEK for Logging Buckets](https://cloud.google.com/logging/docs/routing/managed-encryption-storage) for more information. */ kmsKeyName: string; /** * The CryptoKeyVersion resource name for the configured Cloud KMS key. * KMS key name format: * "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]/cryptoKeyVersions/[VERSION]" * For example: * "projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key/cryptoKeyVersions/1" * This is a read-only field used to convey the specific configured CryptoKeyVersion of kmsKey that has been configured. It will be populated in cases where the CMEK settings are bound to a single key version. */ kmsKeyVersionName: string; /** * The resource name of the bucket. For example: "projects/my-project-id/locations/my-location/buckets/my-bucket-id" */ name: string; /** * The service account associated with a project for which CMEK will apply. * Before enabling CMEK for a logging bucket, you must first assign the cloudkms.cryptoKeyEncrypterDecrypter role to the service account associated with the project for which CMEK will apply. Use [v2.getCmekSettings](https://cloud.google.com/logging/docs/reference/v2/rest/v2/TopLevel/getCmekSettings#google.logging.v2.ConfigServiceV2.GetCmekSettings) to obtain the service account ID. * See [Enabling CMEK for Logging Buckets](https://cloud.google.com/logging/docs/routing/managed-encryption-storage) for more information. */ serviceAccountId: string; } interface BillingAccountBucketConfigIndexConfig { /** * The LogEntry field path to index. * Note that some paths are automatically indexed, and other paths are not eligible for indexing. See [indexing documentation](https://cloud.google.com/logging/docs/analyze/custom-index) for details. */ fieldPath: string; /** * The type of data in this index. Allowed types include `INDEX_TYPE_UNSPECIFIED`, `INDEX_TYPE_STRING` and `INDEX_TYPE_INTEGER`. */ type: string; } interface BillingAccountSinkBigqueryOptions { /** * Whether to use [BigQuery's partition tables](https://cloud.google.com/bigquery/docs/partitioned-tables). * By default, Logging creates dated tables based on the log entries' timestamps, e.g. syslog_20170523. With partitioned * tables, the date suffix is no longer present and [special query syntax](https://cloud.google.com/bigquery/docs/querying-partitioned-tables) * has to be used instead. In both cases, tables are sharded based on UTC timezone. */ usePartitionedTables: boolean; } interface BillingAccountSinkExclusion { /** * A description of this exclusion. */ description?: string; /** * If set to True, then this exclusion is disabled and it does not exclude any log entries. */ disabled?: boolean; /** * An advanced logs filter that matches the log entries to be excluded. By using the sample function, you can exclude less than 100% of the matching log entries. See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced_filters) for information on how to * write a filter. */ filter: string; /** * A client-assigned identifier, such as `load-balancer-exclusion`. Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods. First character has to be alphanumeric. */ name: string; } interface FolderBucketConfigCmekSettings { /** * The resource name for the configured Cloud KMS key. * KMS key name format: * "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" * To enable CMEK for the bucket, set this field to a valid kmsKeyName for which the associated service account has the required cloudkms.cryptoKeyEncrypterDecrypter roles assigned for the key. * The Cloud KMS key used by the bucket can be updated by changing the kmsKeyName to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked. * See [Enabling CMEK for Logging Buckets](https://cloud.google.com/logging/docs/routing/managed-encryption-storage) for more information. */ kmsKeyName: string; /** * The CryptoKeyVersion resource name for the configured Cloud KMS key. * KMS key name format: * "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]/cryptoKeyVersions/[VERSION]" * For example: * "projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key/cryptoKeyVersions/1" * This is a read-only field used to convey the specific configured CryptoKeyVersion of kmsKey that has been configured. It will be populated in cases where the CMEK settings are bound to a single key version. */ kmsKeyVersionName: string; /** * The resource name of the bucket. For example: "folders/my-folder-id/locations/my-location/buckets/my-bucket-id" */ name: string; /** * The service account associated with a project for which CMEK will apply. * Before enabling CMEK for a logging bucket, you must first assign the cloudkms.cryptoKeyEncrypterDecrypter role to the service account associated with the project for which CMEK will apply. Use [v2.getCmekSettings](https://cloud.google.com/logging/docs/reference/v2/rest/v2/TopLevel/getCmekSettings#google.logging.v2.ConfigServiceV2.GetCmekSettings) to obtain the service account ID. * See [Enabling CMEK for Logging Buckets](https://cloud.google.com/logging/docs/routing/managed-encryption-storage) for more information. */ serviceAccountId: string; } interface FolderBucketConfigIndexConfig { /** * The LogEntry field path to index. * Note that some paths are automatically indexed, and other paths are not eligible for indexing. See [indexing documentation](https://cloud.google.com/logging/docs/analyze/custom-index) for details. */ fieldPath: string; /** * The type of data in this index. Allowed types include `INDEX_TYPE_UNSPECIFIED`, `INDEX_TYPE_STRING` and `INDEX_TYPE_INTEGER`. */ type: string; } interface FolderSinkBigqueryOptions { /** * Whether to use [BigQuery's partition tables](https://cloud.google.com/bigquery/docs/partitioned-tables). * By default, Logging creates dated tables based on the log entries' timestamps, e.g. syslog_20170523. With partitioned * tables, the date suffix is no longer present and [special query syntax](https://cloud.google.com/bigquery/docs/querying-partitioned-tables) * has to be used instead. In both cases, tables are sharded based on UTC timezone. */ usePartitionedTables: boolean; } interface FolderSinkExclusion { /** * A description of this exclusion. */ description?: string; /** * If set to True, then this exclusion is disabled and it does not exclude any log entries. */ disabled?: boolean; /** * An advanced logs filter that matches the log entries to be excluded. By using the sample function, you can exclude less than 100% of the matching log entries. See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced_filters) for information on how to * write a filter. */ filter: string; /** * A client-assigned identifier, such as `load-balancer-exclusion`. Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods. First character has to be alphanumeric. */ name: string; } interface GetSinkBigqueryOption { /** * Whether [BigQuery's partition tables](https://cloud.google.com/bigquery/docs/partitioned-tables) are used. */ usePartitionedTables: boolean; } interface GetSinkExclusion { /** * A description of this exclusion. */ description: string; /** * Whether this exclusion is disabled and it does not exclude any log entries. */ disabled: boolean; /** * An advanced logs filter that matches the log entries to be excluded. */ filter: string; /** * A client-assigned identifier, such as `load-balancer-exclusion`. */ name: string; } interface LinkedDatasetBigqueryDataset { /** * (Output) * Output only. The full resource name of the BigQuery dataset. The DATASET_ID will match the ID * of the link, so the link must match the naming restrictions of BigQuery datasets * (alphanumeric characters and underscores only). The dataset will have a resource path of * "bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET_ID]" */ datasetId: string; } interface LogViewIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface LogViewIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface MetricBucketOptions { /** * Specifies a set of buckets with arbitrary widths. * Structure is documented below. */ explicitBuckets?: outputs.logging.MetricBucketOptionsExplicitBuckets; /** * Specifies an exponential sequence of buckets that have a width that is proportional to the value of * the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket. * Structure is documented below. */ exponentialBuckets?: outputs.logging.MetricBucketOptionsExponentialBuckets; /** * Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). * Each bucket represents a constant absolute uncertainty on the specific value in the bucket. * Structure is documented below. */ linearBuckets?: outputs.logging.MetricBucketOptionsLinearBuckets; } interface MetricBucketOptionsExplicitBuckets { /** * The values must be monotonically increasing. */ bounds: number[]; } interface MetricBucketOptionsExponentialBuckets { /** * Must be greater than 1. */ growthFactor: number; /** * Must be greater than 0. */ numFiniteBuckets: number; /** * Must be greater than 0. */ scale: number; } interface MetricBucketOptionsLinearBuckets { /** * Must be greater than 0. */ numFiniteBuckets: number; /** * Lower bound of the first bucket. */ offset: number; /** * Must be greater than 0. */ width: number; } interface MetricMetricDescriptor { /** * A concise name for the metric, which can be displayed in user interfaces. Use sentence case * without an ending period, for example "Request count". This field is optional but it is * recommended to be set for any metrics associated with user-visible concepts, such as Quota. */ displayName?: string; /** * The set of labels that can be used to describe a specific instance of this metric type. For * example, the appengine.googleapis.com/http/server/response_latencies metric type has a label * for the HTTP response code, response_code, so you can look at latencies for successful responses * or just for responses that failed. * Structure is documented below. */ labels?: outputs.logging.MetricMetricDescriptorLabel[]; /** * Whether the metric records instantaneous values, changes to a value, etc. * Some combinations of metricKind and valueType might not be supported. * For counter metrics, set this to DELTA. * Possible values are: `DELTA`, `GAUGE`, `CUMULATIVE`. */ metricKind: string; /** * The unit in which the metric value is reported. It is only applicable if the valueType is * `INT64`, `DOUBLE`, or `DISTRIBUTION`. The supported units are a subset of * [The Unified Code for Units of Measure](http://unitsofmeasure.org/ucum.html) standard */ unit?: string; /** * Whether the measurement is an integer, a floating-point number, etc. * Some combinations of metricKind and valueType might not be supported. * For counter metrics, set this to INT64. * Possible values are: `BOOL`, `INT64`, `DOUBLE`, `STRING`, `DISTRIBUTION`, `MONEY`. */ valueType: string; } interface MetricMetricDescriptorLabel { /** * A human-readable description for the label. */ description?: string; /** * The label key. */ key: string; /** * The type of data that can be assigned to the label. * Default value is `STRING`. * Possible values are: `BOOL`, `INT64`, `STRING`. */ valueType?: string; } interface OrganizationBucketConfigCmekSettings { /** * The resource name for the configured Cloud KMS key. * KMS key name format: * "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" * To enable CMEK for the bucket, set this field to a valid kmsKeyName for which the associated service account has the required cloudkms.cryptoKeyEncrypterDecrypter roles assigned for the key. * The Cloud KMS key used by the bucket can be updated by changing the kmsKeyName to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked. * See [Enabling CMEK for Logging Buckets](https://cloud.google.com/logging/docs/routing/managed-encryption-storage) for more information. */ kmsKeyName: string; /** * The CryptoKeyVersion resource name for the configured Cloud KMS key. * KMS key name format: * "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]/cryptoKeyVersions/[VERSION]" * For example: * "projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key/cryptoKeyVersions/1" * This is a read-only field used to convey the specific configured CryptoKeyVersion of kmsKey that has been configured. It will be populated in cases where the CMEK settings are bound to a single key version. */ kmsKeyVersionName: string; /** * The resource name of the bucket. For example: "organizations/my-organization-id/locations/my-location/buckets/my-bucket-id" */ name: string; /** * The service account associated with a project for which CMEK will apply. * Before enabling CMEK for a logging bucket, you must first assign the cloudkms.cryptoKeyEncrypterDecrypter role to the service account associated with the project for which CMEK will apply. Use [v2.getCmekSettings](https://cloud.google.com/logging/docs/reference/v2/rest/v2/TopLevel/getCmekSettings#google.logging.v2.ConfigServiceV2.GetCmekSettings) to obtain the service account ID. * See [Enabling CMEK for Logging Buckets](https://cloud.google.com/logging/docs/routing/managed-encryption-storage) for more information. */ serviceAccountId: string; } interface OrganizationBucketConfigIndexConfig { /** * The LogEntry field path to index. * Note that some paths are automatically indexed, and other paths are not eligible for indexing. See [indexing documentation](https://cloud.google.com/logging/docs/analyze/custom-index) for details. */ fieldPath: string; /** * The type of data in this index. Allowed types include `INDEX_TYPE_UNSPECIFIED`, `INDEX_TYPE_STRING` and `INDEX_TYPE_INTEGER`. */ type: string; } interface OrganizationSinkBigqueryOptions { /** * Whether to use [BigQuery's partition tables](https://cloud.google.com/bigquery/docs/partitioned-tables). * By default, Logging creates dated tables based on the log entries' timestamps, e.g. syslog_20170523. With partitioned * tables the date suffix is no longer present and [special query syntax](https://cloud.google.com/bigquery/docs/querying-partitioned-tables) * has to be used instead. In both cases, tables are sharded based on UTC timezone. */ usePartitionedTables: boolean; } interface OrganizationSinkExclusion { /** * A description of this exclusion. */ description?: string; /** * If set to True, then this exclusion is disabled and it does not exclude any log entries. */ disabled?: boolean; /** * An advanced logs filter that matches the log entries to be excluded. By using the sample function, you can exclude less than 100% of the matching log entries. See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced_filters) for information on how to * write a filter. */ filter: string; /** * A client-assigned identifier, such as `load-balancer-exclusion`. Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods. First character has to be alphanumeric. */ name: string; } interface ProjectBucketConfigCmekSettings { /** * The resource name for the configured Cloud KMS key. * KMS key name format: * `'projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]'` * To enable CMEK for the bucket, set this field to a valid kmsKeyName for which the associated service account has the required cloudkms.cryptoKeyEncrypterDecrypter roles assigned for the key. * The Cloud KMS key used by the bucket can be updated by changing the kmsKeyName to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked. * See [Enabling CMEK for Logging Buckets](https://cloud.google.com/logging/docs/routing/managed-encryption-storage) for more information. */ kmsKeyName: string; /** * The CryptoKeyVersion resource name for the configured Cloud KMS key. * KMS key name format: * `'projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]/cryptoKeyVersions/[VERSION]'` * For example: * "projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key/cryptoKeyVersions/1" * This is a read-only field used to convey the specific configured CryptoKeyVersion of kmsKey that has been configured. It will be populated in cases where the CMEK settings are bound to a single key version. */ kmsKeyVersionName: string; /** * The resource name of the CMEK settings. */ name: string; /** * The service account associated with a project for which CMEK will apply. * Before enabling CMEK for a logging bucket, you must first assign the cloudkms.cryptoKeyEncrypterDecrypter role to the service account associated with the project for which CMEK will apply. Use [v2.getCmekSettings](https://cloud.google.com/logging/docs/reference/v2/rest/v2/TopLevel/getCmekSettings#google.logging.v2.ConfigServiceV2.GetCmekSettings) to obtain the service account ID. * See [Enabling CMEK for Logging Buckets](https://cloud.google.com/logging/docs/routing/managed-encryption-storage) for more information. */ serviceAccountId: string; } interface ProjectBucketConfigIndexConfig { /** * The LogEntry field path to index. * Note that some paths are automatically indexed, and other paths are not eligible for indexing. See [indexing documentation](https://cloud.google.com/logging/docs/analyze/custom-index) for details. */ fieldPath: string; /** * The type of data in this index. Allowed types include `INDEX_TYPE_UNSPECIFIED`, `INDEX_TYPE_STRING` and `INDEX_TYPE_INTEGER`. */ type: string; } interface ProjectSinkBigqueryOptions { /** * Whether to use [BigQuery's partition tables](https://cloud.google.com/bigquery/docs/partitioned-tables). * By default, Logging creates dated tables based on the log entries' timestamps, e.g. `syslog20170523`. With partitioned * tables the date suffix is no longer present and [special query syntax](https://cloud.google.com/bigquery/docs/querying-partitioned-tables) * has to be used instead. In both cases, tables are sharded based on UTC timezone. */ usePartitionedTables: boolean; } interface ProjectSinkExclusion { /** * A description of this exclusion. */ description?: string; /** * If set to True, then this exclusion is disabled and it does not exclude any log entries. */ disabled?: boolean; /** * An advanced logs filter that matches the log entries to be excluded. By using the sample function, you can exclude less than 100% of the matching log entries. See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced_filters) for information on how to * write a filter. */ filter: string; /** * A client-assigned identifier, such as `load-balancer-exclusion`. Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods. First character has to be alphanumeric. */ name: string; } interface SavedQueryLoggingQuery { /** * An [advanced logs filter](https://cloud.google.com/logging/docs/view/advanced-filters) which * is used to match log entries. */ filter: string; /** * Characters will be counted from the end of the string. */ summaryFieldEnd?: number; /** * Characters will be counted from the start of the string. */ summaryFieldStart?: number; /** * The names of the fields to display in the summary. * Structure is documented below. */ summaryFields?: outputs.logging.SavedQueryLoggingQuerySummaryField[]; } interface SavedQueryLoggingQuerySummaryField { /** * The field from the LogEntry to include in the summary line. */ field?: string; } interface SavedQueryOpsAnalyticsQuery { /** * A logs analytics SQL query, which generally follows BigQuery format. */ sqlQueryText: string; } } export declare namespace looker { interface InstanceAdminSettings { /** * Email domain allowlist for the instance. * Define the email domains to which your users can deliver Looker (Google Cloud core) content. * Updating this list will restart the instance. Updating the allowed email domains from terraform * means the value provided will be considered as the entire list and not an amendment to the * existing list of allowed email domains. */ allowedEmailDomains?: string[]; } interface InstanceControlledEgressConfig { /** * List of fully qualified domain names to be added to the allowlist for * outbound traffic. */ egressFqdns?: string[]; /** * Whether the Looker Marketplace is enabled. */ marketplaceEnabled?: boolean; } interface InstanceCustomDomain { /** * Domain name */ domain?: string; /** * (Output) * Status of the custom domain. */ state: string; } interface InstanceDenyMaintenancePeriod { /** * Required. Start date of the deny maintenance period * Structure is documented below. */ endDate: outputs.looker.InstanceDenyMaintenancePeriodEndDate; /** * Required. Start date of the deny maintenance period * Structure is documented below. */ startDate: outputs.looker.InstanceDenyMaintenancePeriodStartDate; /** * Required. Start time of the window in UTC time. * Structure is documented below. */ time: outputs.looker.InstanceDenyMaintenancePeriodTime; } interface InstanceDenyMaintenancePeriodEndDate { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 * to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a * month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without * a year. */ year?: number; } interface InstanceDenyMaintenancePeriodStartDate { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 * to specify a year by itself or a year and month where the day isn't significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a * month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without * a year. */ year?: number; } interface InstanceDenyMaintenancePeriodTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. */ seconds?: number; } interface InstanceEncryptionConfig { /** * Name of the customer managed encryption key (CMEK) in KMS. */ kmsKeyName?: string; /** * (Output) * Full name and version of the CMEK key currently in use to encrypt Looker data. */ kmsKeyNameVersion: string; /** * (Output) * Status of the customer managed encryption key (CMEK) in KMS. */ kmsKeyState: string; } interface InstanceMaintenanceWindow { /** * Required. Day of the week for this MaintenanceWindow (in UTC). * - MONDAY: Monday * - TUESDAY: Tuesday * - WEDNESDAY: Wednesday * - THURSDAY: Thursday * - FRIDAY: Friday * - SATURDAY: Saturday * - SUNDAY: Sunday * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeek: string; /** * Required. Start time of the window in UTC time. * Structure is documented below. */ startTime: outputs.looker.InstanceMaintenanceWindowStartTime; } interface InstanceMaintenanceWindowStartTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. */ seconds?: number; } interface InstanceOauthConfig { /** * The client ID for the Oauth config. */ clientId: string; /** * The client secret for the Oauth config. */ clientSecret: string; } interface InstancePeriodicExportConfig { /** * Cloud Storage bucket URI for periodic export. * Format: gs://{bucket_name} */ gcsUri: string; /** * Name of the CMEK key in KMS. * Format: * projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key} */ kmsKey: string; /** * Time in UTC to start the periodic export job. * Structure is documented below. */ startTime: outputs.looker.InstancePeriodicExportConfigStartTime; } interface InstancePeriodicExportConfigStartTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. */ seconds?: number; } interface InstancePscConfig { /** * List of VPCs that are allowed ingress into the Looker instance. */ allowedVpcs?: string[]; /** * (Output) * URI of the Looker service attachment. */ lookerServiceAttachmentUri: string; /** * List of egress service attachment configurations. * Structure is documented below. */ serviceAttachments?: outputs.looker.InstancePscConfigServiceAttachment[]; } interface InstancePscConfigServiceAttachment { /** * (Output) * Status of the service attachment connection. */ connectionStatus: string; /** * Fully qualified domain name that will be used in the private DNS record created for the service attachment. */ localFqdn?: string; /** * URI of the service attachment to connect to. */ targetServiceAttachmentUri?: string; } interface InstanceUserMetadata { /** * Number of additional Developer Users to allocate to the Looker Instance. */ additionalDeveloperUserCount?: number; /** * Number of additional Standard Users to allocate to the Looker Instance. */ additionalStandardUserCount?: number; /** * Number of additional Viewer Users to allocate to the Looker Instance. */ additionalViewerUserCount?: number; } } export declare namespace lustre { interface GetInstanceAccessRulesOption { /** * An array of access rule exceptions. Each rule defines IP address ranges * that should have different squash behavior than the default. */ accessRules: outputs.lustre.GetInstanceAccessRulesOptionAccessRule[]; /** * The GID to map the root user to when root squashing is enabled * (e.g., 65534 for nobody). */ defaultSquashGid: number; /** * Set to "ROOT_SQUASH" to enable root squashing by default. * Other values include "NO_SQUASH". Possible values: ["ROOT_SQUASH", "NO_SQUASH"] */ defaultSquashMode: string; /** * The UID to map the root user to when root squashing is enabled * (e.g., 65534 for nobody). */ defaultSquashUid: number; } interface GetInstanceAccessRulesOptionAccessRule { /** * An array of IP address strings or CIDR ranges that this rule applies to. */ ipAddressRanges: string[]; /** * A unique identifier for the access rule. */ name: string; /** * The squash mode for this specific rule. Currently, only "NO_SQUASH" * is supported for exceptions. Possible values: ["NO_SQUASH"] */ squashMode: string; } interface InstanceAccessRulesOptions { /** * An array of access rule exceptions. Each rule defines IP address ranges * that should have different squash behavior than the default. * Structure is documented below. */ accessRules?: outputs.lustre.InstanceAccessRulesOptionsAccessRule[]; /** * The GID to map the root user to when root squashing is enabled * (e.g., 65534 for nobody). */ defaultSquashGid?: number; /** * Set to "ROOT_SQUASH" to enable root squashing by default. * Other values include "NO_SQUASH". * Possible values are: `ROOT_SQUASH`, `NO_SQUASH`. */ defaultSquashMode: string; /** * The UID to map the root user to when root squashing is enabled * (e.g., 65534 for nobody). */ defaultSquashUid?: number; } interface InstanceAccessRulesOptionsAccessRule { /** * An array of IP address strings or CIDR ranges that this rule applies to. */ ipAddressRanges: string[]; /** * A unique identifier for the access rule. */ name: string; /** * The squash mode for this specific rule. Currently, only "NO_SQUASH" * is supported for exceptions. * Possible values are: `NO_SQUASH`. */ squashMode: string; } } export declare namespace managedkafka { interface AclAclEntry { /** * The host. Must be set to "*" for Managed Service for Apache Kafka. */ host?: string; /** * The operation type. Allowed values are (case insensitive): ALL, READ, * WRITE, CREATE, DELETE, ALTER, DESCRIBE, CLUSTER_ACTION, DESCRIBE_CONFIGS, * ALTER_CONFIGS, and IDEMPOTENT_WRITE. See https://kafka.apache.org/documentation/#operations_resources_and_protocols * for valid combinations of resourceType and operation for different Kafka API requests. */ operation: string; /** * The permission type. Accepted values are (case insensitive): ALLOW, DENY. */ permissionType?: string; /** * The principal. Specified as Google Cloud account, with the Kafka StandardAuthorizer prefix User:". For example: "User:test-kafka-client@test-project.iam.gserviceaccount.com". Can be the wildcard "User:*" to refer to all users. */ principal: string; } interface ClusterBrokerCapacityConfig { /** * The disk to provision for each broker in Gibibytes. Minimum: 100 GiB. */ diskSizeGib?: string; } interface ClusterCapacityConfig { /** * The memory to provision for the cluster in bytes. The value must be between 1 GiB and 8 GiB per vCPU. Ex. 1024Mi, 4Gi. */ memoryBytes: string; /** * The number of vCPUs to provision for the cluster. The minimum is 3. */ vcpuCount: string; } interface ClusterGcpConfig { /** * The configuration of access to the Kafka cluster. * Structure is documented below. */ accessConfig: outputs.managedkafka.ClusterGcpConfigAccessConfig; /** * The Cloud KMS Key name to use for encryption. The key must be located in the same region as the cluster and cannot be changed. Must be in the format `projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY`. */ kmsKey?: string; } interface ClusterGcpConfigAccessConfig { /** * Virtual Private Cloud (VPC) subnets where IP addresses for the Kafka cluster are allocated. To make the cluster available in a VPC, you must specify at least one `networkConfigs` block. Max of 10 subnets per cluster. Additional subnets may be specified with additional `networkConfigs` blocks. * Structure is documented below. */ networkConfigs: outputs.managedkafka.ClusterGcpConfigAccessConfigNetworkConfig[]; } interface ClusterGcpConfigAccessConfigNetworkConfig { /** * Name of the VPC subnet from which the cluster is accessible. Both broker and bootstrap server IP addresses and DNS entries are automatically created in the subnet. There can only be one subnet per network, and the subnet must be located in the same region as the cluster. The project may differ. The name of the subnet must be in the format `projects/PROJECT_ID/regions/REGION/subnetworks/SUBNET`. */ subnet: string; } interface ClusterRebalanceConfig { /** * The rebalance behavior for the cluster. When not specified, defaults to `NO_REBALANCE`. Possible values: `MODE_UNSPECIFIED`, `NO_REBALANCE`, `AUTO_REBALANCE_ON_SCALE_UP`. */ mode?: string; } interface ClusterTlsConfig { /** * The rules for mapping mTLS certificate Distinguished Names (DNs) to shortened principal names for Kafka ACLs. This field corresponds exactly to the ssl.principal.mapping.rules broker config and matches the format and syntax defined in the Apache Kafka documentation. Setting or modifying this field will trigger a rolling restart of the Kafka brokers to apply the change. An empty string means that the default Kafka behavior is used. Example: `RULE:^CN=(.?),OU=ServiceUsers.$/$1@example.com/,DEFAULT` */ sslPrincipalMappingRules?: string; /** * The configuration of the broker truststore. If specified, clients can use mTLS for authentication. * Structure is documented below. */ trustConfig?: outputs.managedkafka.ClusterTlsConfigTrustConfig; } interface ClusterTlsConfigTrustConfig { /** * Configuration for the Google Certificate Authority Service. To support mTLS, you must specify at least one `casConfigs` block. A maximum of 10 CA pools can be specified. Additional CA pools may be specified with additional `casConfigs` blocks. * Structure is documented below. */ casConfigs?: outputs.managedkafka.ClusterTlsConfigTrustConfigCasConfig[]; } interface ClusterTlsConfigTrustConfigCasConfig { /** * The name of the CA pool to pull CA certificates from. The CA pool does not need to be in the same project or location as the Kafka cluster. Must be in the format `projects/PROJECT_ID/locations/LOCATION/caPools/CA_POOL_ID. */ caPool: string; } interface ConnectClusterCapacityConfig { /** * The memory to provision for the cluster in bytes. The CPU:memory ratio (vCPU:GiB) must be between 1:1 and 1:8. Minimum: 3221225472 (3 GiB). */ memoryBytes: string; /** * The number of vCPUs to provision for the cluster. The minimum is 3. */ vcpuCount: string; } interface ConnectClusterGcpConfig { /** * The configuration of access to the Kafka Connect cluster. * Structure is documented below. */ accessConfig: outputs.managedkafka.ConnectClusterGcpConfigAccessConfig; } interface ConnectClusterGcpConfigAccessConfig { /** * Virtual Private Cloud (VPC) subnets where IP addresses for the Kafka Connect cluster are allocated. To make the connect cluster available in a VPC, you must specify at least one subnet per network. You must specify between 1 and 10 subnets. Additional subnets may be specified with additional `networkConfigs` blocks. * Structure is documented below. */ networkConfigs: outputs.managedkafka.ConnectClusterGcpConfigAccessConfigNetworkConfig[]; } interface ConnectClusterGcpConfigAccessConfigNetworkConfig { /** * (Optional, Deprecated) * Additional subnets may be specified. They may be in another region, but must be in the same VPC network. The Connect workers can communicate with network endpoints in either the primary or additional subnets. * * > **Warning:** `additionalSubnets` is deprecated and will be removed in a future major release. Managed Kafka Connect clusters can now reach any endpoint accessible from the primary subnet without the need to define additional subnets. Please see https://cloud.google.com/managed-service-for-apache-kafka/docs/connect-cluster/create-connect-cluster#worker-subnet for more information. * * @deprecated `additionalSubnets` is deprecated and will be removed in a future major release. Managed Kafka Connect clusters can now reach any endpoint accessible from the primary subnet without the need to define additional subnets. Please see https://cloud.google.com/managed-service-for-apache-kafka/docs/connect-cluster/create-connect-cluster#worker-subnet for more information. */ additionalSubnets?: string[]; /** * Additional DNS domain names from the subnet's network to be made visible to the Connect Cluster. When using MirrorMaker2, it's necessary to add the bootstrap address's dns domain name of the target cluster to make it visible to the connector. For example: my-kafka-cluster.us-central1.managedkafka.my-project.cloud.goog */ dnsDomainNames?: string[]; /** * VPC subnet to make available to the Kafka Connect cluster. Structured like: projects/{project}/regions/{region}/subnetworks/{subnet_id}. It is used to create a Private Service Connect (PSC) interface for the Kafka Connect workers. It must be located in the same region as the Kafka Connect cluster. The CIDR range of the subnet must be within the IPv4 address ranges for private networks, as specified in RFC 1918. The primary subnet CIDR range must have a minimum size of /22 (1024 addresses). */ primarySubnet: string; } interface ConnectorTaskRestartPolicy { /** * The maximum amount of time to wait before retrying a failed task. This sets an upper bound for the backoff delay. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ maximumBackoff?: string; /** * The minimum amount of time to wait before retrying a failed task. This sets a lower bound for the backoff delay. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ minimumBackoff?: string; } } export declare namespace memcache { interface GetInstanceMaintenancePolicy { /** * Output only. The time when the policy was created. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits */ createTime: string; /** * Optional. Description of what this policy is for. * Create/Update methods return INVALID_ARGUMENT if the * length is greater than 512. */ description: string; /** * Output only. The time when the policy was updated. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ updateTime: string; /** * Required. Maintenance window that is applied to resources covered by this policy. * Minimum 1. For the current version, the maximum number of weeklyMaintenanceWindows * is expected to be one. */ weeklyMaintenanceWindows: outputs.memcache.GetInstanceMaintenancePolicyWeeklyMaintenanceWindow[]; } interface GetInstanceMaintenancePolicyWeeklyMaintenanceWindow { /** * Required. The day of week that maintenance updates occur. * - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. * - MONDAY: Monday * - TUESDAY: Tuesday * - WEDNESDAY: Wednesday * - THURSDAY: Thursday * - FRIDAY: Friday * - SATURDAY: Saturday * - SUNDAY: Sunday Possible values: ["DAY_OF_WEEK_UNSPECIFIED", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"] */ day: string; /** * Required. The length of the maintenance window, ranging from 3 hours to 8 hours. * A duration in seconds with up to nine fractional digits, * terminated by 's'. Example: "3.5s". */ duration: string; /** * Required. Start time of the window in UTC time. */ startTimes: outputs.memcache.GetInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime[]; } interface GetInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. * An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. * An API may allow the value 60 if it allows leap-seconds. */ seconds: number; } interface GetInstanceMaintenanceSchedule { /** * Output only. The end time of any upcoming scheduled maintenance for this instance. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ endTime: string; /** * Output only. The deadline that the maintenance schedule start time * can not go beyond, including reschedule. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ scheduleDeadlineTime: string; /** * Output only. The start time of any upcoming scheduled maintenance for this instance. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ startTime: string; } interface GetInstanceMemcacheNode { /** * Hostname or IP address of the Memcached node used by the clients to connect to the Memcached server on this node. */ host: string; /** * Identifier of the Memcached node. The node id does not include project or location like the Memcached instance name. */ nodeId: string; /** * The port number of the Memcached server on this node. */ port: number; /** * Current state of the Memcached node. */ state: string; /** * Location (GCP Zone) for the Memcached node. */ zone: string; } interface GetInstanceMemcacheParameter { /** * This is a unique ID associated with this set of parameters. */ id: string; /** * User-defined set of parameters to use in the memcache process. */ params: { [key: string]: string; }; } interface GetInstanceNodeConfig { /** * Number of CPUs per node. */ cpuCount: number; /** * Memory size in Mebibytes for each memcache node. */ memorySizeMb: number; } interface InstanceMaintenancePolicy { /** * (Output) * Output only. The time when the policy was created. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits */ createTime: string; /** * Optional. Description of what this policy is for. * Create/Update methods return INVALID_ARGUMENT if the * length is greater than 512. */ description?: string; /** * (Output) * Output only. The time when the policy was updated. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ updateTime: string; /** * Required. Maintenance window that is applied to resources covered by this policy. * Minimum 1. For the current version, the maximum number of weeklyMaintenanceWindows * is expected to be one. * Structure is documented below. */ weeklyMaintenanceWindows: outputs.memcache.InstanceMaintenancePolicyWeeklyMaintenanceWindow[]; } interface InstanceMaintenancePolicyWeeklyMaintenanceWindow { /** * Required. The day of week that maintenance updates occur. * - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. * - MONDAY: Monday * - TUESDAY: Tuesday * - WEDNESDAY: Wednesday * - THURSDAY: Thursday * - FRIDAY: Friday * - SATURDAY: Saturday * - SUNDAY: Sunday * Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ day: string; /** * Required. The length of the maintenance window, ranging from 3 hours to 8 hours. * A duration in seconds with up to nine fractional digits, * terminated by 's'. Example: "3.5s". */ duration: string; /** * Required. Start time of the window in UTC time. * Structure is documented below. */ startTime: outputs.memcache.InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime; } interface InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. * An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. * An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface InstanceMaintenanceSchedule { /** * (Output) * Output only. The end time of any upcoming scheduled maintenance for this instance. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ endTime: string; /** * (Output) * Output only. The deadline that the maintenance schedule start time * can not go beyond, including reschedule. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ scheduleDeadlineTime: string; /** * (Output) * Output only. The start time of any upcoming scheduled maintenance for this instance. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ startTime: string; } interface InstanceMemcacheNode { /** * (Output) * Hostname or IP address of the Memcached node used by the clients to connect to the Memcached server on this node. */ host: string; /** * (Output) * Identifier of the Memcached node. The node id does not include project or location like the Memcached instance name. */ nodeId: string; /** * (Output) * The port number of the Memcached server on this node. */ port: number; /** * (Output) * Current state of the Memcached node. */ state: string; /** * (Output) * Location (GCP Zone) for the Memcached node. */ zone: string; } interface InstanceMemcacheParameters { /** * (Output) * This is a unique ID associated with this set of parameters. */ id: string; /** * User-defined set of parameters to use in the memcache process. */ params?: { [key: string]: string; }; } interface InstanceNodeConfig { /** * Number of CPUs per node. */ cpuCount: number; /** * Memory size in Mebibytes for each memcache node. */ memorySizeMb: number; } } export declare namespace memorystore { interface GetInstanceAutomatedBackupConfig { /** * Trigger automated backups at a fixed frequency. */ fixedFrequencySchedules: outputs.memorystore.GetInstanceAutomatedBackupConfigFixedFrequencySchedule[]; /** * How long to keep automated backups before the backups are deleted. * The value should be between 1 day and 365 days. If not specified, the default value is 35 days. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". The defaultValue is "3024000s" */ retention: string; } interface GetInstanceAutomatedBackupConfigFixedFrequencySchedule { /** * The start time of every automated backup in UTC. * It must be set to the start of an hour. This field is required. */ startTimes: outputs.memorystore.GetInstanceAutomatedBackupConfigFixedFrequencyScheduleStartTime[]; } interface GetInstanceAutomatedBackupConfigFixedFrequencyScheduleStartTime { /** * Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. * An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours: number; } interface GetInstanceCrossInstanceReplicationConfig { /** * The instance role supports the following values: * 1. 'INSTANCE_ROLE_UNSPECIFIED': This is an independent instance that has never participated in cross instance replication. It allows both reads and writes. * 2. 'NONE': This is an independent instance that previously participated in cross instance replication(either as a 'PRIMARY' or 'SECONDARY' cluster). It allows both reads and writes. * 3. 'PRIMARY': This instance serves as the replication source for secondary instance that are replicating from it. Any data written to it is automatically replicated to its secondary clusters. It allows both reads and writes. * 4. 'SECONDARY': This instance replicates data from the primary instance. It allows only reads. Possible values: ["INSTANCE_ROLE_UNSPECIFIED", "NONE", "PRIMARY", "SECONDARY"] */ instanceRole: string; /** * An output only view of all the member instance participating in cross instance replication. This field is populated for all the member clusters irrespective of their cluster role. */ memberships: outputs.memorystore.GetInstanceCrossInstanceReplicationConfigMembership[]; /** * This field is only set for a secondary instance. Details of the primary instance that is used as the replication source for this secondary instance. This is allowed to be set only for clusters whose cluster role is of type 'SECONDARY'. */ primaryInstances: outputs.memorystore.GetInstanceCrossInstanceReplicationConfigPrimaryInstance[]; /** * List of secondary instances that are replicating from this primary cluster. This is allowed to be set only for instances whose cluster role is of type 'PRIMARY'. */ secondaryInstances: outputs.memorystore.GetInstanceCrossInstanceReplicationConfigSecondaryInstance[]; /** * The last time cross instance replication config was updated. */ updateTime: string; } interface GetInstanceCrossInstanceReplicationConfigMembership { /** * Details of the primary instance that is used as the replication source for all the secondary instances. */ primaryInstances: outputs.memorystore.GetInstanceCrossInstanceReplicationConfigMembershipPrimaryInstance[]; /** * List of secondary instances that are replicating from the primary instance. */ secondaryInstances: outputs.memorystore.GetInstanceCrossInstanceReplicationConfigMembershipSecondaryInstance[]; } interface GetInstanceCrossInstanceReplicationConfigMembershipPrimaryInstance { /** * The full resource path of the primary instance in the format: projects/{project}/locations/{region}/instance/{instance-id} */ instance: string; /** * The unique id of the primary instance. */ uid: string; } interface GetInstanceCrossInstanceReplicationConfigMembershipSecondaryInstance { /** * The full resource path of the secondary instance in the format: projects/{project}/locations/{region}/instance/{instance-id} */ instance: string; /** * The unique id of the secondary instance. */ uid: string; } interface GetInstanceCrossInstanceReplicationConfigPrimaryInstance { /** * The full resource path of the primary instance in the format: projects/{project}/locations/{region}/instances/{instance-id} */ instance: string; /** * The unique id of the primary instance. */ uid: string; } interface GetInstanceCrossInstanceReplicationConfigSecondaryInstance { /** * The full resource path of the Nth instance in the format: projects/{project}/locations/{region}/instance/{instance-id} */ instance: string; /** * The unique id of the Nth instance. */ uid: string; } interface GetInstanceDesiredAutoCreatedEndpoint { /** * Required. The consumer network where the IP address resides, in the form of * projects/{project_id}/global/networks/{network_id}. */ network: string; /** * Required. The consumer projectId where the forwarding rule is created from. */ projectId: string; } interface GetInstanceDesiredPscAutoConnection { /** * Required. The consumer network where the IP address resides, in the form of * projects/{project_id}/global/networks/{network_id}. */ network: string; /** * Required. The consumer projectId where the forwarding rule is created from. */ projectId: string; } interface GetInstanceDiscoveryEndpoint { /** * Output only. IP address of the exposed endpoint clients connect to. */ address: string; /** * Output only. The network where the IP address of the discovery endpoint will be * reserved, in the form of * projects/{network_project}/global/networks/{network_id}. */ network: string; /** * Output only. The port number of the exposed endpoint. */ port: number; } interface GetInstanceEndpoint { /** * A group of PSC connections. They are created in the same VPC network, one for each service attachment in the cluster. */ connections: outputs.memorystore.GetInstanceEndpointConnection[]; } interface GetInstanceEndpointConnection { /** * Detailed information of a PSC connection that is created through service connectivity automation. */ pscAutoConnections: outputs.memorystore.GetInstanceEndpointConnectionPscAutoConnection[]; } interface GetInstanceEndpointConnectionPscAutoConnection { /** * Output Only. Type of a PSC Connection. * Possible values: * CONNECTION_TYPE_DISCOVERY * CONNECTION_TYPE_PRIMARY * CONNECTION_TYPE_READER */ connectionType: string; /** * Output only. The URI of the consumer side forwarding rule. * Format: * projects/{project}/regions/{region}/forwardingRules/{forwarding_rule} */ forwardingRule: string; /** * Output only. The IP allocated on the consumer network for the PSC forwarding rule. */ ipAddress: string; /** * Output only. The consumer network where the IP address resides, in the form of * projects/{project_id}/global/networks/{network_id}. */ network: string; /** * Output only. Ports of the exposed endpoint. */ port: number; /** * Output only. The consumer projectId where the forwarding rule is created from. */ projectId: string; /** * Output only. The PSC connection id of the forwarding rule connected to the * service attachment. */ pscConnectionId: string; /** * Output only. The service attachment which is the target of the PSC connection, in the form of projects/{project-id}/regions/{region}/serviceAttachments/{service-attachment-id}. */ serviceAttachment: string; } interface GetInstanceGcsSource { /** * URIs of the GCS objects to import. * Example: gs://bucket1/object1, gs://bucket2/folder2/object2 */ uris: string[]; } interface GetInstanceMaintenancePolicy { /** * The time when the policy was created. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ createTime: string; /** * The time when the policy was last updated. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ updateTime: string; /** * Optional. Maintenance window that is applied to resources covered by this policy. * Minimum 1. For the current version, the maximum number * of weeklyWindow is expected to be one. */ weeklyMaintenanceWindows: outputs.memorystore.GetInstanceMaintenancePolicyWeeklyMaintenanceWindow[]; } interface GetInstanceMaintenancePolicyWeeklyMaintenanceWindow { /** * The day of week that maintenance updates occur. * * - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. * - MONDAY: Monday * - TUESDAY: Tuesday * - WEDNESDAY: Wednesday * - THURSDAY: Thursday * - FRIDAY: Friday * - SATURDAY: Saturday * - SUNDAY: Sunday Possible values: ["DAY_OF_WEEK_UNSPECIFIED", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"] */ day: string; /** * Duration of the maintenance window. * The current window is fixed at 1 hour. * A duration in seconds with up to nine fractional digits, * terminated by 's'. Example: "3.5s". */ duration: string; /** * Start time of the window in UTC time. */ startTimes: outputs.memorystore.GetInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime[]; } interface GetInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. * An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. * An API may allow the value 60 if it allows leap-seconds. */ seconds: number; } interface GetInstanceMaintenanceSchedule { /** * The end time of any upcoming scheduled maintenance for this cluster. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ endTime: string; /** * The deadline that the maintenance schedule start time * can not go beyond, including reschedule. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ scheduleDeadlineTime: string; /** * The start time of any upcoming scheduled maintenance for this cluster. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ startTime: string; } interface GetInstanceManagedBackupSource { /** * Example: 'projects/{project}/locations/{location}/backupCollections/{collection}/backups/{backup}'. */ backup: string; } interface GetInstanceManagedServerCa { /** * The PEM encoded CA certificate chains for managed server authentication */ caCerts: outputs.memorystore.GetInstanceManagedServerCaCaCert[]; } interface GetInstanceManagedServerCaCaCert { /** * The certificates that form the CA chain, from leaf to root order */ certificates: string[]; } interface GetInstanceNodeConfig { /** * Output only. Memory size in GB of the node. */ sizeGb: number; } interface GetInstancePersistenceConfig { /** * Configuration for AOF based persistence. */ aofConfigs: outputs.memorystore.GetInstancePersistenceConfigAofConfig[]; /** * Optional. Current persistence mode. * Possible values: * DISABLED * RDB * AOF Possible values: ["DISABLED", "RDB", "AOF"] */ mode: string; /** * Configuration for RDB based persistence. */ rdbConfigs: outputs.memorystore.GetInstancePersistenceConfigRdbConfig[]; } interface GetInstancePersistenceConfigAofConfig { /** * Optional. The fsync mode. * Possible values: * NEVER * EVERY_SEC * ALWAYS */ appendFsync: string; } interface GetInstancePersistenceConfigRdbConfig { /** * Optional. Period between RDB snapshots. * Possible values: * ONE_HOUR * SIX_HOURS * TWELVE_HOURS * TWENTY_FOUR_HOURS */ rdbSnapshotPeriod: string; /** * Optional. Time that the first snapshot was/will be attempted, and to which future * snapshots will be aligned. If not provided, the current time will be * used. */ rdbSnapshotStartTime: string; } interface GetInstancePscAttachmentDetail { /** * Service attachment URI which your self-created PscConnection should use as target. */ connectionType: string; /** * Service attachment URI which your self-created PscConnection should use as target. */ serviceAttachment: string; } interface GetInstancePscAutoConnection { /** * Output Only. Type of a PSC Connection. * Possible values: * CONNECTION_TYPE_DISCOVERY * CONNECTION_TYPE_PRIMARY * CONNECTION_TYPE_READER */ connectionType: string; /** * Output only. The URI of the consumer side forwarding rule. * Format: * projects/{project}/regions/{region}/forwardingRules/{forwarding_rule} */ forwardingRule: string; /** * Output only. The IP allocated on the consumer network for the PSC forwarding rule. */ ipAddress: string; /** * Output only. The consumer network where the IP address resides, in the form of * projects/{project_id}/global/networks/{network_id}. */ network: string; /** * Output only. Ports of the exposed endpoint. */ port: number; /** * Output only. The consumer projectId where the forwarding rule is created from. */ projectId: string; /** * Output only. The PSC connection id of the forwarding rule connected to the * service attachment. */ pscConnectionId: string; /** * Output Only. The status of the PSC connection: whether a connection exists and ACTIVE or it no longer exists. * Possible values: * ACTIVE * NOT_FOUND */ pscConnectionStatus: string; /** * Output only. The service attachment which is the target of the PSC connection, in the form of projects/{project-id}/regions/{region}/serviceAttachments/{service-attachment-id}. */ serviceAttachment: string; } interface GetInstanceStateInfo { /** * Represents information about instance with state UPDATING. */ updateInfos: outputs.memorystore.GetInstanceStateInfoUpdateInfo[]; } interface GetInstanceStateInfoUpdateInfo { /** * Output only. Target engine version for the instance. */ targetEngineVersion: string; /** * Output only. Target node type for the instance. */ targetNodeType: string; /** * Output only. Target number of replica nodes per shard for the instance. */ targetReplicaCount: number; /** * Output only. Target number of shards for the instance. */ targetShardCount: number; } interface GetInstanceZoneDistributionConfig { /** * Optional. Current zone distribution mode. Defaults to MULTI_ZONE. * Possible values: * MULTI_ZONE * SINGLE_ZONE Possible values: ["MULTI_ZONE", "SINGLE_ZONE"] */ mode: string; /** * Optional. Defines zone where all resources will be allocated with SINGLE_ZONE mode. * Ignored for MULTI_ZONE mode. */ zone: string; } interface InstanceAutomatedBackupConfig { /** * Trigger automated backups at a fixed frequency. * Structure is documented below. */ fixedFrequencySchedule: outputs.memorystore.InstanceAutomatedBackupConfigFixedFrequencySchedule; /** * How long to keep automated backups before the backups are deleted. * The value should be between 1 day and 365 days. If not specified, the default value is 35 days. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". The defaultValue is "3024000s" */ retention: string; } interface InstanceAutomatedBackupConfigFixedFrequencySchedule { /** * The start time of every automated backup in UTC. * It must be set to the start of an hour. This field is required. * Structure is documented below. */ startTime: outputs.memorystore.InstanceAutomatedBackupConfigFixedFrequencyScheduleStartTime; } interface InstanceAutomatedBackupConfigFixedFrequencyScheduleStartTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. * An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours: number; } interface InstanceCrossInstanceReplicationConfig { /** * The instance role supports the following values: * 1. `INSTANCE_ROLE_UNSPECIFIED`: This is an independent instance that has never participated in cross instance replication. It allows both reads and writes. * 2. `NONE`: This is an independent instance that previously participated in cross instance replication(either as a `PRIMARY` or `SECONDARY` cluster). It allows both reads and writes. * 3. `PRIMARY`: This instance serves as the replication source for secondary instance that are replicating from it. Any data written to it is automatically replicated to its secondary clusters. It allows both reads and writes. * 4. `SECONDARY`: This instance replicates data from the primary instance. It allows only reads. * Possible values are: `INSTANCE_ROLE_UNSPECIFIED`, `NONE`, `PRIMARY`, `SECONDARY`. */ instanceRole?: string; /** * (Output) * An output only view of all the member instance participating in cross instance replication. This field is populated for all the member clusters irrespective of their cluster role. * Structure is documented below. */ memberships: outputs.memorystore.InstanceCrossInstanceReplicationConfigMembership[]; /** * This field is only set for a secondary instance. Details of the primary instance that is used as the replication source for this secondary instance. This is allowed to be set only for clusters whose cluster role is of type `SECONDARY`. * Structure is documented below. */ primaryInstance?: outputs.memorystore.InstanceCrossInstanceReplicationConfigPrimaryInstance; /** * List of secondary instances that are replicating from this primary cluster. This is allowed to be set only for instances whose cluster role is of type `PRIMARY`. * Structure is documented below. */ secondaryInstances?: outputs.memorystore.InstanceCrossInstanceReplicationConfigSecondaryInstance[]; /** * (Output) * The last time cross instance replication config was updated. */ updateTime: string; } interface InstanceCrossInstanceReplicationConfigMembership { /** * Details of the primary instance that is used as the replication source for all the secondary instances. */ primaryInstances: outputs.memorystore.InstanceCrossInstanceReplicationConfigMembershipPrimaryInstance[]; /** * List of secondary instances that are replicating from the primary instance. */ secondaryInstances: outputs.memorystore.InstanceCrossInstanceReplicationConfigMembershipSecondaryInstance[]; } interface InstanceCrossInstanceReplicationConfigMembershipPrimaryInstance { /** * The full resource path of the primary instance in the format: projects/{project}/locations/{region}/instances/{instance-id} */ instance: string; /** * (Output) * The unique id of the primary instance. */ uid: string; } interface InstanceCrossInstanceReplicationConfigMembershipSecondaryInstance { /** * The full resource path of the secondary instance in the format: projects/{project}/locations/{region}/instance/{instance-id} */ instance: string; /** * Output only. System assigned, unique identifier for the instance. */ uid: string; } interface InstanceCrossInstanceReplicationConfigPrimaryInstance { /** * The full resource path of the primary instance in the format: projects/{project}/locations/{region}/instances/{instance-id} */ instance?: string; /** * (Output) * The unique id of the primary instance. */ uid: string; } interface InstanceCrossInstanceReplicationConfigSecondaryInstance { /** * (Output) * The full resource path of the secondary instance in the format: projects/{project}/locations/{region}/instance/{instance-id} */ instance?: string; /** * (Output) * The unique id of the secondary instance. */ uid: string; } interface InstanceDesiredAutoCreatedEndpoint { /** * (Output) * Output only. The consumer network where the IP address resides, in the form of * projects/{project_id}/global/networks/{network_id}. */ network: string; /** * (Output) * Output only. The consumer projectId where the forwarding rule is created from. */ projectId: string; } interface InstanceDesiredPscAutoConnection { /** * (Output) * Output only. The consumer network where the IP address resides, in the form of * projects/{project_id}/global/networks/{network_id}. */ network: string; /** * (Output) * Output only. The consumer projectId where the forwarding rule is created from. */ projectId: string; } interface InstanceDesiredUserCreatedEndpointsDesiredUserCreatedEndpoint { /** * Structure is documented below. */ connections?: outputs.memorystore.InstanceDesiredUserCreatedEndpointsDesiredUserCreatedEndpointConnection[]; } interface InstanceDesiredUserCreatedEndpointsDesiredUserCreatedEndpointConnection { /** * Detailed information of a PSC connection that is created by the customer * who owns the cluster. * Structure is documented below. */ pscConnection?: outputs.memorystore.InstanceDesiredUserCreatedEndpointsDesiredUserCreatedEndpointConnectionPscConnection; } interface InstanceDesiredUserCreatedEndpointsDesiredUserCreatedEndpointConnectionPscConnection { /** * (Output) * Output Only. Type of a PSC Connection. * Possible values: * CONNECTION_TYPE_DISCOVERY * CONNECTION_TYPE_PRIMARY * CONNECTION_TYPE_READER */ connectionType: string; /** * The URI of the consumer side forwarding rule. * Format: * projects/{project}/regions/{region}/forwardingRules/{forwarding_rule} */ forwardingRule: string; /** * The IP allocated on the consumer network for the PSC forwarding rule. */ ipAddress: string; /** * The consumer network where the IP address resides, in the form of * projects/{project_id}/global/networks/{network_id}. */ network: string; /** * The consumer projectId where the forwarding rule is created from. */ projectId: string; /** * The PSC connection id of the forwarding rule connected to the * service attachment. */ pscConnectionId: string; /** * (Output) * Output Only. The status of the PSC connection: whether a connection exists and ACTIVE or it no longer exists. * Possible values: * ACTIVE * NOT_FOUND */ pscConnectionStatus: string; /** * The service attachment which is the target of the PSC connection, in the form of projects/{project-id}/regions/{region}/serviceAttachments/{service-attachment-id}. */ serviceAttachment: string; } interface InstanceDiscoveryEndpoint { /** * (Output) * Output only. IP address of the exposed endpoint clients connect to. */ address: string; /** * (Output) * Output only. The consumer network where the IP address resides, in the form of * projects/{project_id}/global/networks/{network_id}. */ network: string; /** * (Output) * Output only. Ports of the exposed endpoint. */ port: number; } interface InstanceEndpoint { /** * A group of PSC connections. They are created in the same VPC network, one for each service attachment in the cluster. * Structure is documented below. */ connections?: outputs.memorystore.InstanceEndpointConnection[]; } interface InstanceEndpointConnection { /** * Detailed information of a PSC connection that is created through service connectivity automation. * Structure is documented below. */ pscAutoConnection?: outputs.memorystore.InstanceEndpointConnectionPscAutoConnection; } interface InstanceEndpointConnectionPscAutoConnection { /** * (Output) * Output Only. Type of a PSC Connection. * Possible values: * CONNECTION_TYPE_DISCOVERY * CONNECTION_TYPE_PRIMARY * CONNECTION_TYPE_READER */ connectionType: string; /** * (Output) * Output only. The URI of the consumer side forwarding rule. * Format: * projects/{project}/regions/{region}/forwardingRules/{forwarding_rule} */ forwardingRule: string; /** * (Output) * Output only. The IP allocated on the consumer network for the PSC forwarding rule. */ ipAddress: string; /** * (Output) * Output only. The consumer network where the IP address resides, in the form of * projects/{project_id}/global/networks/{network_id}. */ network: string; /** * (Output) * Output only. Ports of the exposed endpoint. */ port: number; /** * (Output) * Output only. The consumer projectId where the forwarding rule is created from. */ projectId: string; /** * (Output) * Output only. The PSC connection id of the forwarding rule connected to the * service attachment. */ pscConnectionId: string; /** * (Output) * Output only. The service attachment which is the target of the PSC connection, in the form of projects/{project-id}/regions/{region}/serviceAttachments/{service-attachment-id}. */ serviceAttachment: string; } interface InstanceGcsSource { /** * URIs of the GCS objects to import. * Example: gs://bucket1/object1, gs://bucket2/folder2/object2 */ uris: string[]; } interface InstanceMaintenancePolicy { /** * (Output) * The time when the policy was created. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ createTime: string; /** * (Output) * The time when the policy was last updated. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ updateTime: string; /** * Optional. Maintenance window that is applied to resources covered by this policy. * Minimum 1. For the current version, the maximum number * of weeklyWindow is expected to be one. * Structure is documented below. */ weeklyMaintenanceWindows?: outputs.memorystore.InstanceMaintenancePolicyWeeklyMaintenanceWindow[]; } interface InstanceMaintenancePolicyWeeklyMaintenanceWindow { /** * The day of week that maintenance updates occur. * - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. * - MONDAY: Monday * - TUESDAY: Tuesday * - WEDNESDAY: Wednesday * - THURSDAY: Thursday * - FRIDAY: Friday * - SATURDAY: Saturday * - SUNDAY: Sunday * Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ day: string; /** * (Output) * Duration of the maintenance window. * The current window is fixed at 1 hour. * A duration in seconds with up to nine fractional digits, * terminated by 's'. Example: "3.5s". */ duration: string; /** * Start time of the window in UTC time. * Structure is documented below. */ startTime: outputs.memorystore.InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime; } interface InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. * An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. * An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface InstanceMaintenanceSchedule { /** * (Output) * The end time of any upcoming scheduled maintenance for this cluster. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ endTime: string; /** * (Output) * The deadline that the maintenance schedule start time * can not go beyond, including reschedule. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ scheduleDeadlineTime: string; /** * (Output) * The start time of any upcoming scheduled maintenance for this cluster. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ startTime: string; } interface InstanceManagedBackupSource { /** * Example: `projects/{project}/locations/{location}/backupCollections/{collection}/backups/{backup}`. */ backup: string; } interface InstanceManagedServerCa { /** * (Output) * The PEM encoded CA certificate chains for managed server authentication * Structure is documented below. */ caCerts: outputs.memorystore.InstanceManagedServerCaCaCert[]; } interface InstanceManagedServerCaCaCert { /** * (Output) * The certificates that form the CA chain, from leaf to root order */ certificates: string[]; } interface InstanceNodeConfig { /** * (Output) * Output only. Memory size in GB of the node. */ sizeGb: number; } interface InstancePersistenceConfig { /** * Configuration for AOF based persistence. * Structure is documented below. */ aofConfig: outputs.memorystore.InstancePersistenceConfigAofConfig; /** * Optional. Current persistence mode. * Possible values: * DISABLED * RDB * AOF * Possible values are: `DISABLED`, `RDB`, `AOF`. */ mode: string; /** * Configuration for RDB based persistence. * Structure is documented below. */ rdbConfig: outputs.memorystore.InstancePersistenceConfigRdbConfig; } interface InstancePersistenceConfigAofConfig { /** * Optional. The fsync mode. * Possible values: * NEVER * EVERY_SEC * ALWAYS */ appendFsync: string; } interface InstancePersistenceConfigRdbConfig { /** * Optional. Period between RDB snapshots. * Possible values: * ONE_HOUR * SIX_HOURS * TWELVE_HOURS * TWENTY_FOUR_HOURS */ rdbSnapshotPeriod: string; /** * Optional. Time that the first snapshot was/will be attempted, and to which future * snapshots will be aligned. If not provided, the current time will be * used. */ rdbSnapshotStartTime: string; } interface InstancePscAttachmentDetail { /** * (Output) * Output Only. Type of a PSC Connection. * Possible values: * CONNECTION_TYPE_DISCOVERY * CONNECTION_TYPE_PRIMARY * CONNECTION_TYPE_READER */ connectionType: string; /** * (Output) * Output only. The service attachment which is the target of the PSC connection, in the form of projects/{project-id}/regions/{region}/serviceAttachments/{service-attachment-id}. */ serviceAttachment: string; } interface InstancePscAutoConnection { /** * (Output) * Output Only. Type of a PSC Connection. * Possible values: * CONNECTION_TYPE_DISCOVERY * CONNECTION_TYPE_PRIMARY * CONNECTION_TYPE_READER */ connectionType: string; /** * (Output) * Output only. The URI of the consumer side forwarding rule. * Format: * projects/{project}/regions/{region}/forwardingRules/{forwarding_rule} */ forwardingRule: string; /** * (Output) * Output only. The IP allocated on the consumer network for the PSC forwarding rule. */ ipAddress: string; /** * (Output) * Output only. The consumer network where the IP address resides, in the form of * projects/{project_id}/global/networks/{network_id}. */ network: string; /** * (Output) * Output only. Ports of the exposed endpoint. */ port: number; /** * (Output) * Output only. The consumer projectId where the forwarding rule is created from. */ projectId: string; /** * (Output) * Output only. The PSC connection id of the forwarding rule connected to the * service attachment. */ pscConnectionId: string; /** * (Output) * Output Only. The status of the PSC connection: whether a connection exists and ACTIVE or it no longer exists. * Possible values: * ACTIVE * NOT_FOUND */ pscConnectionStatus: string; /** * (Output) * Output only. The service attachment which is the target of the PSC connection, in the form of projects/{project-id}/regions/{region}/serviceAttachments/{service-attachment-id}. */ serviceAttachment: string; } interface InstanceStateInfo { /** * (Output) * Represents information about instance with state UPDATING. * Structure is documented below. */ updateInfos: outputs.memorystore.InstanceStateInfoUpdateInfo[]; } interface InstanceStateInfoUpdateInfo { /** * (Output) * Output only. Target engine version for the instance. */ targetEngineVersion: string; /** * (Output) * Output only. Target node type for the instance. */ targetNodeType: string; /** * (Output) * Output only. Target number of replica nodes per shard for the instance. */ targetReplicaCount: number; /** * (Output) * Output only. Target number of shards for the instance. */ targetShardCount: number; } interface InstanceZoneDistributionConfig { /** * Optional. Current zone distribution mode. Defaults to MULTI_ZONE. * Possible values: * MULTI_ZONE * SINGLE_ZONE * Possible values are: `MULTI_ZONE`, `SINGLE_ZONE`. */ mode: string; /** * Optional. Defines zone where all resources will be allocated with SINGLE_ZONE mode. * Ignored for MULTI_ZONE mode. */ zone?: string; } } export declare namespace migrationcenter { interface PreferenceSetVirtualMachinePreferences { /** * Commitment plan to consider when calculating costs for virtual machine insights and recommendations. If you are unsure which value to set, a 3 year commitment plan is often a good value to start with. Possible values: `COMMITMENT_PLAN_UNSPECIFIED`, `COMMITMENT_PLAN_NONE`, `COMMITMENT_PLAN_ONE_YEAR`, `COMMITMENT_PLAN_THREE_YEARS` */ commitmentPlan?: string; /** * The user preferences relating to Compute Engine target platform. * Structure is documented below. */ computeEnginePreferences?: outputs.migrationcenter.PreferenceSetVirtualMachinePreferencesComputeEnginePreferences; /** * The user preferences relating to target regions. * Structure is documented below. */ regionPreferences?: outputs.migrationcenter.PreferenceSetVirtualMachinePreferencesRegionPreferences; /** * Sizing optimization strategy specifies the preferred strategy used when extrapolating usage data to calculate insights and recommendations for a virtual machine. If you are unsure which value to set, a moderate sizing optimization strategy is often a good value to start with. Possible values: `SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED`, `SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE`, `SIZING_OPTIMIZATION_STRATEGY_MODERATE`, `SIZING_OPTIMIZATION_STRATEGY_AGGRESSIVE` */ sizingOptimizationStrategy?: string; /** * Preferences concerning Sole Tenancy nodes and VMs. * Structure is documented below. */ soleTenancyPreferences?: outputs.migrationcenter.PreferenceSetVirtualMachinePreferencesSoleTenancyPreferences; /** * Target product for assets using this preference set. Specify either target product or business goal, but not both. Possible values: `COMPUTE_MIGRATION_TARGET_PRODUCT_UNSPECIFIED`, `COMPUTE_MIGRATION_TARGET_PRODUCT_COMPUTE_ENGINE`, `COMPUTE_MIGRATION_TARGET_PRODUCT_VMWARE_ENGINE`, `COMPUTE_MIGRATION_TARGET_PRODUCT_SOLE_TENANCY` */ targetProduct?: string; /** * The user preferences relating to Google Cloud VMware Engine target platform. * Structure is documented below. */ vmwareEnginePreferences?: outputs.migrationcenter.PreferenceSetVirtualMachinePreferencesVmwareEnginePreferences; } interface PreferenceSetVirtualMachinePreferencesComputeEnginePreferences { /** * License type to consider when calculating costs for virtual machine insights and recommendations. If unspecified, costs are calculated based on the default licensing plan. Possible values: `LICENSE_TYPE_UNSPECIFIED`, `LICENSE_TYPE_DEFAULT`, `LICENSE_TYPE_BRING_YOUR_OWN_LICENSE` */ licenseType?: string; /** * The type of machines to consider when calculating virtual machine migration insights and recommendations. Not all machine types are available in all zones and regions. * Structure is documented below. */ machinePreferences?: outputs.migrationcenter.PreferenceSetVirtualMachinePreferencesComputeEnginePreferencesMachinePreferences; } interface PreferenceSetVirtualMachinePreferencesComputeEnginePreferencesMachinePreferences { /** * Compute Engine machine series to consider for insights and recommendations. If empty, no restriction is applied on the machine series. * Structure is documented below. */ allowedMachineSeries?: outputs.migrationcenter.PreferenceSetVirtualMachinePreferencesComputeEnginePreferencesMachinePreferencesAllowedMachineSeries[]; } interface PreferenceSetVirtualMachinePreferencesComputeEnginePreferencesMachinePreferencesAllowedMachineSeries { /** * Code to identify a Compute Engine machine series. Consult https://cloud.google.com/compute/docs/machine-resource#machine_type_comparison for more details on the available series. */ code?: string; } interface PreferenceSetVirtualMachinePreferencesRegionPreferences { /** * A list of preferred regions, ordered by the most preferred region first. Set only valid Google Cloud region names. See https://cloud.google.com/compute/docs/regions-zones for available regions. */ preferredRegions?: string[]; } interface PreferenceSetVirtualMachinePreferencesSoleTenancyPreferences { /** * Commitment plan to consider when calculating costs for virtual machine insights and recommendations. If you are unsure which value to set, a 3 year commitment plan is often a good value to start with. Possible values: `COMMITMENT_PLAN_UNSPECIFIED`, `ON_DEMAND`, `COMMITMENT_1_YEAR`, `COMMITMENT_3_YEAR` */ commitmentPlan?: string; /** * CPU overcommit ratio. Acceptable values are between 1.0 and 2.0 inclusive. */ cpuOvercommitRatio?: number; /** * Sole Tenancy nodes maintenance policy. Possible values: `HOST_MAINTENANCE_POLICY_UNSPECIFIED`, `HOST_MAINTENANCE_POLICY_DEFAULT`, `HOST_MAINTENANCE_POLICY_RESTART_IN_PLACE`, `HOST_MAINTENANCE_POLICY_MIGRATE_WITHIN_NODE_GROUP` */ hostMaintenancePolicy?: string; /** * A list of sole tenant node types. An empty list means that all possible node types will be considered. * Structure is documented below. */ nodeTypes?: outputs.migrationcenter.PreferenceSetVirtualMachinePreferencesSoleTenancyPreferencesNodeType[]; } interface PreferenceSetVirtualMachinePreferencesSoleTenancyPreferencesNodeType { /** * Name of the Sole Tenant node. Consult https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes */ nodeName?: string; } interface PreferenceSetVirtualMachinePreferencesVmwareEnginePreferences { /** * Commitment plan to consider when calculating costs for virtual machine insights and recommendations. If you are unsure which value to set, a 3 year commitment plan is often a good value to start with. Possible values: `COMMITMENT_PLAN_UNSPECIFIED`, `ON_DEMAND`, `COMMITMENT_1_YEAR_MONTHLY_PAYMENTS`, `COMMITMENT_3_YEAR_MONTHLY_PAYMENTS`, `COMMITMENT_1_YEAR_UPFRONT_PAYMENT`, `COMMITMENT_3_YEAR_UPFRONT_PAYMENT`, */ commitmentPlan?: string; /** * CPU overcommit ratio. Acceptable values are between 1.0 and 8.0, with 0.1 increment. */ cpuOvercommitRatio?: number; /** * Memory overcommit ratio. Acceptable values are 1.0, 1.25, 1.5, 1.75 and 2.0. */ memoryOvercommitRatio?: number; /** * The Deduplication and Compression ratio is based on the logical (Used Before) space required to store data before applying deduplication and compression, in relation to the physical (Used After) space required after applying deduplication and compression. Specifically, the ratio is the Used Before space divided by the Used After space. For example, if the Used Before space is 3 GB, but the physical Used After space is 1 GB, the deduplication and compression ratio is 3x. Acceptable values are between 1.0 and 4.0. */ storageDeduplicationCompressionRatio?: number; } } export declare namespace ml { interface EngineModelDefaultVersion { /** * The name specified for the version when it was created. */ name: string; } } export declare namespace modelarmor { interface FloorsettingAiPlatformFloorSetting { /** * If true, log Model Armor filter results to Cloud Logging. */ enableCloudLogging?: boolean; /** * If true, Model Armor filters will be run in inspect and block mode. * Requests that trip Model Armor filters will be blocked. */ inspectAndBlock?: boolean; /** * If true, Model Armor filters will be run in inspect only mode. No action * will be taken on the request. */ inspectOnly?: boolean; } interface FloorsettingFilterConfig { /** * Malicious URI filter settings. * Structure is documented below. */ maliciousUriFilterSettings?: outputs.modelarmor.FloorsettingFilterConfigMaliciousUriFilterSettings; /** * Prompt injection and Jailbreak Filter settings. * Structure is documented below. */ piAndJailbreakFilterSettings?: outputs.modelarmor.FloorsettingFilterConfigPiAndJailbreakFilterSettings; /** * Responsible AI Filter settings. * Structure is documented below. */ raiSettings?: outputs.modelarmor.FloorsettingFilterConfigRaiSettings; /** * Sensitive Data Protection settings. * Structure is documented below. */ sdpSettings?: outputs.modelarmor.FloorsettingFilterConfigSdpSettings; } interface FloorsettingFilterConfigMaliciousUriFilterSettings { /** * Tells whether the Malicious URI filter is enabled or disabled. * Possible values: * ENABLED * DISABLED */ filterEnforcement?: string; } interface FloorsettingFilterConfigPiAndJailbreakFilterSettings { /** * Possible values: * LOW_AND_ABOVE * MEDIUM_AND_ABOVE * HIGH */ confidenceLevel?: string; /** * Tells whether Prompt injection and Jailbreak filter is enabled or * disabled. * Possible values: * ENABLED * DISABLED */ filterEnforcement?: string; } interface FloorsettingFilterConfigRaiSettings { /** * List of Responsible AI filters enabled for template. * Structure is documented below. */ raiFilters: outputs.modelarmor.FloorsettingFilterConfigRaiSettingsRaiFilter[]; } interface FloorsettingFilterConfigRaiSettingsRaiFilter { /** * Possible values: * LOW_AND_ABOVE * MEDIUM_AND_ABOVE * HIGH */ confidenceLevel?: string; /** * Possible values: * SEXUALLY_EXPLICIT * HATE_SPEECH * HARASSMENT * DANGEROUS */ filterType: string; } interface FloorsettingFilterConfigSdpSettings { /** * Sensitive Data Protection Advanced configuration. * Structure is documented below. */ advancedConfig?: outputs.modelarmor.FloorsettingFilterConfigSdpSettingsAdvancedConfig; /** * Sensitive Data Protection basic configuration. * Structure is documented below. */ basicConfig?: outputs.modelarmor.FloorsettingFilterConfigSdpSettingsBasicConfig; } interface FloorsettingFilterConfigSdpSettingsAdvancedConfig { /** * Optional Sensitive Data Protection Deidentify template resource name. * If provided then DeidentifyContent action is performed during Sanitization * using this template and inspect template. The De-identified data will * be returned in SdpDeidentifyResult. * Note that all info-types present in the deidentify template must be present * in inspect template. * e.g. * `projects/{project}/locations/{location}/deidentifyTemplates/{deidentify_template}` */ deidentifyTemplate?: string; /** * Sensitive Data Protection inspect template resource name * If only inspect template is provided (de-identify template not provided), * then Sensitive Data Protection InspectContent action is performed during * Sanitization. All Sensitive Data Protection findings identified during * inspection will be returned as SdpFinding in SdpInsepctionResult. * e.g:- * `projects/{project}/locations/{location}/inspectTemplates/{inspect_template}` */ inspectTemplate?: string; } interface FloorsettingFilterConfigSdpSettingsBasicConfig { /** * Tells whether the Sensitive Data Protection basic config is enabled or * disabled. * Possible values: * ENABLED * DISABLED */ filterEnforcement?: string; } interface FloorsettingFloorSettingMetadata { /** * Metadata for multi language detection. * Structure is documented below. */ multiLanguageDetection?: outputs.modelarmor.FloorsettingFloorSettingMetadataMultiLanguageDetection; } interface FloorsettingFloorSettingMetadataMultiLanguageDetection { /** * If true, multi language detection will be enabled. */ enableMultiLanguageDetection: boolean; } interface FloorsettingGoogleMcpServerFloorSetting { /** * If true, log Model Armor filter results to Cloud Logging. */ enableCloudLogging?: boolean; /** * If true, Model Armor filters will be run in inspect and block mode. * Requests that trip Model Armor filters will be blocked. */ inspectAndBlock?: boolean; /** * If true, Model Armor filters will be run in inspect only mode. No action * will be taken on the request. */ inspectOnly?: boolean; } interface TemplateFilterConfig { /** * Malicious URI filter settings. * Structure is documented below. */ maliciousUriFilterSettings?: outputs.modelarmor.TemplateFilterConfigMaliciousUriFilterSettings; /** * Prompt injection and Jailbreak Filter settings. * Structure is documented below. */ piAndJailbreakFilterSettings?: outputs.modelarmor.TemplateFilterConfigPiAndJailbreakFilterSettings; /** * Responsible AI Filter settings. * Structure is documented below. */ raiSettings?: outputs.modelarmor.TemplateFilterConfigRaiSettings; /** * Sensitive Data Protection settings. * Structure is documented below. */ sdpSettings?: outputs.modelarmor.TemplateFilterConfigSdpSettings; } interface TemplateFilterConfigMaliciousUriFilterSettings { /** * Tells whether the Malicious URI filter is enabled or disabled. * Possible values: * ENABLED * DISABLED */ filterEnforcement?: string; } interface TemplateFilterConfigPiAndJailbreakFilterSettings { /** * Possible values: * LOW_AND_ABOVE * MEDIUM_AND_ABOVE * HIGH */ confidenceLevel?: string; /** * Tells whether Prompt injection and Jailbreak filter is enabled or * disabled. * Possible values: * ENABLED * DISABLED */ filterEnforcement?: string; } interface TemplateFilterConfigRaiSettings { /** * List of Responsible AI filters enabled for template. * Structure is documented below. */ raiFilters: outputs.modelarmor.TemplateFilterConfigRaiSettingsRaiFilter[]; } interface TemplateFilterConfigRaiSettingsRaiFilter { /** * Possible values: * LOW_AND_ABOVE * MEDIUM_AND_ABOVE * HIGH */ confidenceLevel?: string; /** * Possible values: * SEXUALLY_EXPLICIT * HATE_SPEECH * HARASSMENT * DANGEROUS */ filterType: string; } interface TemplateFilterConfigSdpSettings { /** * Sensitive Data Protection Advanced configuration. * Structure is documented below. */ advancedConfig?: outputs.modelarmor.TemplateFilterConfigSdpSettingsAdvancedConfig; /** * Sensitive Data Protection basic configuration. * Structure is documented below. */ basicConfig?: outputs.modelarmor.TemplateFilterConfigSdpSettingsBasicConfig; } interface TemplateFilterConfigSdpSettingsAdvancedConfig { /** * Optional Sensitive Data Protection Deidentify template resource name. * If provided then DeidentifyContent action is performed during Sanitization * using this template and inspect template. The De-identified data will * be returned in SdpDeidentifyResult. * Note that all info-types present in the deidentify template must be present * in inspect template. * e.g. * `projects/{project}/locations/{location}/deidentifyTemplates/{deidentify_template}` */ deidentifyTemplate?: string; /** * Sensitive Data Protection inspect template resource name * If only inspect template is provided (de-identify template not provided), * then Sensitive Data Protection InspectContent action is performed during * Sanitization. All Sensitive Data Protection findings identified during * inspection will be returned as SdpFinding in SdpInsepctionResult. * e.g:- * `projects/{project}/locations/{location}/inspectTemplates/{inspect_template}` */ inspectTemplate?: string; } interface TemplateFilterConfigSdpSettingsBasicConfig { /** * Tells whether the Sensitive Data Protection basic config is enabled or * disabled. * Possible values: * ENABLED * DISABLED */ filterEnforcement?: string; } interface TemplateTemplateMetadata { /** * Indicates the custom error code set by the user to be returned to the end * user if the LLM response trips Model Armor filters. */ customLlmResponseSafetyErrorCode?: number; /** * Indicates the custom error message set by the user to be returned to the * end user if the LLM response trips Model Armor filters. */ customLlmResponseSafetyErrorMessage?: string; /** * Indicates the custom error code set by the user to be returned to the end * user by the service extension if the prompt trips Model Armor filters. */ customPromptSafetyErrorCode?: number; /** * Indicates the custom error message set by the user to be returned to the * end user if the prompt trips Model Armor filters. */ customPromptSafetyErrorMessage?: string; /** * Possible values: * INSPECT_ONLY * INSPECT_AND_BLOCK */ enforcementType?: string; /** * If true, partial detector failures should be ignored. */ ignorePartialInvocationFailures?: boolean; /** * If true, log sanitize operations. */ logSanitizeOperations?: boolean; /** * If true, log template crud operations. */ logTemplateOperations?: boolean; /** * Metadata to enable multi language detection via template. * Structure is documented below. */ multiLanguageDetection?: outputs.modelarmor.TemplateTemplateMetadataMultiLanguageDetection; } interface TemplateTemplateMetadataMultiLanguageDetection { /** * If true, multi language detection will be enabled. */ enableMultiLanguageDetection: boolean; } } export declare namespace monitoring { interface AlertPolicyAlertStrategy { /** * If an alert policy that was active has no data for this long, any open incidents will close. */ autoClose?: string; /** * Control over how the notification channels in `notificationChannels` * are notified when this alert fires, on a per-channel basis. * Structure is documented below. */ notificationChannelStrategies?: outputs.monitoring.AlertPolicyAlertStrategyNotificationChannelStrategy[]; /** * Control when notifications will be sent out. * Each value may be one of: `NOTIFICATION_PROMPT_UNSPECIFIED`, `OPENED`, `CLOSED`. */ notificationPrompts?: string[]; /** * Required for alert policies with a LogMatch condition. * This limit is not implemented for alert policies that are not log-based. * Structure is documented below. */ notificationRateLimit?: outputs.monitoring.AlertPolicyAlertStrategyNotificationRateLimit; } interface AlertPolicyAlertStrategyNotificationChannelStrategy { /** * The notification channels that these settings apply to. Each of these * correspond to the name field in one of the NotificationChannel objects * referenced in the notificationChannels field of this AlertPolicy. The format is * `projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]` */ notificationChannelNames?: string[]; /** * The frequency at which to send reminder notifications for open incidents. */ renotifyInterval?: string; } interface AlertPolicyAlertStrategyNotificationRateLimit { /** * Not more than one notification per period. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example "60.5s". */ period?: string; } interface AlertPolicyCondition { /** * A condition that checks that a time series * continues to receive new data points. * Structure is documented below. */ conditionAbsent?: outputs.monitoring.AlertPolicyConditionConditionAbsent; /** * A condition that checks for log messages matching given constraints. * If set, no other conditions can be present. * Structure is documented below. */ conditionMatchedLog?: outputs.monitoring.AlertPolicyConditionConditionMatchedLog; /** * A Monitoring Query Language query that outputs a boolean stream * Structure is documented below. */ conditionMonitoringQueryLanguage?: outputs.monitoring.AlertPolicyConditionConditionMonitoringQueryLanguage; /** * A condition type that allows alert policies to be defined using * Prometheus Query Language (PromQL). * The PrometheusQueryLanguageCondition message contains information * from a Prometheus alerting rule and its associated rule group. * Structure is documented below. */ conditionPrometheusQueryLanguage?: outputs.monitoring.AlertPolicyConditionConditionPrometheusQueryLanguage; /** * A condition that allows alerting policies to be defined using GoogleSQL. * SQL conditions examine a sliding window of logs using GoogleSQL. * Alert policies with SQL conditions may incur additional billing. * Structure is documented below. */ conditionSql?: outputs.monitoring.AlertPolicyConditionConditionSql; /** * A condition that compares a time series against a * threshold. * Structure is documented below. */ conditionThreshold?: outputs.monitoring.AlertPolicyConditionConditionThreshold; /** * A short name or phrase used to identify the * condition in dashboards, notifications, and * incidents. To avoid confusion, don't use the same * display name for multiple conditions in the same * policy. */ displayName: string; /** * (Output) * The unique resource name for this condition. * Its syntax is: * projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] * [CONDITION_ID] is assigned by Stackdriver Monitoring when * the condition is created as part of a new or updated alerting * policy. */ name: string; } interface AlertPolicyConditionConditionAbsent { /** * Specifies the alignment of data points in * individual time series as well as how to * combine the retrieved time series together * (such as when aggregating multiple streams * on each resource to a single stream for each * resource or when aggregating streams across * all members of a group of resources). * Multiple aggregations are applied in the * order specified. * Structure is documented below. */ aggregations?: outputs.monitoring.AlertPolicyConditionConditionAbsentAggregation[]; /** * The amount of time that a time series must * fail to report new data to be considered * failing. Currently, only values that are a * multiple of a minute--e.g. 60s, 120s, or 300s * --are supported. */ duration: string; /** * A filter that identifies which time series * should be compared with the threshold.The * filter is similar to the one that is * specified in the * MetricService.ListTimeSeries request (that * call is useful to verify the time series * that will be retrieved / processed) and must * specify the metric type and optionally may * contain restrictions on resource type, * resource labels, and metric labels. This * field may not exceed 2048 Unicode characters * in length. */ filter?: string; /** * The number/percent of time series for which * the comparison must hold in order for the * condition to trigger. If unspecified, then * the condition will trigger if the comparison * is true for any of the time series that have * been identified by filter and aggregations. * Structure is documented below. */ trigger?: outputs.monitoring.AlertPolicyConditionConditionAbsentTrigger; } interface AlertPolicyConditionConditionAbsentAggregation { /** * The alignment period for per-time * series alignment. If present, * alignmentPeriod must be at least * 60 seconds. After per-time series * alignment, each time series will * contain data points only on the * period boundaries. If * perSeriesAligner is not specified * or equals ALIGN_NONE, then this * field is ignored. If * perSeriesAligner is specified and * does not equal ALIGN_NONE, then * this field must be defined; * otherwise an error is returned. */ alignmentPeriod?: string; /** * The approach to be used to combine * time series. Not all reducer * functions may be applied to all * time series, depending on the * metric type and the value type of * the original time series. * Reduction may change the metric * type of value type of the time * series.Time series data must be * aligned in order to perform cross- * time series reduction. If * crossSeriesReducer is specified, * then perSeriesAligner must be * specified and not equal ALIGN_NONE * and alignmentPeriod must be * specified; otherwise, an error is * returned. * Possible values are: `REDUCE_NONE`, `REDUCE_MEAN`, `REDUCE_MIN`, `REDUCE_MAX`, `REDUCE_SUM`, `REDUCE_STDDEV`, `REDUCE_COUNT`, `REDUCE_COUNT_TRUE`, `REDUCE_COUNT_FALSE`, `REDUCE_FRACTION_TRUE`, `REDUCE_PERCENTILE_99`, `REDUCE_PERCENTILE_95`, `REDUCE_PERCENTILE_50`, `REDUCE_PERCENTILE_05`. */ crossSeriesReducer?: string; /** * The set of fields to preserve when * crossSeriesReducer is specified. * The groupByFields determine how * the time series are partitioned * into subsets prior to applying the * aggregation function. Each subset * contains time series that have the * same value for each of the * grouping fields. Each individual * time series is a member of exactly * one subset. The crossSeriesReducer * is applied to each subset of time * series. It is not possible to * reduce across different resource * types, so this field implicitly * contains resource.type. Fields not * specified in groupByFields are * aggregated away. If groupByFields * is not specified and all the time * series have the same resource * type, then the time series are * aggregated into a single output * time series. If crossSeriesReducer * is not defined, this field is * ignored. */ groupByFields?: string[]; /** * The approach to be used to align * individual time series. Not all * alignment functions may be applied * to all time series, depending on * the metric type and value type of * the original time series. * Alignment may change the metric * type or the value type of the time * series.Time series data must be * aligned in order to perform cross- * time series reduction. If * crossSeriesReducer is specified, * then perSeriesAligner must be * specified and not equal ALIGN_NONE * and alignmentPeriod must be * specified; otherwise, an error is * returned. * Possible values are: `ALIGN_NONE`, `ALIGN_DELTA`, `ALIGN_RATE`, `ALIGN_INTERPOLATE`, `ALIGN_NEXT_OLDER`, `ALIGN_MIN`, `ALIGN_MAX`, `ALIGN_MEAN`, `ALIGN_COUNT`, `ALIGN_SUM`, `ALIGN_STDDEV`, `ALIGN_COUNT_TRUE`, `ALIGN_COUNT_FALSE`, `ALIGN_FRACTION_TRUE`, `ALIGN_PERCENTILE_99`, `ALIGN_PERCENTILE_95`, `ALIGN_PERCENTILE_50`, `ALIGN_PERCENTILE_05`, `ALIGN_PERCENT_CHANGE`. */ perSeriesAligner?: string; } interface AlertPolicyConditionConditionAbsentTrigger { /** * The absolute number of time series * that must fail the predicate for the * condition to be triggered. */ count?: number; /** * The percentage of time series that * must fail the predicate for the * condition to be triggered. */ percent?: number; } interface AlertPolicyConditionConditionMatchedLog { /** * A logs-based filter. */ filter: string; /** * A map from a label key to an extractor expression, which is used to * extract the value for this label key. Each entry in this map is * a specification for how data should be extracted from log entries that * match filter. Each combination of extracted values is treated as * a separate rule for the purposes of triggering notifications. * Label keys and corresponding values can be used in notifications * generated by this condition. */ labelExtractors?: { [key: string]: string; }; } interface AlertPolicyConditionConditionMonitoringQueryLanguage { /** * The amount of time that a time series must * violate the threshold to be considered * failing. Currently, only values that are a * multiple of a minute--e.g., 0, 60, 120, or * 300 seconds--are supported. If an invalid * value is given, an error will be returned. * When choosing a duration, it is useful to * keep in mind the frequency of the underlying * time series data (which may also be affected * by any alignments specified in the * aggregations field); a good duration is long * enough so that a single outlier does not * generate spurious alerts, but short enough * that unhealthy states are detected and * alerted on quickly. */ duration: string; /** * A condition control that determines how * metric-threshold conditions are evaluated when * data stops arriving. * Possible values are: `EVALUATION_MISSING_DATA_INACTIVE`, `EVALUATION_MISSING_DATA_ACTIVE`, `EVALUATION_MISSING_DATA_NO_OP`. */ evaluationMissingData?: string; /** * Monitoring Query Language query that outputs a boolean stream. */ query: string; /** * The number/percent of time series for which * the comparison must hold in order for the * condition to trigger. If unspecified, then * the condition will trigger if the comparison * is true for any of the time series that have * been identified by filter and aggregations, * or by the ratio, if denominatorFilter and * denominatorAggregations are specified. * Structure is documented below. */ trigger?: outputs.monitoring.AlertPolicyConditionConditionMonitoringQueryLanguageTrigger; } interface AlertPolicyConditionConditionMonitoringQueryLanguageTrigger { /** * The absolute number of time series * that must fail the predicate for the * condition to be triggered. */ count?: number; /** * The percentage of time series that * must fail the predicate for the * condition to be triggered. */ percent?: number; } interface AlertPolicyConditionConditionPrometheusQueryLanguage { /** * The alerting rule name of this alert in the corresponding Prometheus * configuration file. * Some external tools may require this field to be populated correctly * in order to refer to the original Prometheus configuration file. * The rule group name and the alert name are necessary to update the * relevant AlertPolicies in case the definition of the rule group changes * in the future. * This field is optional. If this field is not empty, then it must be a * valid Prometheus label name. */ alertRule?: string; /** * Whether to disable metric existence validation for this condition. * This allows alerting policies to be defined on metrics that do not yet * exist, improving advanced customer workflows such as configuring * alerting policies using Terraform. * Users with the `monitoring.alertPolicyViewer` role are able to see the * name of the non-existent metric in the alerting policy condition. */ disableMetricValidation?: boolean; /** * Alerts are considered firing once their PromQL expression evaluated * to be "true" for this long. Alerts whose PromQL expression was not * evaluated to be "true" for long enough are considered pending. The * default value is zero. Must be zero or positive. */ duration?: string; /** * How often this rule should be evaluated. Must be a positive multiple * of 30 seconds or missing. The default value is 30 seconds. If this * PrometheusQueryLanguageCondition was generated from a Prometheus * alerting rule, then this value should be taken from the enclosing * rule group. */ evaluationInterval?: string; /** * Labels to add to or overwrite in the PromQL query result. Label names * must be valid. * Label values can be templatized by using variables. The only available * variable names are the names of the labels in the PromQL result, * although label names beginning with \_\_ (two "\_") are reserved for * internal use. "labels" may be empty. This field is intended to be used * for organizing and identifying the AlertPolicy. */ labels?: { [key: string]: string; }; /** * The PromQL expression to evaluate. Every evaluation cycle this * expression is evaluated at the current time, and all resultant time * series become pending/firing alerts. This field must not be empty. */ query: string; /** * The rule group name of this alert in the corresponding Prometheus * configuration file. * Some external tools may require this field to be populated correctly * in order to refer to the original Prometheus configuration file. * The rule group name and the alert name are necessary to update the * relevant AlertPolicies in case the definition of the rule group changes * in the future. This field is optional. */ ruleGroup?: string; } interface AlertPolicyConditionConditionSql { /** * A test that uses an alerting result in a boolean column produced by the SQL query. * Structure is documented below. */ booleanTest?: outputs.monitoring.AlertPolicyConditionConditionSqlBooleanTest; /** * Used to schedule the query to run every so many days. * Structure is documented below. */ daily?: outputs.monitoring.AlertPolicyConditionConditionSqlDaily; /** * Used to schedule the query to run every so many hours. * Structure is documented below. */ hourly?: outputs.monitoring.AlertPolicyConditionConditionSqlHourly; /** * Used to schedule the query to run every so many minutes. * Structure is documented below. */ minutes?: outputs.monitoring.AlertPolicyConditionConditionSqlMinutes; /** * The Log Analytics SQL query to run, as a string. The query must * conform to the required shape. Specifically, the query must not try to * filter the input by time. A filter will automatically be applied * to filter the input so that the query receives all rows received * since the last time the query was run. */ query: string; /** * A test that checks if the number of rows in the result set violates some threshold. * Structure is documented below. */ rowCountTest?: outputs.monitoring.AlertPolicyConditionConditionSqlRowCountTest; } interface AlertPolicyConditionConditionSqlBooleanTest { /** * The name of the column containing the boolean value. If the value in a row is * NULL, that row is ignored. */ column: string; } interface AlertPolicyConditionConditionSqlDaily { /** * The time of day (in UTC) at which the query should run. If left * unspecified, the server picks an arbitrary time of day and runs * the query at the same time each day. * Structure is documented below. */ executionTime?: outputs.monitoring.AlertPolicyConditionConditionSqlDailyExecutionTime; /** * The number of days between runs. Must be greater than or equal * to 1 day and less than or equal to 30 days. */ periodicity: number; } interface AlertPolicyConditionConditionSqlDailyExecutionTime { /** * Hours of a day in 24 hour format. Must be greater than or equal * to 0 and typically must be less than or equal to 23. An API may * choose to allow the value "24:00:00" for scenarios like business * closing time. */ hours?: number; /** * Minutes of an hour. Must be greater than or equal to 0 and * less than or equal to 59. */ minutes?: number; /** * Fractions of seconds, in nanoseconds. Must be greater than or * equal to 0 and less than or equal to 999,999,999. */ nanos?: number; /** * Seconds of a minute. Must be greater than or equal to 0 and * typically must be less than or equal to 59. An API may allow the * value 60 if it allows leap-seconds. */ seconds?: number; } interface AlertPolicyConditionConditionSqlHourly { /** * The number of minutes after the hour (in UTC) to run the query. * Must be greater than or equal to 0 minutes and less than or equal to * 59 minutes. If left unspecified, then an arbitrary offset is used. */ minuteOffset?: number; /** * Number of hours between runs. The interval must be greater than or * equal to 1 hour and less than or equal to 48 hours. */ periodicity: number; } interface AlertPolicyConditionConditionSqlMinutes { /** * Number of minutes between runs. The interval must be greater than or * equal to 5 minutes and less than or equal to 1440 minutes. */ periodicity: number; } interface AlertPolicyConditionConditionSqlRowCountTest { /** * The comparison to apply between the time * series (indicated by filter and aggregation) * and the threshold (indicated by * threshold_value). The comparison is applied * on each time series, with the time series on * the left-hand side and the threshold on the * right-hand side. Only COMPARISON_LT and * COMPARISON_GT are supported currently. * Possible values are: `COMPARISON_GT`, `COMPARISON_GE`, `COMPARISON_LT`, `COMPARISON_LE`, `COMPARISON_EQ`, `COMPARISON_NE`. */ comparison: string; /** * The value against which to compare the row count. */ threshold: number; } interface AlertPolicyConditionConditionThreshold { /** * Specifies the alignment of data points in * individual time series as well as how to * combine the retrieved time series together * (such as when aggregating multiple streams * on each resource to a single stream for each * resource or when aggregating streams across * all members of a group of resources). * Multiple aggregations are applied in the * order specified.This field is similar to the * one in the MetricService.ListTimeSeries * request. It is advisable to use the * ListTimeSeries method when debugging this * field. * Structure is documented below. */ aggregations?: outputs.monitoring.AlertPolicyConditionConditionThresholdAggregation[]; /** * The comparison to apply between the time * series (indicated by filter and aggregation) * and the threshold (indicated by * threshold_value). The comparison is applied * on each time series, with the time series on * the left-hand side and the threshold on the * right-hand side. Only COMPARISON_LT and * COMPARISON_GT are supported currently. * Possible values are: `COMPARISON_GT`, `COMPARISON_GE`, `COMPARISON_LT`, `COMPARISON_LE`, `COMPARISON_EQ`, `COMPARISON_NE`. */ comparison: string; /** * Specifies the alignment of data points in * individual time series selected by * denominatorFilter as well as how to combine * the retrieved time series together (such as * when aggregating multiple streams on each * resource to a single stream for each * resource or when aggregating streams across * all members of a group of resources).When * computing ratios, the aggregations and * denominatorAggregations fields must use the * same alignment period and produce time * series that have the same periodicity and * labels.This field is similar to the one in * the MetricService.ListTimeSeries request. It * is advisable to use the ListTimeSeries * method when debugging this field. * Structure is documented below. */ denominatorAggregations?: outputs.monitoring.AlertPolicyConditionConditionThresholdDenominatorAggregation[]; /** * A filter that identifies a time series that * should be used as the denominator of a ratio * that will be compared with the threshold. If * a denominatorFilter is specified, the time * series specified by the filter field will be * used as the numerator.The filter is similar * to the one that is specified in the * MetricService.ListTimeSeries request (that * call is useful to verify the time series * that will be retrieved / processed) and must * specify the metric type and optionally may * contain restrictions on resource type, * resource labels, and metric labels. This * field may not exceed 2048 Unicode characters * in length. */ denominatorFilter?: string; /** * The amount of time that a time series must * violate the threshold to be considered * failing. Currently, only values that are a * multiple of a minute--e.g., 0, 60, 120, or * 300 seconds--are supported. If an invalid * value is given, an error will be returned. * When choosing a duration, it is useful to * keep in mind the frequency of the underlying * time series data (which may also be affected * by any alignments specified in the * aggregations field); a good duration is long * enough so that a single outlier does not * generate spurious alerts, but short enough * that unhealthy states are detected and * alerted on quickly. */ duration: string; /** * A condition control that determines how * metric-threshold conditions are evaluated when * data stops arriving. * Possible values are: `EVALUATION_MISSING_DATA_INACTIVE`, `EVALUATION_MISSING_DATA_ACTIVE`, `EVALUATION_MISSING_DATA_NO_OP`. */ evaluationMissingData?: string; /** * A filter that identifies which time series * should be compared with the threshold.The * filter is similar to the one that is * specified in the * MetricService.ListTimeSeries request (that * call is useful to verify the time series * that will be retrieved / processed) and must * specify the metric type and optionally may * contain restrictions on resource type, * resource labels, and metric labels. This * field may not exceed 2048 Unicode characters * in length. */ filter?: string; /** * When this field is present, the `MetricThreshold` * condition forecasts whether the time series is * predicted to violate the threshold within the * `forecastHorizon`. When this field is not set, the * `MetricThreshold` tests the current value of the * timeseries against the threshold. * Structure is documented below. */ forecastOptions?: outputs.monitoring.AlertPolicyConditionConditionThresholdForecastOptions; /** * A value against which to compare the time * series. */ thresholdValue?: number; /** * The number/percent of time series for which * the comparison must hold in order for the * condition to trigger. If unspecified, then * the condition will trigger if the comparison * is true for any of the time series that have * been identified by filter and aggregations, * or by the ratio, if denominatorFilter and * denominatorAggregations are specified. * Structure is documented below. */ trigger?: outputs.monitoring.AlertPolicyConditionConditionThresholdTrigger; } interface AlertPolicyConditionConditionThresholdAggregation { /** * The alignment period for per-time * series alignment. If present, * alignmentPeriod must be at least * 60 seconds. After per-time series * alignment, each time series will * contain data points only on the * period boundaries. If * perSeriesAligner is not specified * or equals ALIGN_NONE, then this * field is ignored. If * perSeriesAligner is specified and * does not equal ALIGN_NONE, then * this field must be defined; * otherwise an error is returned. */ alignmentPeriod?: string; /** * The approach to be used to combine * time series. Not all reducer * functions may be applied to all * time series, depending on the * metric type and the value type of * the original time series. * Reduction may change the metric * type of value type of the time * series.Time series data must be * aligned in order to perform cross- * time series reduction. If * crossSeriesReducer is specified, * then perSeriesAligner must be * specified and not equal ALIGN_NONE * and alignmentPeriod must be * specified; otherwise, an error is * returned. * Possible values are: `REDUCE_NONE`, `REDUCE_MEAN`, `REDUCE_MIN`, `REDUCE_MAX`, `REDUCE_SUM`, `REDUCE_STDDEV`, `REDUCE_COUNT`, `REDUCE_COUNT_TRUE`, `REDUCE_COUNT_FALSE`, `REDUCE_FRACTION_TRUE`, `REDUCE_PERCENTILE_99`, `REDUCE_PERCENTILE_95`, `REDUCE_PERCENTILE_50`, `REDUCE_PERCENTILE_05`. */ crossSeriesReducer?: string; /** * The set of fields to preserve when * crossSeriesReducer is specified. * The groupByFields determine how * the time series are partitioned * into subsets prior to applying the * aggregation function. Each subset * contains time series that have the * same value for each of the * grouping fields. Each individual * time series is a member of exactly * one subset. The crossSeriesReducer * is applied to each subset of time * series. It is not possible to * reduce across different resource * types, so this field implicitly * contains resource.type. Fields not * specified in groupByFields are * aggregated away. If groupByFields * is not specified and all the time * series have the same resource * type, then the time series are * aggregated into a single output * time series. If crossSeriesReducer * is not defined, this field is * ignored. */ groupByFields?: string[]; /** * The approach to be used to align * individual time series. Not all * alignment functions may be applied * to all time series, depending on * the metric type and value type of * the original time series. * Alignment may change the metric * type or the value type of the time * series.Time series data must be * aligned in order to perform cross- * time series reduction. If * crossSeriesReducer is specified, * then perSeriesAligner must be * specified and not equal ALIGN_NONE * and alignmentPeriod must be * specified; otherwise, an error is * returned. * Possible values are: `ALIGN_NONE`, `ALIGN_DELTA`, `ALIGN_RATE`, `ALIGN_INTERPOLATE`, `ALIGN_NEXT_OLDER`, `ALIGN_MIN`, `ALIGN_MAX`, `ALIGN_MEAN`, `ALIGN_COUNT`, `ALIGN_SUM`, `ALIGN_STDDEV`, `ALIGN_COUNT_TRUE`, `ALIGN_COUNT_FALSE`, `ALIGN_FRACTION_TRUE`, `ALIGN_PERCENTILE_99`, `ALIGN_PERCENTILE_95`, `ALIGN_PERCENTILE_50`, `ALIGN_PERCENTILE_05`, `ALIGN_PERCENT_CHANGE`. */ perSeriesAligner?: string; } interface AlertPolicyConditionConditionThresholdDenominatorAggregation { /** * The alignment period for per-time * series alignment. If present, * alignmentPeriod must be at least * 60 seconds. After per-time series * alignment, each time series will * contain data points only on the * period boundaries. If * perSeriesAligner is not specified * or equals ALIGN_NONE, then this * field is ignored. If * perSeriesAligner is specified and * does not equal ALIGN_NONE, then * this field must be defined; * otherwise an error is returned. */ alignmentPeriod?: string; /** * The approach to be used to combine * time series. Not all reducer * functions may be applied to all * time series, depending on the * metric type and the value type of * the original time series. * Reduction may change the metric * type of value type of the time * series.Time series data must be * aligned in order to perform cross- * time series reduction. If * crossSeriesReducer is specified, * then perSeriesAligner must be * specified and not equal ALIGN_NONE * and alignmentPeriod must be * specified; otherwise, an error is * returned. * Possible values are: `REDUCE_NONE`, `REDUCE_MEAN`, `REDUCE_MIN`, `REDUCE_MAX`, `REDUCE_SUM`, `REDUCE_STDDEV`, `REDUCE_COUNT`, `REDUCE_COUNT_TRUE`, `REDUCE_COUNT_FALSE`, `REDUCE_FRACTION_TRUE`, `REDUCE_PERCENTILE_99`, `REDUCE_PERCENTILE_95`, `REDUCE_PERCENTILE_50`, `REDUCE_PERCENTILE_05`. */ crossSeriesReducer?: string; /** * The set of fields to preserve when * crossSeriesReducer is specified. * The groupByFields determine how * the time series are partitioned * into subsets prior to applying the * aggregation function. Each subset * contains time series that have the * same value for each of the * grouping fields. Each individual * time series is a member of exactly * one subset. The crossSeriesReducer * is applied to each subset of time * series. It is not possible to * reduce across different resource * types, so this field implicitly * contains resource.type. Fields not * specified in groupByFields are * aggregated away. If groupByFields * is not specified and all the time * series have the same resource * type, then the time series are * aggregated into a single output * time series. If crossSeriesReducer * is not defined, this field is * ignored. */ groupByFields?: string[]; /** * The approach to be used to align * individual time series. Not all * alignment functions may be applied * to all time series, depending on * the metric type and value type of * the original time series. * Alignment may change the metric * type or the value type of the time * series.Time series data must be * aligned in order to perform cross- * time series reduction. If * crossSeriesReducer is specified, * then perSeriesAligner must be * specified and not equal ALIGN_NONE * and alignmentPeriod must be * specified; otherwise, an error is * returned. * Possible values are: `ALIGN_NONE`, `ALIGN_DELTA`, `ALIGN_RATE`, `ALIGN_INTERPOLATE`, `ALIGN_NEXT_OLDER`, `ALIGN_MIN`, `ALIGN_MAX`, `ALIGN_MEAN`, `ALIGN_COUNT`, `ALIGN_SUM`, `ALIGN_STDDEV`, `ALIGN_COUNT_TRUE`, `ALIGN_COUNT_FALSE`, `ALIGN_FRACTION_TRUE`, `ALIGN_PERCENTILE_99`, `ALIGN_PERCENTILE_95`, `ALIGN_PERCENTILE_50`, `ALIGN_PERCENTILE_05`, `ALIGN_PERCENT_CHANGE`. */ perSeriesAligner?: string; } interface AlertPolicyConditionConditionThresholdForecastOptions { /** * The length of time into the future to forecast * whether a timeseries will violate the threshold. * If the predicted value is found to violate the * threshold, and the violation is observed in all * forecasts made for the Configured `duration`, * then the timeseries is considered to be failing. */ forecastHorizon: string; } interface AlertPolicyConditionConditionThresholdTrigger { /** * The absolute number of time series * that must fail the predicate for the * condition to be triggered. */ count?: number; /** * The percentage of time series that * must fail the predicate for the * condition to be triggered. */ percent?: number; } interface AlertPolicyCreationRecord { /** * (Output) * When the change occurred. */ mutateTime: string; /** * (Output) * The email address of the user making the change. */ mutatedBy: string; } interface AlertPolicyDocumentation { /** * The text of the documentation, interpreted according to mimeType. * The content may not exceed 8,192 Unicode characters and may not * exceed more than 10,240 bytes when encoded in UTF-8 format, * whichever is smaller. */ content?: string; /** * Links to content such as playbooks, repositories, and other resources. This field can contain up to 3 entries. * Structure is documented below. */ links?: outputs.monitoring.AlertPolicyDocumentationLink[]; /** * The format of the content field. Presently, only the value * "text/markdown" is supported. */ mimeType?: string; /** * The subject line of the notification. The subject line may not * exceed 10,240 bytes. In notifications generated by this policy the contents * of the subject line after variable expansion will be truncated to 255 bytes * or shorter at the latest UTF-8 character boundary. */ subject?: string; } interface AlertPolicyDocumentationLink { /** * A short display name for the link. The display name must not be empty or exceed 63 characters. Example: "playbook". */ displayName?: string; /** * The url of a webpage. A url can be templatized by using variables in the path or the query parameters. The total length of a URL should not exceed 2083 characters before and after variable expansion. Example: "https://my_domain.com/playbook?name=${resource.name}". */ url?: string; } interface CustomServiceTelemetry { /** * The full name of the resource that defines this service. * Formatted as described in * https://cloud.google.com/apis/design/resource_names. */ resourceName?: string; } interface GenericServiceBasicService { /** * Labels that specify the resource that emits the monitoring data * which is used for SLO reporting of this `Service`. */ serviceLabels?: { [key: string]: string; }; /** * The type of service that this basic service defines, e.g. * APP_ENGINE service type */ serviceType?: string; } interface GenericServiceTelemetry { /** * The full name of the resource that defines this service. * Formatted as described in * https://cloud.google.com/apis/design/resource_names. */ resourceName?: string; } interface GetAppEngineServiceTelemetry { /** * The full name of the resource that defines this service. * Formatted as described in * https://cloud.google.com/apis/design/resource_names. */ resourceName: string; } interface GetClusterIstioServiceTelemetry { /** * The full name of the resource that defines this service. * Formatted as described in * https://cloud.google.com/apis/design/resource_names. */ resourceName: string; } interface GetIstioCanonicalServiceTelemetry { /** * The full name of the resource that defines this service. * Formatted as described in * https://cloud.google.com/apis/design/resource_names. */ resourceName: string; } interface GetMeshIstioServiceTelemetry { /** * The full name of the resource that defines this service. * Formatted as described in * https://cloud.google.com/apis/design/resource_names. */ resourceName: string; } interface GetNotificationChannelSensitiveLabel { /** * An authorization token for a notification channel. Channel types that support this field include: slack */ authToken: string; /** * An authorization token for a notification channel. Channel types that support this field include: slack */ authTokenWo: string; /** * Triggers update of 'auth_token_wo' write-only. Increment this value when an update to 'auth_token_wo' is needed. For more info see [updating write-only arguments](https://www.terraform.io/docs/providers/google/guides/using_write_only_arguments.html#updating-write-only-arguments) */ authTokenWoVersion: string; /** * An password for a notification channel. Channel types that support this field include: webhook_basicauth */ password: string; /** * An password for a notification channel. Channel types that support this field include: webhook_basicauth */ passwordWo: string; /** * Triggers update of 'password_wo' write-only. Increment this value when an update to 'password_wo' is needed. For more info see [updating write-only arguments](https://www.terraform.io/docs/providers/google/guides/using_write_only_arguments.html#updating-write-only-arguments) */ passwordWoVersion: string; /** * An servicekey token for a notification channel. Channel types that support this field include: pagerduty */ serviceKey: string; /** * An servicekey token for a notification channel. Channel types that support this field include: pagerduty */ serviceKeyWo: string; /** * Triggers update of 'service_key_wo' write-only. Increment this value when an update to 'service_key_wo' is needed. For more info see [updating write-only arguments](https://www.terraform.io/docs/providers/google/guides/using_write_only_arguments.html#updating-write-only-arguments) */ serviceKeyWoVersion: string; } interface GetUptimeCheckIPsUptimeCheckIp { /** * The IP address from which the Uptime check originates. This is a fully specified IP address * (not an IP address range). Most IP addresses, as of this publication, are in IPv4 format; however, one should not * rely on the IP addresses being in IPv4 format indefinitely, and should support interpreting this field in either * IPv4 or IPv6 format. */ ipAddress: string; /** * A more specific location within the region that typically encodes a particular city/town/metro * (and its containing state/province or country) within the broader umbrella region category. */ location: string; /** * A broad region category in which the IP address is located. */ region: string; } interface MetricDescriptorLabel { /** * A human-readable description for the label. */ description?: string; /** * The key for this label. The key must not exceed 100 characters. The first character of the key must be an upper- or lower-case letter, the remaining characters must be letters, digits or underscores, and the key must match the regular expression [a-zA-Z][a-zA-Z0-9_]* */ key: string; /** * The type of data that can be assigned to the label. * Default value is `STRING`. * Possible values are: `STRING`, `BOOL`, `INT64`. */ valueType?: string; } interface MetricDescriptorMetadata { /** * The delay of data points caused by ingestion. Data points older than this age are guaranteed to be ingested and available to be read, excluding data loss due to errors. In `[duration format](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf?&_ga=2.264881487.1507873253.1593446723-935052455.1591817775#google.protobuf.Duration)`. */ ingestDelay?: string; /** * The sampling period of metric data points. For metrics which are written periodically, consecutive data points are stored at this time interval, excluding data loss due to errors. Metrics with a higher granularity have a smaller sampling period. In `[duration format](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf?&_ga=2.264881487.1507873253.1593446723-935052455.1591817775#google.protobuf.Duration)`. */ samplePeriod?: string; } interface NotificationChannelSensitiveLabels { /** * An authorization token for a notification channel. Channel types that support this field include: slack * **Note**: This property is sensitive and will not be displayed in the plan. */ authToken?: string; /** * **NOTE:** This field is write-only and its value will not be updated in state as part of read operations. * (Optional, Write-Only) * An authorization token for a notification channel. Channel types that support this field include: slack * **Note**: This property is write-only and will not be read from the API. * * > **Note:** One of `authToken` or `authTokenWo` can only be set. */ authTokenWo?: string; /** * Triggers update of `authTokenWo` write-only. Increment this value when an update to `authTokenWo` is needed. For more info see [updating write-only arguments](https://www.terraform.io/docs/providers/google/guides/using_write_only_arguments.html#updating-write-only-arguments) */ authTokenWoVersion?: string; /** * An password for a notification channel. Channel types that support this field include: webhookBasicauth * **Note**: This property is sensitive and will not be displayed in the plan. */ password?: string; /** * **NOTE:** This field is write-only and its value will not be updated in state as part of read operations. * (Optional, Write-Only) * An password for a notification channel. Channel types that support this field include: webhookBasicauth * **Note**: This property is write-only and will not be read from the API. * * > **Note:** One of `password` or `passwordWo` can only be set. */ passwordWo?: string; /** * Triggers update of `passwordWo` write-only. Increment this value when an update to `passwordWo` is needed. For more info see [updating write-only arguments](https://www.terraform.io/docs/providers/google/guides/using_write_only_arguments.html#updating-write-only-arguments) */ passwordWoVersion?: string; /** * An servicekey token for a notification channel. Channel types that support this field include: pagerduty * **Note**: This property is sensitive and will not be displayed in the plan. */ serviceKey?: string; /** * **NOTE:** This field is write-only and its value will not be updated in state as part of read operations. * (Optional, Write-Only) * An servicekey token for a notification channel. Channel types that support this field include: pagerduty * **Note**: This property is write-only and will not be read from the API. * * > **Note:** One of `serviceKey` or `serviceKeyWo` can only be set. */ serviceKeyWo?: string; /** * Triggers update of `serviceKeyWo` write-only. Increment this value when an update to `serviceKeyWo` is needed. For more info see [updating write-only arguments](https://www.terraform.io/docs/providers/google/guides/using_write_only_arguments.html#updating-write-only-arguments) */ serviceKeyWoVersion?: string; } interface SloBasicSli { /** * Availability based SLI, dervied from count of requests made to this service that return successfully. * Structure is documented below. */ availability?: outputs.monitoring.SloBasicSliAvailability; /** * Parameters for a latency threshold SLI. * Structure is documented below. */ latency?: outputs.monitoring.SloBasicSliLatency; /** * An optional set of locations to which this SLI is relevant. * Telemetry from other locations will not be used to calculate * performance for this SLI. If omitted, this SLI applies to all * locations in which the Service has activity. For service types * that don't support breaking down by location, setting this * field will result in an error. */ locations?: string[]; /** * An optional set of RPCs to which this SLI is relevant. * Telemetry from other methods will not be used to calculate * performance for this SLI. If omitted, this SLI applies to all * the Service's methods. For service types that don't support * breaking down by method, setting this field will result in an * error. */ methods?: string[]; /** * The set of API versions to which this SLI is relevant. * Telemetry from other API versions will not be used to * calculate performance for this SLI. If omitted, * this SLI applies to all API versions. For service types * that don't support breaking down by version, setting this * field will result in an error. */ versions?: string[]; } interface SloBasicSliAvailability { /** * Whether an availability SLI is enabled or not. Must be set to `true. Defaults to `true`. */ enabled?: boolean; } interface SloBasicSliLatency { /** * A duration string, e.g. 10s. * Good service is defined to be the count of requests made to * this service that return in no more than threshold. */ threshold: string; } interface SloRequestBasedSli { /** * Used when goodService is defined by a count of values aggregated in a * Distribution that fall into a good range. The totalService is the * total count of all values aggregated in the Distribution. * Defines a distribution TimeSeries filter and thresholds used for * measuring good service and total service. * Exactly one of `distributionCut` or `goodTotalRatio` can be set. * Structure is documented below. */ distributionCut?: outputs.monitoring.SloRequestBasedSliDistributionCut; /** * A means to compute a ratio of `goodService` to `totalService`. * Defines computing this ratio with two TimeSeries [monitoring filters](https://cloud.google.com/monitoring/api/v3/filters) * Must specify exactly two of good, bad, and total service filters. * The relationship goodService + badService = totalService * will be assumed. * Exactly one of `distributionCut` or `goodTotalRatio` can be set. * Structure is documented below. */ goodTotalRatio?: outputs.monitoring.SloRequestBasedSliGoodTotalRatio; } interface SloRequestBasedSliDistributionCut { /** * A TimeSeries [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) * aggregating values to quantify the good service provided. * Must have ValueType = DISTRIBUTION and * MetricKind = DELTA or MetricKind = CUMULATIVE. */ distributionFilter: string; /** * Range of numerical values. The computed goodService * will be the count of values x in the Distribution such * that range.min <= x <= range.max. inclusive of min and * max. Open ranges can be defined by setting * just one of min or max. * Structure is documented below. */ range: outputs.monitoring.SloRequestBasedSliDistributionCutRange; } interface SloRequestBasedSliDistributionCutRange { /** * max value for the range (inclusive). If not given, * will be set to "infinity", defining an open range * ">= range.min" */ max?: number; /** * Min value for the range (inclusive). If not given, * will be set to "-infinity", defining an open range * "< range.max" */ min?: number; } interface SloRequestBasedSliGoodTotalRatio { /** * A TimeSeries [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) * quantifying bad service provided, either demanded service that * was not provided or demanded service that was of inadequate * quality. Exactly two of * good, bad, or total service filter must be defined (where * good + bad = total is assumed) * Must have ValueType = DOUBLE or ValueType = INT64 and * must have MetricKind = DELTA or MetricKind = CUMULATIVE. */ badServiceFilter?: string; /** * A TimeSeries [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) * quantifying good service provided. Exactly two of * good, bad, or total service filter must be defined (where * good + bad = total is assumed) * Must have ValueType = DOUBLE or ValueType = INT64 and * must have MetricKind = DELTA or MetricKind = CUMULATIVE. */ goodServiceFilter?: string; /** * A TimeSeries [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) * quantifying total demanded service. Exactly two of * good, bad, or total service filter must be defined (where * good + bad = total is assumed) * Must have ValueType = DOUBLE or ValueType = INT64 and * must have MetricKind = DELTA or MetricKind = CUMULATIVE. */ totalServiceFilter?: string; } interface SloWindowsBasedSli { /** * A TimeSeries [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) * with ValueType = BOOL. The window is good if any true values * appear in the window. One of `goodBadMetricFilter`, * `goodTotalRatioThreshold`, `metricMeanInRange`, * `metricSumInRange` must be set for `windowsBasedSli`. */ goodBadMetricFilter?: string; /** * Criterion that describes a window as good if its performance is * high enough. One of `goodBadMetricFilter`, * `goodTotalRatioThreshold`, `metricMeanInRange`, * `metricSumInRange` must be set for `windowsBasedSli`. * Structure is documented below. */ goodTotalRatioThreshold?: outputs.monitoring.SloWindowsBasedSliGoodTotalRatioThreshold; /** * Criterion that describes a window as good if the metric's value * is in a good range, *averaged* across returned streams. * One of `goodBadMetricFilter`, * `goodTotalRatioThreshold`, `metricMeanInRange`, * `metricSumInRange` must be set for `windowsBasedSli`. * Average value X of `timeSeries` should satisfy * `range.min <= X <= range.max` for a good window. * Structure is documented below. */ metricMeanInRange?: outputs.monitoring.SloWindowsBasedSliMetricMeanInRange; /** * Criterion that describes a window as good if the metric's value * is in a good range, *summed* across returned streams. * Summed value `X` of `timeSeries` should satisfy * `range.min <= X <= range.max` for a good window. * One of `goodBadMetricFilter`, * `goodTotalRatioThreshold`, `metricMeanInRange`, * `metricSumInRange` must be set for `windowsBasedSli`. * Structure is documented below. */ metricSumInRange?: outputs.monitoring.SloWindowsBasedSliMetricSumInRange; /** * Duration over which window quality is evaluated, given as a * duration string "{X}s" representing X seconds. Must be an * integer fraction of a day and at least 60s. */ windowPeriod?: string; } interface SloWindowsBasedSliGoodTotalRatioThreshold { /** * Basic SLI to evaluate to judge window quality. * Structure is documented below. */ basicSliPerformance?: outputs.monitoring.SloWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformance; /** * Request-based SLI to evaluate to judge window quality. * Structure is documented below. */ performance?: outputs.monitoring.SloWindowsBasedSliGoodTotalRatioThresholdPerformance; /** * If window performance >= threshold, the window is counted * as good. */ threshold?: number; } interface SloWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformance { /** * Availability based SLI, dervied from count of requests made to this service that return successfully. * Structure is documented below. */ availability?: outputs.monitoring.SloWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceAvailability; /** * Parameters for a latency threshold SLI. * Structure is documented below. */ latency?: outputs.monitoring.SloWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatency; /** * An optional set of locations to which this SLI is relevant. * Telemetry from other locations will not be used to calculate * performance for this SLI. If omitted, this SLI applies to all * locations in which the Service has activity. For service types * that don't support breaking down by location, setting this * field will result in an error. */ locations?: string[]; /** * An optional set of RPCs to which this SLI is relevant. * Telemetry from other methods will not be used to calculate * performance for this SLI. If omitted, this SLI applies to all * the Service's methods. For service types that don't support * breaking down by method, setting this field will result in an * error. */ methods?: string[]; /** * The set of API versions to which this SLI is relevant. * Telemetry from other API versions will not be used to * calculate performance for this SLI. If omitted, * this SLI applies to all API versions. For service types * that don't support breaking down by version, setting this * field will result in an error. */ versions?: string[]; } interface SloWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceAvailability { /** * Whether an availability SLI is enabled or not. Must be set to `true. Defaults to `true`. */ enabled?: boolean; } interface SloWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatency { /** * A duration string, e.g. 10s. * Good service is defined to be the count of requests made to * this service that return in no more than threshold. */ threshold: string; } interface SloWindowsBasedSliGoodTotalRatioThresholdPerformance { /** * Used when goodService is defined by a count of values aggregated in a * Distribution that fall into a good range. The totalService is the * total count of all values aggregated in the Distribution. * Defines a distribution TimeSeries filter and thresholds used for * measuring good service and total service. * Structure is documented below. */ distributionCut?: outputs.monitoring.SloWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCut; /** * A means to compute a ratio of `goodService` to `totalService`. * Defines computing this ratio with two TimeSeries [monitoring filters](https://cloud.google.com/monitoring/api/v3/filters) * Must specify exactly two of good, bad, and total service filters. * The relationship goodService + badService = totalService * will be assumed. * Structure is documented below. */ goodTotalRatio?: outputs.monitoring.SloWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatio; } interface SloWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCut { /** * A TimeSeries [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) * aggregating values to quantify the good service provided. * Must have ValueType = DISTRIBUTION and * MetricKind = DELTA or MetricKind = CUMULATIVE. */ distributionFilter: string; /** * Range of numerical values. The computed goodService * will be the count of values x in the Distribution such * that range.min <= x <= range.max. inclusive of min and * max. Open ranges can be defined by setting * just one of min or max. * Structure is documented below. */ range: outputs.monitoring.SloWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRange; } interface SloWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRange { /** * max value for the range (inclusive). If not given, * will be set to "infinity", defining an open range * ">= range.min" */ max?: number; /** * Min value for the range (inclusive). If not given, * will be set to "-infinity", defining an open range * "< range.max" */ min?: number; } interface SloWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatio { /** * A TimeSeries [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) * quantifying bad service provided, either demanded service that * was not provided or demanded service that was of inadequate * quality. Exactly two of * good, bad, or total service filter must be defined (where * good + bad = total is assumed) * Must have ValueType = DOUBLE or ValueType = INT64 and * must have MetricKind = DELTA or MetricKind = CUMULATIVE. */ badServiceFilter?: string; /** * A TimeSeries [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) * quantifying good service provided. Exactly two of * good, bad, or total service filter must be defined (where * good + bad = total is assumed) * Must have ValueType = DOUBLE or ValueType = INT64 and * must have MetricKind = DELTA or MetricKind = CUMULATIVE. */ goodServiceFilter?: string; /** * A TimeSeries [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) * quantifying total demanded service. Exactly two of * good, bad, or total service filter must be defined (where * good + bad = total is assumed) * Must have ValueType = DOUBLE or ValueType = INT64 and * must have MetricKind = DELTA or MetricKind = CUMULATIVE. */ totalServiceFilter?: string; } interface SloWindowsBasedSliMetricMeanInRange { /** * Range of numerical values. The computed goodService * will be the count of values x in the Distribution such * that range.min <= x <= range.max. inclusive of min and * max. Open ranges can be defined by setting * just one of min or max. Mean value `X` of `timeSeries` * values should satisfy `range.min <= X <= range.max` for a * good service. * Structure is documented below. */ range: outputs.monitoring.SloWindowsBasedSliMetricMeanInRangeRange; /** * A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) * specifying the TimeSeries to use for evaluating window * The provided TimeSeries must have ValueType = INT64 or * ValueType = DOUBLE and MetricKind = GAUGE. Mean value `X` * should satisfy `range.min <= X <= range.max` * under good service. */ timeSeries: string; } interface SloWindowsBasedSliMetricMeanInRangeRange { /** * max value for the range (inclusive). If not given, * will be set to "infinity", defining an open range * ">= range.min" */ max?: number; /** * Min value for the range (inclusive). If not given, * will be set to "-infinity", defining an open range * "< range.max" */ min?: number; } interface SloWindowsBasedSliMetricSumInRange { /** * Range of numerical values. The computed goodService * will be the count of values x in the Distribution such * that range.min <= x <= range.max. inclusive of min and * max. Open ranges can be defined by setting * just one of min or max. Summed value `X` should satisfy * `range.min <= X <= range.max` for a good window. * Structure is documented below. */ range: outputs.monitoring.SloWindowsBasedSliMetricSumInRangeRange; /** * A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) * specifying the TimeSeries to use for evaluating window * quality. The provided TimeSeries must have * ValueType = INT64 or ValueType = DOUBLE and * MetricKind = GAUGE. * Summed value `X` should satisfy * `range.min <= X <= range.max` for a good window. */ timeSeries: string; } interface SloWindowsBasedSliMetricSumInRangeRange { /** * max value for the range (inclusive). If not given, * will be set to "infinity", defining an open range * ">= range.min" */ max?: number; /** * Min value for the range (inclusive). If not given, * will be set to "-infinity", defining an open range * "< range.max" */ min?: number; } interface UptimeCheckConfigContentMatcher { /** * String or regex content to match (max 1024 bytes) */ content: string; /** * Information needed to perform a JSONPath content match. Used for `ContentMatcherOption::MATCHES_JSON_PATH` and `ContentMatcherOption::NOT_MATCHES_JSON_PATH`. * Structure is documented below. */ jsonPathMatcher?: outputs.monitoring.UptimeCheckConfigContentMatcherJsonPathMatcher; /** * The type of content matcher that will be applied to the server output, compared to the content string when the check is run. * Default value is `CONTAINS_STRING`. * Possible values are: `CONTAINS_STRING`, `NOT_CONTAINS_STRING`, `MATCHES_REGEX`, `NOT_MATCHES_REGEX`, `MATCHES_JSON_PATH`, `NOT_MATCHES_JSON_PATH`. */ matcher?: string; } interface UptimeCheckConfigContentMatcherJsonPathMatcher { /** * Options to perform JSONPath content matching. * Default value is `EXACT_MATCH`. * Possible values are: `EXACT_MATCH`, `REGEX_MATCH`. */ jsonMatcher?: string; /** * JSONPath within the response output pointing to the expected `ContentMatcher::content` to match against. */ jsonPath: string; } interface UptimeCheckConfigHttpCheck { /** * If present, the check will only pass if the HTTP response status code is in this set of status codes. If empty, the HTTP status code will only pass if the HTTP status code is 200-299. * Structure is documented below. */ acceptedResponseStatusCodes?: outputs.monitoring.UptimeCheckConfigHttpCheckAcceptedResponseStatusCode[]; /** * The authentication information using username and password. Optional when creating an HTTP check; defaults to empty. Do not use with other authentication fields. * Structure is documented below. */ authInfo?: outputs.monitoring.UptimeCheckConfigHttpCheckAuthInfo; /** * The request body associated with the HTTP POST request. If `contentType` is `URL_ENCODED`, the body passed in must be URL-encoded. Users can provide a `Content-Length` header via the `headers` field or the API will do so. If the `requestMethod` is `GET` and `body` is not empty, the API will return an error. The maximum byte size is 1 megabyte. Note - As with all bytes fields JSON representations are base64 encoded. e.g. `foo=bar` in URL-encoded form is `foo%3Dbar` and in base64 encoding is `Zm9vJTI1M0RiYXI=`. */ body?: string; /** * The content type to use for the check. * Possible values are: `TYPE_UNSPECIFIED`, `URL_ENCODED`, `USER_PROVIDED`. */ contentType?: string; /** * A user provided content type header to use for the check. The invalid configurations outlined in the `contentType` field apply to customContentType`, as well as the following 1. `contentType` is `URL_ENCODED` and `customContentType` is set. 2. `contentType` is `USER_PROVIDED` and `customContentType` is not set. */ customContentType?: string; /** * The list of headers to send as part of the uptime check request. If two headers have the same key and different values, they should be entered as a single header, with the value being a comma-separated list of all the desired values as described in [RFC 2616 (page 31)](https://www.w3.org/Protocols/rfc2616/rfc2616.txt). Entering two separate headers with the same key in a Create call will cause the first to be overwritten by the second. The maximum number of headers allowed is 100. */ headers: { [key: string]: string; }; /** * Boolean specifying whether to encrypt the header information. Encryption should be specified for any headers related to authentication that you do not wish to be seen when retrieving the configuration. The server will be responsible for encrypting the headers. On Get/List calls, if `maskHeaders` is set to `true` then the headers will be obscured with `******`. */ maskHeaders?: boolean; /** * The path to the page to run the check against. Will be combined with the host (specified within the MonitoredResource) and port to construct the full URL. If the provided path does not begin with `/`, a `/` will be prepended automatically. Optional (defaults to `/`). */ path?: string; /** * Contains information needed to add pings to an HTTP check. * Structure is documented below. */ pingConfig?: outputs.monitoring.UptimeCheckConfigHttpCheckPingConfig; /** * The port to the page to run the check against. Will be combined with `host` (specified within the `monitoredResource`) and path to construct the full URL. Optional (defaults to 80 without SSL, or 443 with SSL). */ port: number; /** * The HTTP request method to use for the check. If set to `METHOD_UNSPECIFIED` then `requestMethod` defaults to `GET`. * Default value is `GET`. * Possible values are: `METHOD_UNSPECIFIED`, `GET`, `POST`. */ requestMethod?: string; /** * The authentication information using the Monitoring Service Agent. Optional when creating an HTTPS check; defaults to empty. Do not use with other authentication fields. * Structure is documented below. */ serviceAgentAuthentication?: outputs.monitoring.UptimeCheckConfigHttpCheckServiceAgentAuthentication; /** * If true, use HTTPS instead of HTTP to run the check. */ useSsl?: boolean; /** * Boolean specifying whether to include SSL certificate validation as a part of the Uptime check. Only applies to checks where `monitoredResource` is set to `uptimeUrl`. If `useSsl` is `false`, setting `validateSsl` to `true` has no effect. */ validateSsl?: boolean; } interface UptimeCheckConfigHttpCheckAcceptedResponseStatusCode { /** * A class of status codes to accept. * Possible values are: `STATUS_CLASS_1XX`, `STATUS_CLASS_2XX`, `STATUS_CLASS_3XX`, `STATUS_CLASS_4XX`, `STATUS_CLASS_5XX`, `STATUS_CLASS_ANY`. */ statusClass?: string; /** * A status code to accept. */ statusValue?: number; } interface UptimeCheckConfigHttpCheckAuthInfo { /** * The password to authenticate. * **Note**: This property is sensitive and will not be displayed in the plan. */ password?: string; /** * **NOTE:** This field is write-only and its value will not be updated in state as part of read operations. * (Optional, Write-Only) * The password to authenticate. * **Note**: This property is write-only and will not be read from the API. * * > **Note:** One of `password` or `passwordWo` can only be set. */ passwordWo?: string; /** * The password write-only version. */ passwordWoVersion?: string; /** * The username to authenticate. */ username: string; } interface UptimeCheckConfigHttpCheckPingConfig { /** * Number of ICMP pings. A maximum of 3 ICMP pings is currently supported. */ pingsCount: number; } interface UptimeCheckConfigHttpCheckServiceAgentAuthentication { /** * The type of authentication to use. * Possible values are: `SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED`, `OIDC_TOKEN`. */ type?: string; } interface UptimeCheckConfigMonitoredResource { /** * Values for all of the labels listed in the associated monitored resource descriptor. For example, Compute Engine VM instances use the labels `projectId`, `instanceId`, and `zone`. */ labels: { [key: string]: string; }; /** * The monitored resource type. This field must match the type field of a [`MonitoredResourceDescriptor`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.monitoredResourceDescriptors#MonitoredResourceDescriptor) object. For example, the type of a Compute Engine VM instance is `gceInstance`. For a list of types, see [Monitoring resource types](https://cloud.google.com/monitoring/api/resources) and [Logging resource types](https://cloud.google.com/logging/docs/api/v2/resource-list). */ type: string; } interface UptimeCheckConfigResourceGroup { /** * The group of resources being monitored. Should be the `name` of a group */ groupId?: string; /** * The resource type of the group members. * Possible values are: `RESOURCE_TYPE_UNSPECIFIED`, `INSTANCE`, `AWS_ELB_LOAD_BALANCER`. */ resourceType?: string; } interface UptimeCheckConfigSyntheticMonitor { /** * Target a Synthetic Monitor GCFv2 Instance * Structure is documented below. * * * The `cloudFunctionV2` block supports: */ cloudFunctionV2: outputs.monitoring.UptimeCheckConfigSyntheticMonitorCloudFunctionV2; } interface UptimeCheckConfigSyntheticMonitorCloudFunctionV2 { /** * A unique resource name for this UptimeCheckConfig. The format is `projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]`. */ name: string; } interface UptimeCheckConfigTcpCheck { /** * Contains information needed to add pings to a TCP check. * Structure is documented below. */ pingConfig?: outputs.monitoring.UptimeCheckConfigTcpCheckPingConfig; /** * The port to the page to run the check against. Will be combined with host (specified within the `monitoredResource`) to construct the full URL. */ port: number; } interface UptimeCheckConfigTcpCheckPingConfig { /** * Number of ICMP pings. A maximum of 3 ICMP pings is currently supported. */ pingsCount: number; } } export declare namespace netapp { interface BackupVaultBackupRetentionPolicy { /** * Minimum retention duration in days for backups in the backup vault. */ backupMinimumEnforcedRetentionDays: number; /** * Indicates if the daily backups are immutable. At least one of daily_backup_immutable, weekly_backup_immutable, monthlyBackupImmutable and manualBackupImmutable must be true. */ dailyBackupImmutable?: boolean; /** * Indicates if the manual backups are immutable. At least one of daily_backup_immutable, weekly_backup_immutable, monthlyBackupImmutable and manualBackupImmutable must be true. */ manualBackupImmutable?: boolean; /** * Indicates if the monthly backups are immutable. At least one of daily_backup_immutable, weekly_backup_immutable, monthlyBackupImmutable and manualBackupImmutable must be true. */ monthlyBackupImmutable?: boolean; /** * Indicates if the weekly backups are immutable. At least one of daily_backup_immutable, weekly_backup_immutable, monthlyBackupImmutable and manualBackupImmutable must be true. */ weeklyBackupImmutable?: boolean; } interface VolumeBackupConfig { /** * Specify a single backup policy ID for scheduled backups. Format: `projects/{{projectId}}/locations/{{location}}/backupPolicies/{{backupPolicyName}}` */ backupPolicies?: string[]; /** * ID of the backup vault to use. A backup vault is reqired to create manual or scheduled backups. * Format: `projects/{{projectId}}/locations/{{location}}/backupVaults/{{backupVaultName}}` */ backupVault?: string; /** * When set to true, scheduled backup is enabled on the volume. Omit if no backupPolicy is specified. */ scheduledBackupEnabled?: boolean; } interface VolumeBlockDevice { /** * A list of host groups that identify hosts that can mount the block volume. * Format: * `projects/{project_id}/locations/{location}/hostGroups/{host_group_id}` * This field can be updated after the block device is created. */ hostGroups: string[]; /** * (Output) * Device identifier of the Block volume. This represents lunSerialNumber * for ISCSI volumes */ identifier: string; /** * User-defined name for the block device, unique within the Volume. In case * no user input is provided, name will be autogenerated in the backend. * The name must meet the following requirements: * * Be between 1 and 255 characters long. * * Contain only uppercase or lowercase letters (A-Z, a-z), numbers (0-9), * and the following special characters: "-", "_", "}", "{", ".". * * Spaces are not allowed. */ name: string; /** * The OS type of the volume. * This field can't be changed after the block device is created. * Possible values are: `LINUX`, `WINDOWS`, `ESXI`. */ osType: string; /** * (Output) * The size of the block device in GiB. * Any value provided in this field during Volume creation is IGNORED. * The block device's size is system-managed and will be set to match * the parent Volume's `capacityGib`. */ sizeGib: number; } interface VolumeCacheParameters { /** * Optional. Configuration of the cache volume. * Structure is documented below. */ cacheConfig?: outputs.netapp.VolumeCacheParametersCacheConfig; /** * (Output) * State of the cache volume indicating the peering status. */ cacheState: string; /** * (Output) * Copy-paste-able commands to be used on user's ONTAP to accept peering requests. */ command: string; /** * Optional. Field indicating whether cache volume as global file lock enabled. */ enableGlobalFileLock?: boolean; /** * (Output) * Temporary passphrase generated to accept cluster peering command. */ passphrase: string; /** * Required. Name of the origin volume's ONTAP cluster. */ peerClusterName?: string; /** * Required. List of IC LIF addresses of the origin volume's ONTAP cluster. */ peerIpAddresses?: string[]; /** * Required. Name of the origin volume's SVM. */ peerSvmName?: string; /** * Required. Name of the origin volume for the cache volume. */ peerVolumeName?: string; /** * Optional. Expiration time for the peering command to be executed on user's ONTAP. A timestamp in RFC3339 UTC "Zulu" format. Examples: "2023-06-22T09:13:01.617Z". */ peeringCommandExpiryTime: string; /** * (Output) * Detailed description of the current cache state. */ stateDetails: string; } interface VolumeCacheParametersCacheConfig { /** * Optional. Flag indicating whether a CIFS change notification is enabled for the FlexCache volume. */ cifsChangeNotifyEnabled?: boolean; } interface VolumeExportPolicy { /** * Export rules (up to 5) control NFS volume access. * Structure is documented below. */ rules: outputs.netapp.VolumeExportPolicyRule[]; } interface VolumeExportPolicyRule { /** * Defines the access type for clients matching the `allowedClients` specification. * Possible values are: `READ_ONLY`, `READ_WRITE`, `READ_NONE`. */ accessType?: string; /** * Defines the client ingress specification (allowed clients) as a comma separated list with IPv4 CIDRs or IPv4 host addresses. */ allowedClients?: string; /** * An integer representing the anonymous user ID. Range is 0 to 4294967295. Required when `squashMode` is `ALL_SQUASH`. */ anonUid?: number; /** * If enabled, the root user (UID = 0) of the specified clients doesn't get mapped to nobody (UID = 65534). This is also known as no_root_squash. * Use either squashMode or has_root_access, but never both at the same time. These parameters are mutually exclusive. */ hasRootAccess: string; /** * If enabled (true) the rule defines a read only access for clients matching the 'allowedClients' specification. It enables nfs clients to mount using 'authentication' kerberos security mode. */ kerberos5ReadOnly?: boolean; /** * If enabled (true) the rule defines read and write access for clients matching the 'allowedClients' specification. It enables nfs clients to mount using 'authentication' kerberos security mode. The 'kerberos5ReadOnly' value is ignored if this is enabled. */ kerberos5ReadWrite?: boolean; /** * If enabled (true) the rule defines a read only access for clients matching the 'allowedClients' specification. It enables nfs clients to mount using 'integrity' kerberos security mode. */ kerberos5iReadOnly?: boolean; /** * If enabled (true) the rule defines read and write access for clients matching the 'allowedClients' specification. It enables nfs clients to mount using 'integrity' kerberos security mode. The 'kerberos5iReadOnly' value is ignored if this is enabled. */ kerberos5iReadWrite?: boolean; /** * If enabled (true) the rule defines a read only access for clients matching the 'allowedClients' specification. It enables nfs clients to mount using 'privacy' kerberos security mode. */ kerberos5pReadOnly?: boolean; /** * If enabled (true) the rule defines read and write access for clients matching the 'allowedClients' specification. It enables nfs clients to mount using 'privacy' kerberos security mode. The 'kerberos5pReadOnly' value is ignored if this is enabled. */ kerberos5pReadWrite?: boolean; /** * Enable to apply the export rule to NFSV3 clients. */ nfsv3?: boolean; /** * Enable to apply the export rule to NFSV4.1 clients. */ nfsv4?: boolean; /** * SquashMode defines how remote user privileges are restricted when accessing an NFS export. It controls how the user identities (like root) are mapped to anonymous users to limit access and enforce security. * Use either squashMode or has_root_access, but never both at the same time. These parameters are mutually exclusive. * Possible values are: `SQUASH_MODE_UNSPECIFIED`, `NO_ROOT_SQUASH`, `ROOT_SQUASH`, `ALL_SQUASH`. */ squashMode?: string; } interface VolumeHybridReplicationParameters { /** * Optional. Name of source cluster location associated with the replication. This is a free-form field * for display purposes only. */ clusterLocation?: string; /** * Optional. Description of the replication. */ description?: string; /** * Optional. Type of the hybrid replication. Use `MIGRATION` to create a volume migration * and `ONPREM_REPLICATION` to create an external replication. * Other values are read-only. `REVERSE_ONPREM_REPLICATION` is used to represent an external * replication which got reversed. Default is `MIGRATION`. * Possible values are: `MIGRATION`, `CONTINUOUS_REPLICATION`, `ONPREM_REPLICATION`, `REVERSE_ONPREM_REPLICATION`. */ hybridReplicationType?: string; /** * Optional. Labels to be added to the replication as the key value pairs. * An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. */ labels?: { [key: string]: string; }; /** * Optional. If the source is a FlexGroup volume, this field needs to match the number of constituents in the FlexGroup. */ largeVolumeConstituentCount?: number; /** * Required. Name of the ONTAP source cluster to be peered with NetApp Volumes. */ peerClusterName?: string; /** * Required. List of all intercluster LIF IP addresses of the ONTAP source cluster. */ peerIpAddresses?: string[]; /** * Required. Name of the ONTAP source vserver SVM to be peered with NetApp Volumes. */ peerSvmName?: string; /** * Required. Name of the ONTAP source volume to be replicated to NetApp Volumes destination volume. */ peerVolumeName?: string; /** * Required. Desired name for the replication of this volume. */ replication?: string; /** * Optional. Replication Schedule for the replication created. * Possible values are: `EVERY_10_MINUTES`, `HOURLY`, `DAILY`. */ replicationSchedule?: string; } interface VolumeMountOption { /** * (Output) * Export path of the volume. */ export: string; /** * (Output) * Full export path of the volume. * Format for NFS volumes: `:/` * Format for SMB volumes: `\\\\netbios_prefix-four_random_hex_letters.domain_name\\shareName` */ exportFull: string; /** * (Output) * Human-readable mount instructions. */ instructions: string; /** * (Output) * IP Address. */ ipAddress: string; /** * (Output) * Protocol to mount with. */ protocol: string; } interface VolumeReplicationDestinationVolumeParameters { /** * Description for the destination volume. */ description?: string; /** * Share name for destination volume. If not specified, name of source volume's share name will be used. */ shareName: string; /** * Name of an existing storage pool for the destination volume with format: `projects/{{project}}/locations/{{location}}/storagePools/{{poolId}}` */ storagePool: string; /** * Tiering policy for the volume. * Structure is documented below. */ tieringPolicy?: outputs.netapp.VolumeReplicationDestinationVolumeParametersTieringPolicy; /** * Name for the destination volume to be created. If not specified, the name of the source volume will be used. */ volumeId: string; } interface VolumeReplicationDestinationVolumeParametersTieringPolicy { /** * Optional. Time in days to mark the volume's data block as cold and make it eligible for tiering, can be range from 2-183. * Default is 31. */ coolingThresholdDays?: number; /** * Optional. Flag indicating if the volume has tiering policy enable/pause. Default is PAUSED. * Default value is `PAUSED`. * Possible values are: `ENABLED`, `PAUSED`. */ tierAction?: string; } interface VolumeReplicationHybridPeeringDetail { /** * (Output) * Optional. Copy-paste-able commands to be used on user's ONTAP to accept peering requests. */ command: string; /** * (Output) * Optional. Expiration time for the peering command to be executed on user's ONTAP. * Uses RFC 3339, where generated output will always be Z-normalized and uses 0, 3, 6 or 9 fractional digits. Offsets other than "Z" are also accepted. */ commandExpiryTime: string; /** * (Output) * Optional. Temporary passphrase generated to accept cluster peering command. */ passphrase: string; /** * (Output) * Optional. Name of the user's local source cluster to be peered with the destination cluster. */ peerClusterName: string; /** * (Output) * Optional. Name of the user's local source vserver svm to be peered with the destination vserver svm. */ peerSvmName: string; /** * (Output) * Optional. Name of the user's local source volume to be peered with the destination volume. */ peerVolumeName: string; /** * (Output) * Optional. IP address of the subnet. */ subnetIp: string; } interface VolumeReplicationHybridReplicationUserCommand { /** * (Output) * List of commands to be executed by the customer. */ commands: string[]; } interface VolumeReplicationTransferStat { /** * (Output) * The elapsed time since the creation of the snapshot on the source volume that was last replicated * to the destination volume. Lag time represents the difference in age of the destination volume * data in relation to the source volume data. */ lagDuration: string; /** * (Output) * Size of last completed transfer in bytes. */ lastTransferBytes: string; /** * (Output) * Time taken during last completed transfer. */ lastTransferDuration: string; /** * (Output) * Time when last transfer completed. A timestamp in RFC3339 UTC "Zulu" format. Examples: "2023-06-22T09:13:01.617Z". */ lastTransferEndTime: string; /** * (Output) * A message describing the cause of the last transfer failure. */ lastTransferError: string; /** * (Output) * Cumulative time taken across all transfers for the replication relationship. */ totalTransferDuration: string; /** * (Output) * Cumulative bytes transferred so far for the replication relationship. */ transferBytes: string; /** * (Output) * Time when progress was updated last. A timestamp in RFC3339 UTC "Zulu" format. Examples: "2023-06-22T09:13:01.617Z". */ updateTime: string; } interface VolumeRestoreParameters { /** * Full name of the backup to use for creating this volume. * `sourceSnapshot` and `sourceBackup` cannot be used simultaneously. * Format: `projects/{{project}}/locations/{{location}}/backupVaults/{{backupVaultId}}/backups/{{backup}}`. */ sourceBackup?: string; /** * Full name of the snapshot to use for creating this volume. * `sourceSnapshot` and `sourceBackup` cannot be used simultaneously. * Format: `projects/{{project}}/locations/{{location}}/volumes/{{volume}}/snapshots/{{snapshot}}`. */ sourceSnapshot?: string; } interface VolumeSnapshotPolicy { /** * Daily schedule policy. * Structure is documented below. */ dailySchedule?: outputs.netapp.VolumeSnapshotPolicyDailySchedule; /** * Enables automated snapshot creation according to defined schedule. Default is false. * To disable automatic snapshot creation you have to remove the whole snapshotPolicy block. */ enabled?: boolean; /** * Hourly schedule policy. * Structure is documented below. */ hourlySchedule?: outputs.netapp.VolumeSnapshotPolicyHourlySchedule; /** * Monthly schedule policy. * Structure is documented below. */ monthlySchedule?: outputs.netapp.VolumeSnapshotPolicyMonthlySchedule; /** * Weekly schedule policy. * Structure is documented below. */ weeklySchedule?: outputs.netapp.VolumeSnapshotPolicyWeeklySchedule; } interface VolumeSnapshotPolicyDailySchedule { /** * Set the hour to create the snapshot (0-23), defaults to midnight (0). */ hour?: number; /** * Set the minute of the hour to create the snapshot (0-59), defaults to the top of the hour (0). */ minute?: number; /** * The maximum number of snapshots to keep for the daily schedule. */ snapshotsToKeep: number; } interface VolumeSnapshotPolicyHourlySchedule { /** * Set the minute of the hour to create the snapshot (0-59), defaults to the top of the hour (0). */ minute?: number; /** * The maximum number of snapshots to keep for the hourly schedule. */ snapshotsToKeep: number; } interface VolumeSnapshotPolicyMonthlySchedule { /** * Set the day or days of the month to make a snapshot (1-31). Accepts a comma separated number of days. Defaults to '1'. */ daysOfMonth?: string; /** * Set the hour to create the snapshot (0-23), defaults to midnight (0). */ hour?: number; /** * Set the minute of the hour to create the snapshot (0-59), defaults to the top of the hour (0). */ minute?: number; /** * The maximum number of snapshots to keep for the monthly schedule */ snapshotsToKeep: number; } interface VolumeSnapshotPolicyWeeklySchedule { /** * Set the day or days of the week to make a snapshot. Accepts a comma separated days of the week. Defaults to 'Sunday'. */ day?: string; /** * Set the hour to create the snapshot (0-23), defaults to midnight (0). */ hour?: number; /** * Set the minute of the hour to create the snapshot (0-59), defaults to the top of the hour (0). */ minute?: number; /** * The maximum number of snapshots to keep for the weekly schedule. */ snapshotsToKeep: number; } interface VolumeTieringPolicy { /** * Optional. Time in days to mark the volume's data block as cold and make it eligible for tiering, can be range from 2-183. * Default is 31. */ coolingThresholdDays?: number; /** * Optional. Flag indicating that the hot tier bypass mode is enabled. Default is false. * Only applicable to Flex service level. */ hotTierBypassModeEnabled?: boolean; /** * Optional. Flag indicating if the volume has tiering policy enable/pause. Default is PAUSED. * Default value is `PAUSED`. * Possible values are: `ENABLED`, `PAUSED`. */ tierAction?: string; } } export declare namespace networkconnectivity { interface DestinationEndpoint { /** * The ASN of the remote IP prefix. */ asn: string; /** * The CSP of the remote IP prefix. */ csp: string; /** * (Output) * The state of the DestinationEndpoint resource. */ state: string; /** * (Output) * Time when the DestinationEndpoint resource was updated. */ updateTime: string; } interface DestinationStateTimeline { /** * (Output) * The state and activation time details of the resource state. * Structure is documented below. */ states: outputs.networkconnectivity.DestinationStateTimelineState[]; } interface DestinationStateTimelineState { /** * (Output) * Accompanies only the transient states, which include `ADDING`, * `DELETING`, and `SUSPENDING`, to denote the time until which the * transient state of the resource will be effective. For instance, if the * state is `ADDING`, this field shows the time when the resource state * transitions to `ACTIVE`. */ effectiveTime: string; /** * (Output) * The state of the resource. */ state: string; } interface GroupAutoAccept { /** * A list of project ids or project numbers for which you want to enable auto-accept. The auto-accept setting is applied to spokes being created or updated in these projects. */ autoAcceptProjects: string[]; } interface HubRoutingVpc { /** * The URI of the VPC network. */ uri?: string; } interface InternalRangeAllocationOptions { /** * Optional. Sets the strategy used to automatically find a free range of a size given by prefixLength. Can be set only when trying to create a reservation that automatically finds the free range to reserve. * Possible values are: `RANDOM`, `FIRST_AVAILABLE`, `RANDOM_FIRST_N_AVAILABLE`, `FIRST_SMALLEST_FITTING`. */ allocationStrategy?: string; /** * Must be set when allocationStrategy is RANDOM_FIRST_N_AVAILABLE, otherwise must remain unset. Defines the size of the set of free ranges from which RANDOM_FIRST_N_AVAILABLE strategy randomy selects one, * in other words it sets the N in the RANDOM_FIRST_N_AVAILABLE. */ firstAvailableRangesLookupSize?: number; } interface InternalRangeMigration { /** * Resource path as an URI of the source resource, for example a subnet. * The project for the source resource should match the project for the * InternalRange. * An example /projects/{project}/regions/{region}/subnetworks/{subnet} */ source: string; /** * Resource path of the target resource. The target project can be * different, as in the cases when migrating to peer networks. The resource * may not exist yet. * For example /projects/{project}/regions/{region}/subnetworks/{subnet} */ target: string; } interface MulticloudDataTransferConfigService { /** * The name of the service, like "big-query" or "cloud-storage". * This corresponds to the map key in the API. */ serviceName: string; /** * (Output) * The state and activation time details for the service. * Structure is documented below. * * * The `states` block contains: */ states: outputs.networkconnectivity.MulticloudDataTransferConfigServiceState[]; } interface MulticloudDataTransferConfigServiceState { /** * The time when the state becomes effective */ effectiveTime: string; /** * The state of the resource. */ state: string; } interface PolicyBasedRouteFilter { /** * The destination IP range of outgoing packets that this policy-based route applies to. Default is "0.0.0.0/0". */ destRange?: string; /** * The IP protocol that this policy-based route applies to. Valid values are 'TCP', 'UDP', and 'ALL'. Default is 'ALL'. */ ipProtocol?: string; /** * Internet protocol versions this policy-based route applies to. * Possible values are: `IPV4`, `IPV6`. */ protocolVersion: string; /** * The source IP range of outgoing packets that this policy-based route applies to. Default is "0.0.0.0/0". */ srcRange?: string; } interface PolicyBasedRouteInterconnectAttachment { /** * Cloud region to install this policy-based route on for Interconnect attachments. Use `all` to install it on all Interconnect attachments. */ region: string; } interface PolicyBasedRouteVirtualMachine { /** * A list of VM instance tags that this policy-based route applies to. VM instances that have ANY of tags specified here will install this PBR. */ tags: string[]; } interface PolicyBasedRouteWarning { /** * (Output) * A warning code, if applicable. */ code: string; /** * (Output) * Metadata about this warning in key: value format. The key should provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement. */ data: { [key: string]: string; }; /** * (Output) * A human-readable description of the warning code. */ warningMessage: string; } interface ServiceConnectionPolicyPscConfig { /** * List of Projects, Folders, or Organizations from where the Producer instance can be within. For example, * a network administrator can provide both 'organizations/foo' and 'projects/bar' as * allowed_google_producers_resource_hierarchy_levels. This allowlists this network to connect with any Producer * instance within the 'foo' organization or the 'bar' project. By default, * allowedGoogleProducersResourceHierarchyLevel is empty. The format for each * allowedGoogleProducersResourceHierarchyLevel is / where is one of 'projects', 'folders', or 'organizations' * and is either the ID or the number of the resource type. Format for each * allowedGoogleProducersResourceHierarchyLevel value: 'projects/' or 'folders/' or 'organizations/' Eg. * [projects/my-project-id, projects/567, folders/891, organizations/123] */ allowedGoogleProducersResourceHierarchyLevels?: string[]; /** * Max number of PSC connections for this policy. */ limit?: string; /** * ProducerInstanceLocation is used to specify which authorization mechanism to use to determine which projects * the Producer instance can be within. * Possible values are: `PRODUCER_INSTANCE_LOCATION_UNSPECIFIED`, `CUSTOM_RESOURCE_HIERARCHY_LEVELS`. */ producerInstanceLocation: string; /** * IDs of the subnetworks or fully qualified identifiers for the subnetworks */ subnetworks: string[]; } interface ServiceConnectionPolicyPscConnection { /** * The resource reference of the consumer address. */ consumerAddress?: string; /** * The resource reference of the PSC Forwarding Rule within the consumer VPC. */ consumerForwardingRule?: string; /** * The project where the PSC connection is created. */ consumerTargetProject?: string; /** * The most recent error during operating this connection. * Structure is documented below. */ error?: outputs.networkconnectivity.ServiceConnectionPolicyPscConnectionError; /** * The error info for the latest error during operating this connection. * Structure is documented below. */ errorInfo?: outputs.networkconnectivity.ServiceConnectionPolicyPscConnectionErrorInfo; /** * The error type indicates whether the error is consumer facing, producer * facing or system internal. * Possible values are: `CONNECTION_ERROR_TYPE_UNSPECIFIED`, `ERROR_INTERNAL`, `ERROR_CONSUMER_SIDE`, `ERROR_PRODUCER_SIDE`. */ errorType?: string; /** * The last Compute Engine operation to setup PSC connection. */ gceOperation?: string; /** * The PSC connection id of the PSC forwarding rule. */ pscConnectionId?: string; /** * The state of the PSC connection. * Possible values are: `STATE_UNSPECIFIED`, `ACTIVE`, `CREATING`, `DELETING`, `FAILED`. */ state?: string; } interface ServiceConnectionPolicyPscConnectionError { /** * The status code, which should be an enum value of [google.rpc.Code][]. */ code?: number; /** * (Output) * A list of messages that carry the error details. */ details: { [key: string]: string; }[]; /** * A developer-facing error message. */ message?: string; } interface ServiceConnectionPolicyPscConnectionErrorInfo { /** * The logical grouping to which the "reason" belongs. */ domain?: string; /** * Additional structured details about this error. */ metadata?: { [key: string]: string; }; /** * The reason of the error. */ reason?: string; } interface SpokeGateway { /** * the capacity of the gateway spoke, in Gbps. * Possible values are: `CAPACITY_1_GBPS`, `CAPACITY_10_GBPS`, `CAPACITY_100_GBPS`. */ capacity: string; /** * A list of IP ranges that are reserved for this gateway's internal infrastructure. * Structure is documented below. */ ipRangeReservations: outputs.networkconnectivity.SpokeGatewayIpRangeReservation[]; /** * (Output, Beta) * Set of Cloud Routers that are attached to this NCC-GW */ routers: string[]; } interface SpokeGatewayIpRangeReservation { /** * A block of IP address ranges used to allocate supporting infrastructure for this gateway—for example, 10.1.2.0/23. The IP address block must be a /23 range. This IP address block must not overlap with subnets in any spoke or peer network that the gateway can communicate with. */ ipRange: string; } interface SpokeLinkedInterconnectAttachments { /** * IP ranges allowed to be included during import from hub (does not control transit connectivity). * The only allowed value for now is "ALL_IPV4_RANGES". */ includeImportRanges?: string[]; /** * A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations. */ siteToSiteDataTransfer: boolean; /** * The URIs of linked interconnect attachment resources */ uris: string[]; } interface SpokeLinkedProducerVpcNetwork { /** * IP ranges encompassing the subnets to be excluded from peering. */ excludeExportRanges?: string[]; /** * IP ranges allowed to be included from peering. */ includeExportRanges?: string[]; /** * The URI of the Service Consumer VPC that the Producer VPC is peered with. */ network: string; /** * The name of the VPC peering between the Service Consumer VPC and the Producer VPC (defined in the Tenant project) which is added to the NCC hub. This peering must be in ACTIVE state. */ peering: string; /** * (Output) * The URI of the Producer VPC. */ producerNetwork: string; } interface SpokeLinkedRouterApplianceInstances { /** * IP ranges allowed to be included during import from hub (does not control transit connectivity). * The only allowed value for now is "ALL_IPV4_RANGES". */ includeImportRanges?: string[]; /** * The list of router appliance instances * Structure is documented below. */ instances: outputs.networkconnectivity.SpokeLinkedRouterApplianceInstancesInstance[]; /** * A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations. */ siteToSiteDataTransfer: boolean; } interface SpokeLinkedRouterApplianceInstancesInstance { /** * The IP address on the VM to use for peering. */ ipAddress: string; /** * The URI of the virtual machine resource */ virtualMachine: string; } interface SpokeLinkedVpcNetwork { /** * IP ranges encompassing the subnets to be excluded from peering. */ excludeExportRanges?: string[]; /** * IP ranges allowed to be included from peering. */ includeExportRanges?: string[]; /** * The URI of the VPC network resource. */ uri: string; } interface SpokeLinkedVpnTunnels { /** * IP ranges allowed to be included during import from hub (does not control transit connectivity). * The only allowed value for now is "ALL_IPV4_RANGES". */ includeImportRanges?: string[]; /** * A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations. */ siteToSiteDataTransfer: boolean; /** * The URIs of linked VPN tunnel resources. */ uris: string[]; } interface SpokeReason { /** * The code associated with this reason. */ code?: string; /** * Human-readable details about this reason. */ message?: string; /** * Additional information provided by the user in the RejectSpoke call. */ userDetails?: string; } } export declare namespace networkmanagement { interface ConnectivityTestDestination { /** * A Cloud SQL instance URI. */ cloudSqlInstance?: string; /** * Forwarding rule URI. Forwarding rules are frontends for load balancers, * PSC endpoints, and Protocol Forwarding. */ forwardingRule?: string; /** * A DNS endpoint of Google Kubernetes Engine cluster control plane. * Requires gkeMasterCluster to be set, can't be used simultaneoulsly with * ipAddress or network. Applicable only to destination endpoint. */ fqdn?: string; /** * A cluster URI for Google Kubernetes Engine cluster control plane. */ gkeMasterCluster?: string; /** * A Compute Engine instance URI. */ instance?: string; /** * The IP address of the endpoint, which can be an external or internal IP. */ ipAddress?: string; /** * A VPC network URI. */ network?: string; /** * The IP protocol port of the endpoint. Only applicable when protocol is * TCP or UDP. */ port?: number; /** * Project ID where the endpoint is located. * The project ID can be derived from the URI if you provide a endpoint or * network URI. * The following are two cases where you may need to provide the project ID: * 1. Only the IP address is specified, and the IP address is within a Google * Cloud project. * 2. When you are using Shared VPC and the IP address that you provide is * from the service project. In this case, the network that the IP address * resides in is defined in the host project. */ projectId?: string; /** * A Redis Cluster URI. */ redisCluster?: string; /** * A Redis Instance URI. */ redisInstance?: string; } interface ConnectivityTestSource { /** * An App Engine service version. * Structure is documented below. */ appEngineVersion?: outputs.networkmanagement.ConnectivityTestSourceAppEngineVersion; /** * A Cloud Function. * Structure is documented below. */ cloudFunction?: outputs.networkmanagement.ConnectivityTestSourceCloudFunction; /** * A Cloud Run revision. * Structure is documented below. */ cloudRunRevision?: outputs.networkmanagement.ConnectivityTestSourceCloudRunRevision; /** * A Cloud SQL instance URI. */ cloudSqlInstance?: string; /** * A cluster URI for Google Kubernetes Engine cluster control plane. */ gkeMasterCluster?: string; /** * A Compute Engine instance URI. */ instance?: string; /** * The IP address of the endpoint, which can be an external or internal IP. */ ipAddress?: string; /** * A VPC network URI. */ network?: string; /** * Type of the network where the endpoint is located. * Possible values are: `GCP_NETWORK`, `NON_GCP_NETWORK`. */ networkType?: string; /** * The IP protocol port of the endpoint. Only applicable when protocol is * TCP or UDP. */ port?: number; /** * Project ID where the endpoint is located. * The project ID can be derived from the URI if you provide a endpoint or * network URI. * The following are two cases where you may need to provide the project ID: * 1. Only the IP address is specified, and the IP address is within a Google * Cloud project. * 2. When you are using Shared VPC and the IP address that you provide is * from the service project. In this case, the network that the IP address * resides in is defined in the host project. */ projectId?: string; } interface ConnectivityTestSourceAppEngineVersion { /** * An App Engine service version name. */ uri?: string; } interface ConnectivityTestSourceCloudFunction { /** * A Cloud Function name. */ uri?: string; } interface ConnectivityTestSourceCloudRunRevision { /** * A Cloud Run revision URI. */ uri?: string; } interface GetConnectivityTestRunReachabilityDetail { /** * (Output) * Status of the connectivity test: RESULT_UNSPECIFIED, REACHABLE, UNREACHABLE, AMBIGUOUS or UNDETERMINED. */ result: string; /** * (Output) * List of connectivity test traces. * Structure is documented below. */ traces: outputs.networkmanagement.GetConnectivityTestRunReachabilityDetailTrace[]; /** * (Output) * Time when reachability details were determined. An RFC3339 timestamp in UTC time. * This in the format of yyyy-MM-ddTHH:mm:ss.SSSZ. */ verifyTime: string; } interface GetConnectivityTestRunReachabilityDetailTrace { /** * (Output) * Derived from the source and destination endpoints definition specified by user request, and validated by the data plane model. * Structure is documented below. */ endpointInfos: outputs.networkmanagement.GetConnectivityTestRunReachabilityDetailTraceEndpointInfo[]; /** * (Output) * ID of the trace. */ forwardTraceId: number; /** * (Output) * A trace of a test contains multiple steps from the initial state to the final state (delivered, dropped, forwarded, or aborted). * Structure is documented below. */ steps: outputs.networkmanagement.GetConnectivityTestRunReachabilityDetailTraceStep[]; } interface GetConnectivityTestRunReachabilityDetailTraceEndpointInfo { /** * (Output) * Destination IP address. */ destinationIp: string; /** * (Output) * URI of the network where this packet is sent to. */ destinationNetworkUri: string; /** * (Output) * Destination port. Only valid when protocol is TCP or UDP. */ destinationPort: number; /** * (Output) * IP protocol in string format, for example: "TCP", "UDP", "ICMP". */ protocol: string; /** * (Output) * URI of the source telemetry agent this packet originates from. */ sourceAgentUri: string; /** * (Output) * Source IP address. */ sourceIp: string; /** * (Output) * URI of the network where this packet originates from. */ sourceNetworkUri: string; /** * (Output) * Source port. Only valid when protocol is TCP or UDP. */ sourcePort: number; } interface GetConnectivityTestRunReachabilityDetailTraceStep { /** * (Output) * If this step leads to the final state Drop. */ causesDrop: boolean; /** * (Output) * Description of the connectivity test step. */ description: string; /** * (Output) * Project ID of the connectivity test step. */ projectId: string; /** * (Output) * State of the connectivity test step. */ state: string; } interface GetConnectivityTestsConnectivityTest { /** * Whether the analysis should skip firewall checking. */ bypassFirewallChecks: boolean; /** * The user-supplied description of the Connectivity Test. */ description: string; /** * Destination specification of the Connectivity Test. * Structure is documented below. */ destinations: outputs.networkmanagement.GetConnectivityTestsConnectivityTestDestination[]; /** * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. */ effectiveLabels: { [key: string]: string; }; /** * Resource labels to represent user-provided metadata. */ labels: { [key: string]: string; }; /** * Unique name for the connectivity test. */ name: string; /** * The ID of the project. */ project: string; /** * IP Protocol of the test. */ protocol: string; /** * The combination of labels configured directly on the resource * and default labels configured on the provider. */ pulumiLabels: { [key: string]: string; }; /** * Other projects that may be relevant for reachability analysis. */ relatedProjects: string[]; /** * Whether run analysis for the return path from destination to source. */ roundTrip: boolean; /** * Source specification of the Connectivity Test. * Structure is documented below. */ sources: outputs.networkmanagement.GetConnectivityTestsConnectivityTestSource[]; } interface GetConnectivityTestsConnectivityTestDestination { /** * A Cloud SQL instance URI. */ cloudSqlInstance: string; /** * Forwarding rule URI. Forwarding rules are frontends for load balancers, * PSC endpoints, and Protocol Forwarding. */ forwardingRule: string; /** * A DNS endpoint of Google Kubernetes Engine cluster control plane. */ fqdn: string; /** * A cluster URI for Google Kubernetes Engine cluster control plane. */ gkeMasterCluster: string; /** * A Compute Engine instance URI. */ instance: string; /** * The IP address of the endpoint. */ ipAddress: string; /** * A VPC network URI. */ network: string; /** * The IP protocol port of the endpoint. */ port: number; /** * Project ID where the endpoint is located. */ projectId: string; /** * A Redis Cluster URI. */ redisCluster: string; /** * A Redis Instance URI. */ redisInstance: string; } interface GetConnectivityTestsConnectivityTestSource { /** * An App Engine service version. * Structure is documented below. */ appEngineVersions: outputs.networkmanagement.GetConnectivityTestsConnectivityTestSourceAppEngineVersion[]; /** * A Cloud Function. * Structure is documented below. */ cloudFunctions: outputs.networkmanagement.GetConnectivityTestsConnectivityTestSourceCloudFunction[]; /** * A Cloud Run revision. * Structure is documented below. */ cloudRunRevisions: outputs.networkmanagement.GetConnectivityTestsConnectivityTestSourceCloudRunRevision[]; /** * A Cloud SQL instance URI. */ cloudSqlInstance: string; /** * A cluster URI for Google Kubernetes Engine cluster control plane. */ gkeMasterCluster: string; /** * A Compute Engine instance URI. */ instance: string; /** * The IP address of the endpoint. */ ipAddress: string; /** * A VPC network URI. */ network: string; /** * Type of the network where the endpoint is located. */ networkType: string; /** * The IP protocol port of the endpoint. */ port: number; /** * Project ID where the endpoint is located. */ projectId: string; } interface GetConnectivityTestsConnectivityTestSourceAppEngineVersion { /** * A Cloud Run revision URI. */ uri: string; } interface GetConnectivityTestsConnectivityTestSourceCloudFunction { /** * A Cloud Run revision URI. */ uri: string; } interface GetConnectivityTestsConnectivityTestSourceCloudRunRevision { /** * A Cloud Run revision URI. */ uri: string; } } export declare namespace networksecurity { interface AddressGroupIamBindingCondition { description?: string; expression: string; title: string; } interface AddressGroupIamMemberCondition { description?: string; expression: string; title: string; } interface AuthorizationPolicyRule { /** * List of attributes for the traffic destination. All of the destinations must match. A destination is a match if a request matches all the specified hosts, ports, methods and headers. * If not set, the action specified in the 'action' field will be applied without any rule checks for the destination. * Structure is documented below. */ destinations?: outputs.networksecurity.AuthorizationPolicyRuleDestination[]; /** * List of attributes for the traffic source. All of the sources must match. A source is a match if both principals and ipBlocks match. * If not set, the action specified in the 'action' field will be applied without any rule checks for the source. * Structure is documented below. */ sources?: outputs.networksecurity.AuthorizationPolicyRuleSource[]; } interface AuthorizationPolicyRuleDestination { /** * List of host names to match. Matched against the ":authority" header in http requests. At least one host should match. Each host can be an exact match, or a prefix match (example "mydomain.*") or a suffix match (example "*.myorg.com") or a presence (any) match "*". */ hosts: string[]; /** * Match against key:value pair in http header. Provides a flexible match based on HTTP headers, for potentially advanced use cases. At least one header should match. * Avoid using header matches to make authorization decisions unless there is a strong guarantee that requests arrive through a trusted client or proxy. * Structure is documented below. */ httpHeaderMatch?: outputs.networksecurity.AuthorizationPolicyRuleDestinationHttpHeaderMatch; /** * A list of HTTP methods to match. At least one method should match. Should not be set for gRPC services. */ methods: string[]; /** * List of destination ports to match. At least one port should match. */ ports: number[]; } interface AuthorizationPolicyRuleDestinationHttpHeaderMatch { /** * The name of the HTTP header to match. For matching against the HTTP request's authority, use a headerMatch with the header name ":authority". For matching a request's method, use the headerName ":method". */ headerName: string; /** * The value of the header must match the regular expression specified in regexMatch. For regular expression grammar, please see: en.cppreference.com/w/cpp/regex/ecmascript For matching against a port specified in the HTTP request, use a headerMatch with headerName set to Host and a regular expression that satisfies the RFC2616 Host header's port specifier. */ regexMatch: string; } interface AuthorizationPolicyRuleSource { /** * List of CIDR ranges to match based on source IP address. At least one IP block should match. Single IP (e.g., "1.2.3.4") and CIDR (e.g., "1.2.3.0/24") are supported. Authorization based on source IP alone should be avoided. * The IP addresses of any load balancers or proxies should be considered untrusted. */ ipBlocks?: string[]; /** * List of peer identities to match for authorization. At least one principal should match. Each peer can be an exact match, or a prefix match (example, "namespace/*") or a suffix match (example, "*/service-account") or a presence match "*". * Authorization based on the principal name without certificate validation (configured by ServerTlsPolicy resource) is considered insecure. */ principals?: string[]; } interface AuthzPolicyCustomProvider { /** * Delegate authorization decision to user authored Service Extension. Only one of cloudIap or authzExtension can be specified. * Structure is documented below. */ authzExtension?: outputs.networksecurity.AuthzPolicyCustomProviderAuthzExtension; /** * Delegates authorization decisions to Cloud IAP. Applicable only for managed load balancers. Enabling Cloud IAP at the AuthzPolicy level is not compatible with Cloud IAP settings in the BackendService. Enabling IAP in both places will result in request failure. Ensure that IAP is enabled in either the AuthzPolicy or the BackendService but not in both places. * Structure is documented below. */ cloudIap?: outputs.networksecurity.AuthzPolicyCustomProviderCloudIap; } interface AuthzPolicyCustomProviderAuthzExtension { /** * A list of references to authorization extensions that will be invoked for requests matching this policy. Limited to 1 custom provider. */ resources: string[]; } interface AuthzPolicyCustomProviderCloudIap { /** * Enable Cloud IAP at the AuthzPolicy level. */ enabled: boolean; } interface AuthzPolicyHttpRule { /** * Describes properties of one or more sources of a request. * Structure is documented below. */ from?: outputs.networksecurity.AuthzPolicyHttpRuleFrom; /** * Describes properties of one or more targets of a request * Structure is documented below. */ to?: outputs.networksecurity.AuthzPolicyHttpRuleTo; /** * CEL expression that describes the conditions to be satisfied for the action. The result of the CEL expression is ANDed with the from and to. Refer to the CEL language reference for a list of available attributes. */ when?: string; } interface AuthzPolicyHttpRuleFrom { /** * Describes the negated properties of request sources. Matches requests from sources that do not match the criteria specified in this field. At least one of sources or notSources must be specified. Limited to 1 not_source. * Structure is documented below. */ notSources?: outputs.networksecurity.AuthzPolicyHttpRuleFromNotSource[]; /** * Describes the properties of a request's sources. At least one of sources or notSources must be specified. Limited to 1 source. A match occurs when ANY source (in sources or notSources) matches the request. Within a single source, the match follows AND semantics across fields and OR semantics within a single field, i.e. a match occurs when ANY principal matches AND ANY ipBlocks match. * Structure is documented below. */ sources?: outputs.networksecurity.AuthzPolicyHttpRuleFromSource[]; } interface AuthzPolicyHttpRuleFromNotSource { /** * A list of IP addresses or IP address ranges to match against the source IP address of the request. Limited to 10 ipBlocks per Authorization Policy * Structure is documented below. */ ipBlocks?: outputs.networksecurity.AuthzPolicyHttpRuleFromNotSourceIpBlock[]; /** * A list of identities derived from the client's certificate. This field will not match on a request unless mutual TLS is enabled for the Forwarding rule or Gateway. Each identity is a string whose value is matched against the URI SAN, or DNS SAN or the subject field in the client's certificate. The match can be exact, prefix, suffix or a substring match. One of exact, prefix, suffix or contains must be specified. * Limited to 5 principals. * Structure is documented below. */ principals?: outputs.networksecurity.AuthzPolicyHttpRuleFromNotSourcePrincipal[]; /** * A list of resources to match against the resource of the source VM of a request. * Limited to 5 resources. * Structure is documented below. */ resources?: outputs.networksecurity.AuthzPolicyHttpRuleFromNotSourceResource[]; } interface AuthzPolicyHttpRuleFromNotSourceIpBlock { /** * The length of the address range. */ length: number; /** * The address prefix. */ prefix: string; } interface AuthzPolicyHttpRuleFromNotSourcePrincipal { /** * (Optional, Deprecated) * The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc.def * * > **Warning:** `principals.contains` is deprecated and will be removed in a future major release. Use `principals.principal.contains` instead. * * @deprecated `principals.contains` is deprecated and will be removed in a future major release. Use `principals.principal.contains` instead. */ contains?: string; /** * (Optional, Deprecated) * The input string must match exactly the string specified here. * Examples: * * abc only matches the value abc. * * > **Warning:** `principals.exact` is deprecated and will be removed in a future major release. Use `principals.principal.exact` instead. * * @deprecated `principals.exact` is deprecated and will be removed in a future major release. Use `principals.principal.exact` instead. */ exact?: string; /** * (Optional, Deprecated) * If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true. * * > **Warning:** `principals.ignore_case` is deprecated and will be removed in a future major release. Use `principals.principal.ignore_case` instead. * * @deprecated `principals.ignore_case` is deprecated and will be removed in a future major release. Use `principals.principal.ignore_case` instead. */ ignoreCase?: boolean; /** * (Optional, Deprecated) * The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value abc.xyz * * > **Warning:** `principals.prefix` is deprecated and will be removed in a future major release. Use `principals.principal.prefix` instead. * * @deprecated `principals.prefix` is deprecated and will be removed in a future major release. Use `principals.principal.prefix` instead. */ prefix?: string; /** * Required. A non-empty string whose value is matched against the principal value based on the principalSelector. * Only exact match can be applied for CLIENT_CERT_URI_SAN, CLIENT_CERT_DNS_NAME_SAN, CLIENT_CERT_COMMON_NAME selectors. * Structure is documented below. */ principal?: outputs.networksecurity.AuthzPolicyHttpRuleFromNotSourcePrincipalPrincipal; /** * An enum to decide what principal value the principal rule will match against. If not specified, the PrincipalSelector is CLIENT_CERT_URI_SAN. * Default value is `CLIENT_CERT_URI_SAN`. * Possible values are: `PRINCIPAL_SELECTOR_UNSPECIFIED`, `CLIENT_CERT_URI_SAN`, `CLIENT_CERT_DNS_NAME_SAN`, `CLIENT_CERT_COMMON_NAME`. */ principalSelector?: string; /** * (Optional, Deprecated) * The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc * * > **Warning:** `principals.suffix` is deprecated and will be removed in a future major release. Use `principals.principal.suffix` instead. * * @deprecated `principals.suffix` is deprecated and will be removed in a future major release. Use `principals.principal.suffix` instead. */ suffix?: string; } interface AuthzPolicyHttpRuleFromNotSourcePrincipalPrincipal { /** * The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc.def */ contains?: string; /** * The input string must match exactly the string specified here. * Examples: * * abc only matches the value abc. */ exact?: string; /** * If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true. */ ignoreCase?: boolean; /** * The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value abc.xyz */ prefix?: string; /** * The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc */ suffix?: string; } interface AuthzPolicyHttpRuleFromNotSourceResource { /** * An IAM service account to match against the source service account of the VM sending the request. * Structure is documented below. */ iamServiceAccount?: outputs.networksecurity.AuthzPolicyHttpRuleFromNotSourceResourceIamServiceAccount; /** * A list of resource tag value permanent IDs to match against the resource manager tags value associated with the source VM of a request. * Structure is documented below. */ tagValueIdSet?: outputs.networksecurity.AuthzPolicyHttpRuleFromNotSourceResourceTagValueIdSet; } interface AuthzPolicyHttpRuleFromNotSourceResourceIamServiceAccount { /** * The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc.def */ contains?: string; /** * The input string must match exactly the string specified here. * Examples: * * abc only matches the value abc. */ exact?: string; /** * If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true. */ ignoreCase?: boolean; /** * The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value abc.xyz */ prefix?: string; /** * The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc */ suffix?: string; } interface AuthzPolicyHttpRuleFromNotSourceResourceTagValueIdSet { /** * A list of resource tag value permanent IDs to match against the resource manager tags value associated with the source VM of a request. The match follows AND semantics which means all the ids must match. * Limited to 5 matches. */ ids?: string[]; } interface AuthzPolicyHttpRuleFromSource { /** * A list of IP addresses or IP address ranges to match against the source IP address of the request. Limited to 10 ipBlocks per Authorization Policy * Structure is documented below. */ ipBlocks?: outputs.networksecurity.AuthzPolicyHttpRuleFromSourceIpBlock[]; /** * A list of identities derived from the client's certificate. This field will not match on a request unless mutual TLS is enabled for the Forwarding rule or Gateway. Each identity is a string whose value is matched against the URI SAN, or DNS SAN or the subject field in the client's certificate. The match can be exact, prefix, suffix or a substring match. One of exact, prefix, suffix or contains must be specified. * Limited to 5 principals. * Structure is documented below. */ principals?: outputs.networksecurity.AuthzPolicyHttpRuleFromSourcePrincipal[]; /** * A list of resources to match against the resource of the source VM of a request. * Limited to 5 resources. * Structure is documented below. */ resources?: outputs.networksecurity.AuthzPolicyHttpRuleFromSourceResource[]; } interface AuthzPolicyHttpRuleFromSourceIpBlock { /** * The length of the address range. */ length: number; /** * The address prefix. */ prefix: string; } interface AuthzPolicyHttpRuleFromSourcePrincipal { /** * (Optional, Deprecated) * The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc.def * * > **Warning:** `principals.contains` is deprecated and will be removed in a future major release. Use `principals.principal.contains` instead. * * @deprecated `principals.contains` is deprecated and will be removed in a future major release. Use `principals.principal.contains` instead. */ contains?: string; /** * (Optional, Deprecated) * The input string must match exactly the string specified here. * Examples: * * abc only matches the value abc. * * > **Warning:** `principals.exact` is deprecated and will be removed in a future major release. Use `principals.principal.exact` instead. * * @deprecated `principals.exact` is deprecated and will be removed in a future major release. Use `principals.principal.exact` instead. */ exact?: string; /** * (Optional, Deprecated) * If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true. * * > **Warning:** `principals.ignore_case` is deprecated and will be removed in a future major release. Use `principals.principal.ignore_case` instead. * * @deprecated `principals.ignore_case` is deprecated and will be removed in a future major release. Use `principals.principal.ignore_case` instead. */ ignoreCase?: boolean; /** * (Optional, Deprecated) * The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value abc.xyz * * > **Warning:** `principals.prefix` is deprecated and will be removed in a future major release. Use `principals.principal.prefix` instead. * * @deprecated `principals.prefix` is deprecated and will be removed in a future major release. Use `principals.principal.prefix` instead. */ prefix?: string; /** * Required. A non-empty string whose value is matched against the principal value based on the principalSelector. * Only exact match can be applied for CLIENT_CERT_URI_SAN, CLIENT_CERT_DNS_NAME_SAN, CLIENT_CERT_COMMON_NAME selectors. * Structure is documented below. */ principal?: outputs.networksecurity.AuthzPolicyHttpRuleFromSourcePrincipalPrincipal; /** * An enum to decide what principal value the principal rule will match against. If not specified, the PrincipalSelector is CLIENT_CERT_URI_SAN. * Default value is `CLIENT_CERT_URI_SAN`. * Possible values are: `PRINCIPAL_SELECTOR_UNSPECIFIED`, `CLIENT_CERT_URI_SAN`, `CLIENT_CERT_DNS_NAME_SAN`, `CLIENT_CERT_COMMON_NAME`. */ principalSelector?: string; /** * (Optional, Deprecated) * The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc * * > **Warning:** `principals.suffix` is deprecated and will be removed in a future major release. Use `principals.principal.suffix` instead. * * @deprecated `principals.suffix` is deprecated and will be removed in a future major release. Use `principals.principal.suffix` instead. */ suffix?: string; } interface AuthzPolicyHttpRuleFromSourcePrincipalPrincipal { /** * The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc.def */ contains?: string; /** * The input string must match exactly the string specified here. * Examples: * * abc only matches the value abc. */ exact?: string; /** * If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true. */ ignoreCase?: boolean; /** * The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value abc.xyz */ prefix?: string; /** * The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc */ suffix?: string; } interface AuthzPolicyHttpRuleFromSourceResource { /** * An IAM service account to match against the source service account of the VM sending the request. * Structure is documented below. */ iamServiceAccount?: outputs.networksecurity.AuthzPolicyHttpRuleFromSourceResourceIamServiceAccount; /** * A list of resource tag value permanent IDs to match against the resource manager tags value associated with the source VM of a request. * Structure is documented below. */ tagValueIdSet?: outputs.networksecurity.AuthzPolicyHttpRuleFromSourceResourceTagValueIdSet; } interface AuthzPolicyHttpRuleFromSourceResourceIamServiceAccount { /** * The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc.def */ contains?: string; /** * The input string must match exactly the string specified here. * Examples: * * abc only matches the value abc. */ exact?: string; /** * If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true. */ ignoreCase?: boolean; /** * The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value abc.xyz */ prefix?: string; /** * The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc */ suffix?: string; } interface AuthzPolicyHttpRuleFromSourceResourceTagValueIdSet { /** * A list of resource tag value permanent IDs to match against the resource manager tags value associated with the source VM of a request. The match follows AND semantics which means all the ids must match. * Limited to 5 matches. */ ids?: string[]; } interface AuthzPolicyHttpRuleTo { /** * Describes the negated properties of the targets of a request. Matches requests for operations that do not match the criteria specified in this field. At least one of operations or notOperations must be specified. Limited to 1 not_operation. * Structure is documented below. */ notOperations?: outputs.networksecurity.AuthzPolicyHttpRuleToNotOperation[]; /** * Describes properties of one or more targets of a request. At least one of operations or notOperations must be specified. Limited to 1 operation. A match occurs when ANY operation (in operations or notOperations) matches. Within an operation, the match follows AND semantics across fields and OR semantics within a field, i.e. a match occurs when ANY path matches AND ANY header matches and ANY method matches. * Structure is documented below. */ operations?: outputs.networksecurity.AuthzPolicyHttpRuleToOperation[]; } interface AuthzPolicyHttpRuleToNotOperation { /** * A list of headers to match against in http header. * Structure is documented below. */ headerSet?: outputs.networksecurity.AuthzPolicyHttpRuleToNotOperationHeaderSet; /** * A list of HTTP Hosts to match against. The match can be one of exact, prefix, suffix, or contains (substring match). Matches are always case sensitive unless the ignoreCase is set. * Limited to 10 matches. * Structure is documented below. */ hosts?: outputs.networksecurity.AuthzPolicyHttpRuleToNotOperationHost[]; /** * A list of HTTP methods to match against. Each entry must be a valid HTTP method name (GET, PUT, POST, HEAD, PATCH, DELETE, OPTIONS). It only allows exact match and is always case sensitive. */ methods?: string[]; /** * A list of paths to match against. The match can be one of exact, prefix, suffix, or contains (substring match). Matches are always case sensitive unless the ignoreCase is set. * Limited to 10 matches. * Note that this path match includes the query parameters. For gRPC services, this should be a fully-qualified name of the form /package.service/method. * Structure is documented below. */ paths?: outputs.networksecurity.AuthzPolicyHttpRuleToNotOperationPath[]; } interface AuthzPolicyHttpRuleToNotOperationHeaderSet { /** * A list of headers to match against in http header. The match can be one of exact, prefix, suffix, or contains (substring match). The match follows AND semantics which means all the headers must match. Matches are always case sensitive unless the ignoreCase is set. Limited to 10 matches. * Structure is documented below. */ headers?: outputs.networksecurity.AuthzPolicyHttpRuleToNotOperationHeaderSetHeader[]; } interface AuthzPolicyHttpRuleToNotOperationHeaderSetHeader { /** * Specifies the name of the header in the request. */ name?: string; /** * Specifies how the header match will be performed. * Structure is documented below. */ value?: outputs.networksecurity.AuthzPolicyHttpRuleToNotOperationHeaderSetHeaderValue; } interface AuthzPolicyHttpRuleToNotOperationHeaderSetHeaderValue { /** * The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc.def */ contains?: string; /** * The input string must match exactly the string specified here. * Examples: * * abc only matches the value abc. */ exact?: string; /** * If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true. */ ignoreCase?: boolean; /** * The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value abc.xyz */ prefix?: string; /** * The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc */ suffix?: string; } interface AuthzPolicyHttpRuleToNotOperationHost { /** * The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc.def */ contains?: string; /** * The input string must match exactly the string specified here. * Examples: * * abc only matches the value abc. */ exact?: string; /** * If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true. */ ignoreCase?: boolean; /** * The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value abc.xyz */ prefix?: string; /** * The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc */ suffix?: string; } interface AuthzPolicyHttpRuleToNotOperationPath { /** * The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc.def */ contains?: string; /** * The input string must match exactly the string specified here. * Examples: * * abc only matches the value abc. */ exact?: string; /** * If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true. */ ignoreCase?: boolean; /** * The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value abc.xyz */ prefix?: string; /** * The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc */ suffix?: string; } interface AuthzPolicyHttpRuleToOperation { /** * A list of headers to match against in http header. * Structure is documented below. */ headerSet?: outputs.networksecurity.AuthzPolicyHttpRuleToOperationHeaderSet; /** * A list of HTTP Hosts to match against. The match can be one of exact, prefix, suffix, or contains (substring match). Matches are always case sensitive unless the ignoreCase is set. * Limited to 10 matches. * Structure is documented below. */ hosts?: outputs.networksecurity.AuthzPolicyHttpRuleToOperationHost[]; /** * Defines the MCP protocol attributes to match on. MCP based match is allowed only when the AuthzPolicy points to an AgentGateway. * Structure is documented below. */ mcp?: outputs.networksecurity.AuthzPolicyHttpRuleToOperationMcp; /** * A list of HTTP methods to match against. Each entry must be a valid HTTP method name (GET, PUT, POST, HEAD, PATCH, DELETE, OPTIONS). It only allows exact match and is always case sensitive. */ methods?: string[]; /** * A list of paths to match against. The match can be one of exact, prefix, suffix, or contains (substring match). Matches are always case sensitive unless the ignoreCase is set. * Limited to 10 matches. * Note that this path match includes the query parameters. For gRPC services, this should be a fully-qualified name of the form /package.service/method. * Structure is documented below. */ paths?: outputs.networksecurity.AuthzPolicyHttpRuleToOperationPath[]; } interface AuthzPolicyHttpRuleToOperationHeaderSet { /** * A list of headers to match against in http header. The match can be one of exact, prefix, suffix, or contains (substring match). The match follows AND semantics which means all the headers must match. Matches are always case sensitive unless the ignoreCase is set. Limited to 10 matches. * Structure is documented below. */ headers?: outputs.networksecurity.AuthzPolicyHttpRuleToOperationHeaderSetHeader[]; } interface AuthzPolicyHttpRuleToOperationHeaderSetHeader { /** * Specifies the name of the header in the request. */ name?: string; /** * Specifies how the header match will be performed. * Structure is documented below. */ value?: outputs.networksecurity.AuthzPolicyHttpRuleToOperationHeaderSetHeaderValue; } interface AuthzPolicyHttpRuleToOperationHeaderSetHeaderValue { /** * The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc.def */ contains?: string; /** * The input string must match exactly the string specified here. * Examples: * * abc only matches the value abc. */ exact?: string; /** * If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true. */ ignoreCase?: boolean; /** * The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value abc.xyz */ prefix?: string; /** * The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc */ suffix?: string; } interface AuthzPolicyHttpRuleToOperationHost { /** * The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc.def */ contains?: string; /** * The input string must match exactly the string specified here. * Examples: * * abc only matches the value abc. */ exact?: string; /** * If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true. */ ignoreCase?: boolean; /** * The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value abc.xyz */ prefix?: string; /** * The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc */ suffix?: string; } interface AuthzPolicyHttpRuleToOperationMcp { /** * If specified, matches on the MCP protocol’s non-access specific methods namely: * initialize/ * completion/ * logging/ * notifications/ * ping * Default value is `SKIP_BASE_PROTOCOL_METHODS`. * Possible values are: `SKIP_BASE_PROTOCOL_METHODS`, `MATCH_BASE_PROTOCOL_METHODS`. */ baseProtocolMethodsOption?: string; /** * Defines a set of MCP methods and associated parameters to match on. It is recommended to use this field to match on tools, prompts and resource accesses while setting the includeBaseProtocolMethods to true to match on all the other MCP protocol methods. * Structure is documented below. */ methods?: outputs.networksecurity.AuthzPolicyHttpRuleToOperationMcpMethod[]; } interface AuthzPolicyHttpRuleToOperationMcpMethod { /** * The MCP method to match against. Allowed values are as follows: * 1) ā€œtoolsā€, ā€œpromptsā€, ā€œresourcesā€ - these will match against all sub methods under the respective methods. * 2) ā€œprompts/listā€, ā€œtools/listā€, ā€œresources/listā€, ā€œresources/templates/listā€ * 3) ā€œprompts/getā€, ā€œtools/callā€, ā€œresources/subscribeā€, ā€œresources/unsubscribeā€, ā€œresources/readā€ * Params cannot be specified for categories 1) and 2). */ name: string; /** * MCP method parameters to match against. * Structure is documented below. */ params?: outputs.networksecurity.AuthzPolicyHttpRuleToOperationMcpMethodParam[]; } interface AuthzPolicyHttpRuleToOperationMcpMethodParam { /** * A substring match on the MCP method parameter name. */ contains?: string; /** * An exact match on the MCP method parameter name. */ exact?: string; /** * Specifies that the string match should be case insensitive. */ ignoreCase?: boolean; /** * A prefix match on the MCP method parameter name. */ prefix?: string; /** * A suffix match on the MCP method parameter name. */ suffix?: string; } interface AuthzPolicyHttpRuleToOperationPath { /** * The input string must have the substring specified here. Note: empty contains match is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc.def */ contains?: string; /** * The input string must match exactly the string specified here. * Examples: * * abc only matches the value abc. */ exact?: string; /** * If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. For example, the matcher data will match both input string Data and data if set to true. */ ignoreCase?: boolean; /** * The input string must have the prefix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value abc.xyz */ prefix?: string; /** * The input string must have the suffix specified here. Note: empty prefix is not allowed, please use regex instead. * Examples: * * abc matches the value xyz.abc */ suffix?: string; } interface AuthzPolicyTarget { /** * Required when targeting forwarding rules and secure web proxy. Must not be specified when targeting Agent * Gateway. All resources referenced by this policy and extensions must share the same load balancing scheme. * For more information, refer to [Backend services overview](https://cloud.google.com/load-balancing/docs/backend-service). * Possible values are: `INTERNAL_MANAGED`, `EXTERNAL_MANAGED`, `INTERNAL_SELF_MANAGED`. */ loadBalancingScheme?: string; /** * A list of references to the Forwarding Rules or Secure Web Proxy Gateways or Agent Gateways on which this * policy will be applied. */ resources?: string[]; } interface ClientTlsPolicyClientCertificate { /** * The certificate provider instance specification that will be passed to the data plane, which will be used to load necessary credential information. * Structure is documented below. */ certificateProviderInstance?: outputs.networksecurity.ClientTlsPolicyClientCertificateCertificateProviderInstance; /** * gRPC specific configuration to access the gRPC server to obtain the cert and private key. * Structure is documented below. */ grpcEndpoint?: outputs.networksecurity.ClientTlsPolicyClientCertificateGrpcEndpoint; } interface ClientTlsPolicyClientCertificateCertificateProviderInstance { /** * Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "googleCloudPrivateSpiffe" to use Certificate Authority Service certificate provider instance. */ pluginInstance: string; } interface ClientTlsPolicyClientCertificateGrpcEndpoint { /** * The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:". */ targetUri: string; } interface ClientTlsPolicyServerValidationCa { /** * The certificate provider instance specification that will be passed to the data plane, which will be used to load necessary credential information. * Structure is documented below. */ certificateProviderInstance?: outputs.networksecurity.ClientTlsPolicyServerValidationCaCertificateProviderInstance; /** * gRPC specific configuration to access the gRPC server to obtain the cert and private key. * Structure is documented below. */ grpcEndpoint?: outputs.networksecurity.ClientTlsPolicyServerValidationCaGrpcEndpoint; } interface ClientTlsPolicyServerValidationCaCertificateProviderInstance { /** * Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "googleCloudPrivateSpiffe" to use Certificate Authority Service certificate provider instance. */ pluginInstance: string; } interface ClientTlsPolicyServerValidationCaGrpcEndpoint { /** * The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:". */ targetUri: string; } interface FirewallEndpointEndpointSettings { /** * Indicates whether Jumbo Frames are enabled for the firewall endpoint. */ jumboFramesEnabled?: boolean; } interface InterceptDeploymentGroupConnectedEndpointGroup { /** * (Output) * The connected endpoint group's resource name, for example: * `projects/123456789/locations/global/interceptEndpointGroups/my-eg`. * See https://google.aip.dev/124. */ name: string; } interface InterceptDeploymentGroupLocation { /** * The cloud location of the deployment group, currently restricted to `global`. */ location: string; /** * (Output) * The current state of the association in this location. * Possible values: * STATE_UNSPECIFIED * ACTIVE * OUT_OF_SYNC */ state: string; } interface InterceptEndpointGroupAssociation { /** * (Output) * The connected deployment group's resource name, for example: * `projects/123456789/locations/global/interceptDeploymentGroups/my-dg`. * See https://google.aip.dev/124. */ name: string; /** * (Output) * The associated network, for example: * projects/123456789/global/networks/my-network. * See https://google.aip.dev/124. */ network: string; /** * (Output) * The current state of the association in this location. * Possible values: * STATE_UNSPECIFIED * ACTIVE * OUT_OF_SYNC */ state: string; } interface InterceptEndpointGroupAssociationLocation { /** * The cloud location of the association, currently restricted to `global`. */ location: string; /** * (Output) * The current state of the association in this location. * Possible values: * STATE_UNSPECIFIED * ACTIVE * OUT_OF_SYNC */ state: string; } interface InterceptEndpointGroupAssociationLocationsDetail { /** * The cloud location of the association, currently restricted to `global`. */ location: string; /** * (Output) * The current state of the association in this location. * Possible values: * STATE_UNSPECIFIED * ACTIVE * OUT_OF_SYNC */ state: string; } interface InterceptEndpointGroupConnectedDeploymentGroup { /** * (Output) * The list of locations where the deployment group is present. * Structure is documented below. */ locations: outputs.networksecurity.InterceptEndpointGroupConnectedDeploymentGroupLocation[]; /** * (Output) * The connected deployment group's resource name, for example: * `projects/123456789/locations/global/interceptDeploymentGroups/my-dg`. * See https://google.aip.dev/124. */ name: string; } interface InterceptEndpointGroupConnectedDeploymentGroupLocation { /** * The cloud location of the endpoint group, currently restricted to `global`. */ location: string; /** * (Output) * The current state of the association in this location. * Possible values: * STATE_UNSPECIFIED * ACTIVE * OUT_OF_SYNC */ state: string; } interface MirroringDeploymentGroupConnectedEndpointGroup { /** * (Output) * The connected endpoint group's resource name, for example: * `projects/123456789/locations/global/mirroringEndpointGroups/my-eg`. * See https://google.aip.dev/124. */ name: string; } interface MirroringDeploymentGroupLocation { /** * The cloud location of the deployment group, currently restricted to `global`. */ location: string; /** * (Output) * The current state of the association in this location. * Possible values: * STATE_UNSPECIFIED * ACTIVE * OUT_OF_SYNC */ state: string; } interface MirroringEndpointGroupAssociation { /** * (Output) * The connected deployment group's resource name, for example: * `projects/123456789/locations/global/mirroringDeploymentGroups/my-dg`. * See https://google.aip.dev/124. */ name: string; /** * (Output) * The associated network, for example: * projects/123456789/global/networks/my-network. * See https://google.aip.dev/124. */ network: string; /** * (Output) * The current state of the association in this location. * Possible values: * STATE_UNSPECIFIED * ACTIVE * OUT_OF_SYNC */ state: string; } interface MirroringEndpointGroupAssociationLocation { /** * The cloud location of the association, currently restricted to `global`. */ location: string; /** * (Output) * The current state of the association in this location. * Possible values: * STATE_UNSPECIFIED * ACTIVE * OUT_OF_SYNC */ state: string; } interface MirroringEndpointGroupAssociationLocationsDetail { /** * The cloud location of the association, currently restricted to `global`. */ location: string; /** * (Output) * The current state of the association in this location. * Possible values: * STATE_UNSPECIFIED * ACTIVE * OUT_OF_SYNC */ state: string; } interface MirroringEndpointGroupConnectedDeploymentGroup { /** * (Output) * The list of locations where the deployment group is present. * Structure is documented below. */ locations: outputs.networksecurity.MirroringEndpointGroupConnectedDeploymentGroupLocation[]; /** * (Output) * The connected deployment group's resource name, for example: * `projects/123456789/locations/global/mirroringDeploymentGroups/my-dg`. * See https://google.aip.dev/124. */ name: string; } interface MirroringEndpointGroupConnectedDeploymentGroupLocation { /** * The cloud location of the endpoint group, currently restricted to `global`. */ location: string; /** * (Output) * The current state of the association in this location. * Possible values: * STATE_UNSPECIFIED * ACTIVE * OUT_OF_SYNC */ state: string; } interface SacAttachmentSymantecOptions { /** * Name to be used when creating a location on the customer's behalf in Symantec's Location API. Not to be confused with Google Cloud locations. */ symantecLocationName?: string; /** * Symantec data center identifier that this attachment will connect to. */ symantecSite?: string; } interface SacRealmPairingKey { /** * (Output) * Timestamp in UTC of when this resource is considered expired. It expires 7 days after creation. */ expireTime: string; /** * (Output) * Key value. */ key: string; } interface SacRealmSymantecOptions { /** * (Output) * Symantec site IDs which the user can choose to connect to. */ availableSymantecSites: string[]; /** * API Key used to call Symantec APIs on the user's behalf. Required if using Symantec Cloud SWG. P4SA account needs permissions granted to read this secret. * A secret ID, secret name, or secret URI can be specified, but it will be parsed and stored as a secret URI in the form projects/{projectNumber}/secrets/my-secret. */ secretPath?: string; /** * (Output) * Connection status to Symantec API */ symantecConnectionState: string; } interface SecurityProfileCustomInterceptProfile { /** * The Intercept Endpoint Group to which matching traffic should be intercepted. * Format: projects/{project_id}/locations/global/interceptEndpointGroups/{endpoint_group_id} */ interceptEndpointGroup: string; } interface SecurityProfileCustomMirroringProfile { /** * (Optional, Beta) * The target downstream Mirroring Deployment Groups. * This field is used for Packet Broker mirroring endpoint groups to specify * the deployment groups that the packet should be mirrored to by the broker. * Format: projects/{project_id}/locations/global/mirroringDeploymentGroups/{deployment_group_id} */ mirroringDeploymentGroups?: string[]; /** * The target Mirroring Endpoint Group. * When a mirroring rule with this security profile attached matches a packet, * a replica will be mirrored to the location-local target in this group. * Format: projects/{project_id}/locations/global/mirroringEndpointGroups/{endpoint_group_id} */ mirroringEndpointGroup: string; /** * (Output, Beta) * The type of the mirroring endpoint group this profile is attached to. * Possible values: * DIRECT * BROKER */ mirroringEndpointGroupType: string; } interface SecurityProfileThreatPreventionProfile { /** * Defines what action to take for antivirus threats per protocol. * Structure is documented below. */ antivirusOverrides?: outputs.networksecurity.SecurityProfileThreatPreventionProfileAntivirusOverride[]; /** * The configuration for overriding threats actions by severity match. * Structure is documented below. */ severityOverrides?: outputs.networksecurity.SecurityProfileThreatPreventionProfileSeverityOverride[]; /** * The configuration for overriding threats actions by threat id match. * If a threat is matched both by configuration provided in severity overrides * and threat overrides, the threat overrides action is applied. * Structure is documented below. */ threatOverrides?: outputs.networksecurity.SecurityProfileThreatPreventionProfileThreatOverride[]; } interface SecurityProfileThreatPreventionProfileAntivirusOverride { /** * Threat action override. For some threat types, only a subset of actions applies. * Possible values are: `ALERT`, `ALLOW`, `DEFAULT_ACTION`, `DENY`. */ action: string; /** * Required protocol to match. * Possible values are: `SMTP`, `SMB`, `POP3`, `IMAP`, `HTTP2`, `HTTP`, `FTP`. */ protocol: string; } interface SecurityProfileThreatPreventionProfileSeverityOverride { /** * Threat action override. * Possible values are: `ALERT`, `ALLOW`, `DEFAULT_ACTION`, `DENY`. */ action: string; /** * Severity level to match. * Possible values are: `CRITICAL`, `HIGH`, `INFORMATIONAL`, `LOW`, `MEDIUM`. */ severity: string; } interface SecurityProfileThreatPreventionProfileThreatOverride { /** * Threat action. * Possible values are: `ALERT`, `ALLOW`, `DEFAULT_ACTION`, `DENY`. */ action: string; /** * Vendor-specific ID of a threat to override. */ threatId: string; /** * (Output) * Type of threat. */ type: string; } interface SecurityProfileUrlFilteringProfile { /** * The configuration for action to take based on domain name match. * A domain name would be checked for matching filters through the list in order of highest to lowest priority, * and the first filter that a domain name matches with is the one whose actions gets applied. * Structure is documented below. */ urlFilters?: outputs.networksecurity.SecurityProfileUrlFilteringProfileUrlFilter[]; } interface SecurityProfileUrlFilteringProfileUrlFilter { /** * The action to take when the filter is applied. * Possible values are: `ALLOW`, `DENY`. */ filteringAction: string; /** * The priority of the filter within the URL filtering profile. * Must be an integer from 0 and 2147483647, inclusive. Lower integers indicate higher priorities. * The priority of a filter must be unique within a URL filtering profile. */ priority: number; /** * A list of domain matcher strings that a domain name gets compared with to determine if the filter is applicable. * A domain name must match with at least one of the strings in the list for a filter to be applicable. */ urls?: string[]; } interface ServerTlsPolicyMtlsPolicy { /** * Required if the policy is to be used with Traffic Director. For external HTTPS load balancers it must be empty. * Defines the mechanism to obtain the Certificate Authority certificate to validate the client certificate. * Structure is documented below. */ clientValidationCas?: outputs.networksecurity.ServerTlsPolicyMtlsPolicyClientValidationCa[]; /** * When the client presents an invalid certificate or no certificate to the load balancer, the clientValidationMode specifies how the client connection is handled. * Required if the policy is to be used with the external HTTPS load balancing. For Traffic Director it must be empty. * Possible values are: `CLIENT_VALIDATION_MODE_UNSPECIFIED`, `ALLOW_INVALID_OR_MISSING_CLIENT_CERT`, `REJECT_INVALID`. */ clientValidationMode?: string; /** * Reference to the TrustConfig from certificatemanager.googleapis.com namespace. * If specified, the chain validation will be performed against certificates configured in the given TrustConfig. * Allowed only if the policy is to be used with external HTTPS load balancers. */ clientValidationTrustConfig?: string; } interface ServerTlsPolicyMtlsPolicyClientValidationCa { /** * Optional if policy is to be used with Traffic Director. For external HTTPS load balancer must be empty. * Defines a mechanism to provision server identity (public and private keys). Cannot be combined with allowOpen as a permissive mode that allows both plain text and TLS is not supported. * Structure is documented below. */ certificateProviderInstance?: outputs.networksecurity.ServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstance; /** * gRPC specific configuration to access the gRPC server to obtain the cert and private key. * Structure is documented below. */ grpcEndpoint?: outputs.networksecurity.ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpoint; } interface ServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstance { /** * Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "googleCloudPrivateSpiffe" to use Certificate Authority Service certificate provider instance. */ pluginInstance: string; } interface ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpoint { /** * The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:". */ targetUri: string; } interface ServerTlsPolicyServerCertificate { /** * Optional if policy is to be used with Traffic Director. For external HTTPS load balancer must be empty. * Defines a mechanism to provision server identity (public and private keys). Cannot be combined with allowOpen as a permissive mode that allows both plain text and TLS is not supported. * Structure is documented below. */ certificateProviderInstance?: outputs.networksecurity.ServerTlsPolicyServerCertificateCertificateProviderInstance; /** * gRPC specific configuration to access the gRPC server to obtain the cert and private key. * Structure is documented below. */ grpcEndpoint?: outputs.networksecurity.ServerTlsPolicyServerCertificateGrpcEndpoint; } interface ServerTlsPolicyServerCertificateCertificateProviderInstance { /** * Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "googleCloudPrivateSpiffe" to use Certificate Authority Service certificate provider instance. */ pluginInstance: string; } interface ServerTlsPolicyServerCertificateGrpcEndpoint { /** * The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:". */ targetUri: string; } } export declare namespace networkservices { interface AgentGatewayAgentGatewayCard { /** * (Output) * mTLS Endpoint associated with this AgentGateway. */ mtlsEndpoint: string; /** * (Output) * Root Certificates for Agents to validate this AgentGateway. */ rootCertificates: string[]; /** * (Output) * Service Account used by Service Extensions to operate. */ serviceExtensionsServiceAccount: string; } interface AgentGatewayGoogleManaged { /** * Operating Mode of Agent Gateway. * Possible values are: `AGENT_TO_ANYWHERE`, `CLIENT_TO_AGENT`. */ governedAccessPath: string; } interface AgentGatewayNetworkConfig { /** * Optional PSC-Interface network attachment for connectivity to your * private VPCs network. * Structure is documented below. */ egress: outputs.networkservices.AgentGatewayNetworkConfigEgress; } interface AgentGatewayNetworkConfigEgress { /** * The URI of the Network Attachment resource. */ networkAttachment: string; } interface AgentGatewaySelfManaged { /** * A supported Google Cloud networking proxy in the Project and Location. */ resourceUri: string; } interface EdgeCacheKeysetPublicKey { /** * The ID of the public key. The ID must be 1-63 characters long, and comply with RFC1035. * The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* * which means the first character must be a letter, and all following characters must be a dash, underscore, letter or digit. */ id: string; /** * Set to true to have the CDN automatically manage this public key value. */ managed?: boolean; /** * The base64-encoded value of the Ed25519 public key. The base64 encoding can be padded (44 bytes) or unpadded (43 bytes). * Representations or encodings of the public key other than this will be rejected with an error. * **Note**: This property is sensitive and will not be displayed in the plan. */ value?: string; } interface EdgeCacheKeysetValidationSharedKey { /** * The name of the secret version in Secret Manager. * The resource name of the secret version must be in the format `projects/*/secrets/*/versions/*` where the `*` values are replaced by the secrets themselves. * The secrets must be at least 16 bytes large. The recommended secret size depends on the signature algorithm you are using. * * If you are using HMAC-SHA1, we suggest 20-byte secrets. * * If you are using HMAC-SHA256, we suggest 32-byte secrets. * See RFC 2104, Section 3 for more details on these recommendations. */ secretVersion: string; } interface EdgeCacheOriginAwsV4Authentication { /** * The access key ID your origin uses to identify the key. */ accessKeyId: string; /** * The name of the AWS region that your origin is in. */ originRegion: string; /** * The Secret Manager secret version of the secret access key used by your origin. * * This is the resource name of the secret version in the format 'projects/*/secrets/*/versions/*' where the '*' values are replaced by the project, secret, and version you require. */ secretAccessKeyVersion: string; } interface EdgeCacheOriginFlexShielding { /** * Whenever possible, content will be fetched from origin and cached in or * near the specified origin. Best effort. * You must specify exactly one FlexShieldingRegion. * Each value may be one of: `AFRICA_SOUTH1`, `ME_CENTRAL1`. */ flexShieldingRegions?: string; } interface EdgeCacheOriginOriginOverrideAction { /** * The header actions, including adding and removing * headers, for request handled by this origin. * Structure is documented below. */ headerAction?: outputs.networkservices.EdgeCacheOriginOriginOverrideActionHeaderAction; /** * The URL rewrite configuration for request that are * handled by this origin. * Structure is documented below. */ urlRewrite?: outputs.networkservices.EdgeCacheOriginOriginOverrideActionUrlRewrite; } interface EdgeCacheOriginOriginOverrideActionHeaderAction { /** * Describes a header to add. * You may add a maximum of 25 request headers. * Structure is documented below. */ requestHeadersToAdds?: outputs.networkservices.EdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAdd[]; } interface EdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAdd { /** * The name of the header to add. */ headerName: string; /** * The value of the header to add. */ headerValue: string; /** * Whether to replace all existing headers with the same name. * By default, added header values are appended * to the response or request headers with the * same field names. The added values are * separated by commas. * To overwrite existing values, set `replace` to `true`. */ replace?: boolean; } interface EdgeCacheOriginOriginOverrideActionUrlRewrite { /** * Prior to forwarding the request to the selected * origin, the request's host header is replaced with * contents of the hostRewrite. * This value must be between 1 and 255 characters. */ hostRewrite?: string; } interface EdgeCacheOriginOriginRedirect { /** * The set of redirect response codes that the CDN * follows. Values of * [RedirectConditions](https://cloud.google.com/media-cdn/docs/reference/rest/v1/projects.locations.edgeCacheOrigins#redirectconditions) * are accepted. */ redirectConditions?: string[]; } interface EdgeCacheOriginTimeout { /** * The maximum duration to wait for a single origin connection to be established, including DNS lookup, TLS handshake and TCP/QUIC connection establishment. * Defaults to 5 seconds. The timeout must be a value between 1s and 15s. * The connectTimeout capped by the deadline set by the request's maxAttemptsTimeout. The last connection attempt may have a smaller connectTimeout in order to adhere to the overall maxAttemptsTimeout. */ connectTimeout?: string; /** * The maximum time across all connection attempts to the origin, including failover origins, before returning an error to the client. A HTTP 504 will be returned if the timeout is reached before a response is returned. * Defaults to 15 seconds. The timeout must be a value between 1s and 30s. * If a failoverOrigin is specified, the maxAttemptsTimeout of the first configured origin sets the deadline for all connection attempts across all failoverOrigins. */ maxAttemptsTimeout?: string; /** * The maximum duration to wait between reads of a single HTTP connection/stream. * Defaults to 15 seconds. The timeout must be a value between 1s and 30s. * The readTimeout is capped by the responseTimeout. All reads of the HTTP connection/stream must be completed by the deadline set by the responseTimeout. * If the response headers have already been written to the connection, the response will be truncated and logged. * * The `awsV4Authentication` block supports: */ readTimeout?: string; /** * The maximum duration to wait for the last byte of a response to arrive when reading from the HTTP connection/stream. * Defaults to 30 seconds. The timeout must be a value between 1s and 120s. * The responseTimeout starts after the connection has been established. * This also applies to HTTP Chunked Transfer Encoding responses, and/or when an open-ended Range request is made to the origin. Origins that take longer to write additional bytes to the response than the configured responseTimeout will result in an error being returned to the client. * If the response headers have already been written to the connection, the response will be truncated and logged. */ responseTimeout?: string; } interface EdgeCacheServiceLogConfig { /** * Specifies whether to enable logging for traffic served by this service. */ enable: boolean; /** * Configures the sampling rate of requests, where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. The default value is 1.0, and the value of the field must be in [0, 1]. * This field can only be specified if logging is enabled for this service. */ sampleRate?: number; } interface EdgeCacheServiceRouting { /** * The list of hostRules to match against. These rules define which hostnames the EdgeCacheService will match against, and which route configurations apply. * Structure is documented below. */ hostRules: outputs.networkservices.EdgeCacheServiceRoutingHostRule[]; /** * The list of pathMatchers referenced via name by hostRules. PathMatcher is used to match the path portion of the URL when a HostRule matches the URL's host portion. * Structure is documented below. */ pathMatchers: outputs.networkservices.EdgeCacheServiceRoutingPathMatcher[]; } interface EdgeCacheServiceRoutingHostRule { /** * A human-readable description of the hostRule. */ description?: string; /** * The list of host patterns to match. * Host patterns must be valid hostnames. Ports are not allowed. Wildcard hosts are supported in the suffix or prefix form. * matches any string of ([a-z0-9-.]*). It does not match the empty string. * When multiple hosts are specified, hosts are matched in the following priority: * 1. Exact domain names: ``www.foo.com``. * 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. * 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``. * 4. Special wildcard ``*`` matching any domain. * Notes: * The wildcard will not match the empty string. e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. The longest wildcards match first. Only a single host in the entire service can match on ``*``. A domain must be unique across all configured hosts within a service. * Hosts are matched against the HTTP Host header, or for HTTP/2 and HTTP/3, the ":authority" header, from the incoming request. * You may specify up to 10 hosts. */ hosts: string[]; /** * The name of the pathMatcher associated with this hostRule. */ pathMatcher: string; } interface EdgeCacheServiceRoutingPathMatcher { /** * A human-readable description of the resource. */ description?: string; /** * The name to which this PathMatcher is referred by the HostRule. */ name: string; /** * The routeRules to match against. routeRules support advanced routing behaviour, and can match on paths, headers and query parameters, as well as status codes and HTTP methods. * Structure is documented below. */ routeRules: outputs.networkservices.EdgeCacheServiceRoutingPathMatcherRouteRule[]; } interface EdgeCacheServiceRoutingPathMatcherRouteRule { /** * A human-readable description of the routeRule. */ description?: string; /** * The header actions, including adding & removing headers, for requests that match this route. * Structure is documented below. */ headerAction?: outputs.networkservices.EdgeCacheServiceRoutingPathMatcherRouteRuleHeaderAction; /** * The list of criteria for matching attributes of a request to this routeRule. This list has OR semantics: the request matches this routeRule when any of the matchRules are satisfied. However predicates * within a given matchRule have AND semantics. All predicates within a matchRule must match for the request to match the rule. * Structure is documented below. */ matchRules: outputs.networkservices.EdgeCacheServiceRoutingPathMatcherRouteRuleMatchRule[]; /** * The Origin resource that requests to this route should fetch from when a matching response is not in cache. Origins can be defined as short names ("my-origin") or fully-qualified resource URLs - e.g. "networkservices.googleapis.com/projects/my-project/global/edgecacheorigins/my-origin" * Only one of origin or urlRedirect can be set. */ origin?: string; /** * The priority of this route rule, where 1 is the highest priority. * You cannot configure two or more routeRules with the same priority. Priority for each rule must be set to a number between 1 and 999 inclusive. * Priority numbers can have gaps, which enable you to add or remove rules in the future without affecting the rest of the rules. For example, 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers * to which you could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in the future without any impact on existing rules. */ priority: string; /** * In response to a matching path, the routeAction performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected origin. * Structure is documented below. */ routeAction?: outputs.networkservices.EdgeCacheServiceRoutingPathMatcherRouteRuleRouteAction; /** * Allow overriding the set of methods that are allowed for this route. * When not set, Media CDN allows only "GET", "HEAD", and "OPTIONS". * Structure is documented below. */ routeMethods?: outputs.networkservices.EdgeCacheServiceRoutingPathMatcherRouteRuleRouteMethods; /** * The URL redirect configuration for requests that match this route. * Structure is documented below. */ urlRedirect?: outputs.networkservices.EdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirect; } interface EdgeCacheServiceRoutingPathMatcherRouteRuleHeaderAction { /** * Describes a header to add. * Structure is documented below. */ requestHeaderToAdds?: outputs.networkservices.EdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAdd[]; /** * A list of header names for headers that need to be removed from the request prior to forwarding the request to the origin. * Structure is documented below. */ requestHeaderToRemoves?: outputs.networkservices.EdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemove[]; /** * Headers to add to the response prior to sending it back to the client. * Response headers are only sent to the client, and do not have an effect on the cache serving the response. * Structure is documented below. */ responseHeaderToAdds?: outputs.networkservices.EdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAdd[]; /** * A list of header names for headers that need to be removed from the request prior to forwarding the request to the origin. * Structure is documented below. */ responseHeaderToRemoves?: outputs.networkservices.EdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemove[]; } interface EdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAdd { /** * The name of the header to add. */ headerName: string; /** * The value of the header to add. */ headerValue: string; /** * Whether to replace all existing headers with the same name. */ replace: boolean; } interface EdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemove { /** * The name of the header to remove. */ headerName: string; } interface EdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAdd { /** * The name of the header to add. */ headerName: string; /** * The value of the header to add. */ headerValue: string; /** * Whether to replace all existing headers with the same name. */ replace: boolean; } interface EdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemove { /** * Headers to remove from the response prior to sending it back to the client. * Response headers are only sent to the client, and do not have an effect on the cache serving the response. */ headerName: string; } interface EdgeCacheServiceRoutingPathMatcherRouteRuleMatchRule { /** * For satisfying the matchRule condition, the path of the request must exactly match the value specified in fullPathMatch after removing any query parameters and anchor that may be part of the original URL. */ fullPathMatch?: string; /** * Specifies a list of header match criteria, all of which must match corresponding headers in the request. * Structure is documented below. */ headerMatches?: outputs.networkservices.EdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatch[]; /** * Specifies that prefixMatch and fullPathMatch matches are case sensitive. */ ignoreCase: boolean; /** * For satisfying the matchRule condition, the path of the request * must match the wildcard pattern specified in pathTemplateMatch * after removing any query parameters and anchor that may be part * of the original URL. * pathTemplateMatch must be between 1 and 255 characters * (inclusive). The pattern specified by pathTemplateMatch may * have at most 5 wildcard operators and at most 5 variable * captures in total. */ pathTemplateMatch?: string; /** * For satisfying the matchRule condition, the request's path must begin with the specified prefixMatch. prefixMatch must begin with a /. */ prefixMatch?: string; /** * Specifies a list of query parameter match criteria, all of which must match corresponding query parameters in the request. * Structure is documented below. */ queryParameterMatches?: outputs.networkservices.EdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatch[]; } interface EdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatch { /** * The value of the header should exactly match contents of exactMatch. */ exactMatch?: string; /** * The header name to match on. */ headerName: string; /** * If set to false (default), the headerMatch is considered a match if the match criteria above are met. * If set to true, the headerMatch is considered a match if the match criteria above are NOT met. */ invertMatch: boolean; /** * The value of the header must start with the contents of prefixMatch. */ prefixMatch?: string; /** * A header with the contents of headerName must exist. The match takes place whether or not the request's header has a value. */ presentMatch?: boolean; /** * The value of the header must end with the contents of suffixMatch. */ suffixMatch?: string; } interface EdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatch { /** * The queryParameterMatch matches if the value of the parameter exactly matches the contents of exactMatch. */ exactMatch?: string; /** * The name of the query parameter to match. The query parameter must exist in the request, in the absence of which the request match fails. */ name: string; /** * Specifies that the queryParameterMatch matches if the request contains the query parameter, irrespective of whether the parameter has a value or not. */ presentMatch?: boolean; } interface EdgeCacheServiceRoutingPathMatcherRouteRuleRouteAction { /** * The policy to use for defining caching and signed request behaviour for requests that match this route. * Structure is documented below. */ cdnPolicy?: outputs.networkservices.EdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicy; /** * Setting the compression mode to automatic enables dynamic compression for every eligible response. * When dynamic compression is enabled, it is recommended to also set a cache policy to maximize efficiency. * Possible values are: `DISABLED`, `AUTOMATIC`. */ compressionMode?: string; /** * CORSPolicy defines Cross-Origin-Resource-Sharing configuration, including which CORS response headers will be set. * Structure is documented below. */ corsPolicy?: outputs.networkservices.EdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicy; /** * The URL rewrite configuration for requests that match this route. * Structure is documented below. */ urlRewrite?: outputs.networkservices.EdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewrite; } interface EdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicy { /** * Enable signature generation or propagation on this route. * This field may only be specified when signedRequestMode is set to REQUIRE_TOKENS. * Structure is documented below. */ addSignatures?: outputs.networkservices.EdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignatures; /** * Defines the request parameters that contribute to the cache key. * Structure is documented below. */ cacheKeyPolicy?: outputs.networkservices.EdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicy; /** * Cache modes allow users to control the behaviour of the cache, what content it should cache automatically, whether to respect origin headers, or whether to unconditionally cache all responses. * For all cache modes, Cache-Control headers will be passed to the client. Use clientTtl to override what is sent to the client. * Possible values are: `CACHE_ALL_STATIC`, `USE_ORIGIN_HEADERS`, `FORCE_CACHE_ALL`, `BYPASS_CACHE`. */ cacheMode: string; /** * Specifies a separate client (e.g. browser client) TTL, separate from the TTL used by the edge caches. Leaving this empty will use the same cache TTL for both the CDN and the client-facing response. * - The TTL must be > 0 and <= 86400s (1 day) * - The clientTtl cannot be larger than the defaultTtl (if set) * - Fractions of a second are not allowed. * Omit this field to use the defaultTtl, or the max-age set by the origin, as the client-facing TTL. * When the cache mode is set to "USE_ORIGIN_HEADERS" or "BYPASS_CACHE", you must omit this field. * A duration in seconds terminated by 's'. Example: "3s". */ clientTtl?: string; /** * Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). * Defaults to 3600s (1 hour). * - The TTL must be >= 0 and <= 31,536,000 seconds (1 year) * - Setting a TTL of "0" means "always revalidate" (equivalent to must-revalidate) * - The value of defaultTTL cannot be set to a value greater than that of maxTTL. * - Fractions of a second are not allowed. * - When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. * Note that infrequently accessed objects may be evicted from the cache before the defined TTL. Objects that expire will be revalidated with the origin. * When the cache mode is set to "USE_ORIGIN_HEADERS" or "BYPASS_CACHE", you must omit this field. * A duration in seconds terminated by 's'. Example: "3s". */ defaultTtl: string; /** * Specifies the maximum allowed TTL for cached content served by this origin. * Defaults to 86400s (1 day). * Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTtl seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. * - The TTL must be >= 0 and <= 31,536,000 seconds (1 year) * - Setting a TTL of "0" means "always revalidate" * - The value of maxTtl must be equal to or greater than defaultTtl. * - Fractions of a second are not allowed. * When the cache mode is set to "USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", or "BYPASS_CACHE", you must omit this field. * A duration in seconds terminated by 's'. Example: "3s". */ maxTtl: string; /** * Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. * By default, the CDNPolicy will apply the following default TTLs to these status codes: * - HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m * - HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s * - HTTP 405 (Method Not Found), 414 (URI Too Long), 501 (Not Implemented): 60s * These defaults can be overridden in negativeCachingPolicy */ negativeCaching?: boolean; /** * Sets a cache TTL for the specified HTTP status code. negativeCaching must be enabled to configure negativeCachingPolicy. * - Omitting the policy and leaving negativeCaching enabled will use the default TTLs for each status code, defined in negativeCaching. * - TTLs must be >= 0 (where 0 is "always revalidate") and <= 86400s (1 day) * Note that when specifying an explicit negativeCachingPolicy, you should take care to specify a cache TTL for all response codes that you wish to cache. The CDNPolicy will not apply any default negative caching when a policy exists. */ negativeCachingPolicy?: { [key: string]: string; }; /** * The EdgeCacheKeyset containing the set of public keys used to validate signed requests at the edge. */ signedRequestKeyset: string; /** * Limit how far into the future the expiration time of a signed request may be. * When set, a signed request is rejected if its expiration time is later than now + signedRequestMaximumExpirationTtl, where now is the time at which the signed request is first handled by the CDN. * - The TTL must be > 0. * - Fractions of a second are not allowed. * By default, signedRequestMaximumExpirationTtl is not set and the expiration time of a signed request may be arbitrarily far into future. */ signedRequestMaximumExpirationTtl?: string; /** * Whether to enforce signed requests. The default value is DISABLED, which means all content is public, and does not authorize access. * You must also set a signedRequestKeyset to enable signed requests. * When set to REQUIRE_SIGNATURES, all matching requests will have their signature validated. Requests that were not signed with the corresponding private key, or that are otherwise invalid (expired, do not match the signature, IP address, or header) will be rejected with a HTTP 403 and (if enabled) logged. * Possible values are: `DISABLED`, `REQUIRE_SIGNATURES`, `REQUIRE_TOKENS`. */ signedRequestMode: string; /** * Additional options for signed tokens. * signedTokenOptions may only be specified when signedRequestMode is REQUIRE_TOKENS. * Structure is documented below. */ signedTokenOptions?: outputs.networkservices.EdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedTokenOptions; } interface EdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignatures { /** * The actions to take to add signatures to responses. * Each value may be one of: `GENERATE_COOKIE`, `GENERATE_TOKEN_HLS_COOKIELESS`, `PROPAGATE_TOKEN_HLS_COOKIELESS`. */ actions: string; /** * The parameters to copy from the verified token to the generated token. * Only the following parameters may be copied: * * `PathGlobs` */ copiedParameters?: string[]; /** * The keyset to use for signature generation. * The following are both valid paths to an EdgeCacheKeyset resource: * * `projects/project/locations/global/edgeCacheKeysets/yourKeyset` * * `yourKeyset` * This must be specified when the GENERATE_COOKIE or GENERATE_TOKEN_HLS_COOKIELESS actions are specified. This field may not be specified otherwise. */ keyset?: string; /** * The query parameter in which to put the generated token. * If not specified, defaults to `edge-cache-token`. * If specified, the name must be 1-64 characters long and match the regular expression `a-zA-Z*` which means the first character must be a letter, and all following characters must be a dash, underscore, letter or digit. * This field may only be set when the GENERATE_TOKEN_HLS_COOKIELESS or PROPAGATE_TOKEN_HLS_COOKIELESS actions are specified. */ tokenQueryParameter?: string; /** * The duration the token is valid starting from the moment the token is first generated. * Defaults to `86400s` (1 day). * The TTL must be >= 0 and <= 604,800 seconds (1 week). * This field may only be specified when the GENERATE_COOKIE or GENERATE_TOKEN_HLS_COOKIELESS actions are specified. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ tokenTtl?: string; } interface EdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicy { /** * If true, requests to different hosts will be cached separately. * Note: this should only be enabled if hosts share the same origin and content. Removing the host from the cache key may inadvertently result in different objects being cached than intended, depending on which route the first user matched. */ excludeHost: boolean; /** * If true, exclude query string parameters from the cache key * If false (the default), include the query string parameters in * the cache key according to includeQueryParameters and * excludeQueryParameters. If neither includeQueryParameters nor * excludeQueryParameters is set, the entire query string will be * included. */ excludeQueryString?: boolean; /** * Names of query string parameters to exclude from cache keys. All other parameters will be included. * Either specify includedQueryParameters or excludedQueryParameters, not both. '&' and '=' will be percent encoded and not treated as delimiters. */ excludedQueryParameters?: string[]; /** * If true, http and https requests will be cached separately. */ includeProtocol: boolean; /** * Names of Cookies to include in cache keys. The cookie name and cookie value of each cookie named will be used as part of the cache key. * Cookie names: * - must be valid RFC 6265 "cookie-name" tokens * - are case sensitive * - cannot start with "Edge-Cache-" (case insensitive) * Note that specifying several cookies, and/or cookies that have a large range of values (e.g., per-user) will dramatically impact the cache hit rate, and may result in a higher eviction rate and reduced performance. * You may specify up to three cookie names. */ includedCookieNames?: string[]; /** * Names of HTTP request headers to include in cache keys. The value of the header field will be used as part of the cache key. * - Header names must be valid HTTP RFC 7230 header field values. * - Header field names are case insensitive * - To include the HTTP method, use ":method" * Note that specifying several headers, and/or headers that have a large range of values (e.g. per-user) will dramatically impact the cache hit rate, and may result in a higher eviction rate and reduced performance. */ includedHeaderNames?: string[]; /** * Names of query string parameters to include in cache keys. All other parameters will be excluded. * Either specify includedQueryParameters or excludedQueryParameters, not both. '&' and '=' will be percent encoded and not treated as delimiters. */ includedQueryParameters?: string[]; } interface EdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedTokenOptions { /** * The allowed signature algorithms to use. * Defaults to using only ED25519. * You may specify up to 3 signature algorithms to use. * Each value may be one of: `ED25519`, `HMAC_SHA_256`, `HMAC_SHA1`. */ allowedSignatureAlgorithms?: string[]; /** * The query parameter in which to find the token. * The name must be 1-64 characters long and match the regular expression `a-zA-Z*` which means the first character must be a letter, and all following characters must be a dash, underscore, letter or digit. * Defaults to `edge-cache-token`. */ tokenQueryParameter?: string; } interface EdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicy { /** * In response to a preflight request, setting this to true indicates that the actual request can include user credentials. * This translates to the Access-Control-Allow-Credentials response header. */ allowCredentials?: boolean; /** * Specifies the content for the Access-Control-Allow-Headers response header. */ allowHeaders?: string[]; /** * Specifies the content for the Access-Control-Allow-Methods response header. */ allowMethods?: string[]; /** * Specifies the list of origins that will be allowed to do CORS requests. * This translates to the Access-Control-Allow-Origin response header. */ allowOrigins?: string[]; /** * If true, specifies the CORS policy is disabled. The default value is false, which indicates that the CORS policy is in effect. */ disabled?: boolean; /** * Specifies the content for the Access-Control-Allow-Headers response header. */ exposeHeaders?: string[]; /** * Specifies how long results of a preflight request can be cached by a client in seconds. Note that many browser clients enforce a maximum TTL of 600s (10 minutes). * - Setting the value to -1 forces a pre-flight check for all requests (not recommended) * - A maximum TTL of 86400s can be set, but note that (as above) some clients may force pre-flight checks at a more regular interval. * - This translates to the Access-Control-Max-Age header. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ maxAge: string; } interface EdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewrite { /** * Prior to forwarding the request to the selected origin, the request's host header is replaced with contents of hostRewrite. */ hostRewrite?: string; /** * Prior to forwarding the request to the selected origin, the matching portion of the request's path is replaced by pathPrefixRewrite. */ pathPrefixRewrite?: string; /** * Prior to forwarding the request to the selected origin, if the * request matched a pathTemplateMatch, the matching portion of the * request's path is replaced re-written using the pattern specified * by pathTemplateRewrite. * pathTemplateRewrite must be between 1 and 255 characters * (inclusive), must start with a '/', and must only use variables * captured by the route's pathTemplate matchers. * pathTemplateRewrite may only be used when all of a route's * MatchRules specify pathTemplate. * Only one of pathPrefixRewrite and pathTemplateRewrite may be * specified. */ pathTemplateRewrite?: string; } interface EdgeCacheServiceRoutingPathMatcherRouteRuleRouteMethods { /** * The non-empty set of HTTP methods that are allowed for this route. * Any combination of "GET", "HEAD", "OPTIONS", "PUT", "POST", "DELETE", and "PATCH". */ allowedMethods?: string[]; } interface EdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirect { /** * The host that will be used in the redirect response instead of the one that was supplied in the request. */ hostRedirect?: string; /** * If set to true, the URL scheme in the redirected request is set to https. If set to false, the URL scheme of the redirected request will remain the same as that of the request. * This can only be set if there is at least one (1) edgeSslCertificate set on the service. */ httpsRedirect: boolean; /** * The path that will be used in the redirect response instead of the one that was supplied in the request. * pathRedirect cannot be supplied together with prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the original request will be used for the redirect. * The path value must be between 1 and 1024 characters. */ pathRedirect?: string; /** * The prefix that replaces the prefixMatch specified in the routeRule, retaining the remaining portion of the URL before redirecting the request. * prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or neither. If neither is supplied, the path of the original request will be used for the redirect. */ prefixRedirect?: string; /** * The HTTP Status code to use for this RedirectAction. * The supported values are: * - `MOVED_PERMANENTLY_DEFAULT`, which is the default value and corresponds to 301. * - `FOUND`, which corresponds to 302. * - `SEE_OTHER` which corresponds to 303. * - `TEMPORARY_REDIRECT`, which corresponds to 307. in this case, the request method will be retained. * - `PERMANENT_REDIRECT`, which corresponds to 308. in this case, the request method will be retained. * Possible values are: `MOVED_PERMANENTLY_DEFAULT`, `FOUND`, `SEE_OTHER`, `TEMPORARY_REDIRECT`, `PERMANENT_REDIRECT`. */ redirectResponseCode: string; /** * If set to true, any accompanying query portion of the original URL is removed prior to redirecting the request. If set to false, the query portion of the original URL is retained. */ stripQuery: boolean; } interface EndpointPolicyEndpointMatcher { /** * The matcher is based on node metadata presented by xDS clients. * Structure is documented below. */ metadataLabelMatcher: outputs.networkservices.EndpointPolicyEndpointMatcherMetadataLabelMatcher; } interface EndpointPolicyEndpointMatcherMetadataLabelMatcher { /** * Specifies how matching should be done. * Possible values are: `MATCH_ANY`, `MATCH_ALL`. */ metadataLabelMatchCriteria: string; /** * The list of label value pairs that must match labels in the provided metadata based on filterMatchCriteria * Structure is documented below. */ metadataLabels?: outputs.networkservices.EndpointPolicyEndpointMatcherMetadataLabelMatcherMetadataLabel[]; } interface EndpointPolicyEndpointMatcherMetadataLabelMatcherMetadataLabel { /** * Required. Label name presented as key in xDS Node Metadata. */ labelName: string; /** * Required. Label value presented as value corresponding to the above key, in xDS Node Metadata. */ labelValue: string; } interface EndpointPolicyTrafficPortSelector { /** * List of ports. Can be port numbers or port range (example, [80-90] specifies all ports from 80 to 90, including 80 and 90) or named ports or * to specify all ports. If the list is empty, all ports are selected. */ ports: string[]; } interface GrpcRouteRule { /** * Required. A detailed rule defining how to route traffic. * Structure is documented below. */ action?: outputs.networkservices.GrpcRouteRuleAction; /** * Matches define conditions used for matching the rule against incoming gRPC requests. * Structure is documented below. */ matches?: outputs.networkservices.GrpcRouteRuleMatch[]; } interface GrpcRouteRuleAction { /** * The destination to which traffic should be forwarded. * Structure is documented below. */ destinations?: outputs.networkservices.GrpcRouteRuleActionDestination[]; /** * The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. * Structure is documented below. */ faultInjectionPolicy?: outputs.networkservices.GrpcRouteRuleActionFaultInjectionPolicy; /** * Specifies the retry policy associated with this route. * Structure is documented below. */ retryPolicy?: outputs.networkservices.GrpcRouteRuleActionRetryPolicy; /** * Specifies the timeout for selected route. */ timeout?: string; } interface GrpcRouteRuleActionDestination { /** * The URL of a BackendService to route traffic to. */ serviceName?: string; /** * Specifies the proportion of requests forwarded to the backend referenced by the serviceName field. */ weight?: number; } interface GrpcRouteRuleActionFaultInjectionPolicy { /** * Specification of how client requests are aborted as part of fault injection before being sent to a destination. * Structure is documented below. */ abort?: outputs.networkservices.GrpcRouteRuleActionFaultInjectionPolicyAbort; /** * Specification of how client requests are delayed as part of fault injection before being sent to a destination. * Structure is documented below. */ delay?: outputs.networkservices.GrpcRouteRuleActionFaultInjectionPolicyDelay; } interface GrpcRouteRuleActionFaultInjectionPolicyAbort { /** * The HTTP status code used to abort the request. */ httpStatus?: number; /** * The percentage of traffic which will be aborted. */ percentage?: number; } interface GrpcRouteRuleActionFaultInjectionPolicyDelay { /** * Specify a fixed delay before forwarding the request. */ fixedDelay?: string; /** * The percentage of traffic on which delay will be injected. */ percentage?: number; } interface GrpcRouteRuleActionRetryPolicy { /** * Specifies the allowed number of retries. */ numRetries?: number; /** * Specifies one or more conditions when this retry policy applies. * Each value may be one of: `connect-failure`, `refused-stream`, `cancelled`, `deadline-exceeded`, `resource-exhausted`, `unavailable`. */ retryConditions?: string[]; } interface GrpcRouteRuleMatch { /** * Specifies a list of HTTP request headers to match against. * Structure is documented below. */ headers?: outputs.networkservices.GrpcRouteRuleMatchHeader[]; /** * A gRPC method to match against. If this field is empty or omitted, will match all methods. * Structure is documented below. */ method?: outputs.networkservices.GrpcRouteRuleMatchMethod; } interface GrpcRouteRuleMatchHeader { /** * Required. The key of the header. */ key: string; /** * The type of match. * Default value is `EXACT`. * Possible values are: `TYPE_UNSPECIFIED`, `EXACT`, `REGULAR_EXPRESSION`. */ type?: string; /** * Required. The value of the header. */ value: string; } interface GrpcRouteRuleMatchMethod { /** * Specifies that matches are case sensitive. The default value is true. */ caseSensitive?: boolean; /** * Required. Name of the method to match against. */ grpcMethod: string; /** * Required. Name of the service to match against. */ grpcService: string; } interface HttpRouteRule { /** * The detailed rule defining how to route matched traffic. * Structure is documented below. */ action?: outputs.networkservices.HttpRouteRuleAction; /** * A list of matches define conditions used for matching the rule against incoming HTTP requests. Each match is independent, i.e. this rule will be matched if ANY one of the matches is satisfied. * If no matches field is specified, this rule will unconditionally match traffic. * If a default rule is desired to be configured, add a rule with no matches specified to the end of the rules list. * Structure is documented below. */ matches?: outputs.networkservices.HttpRouteRuleMatch[]; } interface HttpRouteRuleAction { /** * The specification for allowing client side cross-origin requests. * Structure is documented below. */ corsPolicy?: outputs.networkservices.HttpRouteRuleActionCorsPolicy; /** * The destination to which traffic should be forwarded. * Structure is documented below. */ destinations?: outputs.networkservices.HttpRouteRuleActionDestination[]; /** * The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. * Structure is documented below. */ faultInjectionPolicy?: outputs.networkservices.HttpRouteRuleActionFaultInjectionPolicy; /** * If set, the request is directed as configured by this field. * Structure is documented below. */ redirect?: outputs.networkservices.HttpRouteRuleActionRedirect; /** * The specification for modifying the headers of a matching request prior to delivery of the request to the destination. * Structure is documented below. */ requestHeaderModifier?: outputs.networkservices.HttpRouteRuleActionRequestHeaderModifier; /** * Specifies the policy on how requests intended for the routes destination are shadowed to a separate mirrored destination. * Structure is documented below. */ requestMirrorPolicy?: outputs.networkservices.HttpRouteRuleActionRequestMirrorPolicy; /** * The specification for modifying the headers of a response prior to sending the response back to the client. * Structure is documented below. */ responseHeaderModifier?: outputs.networkservices.HttpRouteRuleActionResponseHeaderModifier; /** * Specifies the retry policy associated with this route. * Structure is documented below. */ retryPolicy?: outputs.networkservices.HttpRouteRuleActionRetryPolicy; /** * Specifies the timeout for selected route. */ timeout?: string; /** * The specification for rewrite URL before forwarding requests to the destination. * Structure is documented below. */ urlRewrite?: outputs.networkservices.HttpRouteRuleActionUrlRewrite; } interface HttpRouteRuleActionCorsPolicy { /** * In response to a preflight request, setting this to true indicates that the actual request can include user credentials. */ allowCredentials?: boolean; /** * Specifies the content for Access-Control-Allow-Headers header. */ allowHeaders?: string[]; /** * Specifies the content for Access-Control-Allow-Methods header. */ allowMethods?: string[]; /** * Specifies the regular expression patterns that match allowed origins. */ allowOriginRegexes?: string[]; /** * Specifies the list of origins that will be allowed to do CORS requests. */ allowOrigins?: string[]; /** * If true, the CORS policy is disabled. The default value is false, which indicates that the CORS policy is in effect. */ disabled?: boolean; /** * Specifies the content for Access-Control-Expose-Headers header. */ exposeHeaders?: string[]; /** * Specifies how long result of a preflight request can be cached in seconds. */ maxAge?: string; } interface HttpRouteRuleActionDestination { /** * The URL of a BackendService to route traffic to. */ serviceName?: string; /** * Specifies the proportion of requests forwarded to the backend referenced by the serviceName field. This is computed as: weight/Sum(weights in this destination list). For non-zero values, there may be some epsilon from the exact proportion defined here depending on the precision an implementation supports. * If only one serviceName is specified and it has a weight greater than 0, 100% of the traffic is forwarded to that backend. * If weights are specified for any one service name, they need to be specified for all of them. * If weights are unspecified for all services, then, traffic is distributed in equal proportions to all of them. */ weight?: number; } interface HttpRouteRuleActionFaultInjectionPolicy { /** * Specification of how client requests are aborted as part of fault injection before being sent to a destination. * Structure is documented below. */ abort?: outputs.networkservices.HttpRouteRuleActionFaultInjectionPolicyAbort; /** * Specification of how client requests are delayed as part of fault injection before being sent to a destination. * Structure is documented below. */ delay?: outputs.networkservices.HttpRouteRuleActionFaultInjectionPolicyDelay; } interface HttpRouteRuleActionFaultInjectionPolicyAbort { /** * The HTTP status code used to abort the request. */ httpStatus?: number; /** * The percentage of traffic which will be aborted. */ percentage?: number; } interface HttpRouteRuleActionFaultInjectionPolicyDelay { /** * Specify a fixed delay before forwarding the request. */ fixedDelay?: string; /** * The percentage of traffic on which delay will be injected. */ percentage?: number; } interface HttpRouteRuleActionRedirect { /** * The host that will be used in the redirect response instead of the one that was supplied in the request. */ hostRedirect?: string; /** * If set to true, the URL scheme in the redirected request is set to https. */ httpsRedirect?: boolean; /** * The path that will be used in the redirect response instead of the one that was supplied in the request. pathRedirect can not be supplied together with prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the original request will be used for the redirect. */ pathRedirect?: string; /** * The port that will be used in the redirected request instead of the one that was supplied in the request. */ portRedirect?: number; /** * Indicates that during redirection, the matched prefix (or path) should be swapped with this value. */ prefixRewrite?: string; /** * The HTTP Status code to use for the redirect. */ responseCode?: string; /** * If set to true, any accompanying query portion of the original URL is removed prior to redirecting the request. */ stripQuery?: boolean; } interface HttpRouteRuleActionRequestHeaderModifier { /** * Add the headers with given map where key is the name of the header, value is the value of the header. */ add?: { [key: string]: string; }; /** * Remove headers (matching by header names) specified in the list. */ removes?: string[]; /** * Completely overwrite/replace the headers with given map where key is the name of the header, value is the value of the header. */ set?: { [key: string]: string; }; } interface HttpRouteRuleActionRequestMirrorPolicy { /** * The destination the requests will be mirrored to. * Structure is documented below. */ destination?: outputs.networkservices.HttpRouteRuleActionRequestMirrorPolicyDestination; } interface HttpRouteRuleActionRequestMirrorPolicyDestination { /** * The URL of a BackendService to route traffic to. */ serviceName?: string; /** * Specifies the proportion of requests forwarded to the backend referenced by the serviceName field. This is computed as: weight/Sum(weights in this destination list). For non-zero values, there may be some epsilon from the exact proportion defined here depending on the precision an implementation supports. * If only one serviceName is specified and it has a weight greater than 0, 100% of the traffic is forwarded to that backend. * If weights are specified for any one service name, they need to be specified for all of them. * If weights are unspecified for all services, then, traffic is distributed in equal proportions to all of them. */ weight?: number; } interface HttpRouteRuleActionResponseHeaderModifier { /** * Add the headers with given map where key is the name of the header, value is the value of the header. */ add?: { [key: string]: string; }; /** * Remove headers (matching by header names) specified in the list. */ removes?: string[]; /** * Completely overwrite/replace the headers with given map where key is the name of the header, value is the value of the header. */ set?: { [key: string]: string; }; } interface HttpRouteRuleActionRetryPolicy { /** * Specifies the allowed number of retries. */ numRetries?: number; /** * Specifies a non-zero timeout per retry attempt. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ perTryTimeout?: string; /** * Specifies one or more conditions when this retry policy applies. */ retryConditions?: string[]; } interface HttpRouteRuleActionUrlRewrite { /** * Prior to forwarding the request to the selected destination, the requests host header is replaced by this value. */ hostRewrite?: string; /** * Prior to forwarding the request to the selected destination, the matching portion of the requests path is replaced by this value. */ pathPrefixRewrite?: string; } interface HttpRouteRuleMatch { /** * The HTTP request path value should exactly match this value. */ fullPathMatch?: string; /** * Specifies a list of HTTP request headers to match against. * Structure is documented below. */ headers?: outputs.networkservices.HttpRouteRuleMatchHeader[]; /** * Specifies if prefixMatch and fullPathMatch matches are case sensitive. The default value is false. */ ignoreCase?: boolean; /** * The HTTP request path value must begin with specified prefixMatch. prefixMatch must begin with a /. */ prefixMatch?: string; /** * Specifies a list of query parameters to match against. * Structure is documented below. */ queryParameters?: outputs.networkservices.HttpRouteRuleMatchQueryParameter[]; /** * The HTTP request path value must satisfy the regular expression specified by regexMatch after removing any query parameters and anchor supplied with the original URL. For regular expression grammar, please see https://github.com/google/re2/wiki/Syntax */ regexMatch?: string; } interface HttpRouteRuleMatchHeader { /** * The value of the header should match exactly the content of exactMatch. */ exactMatch?: string; /** * The name of the HTTP header to match against. */ header?: string; /** * If specified, the match result will be inverted before checking. Default value is set to false. */ invertMatch?: boolean; /** * The value of the header must start with the contents of prefixMatch. */ prefixMatch?: string; /** * A header with headerName must exist. The match takes place whether or not the header has a value. */ presentMatch?: boolean; /** * If specified, the rule will match if the request header value is within the range. * Structure is documented below. */ rangeMatch?: outputs.networkservices.HttpRouteRuleMatchHeaderRangeMatch; /** * The value of the header must match the regular expression specified in regexMatch. */ regexMatch?: string; /** * The value of the header must end with the contents of suffixMatch. */ suffixMatch?: string; } interface HttpRouteRuleMatchHeaderRangeMatch { /** * End of the range (exclusive). */ end: number; /** * Start of the range (inclusive). */ start: number; } interface HttpRouteRuleMatchQueryParameter { /** * The value of the query parameter must exactly match the contents of exactMatch. */ exactMatch?: string; /** * Specifies that the QueryParameterMatcher matches if request contains query parameter, irrespective of whether the parameter has a value or not. */ presentMatch?: boolean; /** * The name of the query parameter to match. */ queryParameter?: string; /** * The value of the query parameter must match the regular expression specified by regexMatch.For regular expression grammar, please see https://github.com/google/re2/wiki/Syntax */ regexMatch?: string; } interface LbEdgeExtensionExtensionChain { /** * A set of extensions to execute for the matching request. * At least one extension is required. Up to 3 extensions can be defined for each extension chain for * LbTrafficExtension resource. LbRouteExtension chains are limited to 1 extension per extension chain. * Structure is documented below. */ extensions: outputs.networkservices.LbEdgeExtensionExtensionChainExtension[]; /** * Conditions under which this chain is invoked for a request. * Structure is documented below. */ matchCondition: outputs.networkservices.LbEdgeExtensionExtensionChainMatchCondition; /** * The name for this extension chain. The name is logged as part of the HTTP request logs. * The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, * and can have a maximum length of 63 characters. Additionally, the first character must be a letter * and the last character must be a letter or a number. */ name: string; } interface LbEdgeExtensionExtensionChainExtension { /** * Determines how the proxy behaves if the call to the extension fails or times out. * When set to TRUE, request or response processing continues without error. * Any subsequent extensions in the extension chain are also executed. * When set to FALSE: * If response headers have not been delivered to the downstream client, * a generic 500 error is returned to the client. The error response can be tailored by * configuring a custom error response in the load balancer. */ failOpen?: boolean; /** * List of the HTTP headers to forward to the extension (from the client or backend). * If omitted, all headers are sent. Each element is a string indicating the header name. */ forwardHeaders?: string[]; /** * The name for this extension. The name is logged as part of the HTTP request logs. * The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, * and can have a maximum length of 63 characters. Additionally, the first character must be a letter * and the last a letter or a number. */ name: string; /** * The reference to the service that runs the extension. * * To configure a callout extension, service must be a fully-qualified reference to a backend service. * * To configure a plugin extension, service must be a reference to a WasmPlugin resource. */ service: string; /** * A set of events during request or response processing for which this extension is called. * This field is required for the LbEdgeExtension resource and only supports the value `REQUEST_HEADERS`. */ supportedEvents?: string[]; } interface LbEdgeExtensionExtensionChainMatchCondition { /** * A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. */ celExpression: string; } interface LbRouteExtensionExtensionChain { /** * A set of extensions to execute for the matching request. * At least one extension is required. Up to 3 extensions can be defined for each extension chain for * LbTrafficExtension resource. LbRouteExtension chains are limited to 1 extension per extension chain. * Further documentation can be found at https://cloud.google.com/service-extensions/docs/reference/rest/v1/ExtensionChain#Extension * Structure is documented below. */ extensions: outputs.networkservices.LbRouteExtensionExtensionChainExtension[]; /** * Conditions under which this chain is invoked for a request. * Structure is documented below. */ matchCondition: outputs.networkservices.LbRouteExtensionExtensionChainMatchCondition; /** * The name for this extension chain. The name is logged as part of the HTTP request logs. * The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, * and can have a maximum length of 63 characters. Additionally, the first character must be a letter * and the last character must be a letter or a number. */ name: string; } interface LbRouteExtensionExtensionChainExtension { /** * The :authority header in the gRPC request sent from Envoy to the extension service. */ authority?: string; /** * Determines how the proxy behaves if the call to the extension fails or times out. * When set to TRUE, request or response processing continues without error. * Any subsequent extensions in the extension chain are also executed. * When set to FALSE: * If response headers have not been delivered to the downstream client, * a generic 500 error is returned to the client. The error response can be tailored by * configuring a custom error response in the load balancer. */ failOpen?: boolean; /** * List of the HTTP headers to forward to the extension (from the client or backend). * If omitted, all headers are sent. Each element is a string indicating the header name. */ forwardHeaders?: string[]; /** * The metadata provided here is included as part of the `metadataContext` (of type `google.protobuf.Struct`) * in the `ProcessingRequest` message sent to the extension server. * The metadata is available under the namespace `com.google.lb_route_extension...`. * The following variables are supported in the metadata: `{forwarding_rule_id}` - substituted with the forwarding rule's fully qualified resource name. * This field must not be set for plugin extensions. Setting it results in a validation error. */ metadata?: { [key: string]: string; }; /** * The name for this extension. The name is logged as part of the HTTP request logs. * The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, * and can have a maximum length of 63 characters. Additionally, the first character must be a letter * and the last a letter or a number. */ name: string; /** * When set to `TRUE`, enables `observabilityMode` on the `extProc` filter. * This makes `extProc` calls asynchronous. Envoy doesn't check for the response from `extProc` calls. * For more information about the filter, see: https://www.envoyproxy.io/docs/envoy/v1.32.3/api-v3/extensions/filters/http/ext_proc/v3/ext_proc.proto * This field is helpful when you want to try out the extension in async log-only mode. * Supported by regional `LbTrafficExtension` and `LbRouteExtension` resources. * Only `STREAMED` (default) body processing mode is supported. */ observabilityMode?: boolean; /** * Configures the send mode for request body processing. * The field can only be set if `supportedEvents` includes `REQUEST_BODY`. * If `supportedEvents` includes `REQUEST_BODY`, but `requestBodySendMode` is unset, the default value `STREAMED` is used. * When this field is set to `FULL_DUPLEX_STREAMED`, `supportedEvents` must include both `REQUEST_BODY` and `REQUEST_TRAILERS`. * This field can be set only when the `service` field of the extension points to a `BackendService`. * Only `FULL_DUPLEX_STREAMED` mode is supported for `LbRouteExtension` resources. * Possible values are: `BODY_SEND_MODE_UNSPECIFIED`, `BODY_SEND_MODE_STREAMED`, `BODY_SEND_MODE_FULL_DUPLEX_STREAMED`. */ requestBodySendMode?: string; /** * The reference to the service that runs the extension. * * To configure a callout extension, service must be a fully-qualified reference to a backend service. * * To configure a plugin extension, service must be a reference to a WasmPlugin resource. */ service: string; /** * A set of events during request or response processing for which this extension is called. * This field is optional for the LbRouteExtension resource. If unspecified, `REQUEST_HEADERS` event is assumed as supported. * Possible values: `REQUEST_HEADERS`, `REQUEST_BODY`, `REQUEST_TRAILERS`. */ supportedEvents?: string[]; /** * Specifies the timeout for each individual message on the stream. The timeout must be between 10-1000 milliseconds. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ timeout?: string; } interface LbRouteExtensionExtensionChainMatchCondition { /** * A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. */ celExpression: string; } interface LbTrafficExtensionExtensionChain { /** * A set of extensions to execute for the matching request. * At least one extension is required. Up to 3 extensions can be defined for each extension chain for * LbTrafficExtension resource. LbRouteExtension chains are limited to 1 extension per extension chain. * Further documentation to be found at https://cloud.google.com/service-extensions/docs/reference/rest/v1/ExtensionChain#Extension * Structure is documented below. */ extensions: outputs.networkservices.LbTrafficExtensionExtensionChainExtension[]; /** * Conditions under which this chain is invoked for a request. * Structure is documented below. */ matchCondition: outputs.networkservices.LbTrafficExtensionExtensionChainMatchCondition; /** * The name for this extension chain. The name is logged as part of the HTTP request logs. * The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, * and can have a maximum length of 63 characters. Additionally, the first character must be a letter * and the last a letter or a number. */ name: string; } interface LbTrafficExtensionExtensionChainExtension { /** * The :authority header in the gRPC request sent from Envoy to the extension service. */ authority?: string; /** * Determines how the proxy behaves if the call to the extension fails or times out. * When set to TRUE, request or response processing continues without error. * Any subsequent extensions in the extension chain are also executed. * When set to FALSE: * If response headers have not been delivered to the downstream client, * a generic 500 error is returned to the client. The error response can be tailored by * configuring a custom error response in the load balancer. */ failOpen?: boolean; /** * List of the HTTP headers to forward to the extension (from the client or backend). * If omitted, all headers are sent. Each element is a string indicating the header name. */ forwardHeaders?: string[]; /** * Metadata associated with the extension. This field is used to pass metadata to the extension service. * You can set up key value pairs for metadata as you like and need. * f.e. {"key": "value", "key2": "value2"}. */ metadata?: { [key: string]: string; }; /** * The name for this extension. The name is logged as part of the HTTP request logs. * The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, * and can have a maximum length of 63 characters. Additionally, the first character must be a letter * and the last a letter or a number. */ name: string; /** * The reference to the service that runs the extension. * * To configure a callout extension, service must be a fully-qualified reference to a backend service. * * To configure a plugin extension, service must be a reference to a WasmPlugin resource. */ service: string; /** * A set of events during request or response processing for which this extension is called. * This field is required for the LbTrafficExtension resource. It's not relevant for the LbRouteExtension * resource. Possible values:`EVENT_TYPE_UNSPECIFIED`, `REQUEST_HEADERS`, `REQUEST_BODY`, `RESPONSE_HEADERS`, * `RESPONSE_BODY`, `RESPONSE_BODY` and `RESPONSE_BODY`. */ supportedEvents?: string[]; /** * Specifies the timeout for each individual message on the stream. The timeout must be between 10-1000 milliseconds. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ timeout?: string; } interface LbTrafficExtensionExtensionChainMatchCondition { /** * A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. */ celExpression: string; } interface MulticastConsumerAssociationState { /** * (Output) * The state of the multicast resource. * Possible values: * CREATING * ACTIVE * DELETING * DELETE_FAILED * UPDATING * UPDATE_FAILED * INACTIVE */ state: string; } interface MulticastDomainActivationState { /** * (Output) * The state of the multicast resource. * Possible values: * CREATING * ACTIVE * DELETING * DELETE_FAILED * UPDATING * UPDATE_FAILED * INACTIVE */ state: string; } interface MulticastDomainActivationTrafficSpec { /** * Aggregated egress Packet-Per-Second for all multicast groups in the domain * in this zone. */ aggrEgressPps: string; /** * Aggregated ingress Packet-Per-Second for all multicast groups in the domain * in this zone. Default to (aggregated_egress_pps / * max_per_group_subscribers) * 2. */ aggrIngressPps: string; /** * Average packet size (Default to 512 bytes). */ avgPacketSize: number; /** * Maximum ingress Packet-Per-Second for a single multicast group in this * zone. Default to aggregatedIngressPps / 2. */ maxPerGroupIngressPps: string; /** * Maximum number of subscribers for a single multicast group in this zone. * Default to max(50, aggregatedEgressPps / aggregated_ingress_pps). */ maxPerGroupSubscribers: string; } interface MulticastDomainConnectionConfig { /** * The VPC connection type. * Possible values: * NCC * SAME_VPC */ connectionType: string; /** * The resource name of the * [NCC](https://cloud.google.com/network-connectivity-center) hub. * Use the following format: * `projects/{project}/locations/global/hubs/{hub}`. */ nccHub?: string; } interface MulticastDomainGroupState { /** * (Output) * The state of the multicast resource. * Possible values: * CREATING * ACTIVE * DELETING * DELETE_FAILED * UPDATING * UPDATE_FAILED * INACTIVE */ state: string; } interface MulticastDomainState { /** * (Output) * The state of the multicast resource. * Possible values: * CREATING * ACTIVE * DELETING * DELETE_FAILED * UPDATING * UPDATE_FAILED * INACTIVE */ state: string; } interface MulticastDomainUllMulticastDomain { /** * The preconfigured Ultra-Low-Latency domain name. */ preconfiguredUllDomain?: string; } interface MulticastGroupConsumerActivationLogConfig { /** * Whether to enable logging or not. */ enabled?: boolean; } interface MulticastGroupConsumerActivationState { /** * (Output) * The state of the multicast resource. * Possible values: * CREATING * ACTIVE * DELETING * DELETE_FAILED * UPDATING * UPDATE_FAILED * INACTIVE */ state: string; } interface MulticastGroupProducerActivationState { /** * (Output) * The state of the multicast resource. * Possible values: * CREATING * ACTIVE * DELETING * DELETE_FAILED * UPDATING * UPDATE_FAILED * INACTIVE */ state: string; } interface MulticastGroupRangeActivationLogConfig { /** * Whether to enable logging or not. */ enabled?: boolean; } interface MulticastGroupRangeActivationState { /** * (Output) * The state of the multicast resource. * Possible values: * CREATING * ACTIVE * DELETING * DELETE_FAILED * UPDATING * UPDATE_FAILED * INACTIVE */ state: string; } interface MulticastGroupRangeLogConfig { /** * Whether to enable logging or not. */ enabled?: boolean; } interface MulticastGroupRangeState { /** * (Output) * The state of the multicast resource. * Possible values: * CREATING * ACTIVE * DELETING * DELETE_FAILED * UPDATING * UPDATE_FAILED * INACTIVE */ state: string; } interface MulticastProducerAssociationState { /** * (Output) * The state of the multicast resource. * Possible values: * CREATING * ACTIVE * DELETING * DELETE_FAILED * UPDATING * UPDATE_FAILED * INACTIVE */ state: string; } interface ServiceLbPoliciesAutoCapacityDrain { /** * Optional. If set to 'True', an unhealthy MIG/NEG will be set as drained. - An MIG/NEG is considered unhealthy if less than 25% of the instances/endpoints in the MIG/NEG are healthy. - This option will never result in draining more than 50% of the configured IGs/NEGs for the Backend Service. */ enable?: boolean; } interface ServiceLbPoliciesFailoverConfig { /** * Optional. The percentage threshold that a load balancer will begin to send traffic to failover backends. If the percentage of endpoints in a MIG/NEG is smaller than this value, traffic would be sent to failover backends if possible. This field should be set to a value between 1 and 99. The default value is 50 for Global external HTTP(S) load balancer (classic) and Proxyless service mesh, and 70 for others. */ failoverHealthThreshold: number; } interface ServiceLbPoliciesIsolationConfig { /** * The isolation granularity of the load balancer. * Possible values are: `ISOLATION_GRANULARITY_UNSPECIFIED`, `REGION`. */ isolationGranularity?: string; /** * The isolation mode of the load balancer. * Default value is `NEAREST`. * Possible values are: `ISOLATION_MODE_UNSPECIFIED`, `NEAREST`, `STRICT`. */ isolationMode?: string; } interface TcpRouteRule { /** * A detailed rule defining how to route traffic. * Structure is documented below. */ action: outputs.networkservices.TcpRouteRuleAction; /** * RouteMatch defines the predicate used to match requests to a given action. Multiple match types are "OR"ed for evaluation. * If no routeMatch field is specified, this rule will unconditionally match traffic. * Structure is documented below. */ matches?: outputs.networkservices.TcpRouteRuleMatch[]; } interface TcpRouteRuleAction { /** * The destination services to which traffic should be forwarded. At least one destination service is required. * Structure is documented below. */ destinations?: outputs.networkservices.TcpRouteRuleActionDestination[]; /** * Specifies the idle timeout for the selected route. The idle timeout is defined as the period in which there are no bytes sent or received on either the upstream or downstream connection. If not set, the default idle timeout is 30 seconds. If set to 0s, the timeout will be disabled. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ idleTimeout?: string; /** * If true, Router will use the destination IP and port of the original connection as the destination of the request. */ originalDestination?: boolean; } interface TcpRouteRuleActionDestination { /** * The URL of a BackendService to route traffic to. */ serviceName?: string; /** * Specifies the proportion of requests forwarded to the backend referenced by the serviceName field. This is computed as: weight/Sum(weights in this destination list). For non-zero values, there may be some epsilon from the exact proportion defined here depending on the precision an implementation supports. * If only one serviceName is specified and it has a weight greater than 0, 100% of the traffic is forwarded to that backend. * If weights are specified for any one service name, they need to be specified for all of them. * If weights are unspecified for all services, then, traffic is distributed in equal proportions to all of them. */ weight?: number; } interface TcpRouteRuleMatch { /** * Must be specified in the CIDR range format. A CIDR range consists of an IP Address and a prefix length to construct the subnet mask. * By default, the prefix length is 32 (i.e. matches a single IP address). Only IPV4 addresses are supported. Examples: "10.0.0.1" - matches against this exact IP address. "10.0.0.0/8" - matches against any IP address within the 10.0.0.0 subnet and 255.255.255.0 mask. "0.0.0.0/0" - matches against any IP address'. */ address: string; /** * Specifies the destination port to match against. */ port: string; } interface TlsRouteRule { /** * Required. A detailed rule defining how to route traffic. * Structure is documented below. */ action: outputs.networkservices.TlsRouteRuleAction; /** * Matches define the predicate used to match requests to a given action. * Structure is documented below. */ matches: outputs.networkservices.TlsRouteRuleMatch[]; } interface TlsRouteRuleAction { /** * The destination to which traffic should be forwarded. * Structure is documented below. */ destinations?: outputs.networkservices.TlsRouteRuleActionDestination[]; } interface TlsRouteRuleActionDestination { /** * The URL of a BackendService to route traffic to. */ serviceName?: string; /** * Specifies the proportion of requests forwarded to the backend referenced by the serviceName field. */ weight?: number; } interface TlsRouteRuleMatch { /** * ALPN (Application-Layer Protocol Negotiation) to match against. Examples: "http/1.1", "h2". At least one of sniHost and alpn is required. Up to 5 alpns across all matches can be set. */ alpns?: string[]; /** * SNI (server name indicator) to match against. SNI will be matched against all wildcard domains, i.e. www.example.com will be first matched against www.example.com, then *.example.com, then *.com. * Partial wildcards are not supported, and values like *w.example.com are invalid. At least one of sniHost and alpn is required. Up to 5 sni hosts across all matches can be set. */ sniHosts?: string[]; } interface WasmPluginLogConfig { /** * Optional. Specifies whether to enable logging for activity by this plugin. */ enable?: boolean; /** * Non-empty default. Specificies the lowest level of the plugin logs that are exported to Cloud Logging. This setting relates to the logs generated by using logging statements in your Wasm code. * This field is can be set only if logging is enabled for the plugin. * If the field is not provided when logging is enabled, it is set to INFO by default. * Possible values are: `LOG_LEVEL_UNSPECIFIED`, `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`, `CRITICAL`. */ minLogLevel: string; /** * Non-empty default. Configures the sampling rate of activity logs, where 1.0 means all logged activity is reported and 0.0 means no activity is reported. * A floating point value between 0.0 and 1.0 indicates that a percentage of log messages is stored. * The default value when logging is enabled is 1.0. The value of the field must be between 0 and 1 (inclusive). * This field can be specified only if logging is enabled for this plugin. */ sampleRate: number; } interface WasmPluginUsedBy { /** * Identifier. Name of the WasmPlugin resource. */ name?: string; } interface WasmPluginVersion { /** * (Output) * Output only. The timestamp when the resource was created. */ createTime: string; /** * Optional. A human-readable description of the resource. */ description?: string; /** * (Output) * Output only. The resolved digest for the image specified in the image field. The digest is resolved during the creation of WasmPluginVersion resource. * This field holds the digest value, regardless of whether a tag or digest was originally specified in the image field. */ imageDigest: string; /** * Optional. URI of the container image containing the plugin, stored in the Artifact Registry. When a new WasmPluginVersion resource is created, the digest of the container image is saved in the imageDigest field. * When downloading an image, the digest value is used instead of an image tag. */ imageUri?: string; /** * Optional. Set of labels associated with the WasmPlugin resource. */ labels?: { [key: string]: string; }; /** * A base64-encoded string containing the configuration for the plugin. The configuration is provided to the plugin at runtime through the ON_CONFIGURE callback. * When a new WasmPluginVersion resource is created, the digest of the contents is saved in the pluginConfigDigest field. * Conflics with pluginConfigUri. */ pluginConfigData?: string; /** * (Output) * Output only. This field holds the digest (usually checksum) value for the plugin configuration. * The value is calculated based on the contents of pluginConfigData or the container image defined by the pluginConfigUri field. */ pluginConfigDigest: string; /** * URI of the plugin configuration stored in the Artifact Registry. The configuration is provided to the plugin at runtime through the ON_CONFIGURE callback. * The container image must contain only a single file with the name plugin.config. * When a new WasmPluginVersion resource is created, the digest of the container image is saved in the pluginConfigDigest field. * Conflics with pluginConfigData. */ pluginConfigUri?: string; /** * (Output) * Output only. The timestamp when the resource was updated. */ updateTime: string; /** * The identifier for this object. Format specified above. */ versionName: string; } } export declare namespace notebooks { interface EnvironmentContainerImage { /** * The path to the container image repository. * For example: gcr.io/{project_id}/{imageName} */ repository: string; /** * The tag of the container image. If not specified, this defaults to the latest tag. */ tag?: string; } interface EnvironmentVmImage { /** * Use this VM image family to find the image; the newest image in this family will be used. */ imageFamily?: string; /** * Use VM image name to find the image. */ imageName?: string; /** * The name of the Google Cloud project that this VM image belongs to. * Format: projects/{project_id} */ project: string; } interface InstanceAcceleratorConfig { /** * Count of cores of this accelerator. */ coreCount: number; /** * Type of this accelerator. * Possible values are: `ACCELERATOR_TYPE_UNSPECIFIED`, `NVIDIA_TESLA_K80`, `NVIDIA_TESLA_P100`, `NVIDIA_TESLA_V100`, `NVIDIA_TESLA_P4`, `NVIDIA_TESLA_T4`, `NVIDIA_TESLA_T4_VWS`, `NVIDIA_TESLA_P100_VWS`, `NVIDIA_TESLA_P4_VWS`, `NVIDIA_TESLA_A100`, `TPU_V2`, `TPU_V3`. */ type: string; } interface InstanceContainerImage { /** * The path to the container image repository. * For example: gcr.io/{project_id}/{imageName} */ repository: string; /** * The tag of the container image. If not specified, this defaults to the latest tag. */ tag?: string; } interface InstanceIamBindingCondition { description?: string; expression: string; title: string; } interface InstanceIamMemberCondition { description?: string; expression: string; title: string; } interface InstanceReservationAffinity { /** * The type of Compute Reservation. * Possible values are: `NO_RESERVATION`, `ANY_RESERVATION`, `SPECIFIC_RESERVATION`. */ consumeReservationType: string; /** * Corresponds to the label key of reservation resource. */ key?: string; /** * Corresponds to the label values of reservation resource. */ values?: string[]; } interface InstanceShieldedInstanceConfig { /** * Defines whether the instance has integrity monitoring enabled. Enables monitoring and attestation of the * boot integrity of the instance. The attestation is performed against the integrity policy baseline. * This baseline is initially derived from the implicitly trusted boot image when the instance is created. * Enabled by default. */ enableIntegrityMonitoring?: boolean; /** * Defines whether the instance has Secure Boot enabled. Secure Boot helps ensure that the system only runs * authentic software by verifying the digital signature of all boot components, and halting the boot process * if signature verification fails. * Disabled by default. */ enableSecureBoot?: boolean; /** * Defines whether the instance has the vTPM enabled. * Enabled by default. */ enableVtpm?: boolean; } interface InstanceVmImage { /** * Use this VM image family to find the image; the newest image in this family will be used. */ imageFamily?: string; /** * Use VM image name to find the image. */ imageName?: string; /** * The name of the Google Cloud project that this VM image belongs to. * Format: projects/{project_id} */ project: string; } interface RuntimeAccessConfig { /** * The type of access mode this instance. For valid values, see * `https://cloud.google.com/vertex-ai/docs/workbench/reference/ * rest/v1/projects.locations.runtimes#RuntimeAccessType`. */ accessType?: string; /** * (Output) * The proxy endpoint that is used to access the runtime. */ proxyUri: string; /** * The owner of this runtime after creation. Format: `alias@example.com`. * Currently supports one owner only. */ runtimeOwner?: string; } interface RuntimeIamBindingCondition { description?: string; expression: string; title: string; } interface RuntimeIamMemberCondition { description?: string; expression: string; title: string; } interface RuntimeMetric { /** * (Output) * Contains runtime daemon metrics, such as OS and kernels and * sessions stats. */ systemMetrics: { [key: string]: string; }; } interface RuntimeSoftwareConfig { /** * Specify a custom Cloud Storage path where the GPU driver is stored. * If not specified, we'll automatically choose from official GPU drivers. */ customGpuDriverPath?: string; /** * Verifies core internal services are running. Default: True. */ enableHealthMonitoring?: boolean; /** * Runtime will automatically shutdown after idle_shutdown_time. * Default: True */ idleShutdown?: boolean; /** * Time in minutes to wait before shuting down runtime. * Default: 180 minutes */ idleShutdownTimeout?: number; /** * Install Nvidia Driver automatically. */ installGpuDriver?: boolean; /** * Use a list of container images to use as Kernels in the notebook instance. * Structure is documented below. */ kernels?: outputs.notebooks.RuntimeSoftwareConfigKernel[]; /** * Cron expression in UTC timezone for schedule instance auto upgrade. * Please follow the [cron format](https://en.wikipedia.org/wiki/Cron). */ notebookUpgradeSchedule?: string; /** * Path to a Bash script that automatically runs after a notebook instance * fully boots up. The path must be a URL or * Cloud Storage path (gs://path-to-file/file-name). */ postStartupScript?: string; /** * Behavior for the post startup script. * Possible values are: `POST_STARTUP_SCRIPT_BEHAVIOR_UNSPECIFIED`, `RUN_EVERY_START`, `DOWNLOAD_AND_RUN_EVERY_START`. */ postStartupScriptBehavior?: string; /** * (Output) * Bool indicating whether an newer image is available in an image family. */ upgradeable: boolean; } interface RuntimeSoftwareConfigKernel { /** * The path to the container image repository. * For example: gcr.io/{project_id}/{imageName} */ repository: string; /** * The tag of the container image. If not specified, this defaults to the latest tag. */ tag?: string; } interface RuntimeVirtualMachine { /** * (Output) * The unique identifier of the Managed Compute Engine instance. */ instanceId: string; /** * (Output) * The user-friendly name of the Managed Compute Engine instance. */ instanceName: string; /** * Virtual Machine configuration settings. * Structure is documented below. */ virtualMachineConfig?: outputs.notebooks.RuntimeVirtualMachineVirtualMachineConfig; } interface RuntimeVirtualMachineVirtualMachineConfig { /** * The Compute Engine accelerator configuration for this runtime. * Structure is documented below. */ acceleratorConfig?: outputs.notebooks.RuntimeVirtualMachineVirtualMachineConfigAcceleratorConfig; /** * Use a list of container images to start the notebook instance. * Structure is documented below. */ containerImages: outputs.notebooks.RuntimeVirtualMachineVirtualMachineConfigContainerImage[]; /** * Data disk option configuration settings. * Structure is documented below. */ dataDisk: outputs.notebooks.RuntimeVirtualMachineVirtualMachineConfigDataDisk; /** * Encryption settings for virtual machine data disk. * Structure is documented below. */ encryptionConfig?: outputs.notebooks.RuntimeVirtualMachineVirtualMachineConfigEncryptionConfig; /** * (Output) * The Compute Engine guest attributes. (see [Project and instance * guest attributes](https://cloud.google.com/compute/docs/ * storing-retrieving-metadata#guest_attributes)). */ guestAttributes: { [key: string]: string; }; /** * If true, runtime will only have internal IP addresses. By default, * runtimes are not restricted to internal IP addresses, and will * have ephemeral external IP addresses assigned to each vm. This * `internalIpOnly` restriction can only be enabled for subnetwork * enabled networks, and all dependencies must be configured to be * accessible without external IP addresses. */ internalIpOnly?: boolean; /** * The labels to associate with this runtime. Label **keys** must * contain 1 to 63 characters, and must conform to [RFC 1035] * (https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be * empty, but, if present, must contain 1 to 63 characters, and must * conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No * more than 32 labels can be associated with a cluster. */ labels: { [key: string]: string; }; /** * The Compute Engine machine type used for runtimes. */ machineType: string; /** * The Compute Engine metadata entries to add to virtual machine. * (see [Project and instance metadata](https://cloud.google.com * /compute/docs/storing-retrieving-metadata#project_and_instance * _metadata)). */ metadata: { [key: string]: string; }; /** * The Compute Engine network to be used for machine communications. * Cannot be specified with subnetwork. If neither `network` nor * `subnet` is specified, the "default" network of the project is * used, if it exists. A full URL or partial URI. Examples: * * `https://www.googleapis.com/compute/v1/projects/[projectId]/ * regions/global/default` * * `projects/[projectId]/regions/global/default` * Runtimes are managed resources inside Google Infrastructure. * Runtimes support the following network configurations: * * Google Managed Network (Network & subnet are empty) * * Consumer Project VPC (network & subnet are required). Requires * configuring Private Service Access. * * Shared VPC (network & subnet are required). Requires * configuring Private Service Access. */ network?: string; /** * The type of vNIC to be used on this interface. This may be gVNIC * or VirtioNet. * Possible values are: `UNSPECIFIED_NIC_TYPE`, `VIRTIO_NET`, `GVNIC`. */ nicType?: string; /** * Reserved IP Range name is used for VPC Peering. The * subnetwork allocation will use the range *name* if it's assigned. */ reservedIpRange?: string; /** * Shielded VM Instance configuration settings. * Structure is documented below. */ shieldedInstanceConfig?: outputs.notebooks.RuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfig; /** * The Compute Engine subnetwork to be used for machine * communications. Cannot be specified with network. A full URL or * partial URI are valid. Examples: * * `https://www.googleapis.com/compute/v1/projects/[projectId]/ * regions/us-east1/subnetworks/sub0` * * `projects/[projectId]/regions/us-east1/subnetworks/sub0` */ subnet?: string; /** * The Compute Engine tags to add to runtime (see [Tagging instances] * (https://cloud.google.com/compute/docs/ * label-or-tag-resources#tags)). */ tags: string[]; /** * (Output) * The zone where the virtual machine is located. */ zone: string; } interface RuntimeVirtualMachineVirtualMachineConfigAcceleratorConfig { /** * Count of cores of this accelerator. */ coreCount?: number; /** * Accelerator model. For valid values, see * `https://cloud.google.com/vertex-ai/docs/workbench/reference/ * rest/v1/projects.locations.runtimes#AcceleratorType` */ type?: string; } interface RuntimeVirtualMachineVirtualMachineConfigContainerImage { /** * The path to the container image repository. * For example: gcr.io/{project_id}/{imageName} */ repository: string; /** * The tag of the container image. If not specified, this defaults to the latest tag. */ tag?: string; } interface RuntimeVirtualMachineVirtualMachineConfigDataDisk { /** * (Output) * Optional. Specifies whether the disk will be auto-deleted * when the instance is deleted (but not when the disk is * detached from the instance). */ autoDelete: boolean; /** * (Output) * Optional. Indicates that this is a boot disk. The virtual * machine will use the first partition of the disk for its * root filesystem. */ boot: boolean; /** * (Output) * Optional. Specifies a unique device name of your choice * that is reflected into the /dev/disk/by-id/google-* tree * of a Linux operating system running within the instance. * This name can be used to reference the device for mounting, * resizing, and so on, from within the instance. * If not specified, the server chooses a default device name * to apply to this disk, in the form persistent-disk-x, where * x is a number assigned by Google Compute Engine. This field * is only applicable for persistent disks. */ deviceName: string; /** * (Output) * Indicates a list of features to enable on the guest operating * system. Applicable only for bootable images. To see a list of * available features, read `https://cloud.google.com/compute/docs/ * images/create-delete-deprecate-private-images#guest-os-features` * options. `` */ guestOsFeatures: string[]; /** * (Output) * Output only. A zero-based index to this disk, where 0 is * reserved for the boot disk. If you have many disks attached * to an instance, each disk would have a unique index number. */ index: number; /** * Input only. Specifies the parameters for a new disk that will * be created alongside the new instance. Use initialization * parameters to create boot disks or local SSDs attached to the * new instance. This property is mutually exclusive with the * source property; you can only define one or the other, but not * both. * Structure is documented below. */ initializeParams?: outputs.notebooks.RuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParams; /** * "Specifies the disk interface to use for attaching this disk, * which is either SCSI or NVME. The default is SCSI. Persistent * disks must always use SCSI and the request will fail if you attempt * to attach a persistent disk in any other format than SCSI. Local SSDs * can use either NVME or SCSI. For performance characteristics of SCSI * over NVMe, see Local SSD performance. Valid values: * NVME * SCSI". */ interface?: string; /** * (Output) * Type of the resource. Always compute#attachedDisk for attached * disks. */ kind: string; /** * (Output) * Output only. Any valid publicly visible licenses. */ licenses: string[]; /** * The mode in which to attach this disk, either READ_WRITE * or READ_ONLY. If not specified, the default is to attach * the disk in READ_WRITE mode. */ mode?: string; /** * Specifies a valid partial or full URL to an existing * Persistent Disk resource. */ source?: string; /** * Specifies the type of the disk, either SCRATCH or PERSISTENT. * If not specified, the default is PERSISTENT. */ type?: string; } interface RuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParams { /** * Provide this property when creating the disk. */ description?: string; /** * Specifies the disk name. If not specified, the default is * to use the name of the instance. If the disk with the * instance name exists already in the given zone/region, a * new name will be automatically generated. */ diskName?: string; /** * Specifies the size of the disk in base-2 GB. If not * specified, the disk will be the same size as the image * (usually 10GB). If specified, the size must be equal to * or larger than 10GB. Default 100 GB. */ diskSizeGb?: number; /** * The type of the boot disk attached to this runtime, * defaults to standard persistent disk. For valid values, * see `https://cloud.google.com/vertex-ai/docs/workbench/ * reference/rest/v1/projects.locations.runtimes#disktype` */ diskType?: string; /** * Labels to apply to this disk. These can be later modified * by the disks.setLabels method. This field is only * applicable for persistent disks. */ labels: { [key: string]: string; }; } interface RuntimeVirtualMachineVirtualMachineConfigEncryptionConfig { /** * The Cloud KMS resource identifier of the customer-managed * encryption key used to protect a resource, such as a disks. * It has the following format: * `projects/{PROJECT_ID}/locations/{REGION}/keyRings/ * {KEY_RING_NAME}/cryptoKeys/{KEY_NAME}` */ kmsKey?: string; } interface RuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfig { /** * Defines whether the instance has integrity monitoring enabled. * Enables monitoring and attestation of the boot integrity of * the instance. The attestation is performed against the * integrity policy baseline. This baseline is initially derived * from the implicitly trusted boot image when the instance is * created. Enabled by default. */ enableIntegrityMonitoring?: boolean; /** * Defines whether the instance has Secure Boot enabled.Secure * Boot helps ensure that the system only runs authentic software * by verifying the digital signature of all boot components, and * halting the boot process if signature verification fails. * Disabled by default. */ enableSecureBoot?: boolean; /** * Defines whether the instance has the vTPM enabled. Enabled by * default. */ enableVtpm?: boolean; } } export declare namespace oracledatabase { interface AutonomousDatabaseProperties { /** * (Output) * The amount of storage currently being used for user and system data, in * terabytes. */ actualUsedDataStorageSizeTb: number; /** * (Output) * The amount of storage currently allocated for the database tables and * billed for, rounded up in terabytes. */ allocatedStorageSizeTb: number; /** * (Output) * Oracle APEX Application Development. * https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseApex * Structure is documented below. */ apexDetails: outputs.oracledatabase.AutonomousDatabasePropertiesApexDetail[]; /** * (Output) * This field indicates the status of Data Guard and Access control for the * Autonomous Database. The field's value is null if Data Guard is disabled * or Access Control is disabled. The field's value is TRUE if both Data Guard * and Access Control are enabled, and the Autonomous Database is using * primary IP access control list (ACL) for standby. The field's value is * FALSE if both Data Guard and Access Control are enabled, and the Autonomous * Database is using a different IP access control list (ACL) for standby * compared to primary. */ arePrimaryAllowlistedIpsUsed: boolean; /** * (Output) * The Autonomous Container Database OCID. */ autonomousContainerDatabaseId: string; /** * (Output) * The list of available Oracle Database upgrade versions for an Autonomous * Database. */ availableUpgradeVersions: string[]; /** * The retention period for the Autonomous Database. This field is specified * in days, can range from 1 day to 60 days, and has a default value of * 60 days. */ backupRetentionPeriodDays: number; /** * The character set for the Autonomous Database. The default is AL32UTF8. */ characterSet?: string; /** * The number of compute servers for the Autonomous Database. */ computeCount: number; /** * (Output) * The connection string used to connect to the Autonomous Database. * https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseConnectionStrings * Structure is documented below. */ connectionStrings: outputs.oracledatabase.AutonomousDatabasePropertiesConnectionString[]; /** * (Output) * The URLs for accessing Oracle Application Express (APEX) and SQL Developer * Web with a browser from a Compute instance. * https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseConnectionUrls * Structure is documented below. */ connectionUrls: outputs.oracledatabase.AutonomousDatabasePropertiesConnectionUrl[]; /** * The number of CPU cores to be made available to the database. */ cpuCoreCount: number; /** * The list of customer contacts. * Structure is documented below. */ customerContacts?: outputs.oracledatabase.AutonomousDatabasePropertiesCustomerContact[]; /** * (Output) * The current state of the Data Safe registration for the * Autonomous Database. * Possible values: * DATA_SAFE_STATE_UNSPECIFIED * REGISTERING * REGISTERED * DEREGISTERING * NOT_REGISTERED * FAILED */ dataSafeState: string; /** * The size of the data stored in the database, in gigabytes. */ dataStorageSizeGb: number; /** * The size of the data stored in the database, in terabytes. */ dataStorageSizeTb: number; /** * (Output) * The current state of database management for the Autonomous Database. * Possible values: * DATABASE_MANAGEMENT_STATE_UNSPECIFIED * ENABLING * ENABLED * DISABLING * NOT_ENABLED * FAILED_ENABLING * FAILED_DISABLING */ databaseManagementState: string; /** * The edition of the Autonomous Databases. * Possible values: * DATABASE_EDITION_UNSPECIFIED * STANDARD_EDITION * ENTERPRISE_EDITION */ dbEdition?: string; /** * The Oracle Database version for the Autonomous Database. */ dbVersion?: string; /** * Possible values: * DB_WORKLOAD_UNSPECIFIED * OLTP * DW * AJD * APEX */ dbWorkload: string; /** * (Output) * This field indicates the number of seconds of data loss during a Data * Guard failover. */ failedDataRecoveryDuration: string; /** * This field indicates if auto scaling is enabled for the Autonomous Database * CPU core count. */ isAutoScalingEnabled?: boolean; /** * (Output) * This field indicates whether the Autonomous Database has local (in-region) * Data Guard enabled. */ isLocalDataGuardEnabled: boolean; /** * This field indicates if auto scaling is enabled for the Autonomous Database * storage. */ isStorageAutoScalingEnabled: boolean; /** * The license type used for the Autonomous Database. * Possible values: * LICENSE_TYPE_UNSPECIFIED * LICENSE_INCLUDED * BRING_YOUR_OWN_LICENSE */ licenseType: string; /** * (Output) * The details of the current lifestyle state of the Autonomous Database. */ lifecycleDetails: string; /** * (Output) * This field indicates the maximum data loss limit for an Autonomous * Database, in seconds. */ localAdgAutoFailoverMaxDataLossLimit: number; /** * (Output) * This field indicates the local disaster recovery (DR) type of an * Autonomous Database. * Possible values: * LOCAL_DISASTER_RECOVERY_TYPE_UNSPECIFIED * ADG * BACKUP_BASED */ localDisasterRecoveryType: string; /** * (Output) * Autonomous Data Guard standby database details. * https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseStandbySummary * Structure is documented below. */ localStandbyDbs: outputs.oracledatabase.AutonomousDatabasePropertiesLocalStandbyDb[]; /** * (Output) * The date and time when maintenance will begin. */ maintenanceBeginTime: string; /** * (Output) * The date and time when maintenance will end. */ maintenanceEndTime: string; /** * The maintenance schedule of the Autonomous Database. * Possible values: * MAINTENANCE_SCHEDULE_TYPE_UNSPECIFIED * EARLY * REGULAR */ maintenanceScheduleType: string; /** * (Output) * The amount of memory enabled per ECPU, in gigabytes. */ memoryPerOracleComputeUnitGbs: number; /** * (Output) * The memory assigned to in-memory tables in an Autonomous Database. */ memoryTableGbs: number; /** * This field specifies if the Autonomous Database requires mTLS connections. */ mtlsConnectionRequired?: boolean; /** * The national character set for the Autonomous Database. The default is * AL16UTF16. */ nCharacterSet?: string; /** * (Output) * The long term backup schedule of the Autonomous Database. */ nextLongTermBackupTime: string; /** * (Output) * The Oracle Cloud Infrastructure link for the Autonomous Database. */ ociUrl: string; /** * (Output) * OCID of the Autonomous Database. * https://docs.oracle.com/en-us/iaas/Content/General/Concepts/identifiers.htm#Oracle */ ocid: string; /** * (Output) * This field indicates the current mode of the Autonomous Database. * Possible values: * OPEN_MODE_UNSPECIFIED * READ_ONLY * READ_WRITE */ openMode: string; /** * Possible values: * OPERATIONS_INSIGHTS_STATE_UNSPECIFIED * ENABLING * ENABLED * DISABLING * NOT_ENABLED * FAILED_ENABLING * FAILED_DISABLING */ operationsInsightsState: string; /** * (Output) * The list of OCIDs of standby databases located in Autonomous Data Guard * remote regions that are associated with the source database. */ peerDbIds: string[]; /** * (Output) * The permission level of the Autonomous Database. * Possible values: * PERMISSION_LEVEL_UNSPECIFIED * RESTRICTED * UNRESTRICTED */ permissionLevel: string; /** * (Output) * The private endpoint for the Autonomous Database. */ privateEndpoint: string; /** * The private endpoint IP address for the Autonomous Database. */ privateEndpointIp: string; /** * The private endpoint label for the Autonomous Database. */ privateEndpointLabel: string; /** * (Output) * The refresh mode of the cloned Autonomous Database. * Possible values: * REFRESHABLE_MODE_UNSPECIFIED * AUTOMATIC * MANUAL */ refreshableMode: string; /** * (Output) * The refresh State of the clone. * Possible values: * REFRESHABLE_STATE_UNSPECIFIED * REFRESHING * NOT_REFRESHING */ refreshableState: string; /** * (Output) * The Data Guard role of the Autonomous Database. * Possible values: * ROLE_UNSPECIFIED * PRIMARY * STANDBY * DISABLED_STANDBY * BACKUP_COPY * SNAPSHOT_STANDBY */ role: string; /** * (Output) * The list and details of the scheduled operations of the Autonomous * Database. * Structure is documented below. */ scheduledOperationDetails: outputs.oracledatabase.AutonomousDatabasePropertiesScheduledOperationDetail[]; /** * The ID of the Oracle Cloud Infrastructure vault secret. */ secretId?: string; /** * (Output) * The SQL Web Developer URL for the Autonomous Database. */ sqlWebDeveloperUrl: string; /** * (Output) * Possible values: * STATE_UNSPECIFIED * PROVISIONING * AVAILABLE * STOPPING * STOPPED * STARTING * TERMINATING * TERMINATED * UNAVAILABLE * RESTORE_IN_PROGRESS * RESTORE_FAILED * BACKUP_IN_PROGRESS * SCALE_IN_PROGRESS * AVAILABLE_NEEDS_ATTENTION * UPDATING * MAINTENANCE_IN_PROGRESS * RESTARTING * RECREATING * ROLE_CHANGE_IN_PROGRESS * UPGRADING * INACCESSIBLE * STANDBY */ state: string; /** * (Output) * The list of available regions that can be used to create a clone for the * Autonomous Database. */ supportedCloneRegions: string[]; /** * (Output) * The storage space used by automatic backups of Autonomous Database, in * gigabytes. */ totalAutoBackupStorageSizeGbs: number; /** * (Output) * The storage space used by Autonomous Database, in gigabytes. */ usedDataStorageSizeTbs: number; /** * The ID of the Oracle Cloud Infrastructure vault. */ vaultId?: string; } interface AutonomousDatabasePropertiesApexDetail { /** * The Oracle APEX Application Development version. */ apexVersion: string; /** * The Oracle REST Data Services (ORDS) version. */ ordsVersion: string; } interface AutonomousDatabasePropertiesConnectionString { /** * A list of all connection strings that can be used to connect to the * Autonomous Database. */ allConnectionStrings: outputs.oracledatabase.AutonomousDatabasePropertiesConnectionStringAllConnectionString[]; /** * The database service provides the least level of resources to each SQL * statement, but supports the most number of concurrent SQL statements. */ dedicated: string; /** * The database service provides the highest level of resources to each SQL * statement. */ high: string; /** * The database service provides the least level of resources to each SQL * statement. */ low: string; /** * The database service provides a lower level of resources to each SQL * statement. */ medium: string; /** * A list of connection string profiles to allow clients to group, filter, and * select values based on the structured metadata. */ profiles: outputs.oracledatabase.AutonomousDatabasePropertiesConnectionStringProfile[]; } interface AutonomousDatabasePropertiesConnectionStringAllConnectionString { /** * The database service provides the highest level of resources to each SQL * statement. */ high: string; /** * The database service provides the least level of resources to each SQL * statement. */ low: string; /** * The database service provides a lower level of resources to each SQL * statement. */ medium: string; } interface AutonomousDatabasePropertiesConnectionStringProfile { /** * The current consumer group being used by the connection. * Possible values: * CONSUMER_GROUP_UNSPECIFIED * HIGH * MEDIUM * LOW * TP * TPURGENT */ consumerGroup: string; /** * The display name for the Autonomous Database. The name does not have to * be unique within your project. */ displayName: string; /** * The host name format being currently used in connection string. * Possible values: * HOST_FORMAT_UNSPECIFIED * FQDN * IP */ hostFormat: string; /** * This field indicates if the connection string is regional and is only * applicable for cross-region Data Guard. */ isRegional: boolean; /** * The protocol being used by the connection. * Possible values: * PROTOCOL_UNSPECIFIED * TCP * TCPS */ protocol: string; /** * The current session mode of the connection. * Possible values: * SESSION_MODE_UNSPECIFIED * DIRECT * INDIRECT */ sessionMode: string; /** * The syntax of the connection string. * Possible values: * SYNTAX_FORMAT_UNSPECIFIED * LONG * EZCONNECT * EZCONNECTPLUS */ syntaxFormat: string; /** * This field indicates the TLS authentication type of the connection. * Possible values: * TLS_AUTHENTICATION_UNSPECIFIED * SERVER * MUTUAL */ tlsAuthentication: string; /** * The value of the connection string. */ value: string; } interface AutonomousDatabasePropertiesConnectionUrl { /** * Oracle Application Express (APEX) URL. */ apexUri: string; /** * The URL of the Database Transforms for the Autonomous Database. */ databaseTransformsUri: string; /** * The URL of the Graph Studio for the Autonomous Database. */ graphStudioUri: string; /** * The URL of the Oracle Machine Learning (OML) Notebook for the Autonomous * Database. */ machineLearningNotebookUri: string; /** * The URL of Machine Learning user management the Autonomous Database. */ machineLearningUserManagementUri: string; /** * The URL of the MongoDB API for the Autonomous Database. */ mongoDbUri: string; /** * The Oracle REST Data Services (ORDS) URL of the Web Access for the * Autonomous Database. */ ordsUri: string; /** * The URL of the Oracle SQL Developer Web for the Autonomous Database. */ sqlDevWebUri: string; } interface AutonomousDatabasePropertiesCustomerContact { /** * The email address used by Oracle to send notifications regarding databases * and infrastructure. * * The `apexDetails` block contains: */ email: string; } interface AutonomousDatabasePropertiesLocalStandbyDb { /** * The date and time the Autonomous Data Guard role was switched for the * standby Autonomous Database. */ dataGuardRoleChangedTime: string; /** * The date and time the Disaster Recovery role was switched for the standby * Autonomous Database. */ disasterRecoveryRoleChangedTime: string; /** * The amount of time, in seconds, that the data of the standby database lags * in comparison to the data of the primary database. */ lagTimeDuration: string; /** * The additional details about the current lifecycle state of the * Autonomous Database. */ lifecycleDetails: string; /** * Possible values: * STATE_UNSPECIFIED * PROVISIONING * AVAILABLE * STOPPING * STOPPED * STARTING * TERMINATING * TERMINATED * UNAVAILABLE * RESTORE_IN_PROGRESS * RESTORE_FAILED * BACKUP_IN_PROGRESS * SCALE_IN_PROGRESS * AVAILABLE_NEEDS_ATTENTION * UPDATING * MAINTENANCE_IN_PROGRESS * RESTARTING * RECREATING * ROLE_CHANGE_IN_PROGRESS * UPGRADING * INACCESSIBLE * STANDBY */ state: string; } interface AutonomousDatabasePropertiesScheduledOperationDetail { /** * Possible values: * DAY_OF_WEEK_UNSPECIFIED * MONDAY * TUESDAY * WEDNESDAY * THURSDAY * FRIDAY * SATURDAY * SUNDAY */ dayOfWeek: string; /** * Represents a time of day. The date and time zone are either not significant * or are specified elsewhere. An API may choose to allow leap seconds. Related * types are google.type.Date and 'google.protobuf.Timestamp'. */ startTimes: outputs.oracledatabase.AutonomousDatabasePropertiesScheduledOperationDetailStartTime[]; /** * Represents a time of day. The date and time zone are either not significant * or are specified elsewhere. An API may choose to allow leap seconds. Related * types are google.type.Date and 'google.protobuf.Timestamp'. */ stopTimes: outputs.oracledatabase.AutonomousDatabasePropertiesScheduledOperationDetailStopTime[]; } interface AutonomousDatabasePropertiesScheduledOperationDetailStartTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose * to allow the value "24:00:00" for scenarios like business closing time. */ hours: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may * allow the value 60 if it allows leap-seconds. */ seconds: number; } interface AutonomousDatabasePropertiesScheduledOperationDetailStopTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose * to allow the value "24:00:00" for scenarios like business closing time. */ hours: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may * allow the value 60 if it allows leap-seconds. */ seconds: number; } interface AutonomousDatabaseSourceConfig { /** * This field specifies if the replication of automatic backups is enabled when creating a Data Guard. */ automaticBackupsReplicationEnabled?: boolean; /** * The name of the primary Autonomous Database that is used to create a Peer Autonomous Database from a source. */ autonomousDatabase?: string; } interface CloudExadataInfrastructureProperties { /** * (Output) * The requested number of additional storage servers activated for the * Exadata Infrastructure. */ activatedStorageCount: number; /** * (Output) * The requested number of additional storage servers for the Exadata * Infrastructure. */ additionalStorageCount: number; /** * (Output) * The available storage can be allocated to the Exadata Infrastructure * resource, in gigabytes (GB). */ availableStorageSizeGb: number; /** * The number of compute servers for the Exadata Infrastructure. */ computeCount?: number; /** * (Output) * The number of enabled CPU cores. */ cpuCount: number; /** * The list of customer contacts. * Structure is documented below. */ customerContacts?: outputs.oracledatabase.CloudExadataInfrastructurePropertiesCustomerContact[]; /** * (Output) * Size, in terabytes, of the DATA disk group. */ dataStorageSizeTb: number; /** * (Output) * The local node storage allocated in GBs. */ dbNodeStorageSizeGb: number; /** * (Output) * The software version of the database servers (dom0) in the Exadata * Infrastructure. */ dbServerVersion: string; /** * Maintenance window as defined by Oracle. * https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/MaintenanceWindow * Structure is documented below. */ maintenanceWindow: outputs.oracledatabase.CloudExadataInfrastructurePropertiesMaintenanceWindow; /** * (Output) * The total number of CPU cores available. */ maxCpuCount: number; /** * (Output) * The total available DATA disk group size. */ maxDataStorageTb: number; /** * (Output) * The total local node storage available in GBs. */ maxDbNodeStorageSizeGb: number; /** * (Output) * The total memory available in GBs. */ maxMemoryGb: number; /** * (Output) * The memory allocated in GBs. */ memorySizeGb: number; /** * (Output) * The monthly software version of the database servers (dom0) * in the Exadata Infrastructure. Example: 20.1.15 */ monthlyDbServerVersion: string; /** * (Output) * The monthly software version of the storage servers (cells) * in the Exadata Infrastructure. Example: 20.1.15 */ monthlyStorageServerVersion: string; /** * (Output) * The OCID of the next maintenance run. */ nextMaintenanceRunId: string; /** * (Output) * The time when the next maintenance run will occur. */ nextMaintenanceRunTime: string; /** * (Output) * The time when the next security maintenance run will occur. */ nextSecurityMaintenanceRunTime: string; /** * (Output) * Deep link to the OCI console to view this resource. */ ociUrl: string; /** * (Output) * OCID of created infra. * https://docs.oracle.com/en-us/iaas/Content/General/Concepts/identifiers.htm#Oracle */ ocid: string; /** * The shape of the Exadata Infrastructure. The shape determines the * amount of CPU, storage, and memory resources allocated to the instance. */ shape: string; /** * (Output) * The current lifecycle state of the Exadata Infrastructure. * Possible values: * STATE_UNSPECIFIED * PROVISIONING * AVAILABLE * UPDATING * TERMINATING * TERMINATED * FAILED * MAINTENANCE_IN_PROGRESS */ state: string; /** * The number of Cloud Exadata storage servers for the Exadata Infrastructure. */ storageCount?: number; /** * (Output) * The software version of the storage servers (cells) in the Exadata * Infrastructure. */ storageServerVersion: string; /** * The total storage allocated to the Exadata Infrastructure * resource, in gigabytes (GB). */ totalStorageSizeGb: number; } interface CloudExadataInfrastructurePropertiesCustomerContact { /** * The email address used by Oracle to send notifications regarding databases * and infrastructure. */ email: string; } interface CloudExadataInfrastructurePropertiesMaintenanceWindow { /** * Determines the amount of time the system will wait before the start of each * database server patching operation. Custom action timeout is in minutes and * valid value is between 15 to 120 (inclusive). */ customActionTimeoutMins: number; /** * Days during the week when maintenance should be performed. */ daysOfWeeks: string[]; /** * The window of hours during the day when maintenance should be performed. * The window is a 4 hour slot. Valid values are: * 0 - represents time slot 0:00 - 3:59 UTC * 4 - represents time slot 4:00 - 7:59 UTC * 8 - represents time slot 8:00 - 11:59 UTC * 12 - represents time slot 12:00 - 15:59 UTC * 16 - represents time slot 16:00 - 19:59 UTC * 20 - represents time slot 20:00 - 23:59 UTC */ hoursOfDays: number[]; /** * If true, enables the configuration of a custom action timeout (waiting * period) between database server patching operations. */ isCustomActionTimeoutEnabled: boolean; /** * Lead time window allows user to set a lead time to prepare for a down time. * The lead time is in weeks and valid value is between 1 to 4. */ leadTimeWeek: number; /** * Months during the year when maintenance should be performed. */ months: string[]; /** * Cloud CloudExadataInfrastructure node patching method, either "ROLLING" * or "NONROLLING". Default value is ROLLING. * Possible values: * PATCHING_MODE_UNSPECIFIED * ROLLING * NON_ROLLING */ patchingMode: string; /** * The maintenance window scheduling preference. * Possible values: * MAINTENANCE_WINDOW_PREFERENCE_UNSPECIFIED * CUSTOM_PREFERENCE * NO_PREFERENCE */ preference: string; /** * Weeks during the month when maintenance should be performed. Weeks start on * the 1st, 8th, 15th, and 22nd days of the month, and have a duration of 7 * days. Weeks start and end based on calendar dates, not days of the week. */ weeksOfMonths: number[]; } interface CloudVmClusterProperties { /** * OCI Cluster name. */ clusterName: string; /** * (Output) * Compartment ID of cluster. */ compartmentId: string; /** * Number of enabled CPU cores. */ cpuCoreCount: number; /** * The data disk group size to be allocated in TBs. */ dataStorageSizeTb: number; /** * Local storage per VM */ dbNodeStorageSizeGb: number; /** * OCID of database servers. */ dbServerOcids: string[]; /** * Data collection options for diagnostics. * Structure is documented below. */ diagnosticsDataCollectionOptions?: outputs.oracledatabase.CloudVmClusterPropertiesDiagnosticsDataCollectionOptions; /** * The type of redundancy. * Possible values: * DISK_REDUNDANCY_UNSPECIFIED * HIGH * NORMAL */ diskRedundancy: string; /** * (Output) * DNS listener IP. */ dnsListenerIp: string; /** * (Output) * Parent DNS domain where SCAN DNS and hosts names are qualified. * ex: ocispdelegated.ocisp10jvnet.oraclevcn.com */ domain: string; /** * Grid Infrastructure Version. */ giVersion?: string; /** * (Output) * host name without domain. * format: "-" with some suffix. * ex: sp2-yi0xq where "sp2" is the hostname_prefix. */ hostname: string; /** * Prefix for VM cluster host names. */ hostnamePrefix?: string; /** * License type of VM Cluster. * Possible values: * LICENSE_TYPE_UNSPECIFIED * LICENSE_INCLUDED * BRING_YOUR_OWN_LICENSE */ licenseType: string; /** * Use local backup. */ localBackupEnabled?: boolean; /** * Memory allocated in GBs. */ memorySizeGb: number; /** * Number of database servers. */ nodeCount: number; /** * (Output) * Deep link to the OCI console to view this resource. */ ociUrl: string; /** * (Output) * Oracle Cloud Infrastructure ID of VM Cluster. */ ocid: string; /** * OCPU count per VM. Minimum is 0.1. */ ocpuCount: number; /** * (Output) * SCAN DNS name. * ex: sp2-yi0xq-scan.ocispdelegated.ocisp10jvnet.oraclevcn.com */ scanDns: string; /** * (Output) * OCID of scan DNS record. */ scanDnsRecordId: string; /** * (Output) * OCIDs of scan IPs. */ scanIpIds: string[]; /** * (Output) * SCAN listener port - TCP */ scanListenerPortTcp: number; /** * (Output) * SCAN listener port - TLS */ scanListenerPortTcpSsl: number; /** * (Output) * Shape of VM Cluster. */ shape: string; /** * Use exadata sparse snapshots. */ sparseDiskgroupEnabled: boolean; /** * SSH public keys to be stored with cluster. */ sshPublicKeys?: string[]; /** * (Output) * State of the cluster. * Possible values: * STATE_UNSPECIFIED * PROVISIONING * AVAILABLE * UPDATING * TERMINATING * TERMINATED * FAILED * MAINTENANCE_IN_PROGRESS */ state: string; /** * (Output) * The storage allocation for the disk group, in gigabytes (GB). */ storageSizeGb: number; /** * (Output) * Operating system version of the image. */ systemVersion: string; /** * Represents a time zone from the * [IANA Time Zone Database](https://www.iana.org/time-zones). * Structure is documented below. */ timeZone: outputs.oracledatabase.CloudVmClusterPropertiesTimeZone; } interface CloudVmClusterPropertiesDiagnosticsDataCollectionOptions { /** * Indicates whether diagnostic collection is enabled for the VM cluster */ diagnosticsEventsEnabled?: boolean; /** * Indicates whether health monitoring is enabled for the VM cluster */ healthMonitoringEnabled?: boolean; /** * Indicates whether incident logs and trace collection are enabled for the VM * cluster */ incidentLogsEnabled?: boolean; } interface CloudVmClusterPropertiesTimeZone { /** * IANA Time Zone Database time zone, e.g. "America/New_York". */ id: string; /** * IANA Time Zone Database version number, e.g. "2019a". */ version?: string; } interface DbSystemProperties { /** * The number of CPU cores to enable for the DbSystem. */ computeCount: number; /** * The compute model of the DbSystem. * Possible values: * ECPU * OCPU */ computeModel: string; /** * Data collection options for DbSystem. * Structure is documented below. */ dataCollectionOptions?: outputs.oracledatabase.DbSystemPropertiesDataCollectionOptions; /** * The data storage size in GB that is currently available to DbSystems. */ dataStorageSizeGb: number; /** * The database edition of the DbSystem. * Possible values: * STANDARD_EDITION * ENTERPRISE_EDITION * ENTERPRISE_EDITION_HIGH_PERFORMANCE */ databaseEdition: string; /** * Details of the Database Home resource. * Structure is documented below. */ dbHome?: outputs.oracledatabase.DbSystemPropertiesDbHome; /** * Details of the DbSystem Options. * Structure is documented below. */ dbSystemOptions?: outputs.oracledatabase.DbSystemPropertiesDbSystemOptions; /** * The host domain name of the DbSystem. */ domain: string; /** * (Output) * The hostname of the DbSystem. */ hostname: string; /** * Prefix for DB System host names. */ hostnamePrefix?: string; /** * The initial data storage size in GB. */ initialDataStorageSizeGb: number; /** * The license model of the DbSystem. * Possible values: * LICENSE_INCLUDED * BRING_YOUR_OWN_LICENSE */ licenseModel: string; /** * (Output) * State of the DbSystem. * Possible values: * PROVISIONING * AVAILABLE * UPDATING * TERMINATING * TERMINATED * FAILED * MIGRATED * MAINTENANCE_IN_PROGRESS * NEEDS_ATTENTION * UPGRADING */ lifecycleState: string; /** * The memory size in GB. */ memorySizeGb: number; /** * The number of nodes in the DbSystem. */ nodeCount: number; /** * (Output) * OCID of the DbSystem. */ ocid: string; /** * The private IP address of the DbSystem. */ privateIp: string; /** * The reco/redo storage size in GB. */ recoStorageSizeGb: number; /** * Shape of DB System. */ shape: string; /** * SSH public keys to be stored with the DbSystem. */ sshPublicKeys: string[]; /** * Represents a time zone from the * [IANA Time Zone Database](https://www.iana.org/time-zones). * Structure is documented below. */ timeZone: outputs.oracledatabase.DbSystemPropertiesTimeZone; } interface DbSystemPropertiesDataCollectionOptions { /** * Indicates whether to enable data collection for diagnostics. */ isDiagnosticsEventsEnabled?: boolean; /** * Indicates whether to enable incident logs and trace collection. */ isIncidentLogsEnabled?: boolean; } interface DbSystemPropertiesDbHome { /** * Details of the Database resource. * https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/Database/ * Structure is documented below. */ database: outputs.oracledatabase.DbSystemPropertiesDbHomeDatabase; /** * A valid Oracle Database version. For a list of supported versions, use the * ListDbVersions operation. */ dbVersion: string; /** * The display name for the Database Home. The name does not have to * be unique within your project. */ displayName?: string; /** * Whether unified auditing is enabled for the Database Home. */ isUnifiedAuditingEnabled: boolean; } interface DbSystemPropertiesDbHomeDatabase { /** * The password for the default ADMIN user. */ adminPassword: string; /** * The character set for the database. The default is AL32UTF8. */ characterSet: string; /** * (Output) * The date and time that the Database was created. */ createTime: string; /** * The database ID of the Database. */ databaseId: string; /** * The name of the DbHome resource associated with the Database. */ dbHomeName: string; /** * The database name. The name must begin with an alphabetic character and can * contain a maximum of eight alphanumeric characters. Special characters are * not permitted. */ dbName?: string; /** * The DB_UNIQUE_NAME of the Oracle Database being backed up. */ dbUniqueName: string; /** * The GCP Oracle zone where the Database is created. */ gcpOracleZone: string; /** * (Output) * Identifier. The name of the Database resource in the following format: * projects/{project}/locations/{region}/databases/{database} */ name: string; /** * The national character set for the database. The default is AL16UTF16. */ ncharacterSet: string; /** * (Output) * HTTPS link to OCI resources exposed to Customer via UI Interface. */ ociUrl: string; /** * (Output) * The Status of Operations Insights for this Database. * Possible values: * ENABLING * ENABLED * DISABLING * NOT_ENABLED * FAILED_ENABLING * FAILED_DISABLING */ opsInsightsStatus: string; /** * The properties of a Database. * Structure is documented below. */ properties?: outputs.oracledatabase.DbSystemPropertiesDbHomeDatabaseProperties; /** * The TDE wallet password for the database. */ tdeWalletPassword?: string; } interface DbSystemPropertiesDbHomeDatabaseProperties { /** * The configuration of the Database Management service. * Structure is documented below. */ databaseManagementConfig?: outputs.oracledatabase.DbSystemPropertiesDbHomeDatabasePropertiesDatabaseManagementConfig; /** * Backup Options for the Database. * Structure is documented below. */ dbBackupConfig?: outputs.oracledatabase.DbSystemPropertiesDbHomeDatabasePropertiesDbBackupConfig; /** * The Oracle Database version. */ dbVersion: string; /** * (Output) * State of the Database. * Possible values: * PROVISIONING * AVAILABLE * UPDATING * BACKUP_IN_PROGRESS * UPGRADING * CONVERTING * TERMINATING * TERMINATED * RESTORE_FAILED * FAILED */ state: string; } interface DbSystemPropertiesDbHomeDatabasePropertiesDatabaseManagementConfig { /** * (Output) * The status of the Database Management service. * Possible values: * ENABLING * ENABLED * DISABLING * DISABLED * UPDATING * FAILED_ENABLING * FAILED_DISABLING * FAILED_UPDATING */ managementState: string; /** * (Output) * The Database Management type. * Possible values: * BASIC * ADVANCED */ managementType: string; } interface DbSystemPropertiesDbHomeDatabasePropertiesDbBackupConfig { /** * If set to true, enables automatic backups on the database. */ autoBackupEnabled?: boolean; /** * Possible values: * MONDAY * TUESDAY * WEDNESDAY * THURSDAY * FRIDAY * SATURDAY * SUNDAY */ autoFullBackupDay?: string; /** * The window in which the full backup should be performed on the database. * If no value is provided, the default is anytime. * Possible values: * SLOT_ONE * SLOT_TWO * SLOT_THREE * SLOT_FOUR * SLOT_FIVE * SLOT_SIX * SLOT_SEVEN * SLOT_EIGHT * SLOT_NINE * SLOT_TEN * SLOT_ELEVEN * SLOT_TWELVE */ autoFullBackupWindow: string; /** * The window in which the incremental backup should be performed on the * database. If no value is provided, the default is anytime except the auto * full backup day. * Possible values: * SLOT_ONE * SLOT_TWO * SLOT_THREE * SLOT_FOUR * SLOT_FIVE * SLOT_SIX * SLOT_SEVEN * SLOT_EIGHT * SLOT_NINE * SLOT_TEN * SLOT_ELEVEN * SLOT_TWELVE */ autoIncrementalBackupWindow: string; /** * This defines when the backups will be deleted after Database termination. * Possible values: * DELETE_IMMEDIATELY * DELETE_AFTER_RETENTION_PERIOD */ backupDeletionPolicy: string; /** * Details of the database backup destinations. * Structure is documented below. */ backupDestinationDetails?: outputs.oracledatabase.DbSystemPropertiesDbHomeDatabasePropertiesDbBackupConfigBackupDestinationDetail[]; /** * The number of days an automatic backup is retained before being * automatically deleted. This value determines the earliest point in time to * which a database can be restored. Min: 1, Max: 60. */ retentionPeriodDays: number; } interface DbSystemPropertiesDbHomeDatabasePropertiesDbBackupConfigBackupDestinationDetail { /** * The type of the database backup destination. * Possible values: * NFS * RECOVERY_APPLIANCE * OBJECT_STORE * LOCAL * DBRS */ type: string; } interface DbSystemPropertiesDbSystemOptions { /** * The storage option used in DB system. * Possible values: * ASM * LVM */ storageManagement: string; } interface DbSystemPropertiesTimeZone { /** * IANA Time Zone Database time zone. For example "America/New_York". */ id: string; } interface ExadbVmClusterProperties { /** * The number of additional ECPUs per node for an Exadata VM cluster on * exascale infrastructure. */ additionalEcpuCountPerNode: number; /** * The cluster name for Exascale vm cluster. The cluster name must begin with * an alphabetic character and may contain hyphens(-) but can not contain * underscores(_). It should be not more than 11 characters and is not case * sensitive. * OCI Cluster name. */ clusterName: string; /** * Data collection options for diagnostics. * https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/DataCollectionOptions * Structure is documented below. */ dataCollectionOptions: outputs.oracledatabase.ExadbVmClusterPropertiesDataCollectionOptions; /** * The number of ECPUs enabled per node for an exadata vm cluster on * exascale infrastructure. */ enabledEcpuCountPerNode: number; /** * The name of ExascaleDbStorageVault associated with the ExadbVmCluster. * It can refer to an existing ExascaleDbStorageVault. Or a new one can be * created during the ExadbVmCluster creation (requires * storageVaultProperties to be set). * Format: * projects/{project}/locations/{location}/exascaleDbStorageVaults/{exascale_db_storage_vault} */ exascaleDbStorageVault: string; /** * (Output) * The Oracle Grid Infrastructure (GI) software version. */ giVersion: string; /** * Grid Infrastructure Version. */ gridImageId: string; /** * (Output) * The hostname of the ExadbVmCluster. */ hostname: string; /** * Prefix for VM cluster host names. */ hostnamePrefix: string; /** * The license type of the ExadbVmCluster. * Possible values: * LICENSE_INCLUDED * BRING_YOUR_OWN_LICENSE */ licenseModel: string; /** * (Output) * State of the cluster. * Possible values: * PROVISIONING * AVAILABLE * UPDATING * TERMINATING * TERMINATED * FAILED * MAINTENANCE_IN_PROGRESS */ lifecycleState: string; /** * (Output) * Memory per VM (GB) (Read-only): Shows the amount of memory allocated to * each VM. Memory is calculated based on 2.75 GB per Total ECPUs. */ memorySizeGb: number; /** * The number of nodes/VMs in the ExadbVmCluster. */ nodeCount: number; /** * (Output) * Deep link to the OCI console to view this resource. */ ociUri: string; /** * SCAN listener port - TCP */ scanListenerPortTcp: number; /** * The shape attribute of the VM cluster. The type of Exascale storage used * for Exadata VM cluster. The default is SMART_STORAGE which supports Oracle * Database 23ai and later * Possible values: * SMART_STORAGE * BLOCK_STORAGE */ shapeAttribute: string; /** * The SSH public keys for the ExadbVmCluster. */ sshPublicKeys: string[]; /** * Represents a time zone from the * [IANA Time Zone Database](https://www.iana.org/time-zones). * Structure is documented below. */ timeZone?: outputs.oracledatabase.ExadbVmClusterPropertiesTimeZone; /** * The storage allocation for the exadbvmcluster, in gigabytes (GB). * Structure is documented below. */ vmFileSystemStorage: outputs.oracledatabase.ExadbVmClusterPropertiesVmFileSystemStorage; } interface ExadbVmClusterPropertiesDataCollectionOptions { /** * Indicates whether to enable data collection for diagnostics. */ isDiagnosticsEventsEnabled: boolean; /** * Indicates whether to enable health monitoring. */ isHealthMonitoringEnabled: boolean; /** * Indicates whether to enable incident logs and trace collection. */ isIncidentLogsEnabled: boolean; } interface ExadbVmClusterPropertiesTimeZone { /** * IANA Time Zone Database time zone. For example "America/New_York". */ id?: string; /** * IANA Time Zone Database version number. For example "2019a". */ version?: string; } interface ExadbVmClusterPropertiesVmFileSystemStorage { /** * The storage allocation for the exadbvmcluster per node, in gigabytes (GB). * This field is used to calculate the total storage allocation for the * exadbvmcluster. */ sizeInGbsPerNode: number; } interface ExascaleDbStorageVaultProperties { /** * The size of additional flash cache in percentage of high capacity * database storage. */ additionalFlashCachePercent: number; /** * (Output) * The shape attributes of the VM clusters attached to the * ExascaleDbStorageVault. */ attachedShapeAttributes: string[]; /** * (Output) * The shape attributes available for the VM clusters to be attached to the * ExascaleDbStorageVault. */ availableShapeAttributes: string[]; /** * The storage details of the ExascaleDbStorageVault. * Structure is documented below. */ exascaleDbStorageDetails: outputs.oracledatabase.ExascaleDbStorageVaultPropertiesExascaleDbStorageDetails; /** * (Output) * Deep link to the OCI console to view this resource. */ ociUri: string; /** * (Output) * The OCID for the ExascaleDbStorageVault. */ ocid: string; /** * (Output) * The state of the ExascaleDbStorageVault. * Possible values: * PROVISIONING * AVAILABLE * UPDATING * TERMINATING * TERMINATED * FAILED */ state: string; /** * Represents a time zone from the * [IANA Time Zone Database](https://www.iana.org/time-zones). * Structure is documented below. */ timeZone: outputs.oracledatabase.ExascaleDbStorageVaultPropertiesTimeZone; /** * (Output) * The number of VM clusters associated with the ExascaleDbStorageVault. */ vmClusterCount: number; /** * (Output) * The list of VM cluster OCIDs associated with the ExascaleDbStorageVault. */ vmClusterIds: string[]; } interface ExascaleDbStorageVaultPropertiesExascaleDbStorageDetails { /** * (Output) * The available storage capacity for the ExascaleDbStorageVault, in gigabytes * (GB). */ availableSizeGbs: number; /** * The total storage allocation for the ExascaleDbStorageVault, in gigabytes * (GB). */ totalSizeGbs: number; } interface ExascaleDbStorageVaultPropertiesTimeZone { /** * IANA Time Zone Database time zone. For example "America/New_York". */ id: string; /** * IANA Time Zone Database version number. For example "2019a". */ version: string; } interface GetAutonomousDatabaseProperty { /** * The amount of storage currently being used for user and system data, in * terabytes. */ actualUsedDataStorageSizeTb: number; /** * The amount of storage currently allocated for the database tables and * billed for, rounded up in terabytes. */ allocatedStorageSizeTb: number; /** * Oracle APEX Application Development. * https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseApex */ apexDetails: outputs.oracledatabase.GetAutonomousDatabasePropertyApexDetail[]; /** * This field indicates the status of Data Guard and Access control for the * Autonomous Database. The field's value is null if Data Guard is disabled * or Access Control is disabled. The field's value is TRUE if both Data Guard * and Access Control are enabled, and the Autonomous Database is using * primary IP access control list (ACL) for standby. The field's value is * FALSE if both Data Guard and Access Control are enabled, and the Autonomous * Database is using a different IP access control list (ACL) for standby * compared to primary. */ arePrimaryAllowlistedIpsUsed: boolean; /** * The Autonomous Container Database OCID. */ autonomousContainerDatabaseId: string; /** * The list of available Oracle Database upgrade versions for an Autonomous * Database. */ availableUpgradeVersions: string[]; /** * The retention period for the Autonomous Database. This field is specified * in days, can range from 1 day to 60 days, and has a default value of * 60 days. */ backupRetentionPeriodDays: number; /** * The character set for the Autonomous Database. The default is AL32UTF8. */ characterSet: string; /** * The number of compute servers for the Autonomous Database. */ computeCount: number; /** * The connection string used to connect to the Autonomous Database. * https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseConnectionStrings */ connectionStrings: outputs.oracledatabase.GetAutonomousDatabasePropertyConnectionString[]; /** * The URLs for accessing Oracle Application Express (APEX) and SQL Developer * Web with a browser from a Compute instance. * https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseConnectionUrls */ connectionUrls: outputs.oracledatabase.GetAutonomousDatabasePropertyConnectionUrl[]; /** * The number of CPU cores to be made available to the database. */ cpuCoreCount: number; /** * The list of customer contacts. */ customerContacts: outputs.oracledatabase.GetAutonomousDatabasePropertyCustomerContact[]; /** * The current state of the Data Safe registration for the * Autonomous Database. * Possible values: * DATA_SAFE_STATE_UNSPECIFIED * REGISTERING * REGISTERED * DEREGISTERING * NOT_REGISTERED * FAILED */ dataSafeState: string; /** * The size of the data stored in the database, in gigabytes. */ dataStorageSizeGb: number; /** * The size of the data stored in the database, in terabytes. */ dataStorageSizeTb: number; /** * The current state of database management for the Autonomous Database. * Possible values: * DATABASE_MANAGEMENT_STATE_UNSPECIFIED * ENABLING * ENABLED * DISABLING * NOT_ENABLED * FAILED_ENABLING * FAILED_DISABLING */ databaseManagementState: string; /** * The edition of the Autonomous Databases. * Possible values: * DATABASE_EDITION_UNSPECIFIED * STANDARD_EDITION * ENTERPRISE_EDITION */ dbEdition: string; /** * The Oracle Database version for the Autonomous Database. */ dbVersion: string; /** * Possible values: * DB_WORKLOAD_UNSPECIFIED * OLTP * DW * AJD * APEX */ dbWorkload: string; /** * This field indicates the number of seconds of data loss during a Data * Guard failover. */ failedDataRecoveryDuration: string; /** * This field indicates if auto scaling is enabled for the Autonomous Database * CPU core count. */ isAutoScalingEnabled: boolean; /** * This field indicates whether the Autonomous Database has local (in-region) * Data Guard enabled. */ isLocalDataGuardEnabled: boolean; /** * This field indicates if auto scaling is enabled for the Autonomous Database * storage. */ isStorageAutoScalingEnabled: boolean; /** * The license type used for the Autonomous Database. * Possible values: * LICENSE_TYPE_UNSPECIFIED * LICENSE_INCLUDED * BRING_YOUR_OWN_LICENSE */ licenseType: string; /** * The details of the current lifestyle state of the Autonomous Database. */ lifecycleDetails: string; /** * This field indicates the maximum data loss limit for an Autonomous * Database, in seconds. */ localAdgAutoFailoverMaxDataLossLimit: number; /** * This field indicates the local disaster recovery (DR) type of an * Autonomous Database. * Possible values: * LOCAL_DISASTER_RECOVERY_TYPE_UNSPECIFIED * ADG * BACKUP_BASED */ localDisasterRecoveryType: string; /** * Autonomous Data Guard standby database details. * https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseStandbySummary */ localStandbyDbs: outputs.oracledatabase.GetAutonomousDatabasePropertyLocalStandbyDb[]; /** * The date and time when maintenance will begin. */ maintenanceBeginTime: string; /** * The date and time when maintenance will end. */ maintenanceEndTime: string; /** * The maintenance schedule of the Autonomous Database. * Possible values: * MAINTENANCE_SCHEDULE_TYPE_UNSPECIFIED * EARLY * REGULAR */ maintenanceScheduleType: string; /** * The amount of memory enabled per ECPU, in gigabytes. */ memoryPerOracleComputeUnitGbs: number; /** * The memory assigned to in-memory tables in an Autonomous Database. */ memoryTableGbs: number; /** * This field specifies if the Autonomous Database requires mTLS connections. */ mtlsConnectionRequired: boolean; /** * The national character set for the Autonomous Database. The default is * AL16UTF16. */ nCharacterSet: string; /** * The long term backup schedule of the Autonomous Database. */ nextLongTermBackupTime: string; /** * The Oracle Cloud Infrastructure link for the Autonomous Database. */ ociUrl: string; /** * OCID of the Autonomous Database. * https://docs.oracle.com/en-us/iaas/Content/General/Concepts/identifiers.htm#Oracle */ ocid: string; /** * This field indicates the current mode of the Autonomous Database. * Possible values: * OPEN_MODE_UNSPECIFIED * READ_ONLY * READ_WRITE */ openMode: string; /** * Possible values: * OPERATIONS_INSIGHTS_STATE_UNSPECIFIED * ENABLING * ENABLED * DISABLING * NOT_ENABLED * FAILED_ENABLING * FAILED_DISABLING */ operationsInsightsState: string; /** * The list of OCIDs of standby databases located in Autonomous Data Guard * remote regions that are associated with the source database. */ peerDbIds: string[]; /** * The permission level of the Autonomous Database. * Possible values: * PERMISSION_LEVEL_UNSPECIFIED * RESTRICTED * UNRESTRICTED */ permissionLevel: string; /** * The private endpoint for the Autonomous Database. */ privateEndpoint: string; /** * The private endpoint IP address for the Autonomous Database. */ privateEndpointIp: string; /** * The private endpoint label for the Autonomous Database. */ privateEndpointLabel: string; /** * The refresh mode of the cloned Autonomous Database. * Possible values: * REFRESHABLE_MODE_UNSPECIFIED * AUTOMATIC * MANUAL */ refreshableMode: string; /** * The refresh State of the clone. * Possible values: * REFRESHABLE_STATE_UNSPECIFIED * REFRESHING * NOT_REFRESHING */ refreshableState: string; /** * The Data Guard role of the Autonomous Database. * Possible values: * ROLE_UNSPECIFIED * PRIMARY * STANDBY * DISABLED_STANDBY * BACKUP_COPY * SNAPSHOT_STANDBY */ role: string; /** * The list and details of the scheduled operations of the Autonomous * Database. */ scheduledOperationDetails: outputs.oracledatabase.GetAutonomousDatabasePropertyScheduledOperationDetail[]; /** * The ID of the Oracle Cloud Infrastructure vault secret. */ secretId: string; /** * The SQL Web Developer URL for the Autonomous Database. */ sqlWebDeveloperUrl: string; /** * Possible values: * STATE_UNSPECIFIED * PROVISIONING * AVAILABLE * STOPPING * STOPPED * STARTING * TERMINATING * TERMINATED * UNAVAILABLE * RESTORE_IN_PROGRESS * RESTORE_FAILED * BACKUP_IN_PROGRESS * SCALE_IN_PROGRESS * AVAILABLE_NEEDS_ATTENTION * UPDATING * MAINTENANCE_IN_PROGRESS * RESTARTING * RECREATING * ROLE_CHANGE_IN_PROGRESS * UPGRADING * INACCESSIBLE * STANDBY */ state: string; /** * The list of available regions that can be used to create a clone for the * Autonomous Database. */ supportedCloneRegions: string[]; /** * The storage space used by automatic backups of Autonomous Database, in * gigabytes. */ totalAutoBackupStorageSizeGbs: number; /** * The storage space used by Autonomous Database, in gigabytes. */ usedDataStorageSizeTbs: number; /** * The ID of the Oracle Cloud Infrastructure vault. */ vaultId: string; } interface GetAutonomousDatabasePropertyApexDetail { /** * The Oracle APEX Application Development version. */ apexVersion: string; /** * The Oracle REST Data Services (ORDS) version. */ ordsVersion: string; } interface GetAutonomousDatabasePropertyConnectionString { /** * A list of all connection strings that can be used to connect to the * Autonomous Database. */ allConnectionStrings: outputs.oracledatabase.GetAutonomousDatabasePropertyConnectionStringAllConnectionString[]; /** * The database service provides the least level of resources to each SQL * statement, but supports the most number of concurrent SQL statements. */ dedicated: string; /** * The database service provides the highest level of resources to each SQL * statement. */ high: string; /** * The database service provides the least level of resources to each SQL * statement. */ low: string; /** * The database service provides a lower level of resources to each SQL * statement. */ medium: string; /** * A list of connection string profiles to allow clients to group, filter, and * select values based on the structured metadata. */ profiles: outputs.oracledatabase.GetAutonomousDatabasePropertyConnectionStringProfile[]; } interface GetAutonomousDatabasePropertyConnectionStringAllConnectionString { /** * The database service provides the highest level of resources to each SQL * statement. */ high: string; /** * The database service provides the least level of resources to each SQL * statement. */ low: string; /** * The database service provides a lower level of resources to each SQL * statement. */ medium: string; } interface GetAutonomousDatabasePropertyConnectionStringProfile { /** * The current consumer group being used by the connection. * Possible values: * CONSUMER_GROUP_UNSPECIFIED * HIGH * MEDIUM * LOW * TP * TPURGENT */ consumerGroup: string; /** * The display name for the database connection. */ displayName: string; /** * The host name format being currently used in connection string. * Possible values: * HOST_FORMAT_UNSPECIFIED * FQDN * IP */ hostFormat: string; /** * This field indicates if the connection string is regional and is only * applicable for cross-region Data Guard. */ isRegional: boolean; /** * The protocol being used by the connection. * Possible values: * PROTOCOL_UNSPECIFIED * TCP * TCPS */ protocol: string; /** * The current session mode of the connection. * Possible values: * SESSION_MODE_UNSPECIFIED * DIRECT * INDIRECT */ sessionMode: string; /** * The syntax of the connection string. * Possible values: * SYNTAX_FORMAT_UNSPECIFIED * LONG * EZCONNECT * EZCONNECTPLUS */ syntaxFormat: string; /** * This field indicates the TLS authentication type of the connection. * Possible values: * TLS_AUTHENTICATION_UNSPECIFIED * SERVER * MUTUAL */ tlsAuthentication: string; /** * The value of the connection string. */ value: string; } interface GetAutonomousDatabasePropertyConnectionUrl { /** * Oracle Application Express (APEX) URL. */ apexUri: string; /** * The URL of the Database Transforms for the Autonomous Database. */ databaseTransformsUri: string; /** * The URL of the Graph Studio for the Autonomous Database. */ graphStudioUri: string; /** * The URL of the Oracle Machine Learning (OML) Notebook for the Autonomous * Database. */ machineLearningNotebookUri: string; /** * The URL of Machine Learning user management the Autonomous Database. */ machineLearningUserManagementUri: string; /** * The URL of the MongoDB API for the Autonomous Database. */ mongoDbUri: string; /** * The Oracle REST Data Services (ORDS) URL of the Web Access for the * Autonomous Database. */ ordsUri: string; /** * The URL of the Oracle SQL Developer Web for the Autonomous Database. */ sqlDevWebUri: string; } interface GetAutonomousDatabasePropertyCustomerContact { /** * The email address used by Oracle to send notifications regarding databases * and infrastructure. */ email: string; } interface GetAutonomousDatabasePropertyLocalStandbyDb { /** * The date and time the Autonomous Data Guard role was switched for the * standby Autonomous Database. */ dataGuardRoleChangedTime: string; /** * The date and time the Disaster Recovery role was switched for the standby * Autonomous Database. */ disasterRecoveryRoleChangedTime: string; /** * The amount of time, in seconds, that the data of the standby database lags * in comparison to the data of the primary database. */ lagTimeDuration: string; /** * The additional details about the current lifecycle state of the * Autonomous Database. */ lifecycleDetails: string; /** * Possible values: * STATE_UNSPECIFIED * PROVISIONING * AVAILABLE * STOPPING * STOPPED * STARTING * TERMINATING * TERMINATED * UNAVAILABLE * RESTORE_IN_PROGRESS * RESTORE_FAILED * BACKUP_IN_PROGRESS * SCALE_IN_PROGRESS * AVAILABLE_NEEDS_ATTENTION * UPDATING * MAINTENANCE_IN_PROGRESS * RESTARTING * RECREATING * ROLE_CHANGE_IN_PROGRESS * UPGRADING * INACCESSIBLE * STANDBY */ state: string; } interface GetAutonomousDatabasePropertyScheduledOperationDetail { /** * Possible values: * DAY_OF_WEEK_UNSPECIFIED * MONDAY * TUESDAY * WEDNESDAY * THURSDAY * FRIDAY * SATURDAY * SUNDAY */ dayOfWeek: string; /** * Represents a time of day. The date and time zone are either not significant * or are specified elsewhere. An API may choose to allow leap seconds. Related * types are google.type.Date and 'google.protobuf.Timestamp'. */ startTimes: outputs.oracledatabase.GetAutonomousDatabasePropertyScheduledOperationDetailStartTime[]; /** * Represents a time of day. The date and time zone are either not significant * or are specified elsewhere. An API may choose to allow leap seconds. Related * types are google.type.Date and 'google.protobuf.Timestamp'. */ stopTimes: outputs.oracledatabase.GetAutonomousDatabasePropertyScheduledOperationDetailStopTime[]; } interface GetAutonomousDatabasePropertyScheduledOperationDetailStartTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose * to allow the value "24:00:00" for scenarios like business closing time. */ hours: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may * allow the value 60 if it allows leap-seconds. */ seconds: number; } interface GetAutonomousDatabasePropertyScheduledOperationDetailStopTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose * to allow the value "24:00:00" for scenarios like business closing time. */ hours: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may * allow the value 60 if it allows leap-seconds. */ seconds: number; } interface GetAutonomousDatabaseSourceConfig { /** * This field specifies if the replication of automatic backups is enabled when creating a Data Guard. */ automaticBackupsReplicationEnabled: boolean; /** * The name of the primary Autonomous Database that is used to create a Peer Autonomous Database from a source. */ autonomousDatabase: string; } interface GetAutonomousDatabasesAutonomousDatabase { /** * The password for the default ADMIN user. */ adminPassword: string; /** * The ID of the Autonomous Database to create. This value is restricted * to (^a-z?$) and must be a maximum of 63 * characters in length. The value must start with a letter and end with * a letter or a number. */ autonomousDatabaseId: string; /** * The subnet CIDR range for the Autonmous Database. */ cidr: string; /** * The date and time that the Autonomous Database was created. */ createTime: string; /** * The name of the Autonomous Database. The database name must be unique in * the project. The name must begin with a letter and can * contain a maximum of 30 alphanumeric characters. */ database: string; /** * Whether or not to allow Terraform to destroy the instance. Unless this field is set to false in Terraform state, a terraform destroy or terraform apply that would delete the instance will fail. */ deletionProtection: boolean; /** * List of supported GCP region to clone the Autonomous Database for disaster recovery. */ disasterRecoverySupportedLocations: string[]; /** * The display name for the Autonomous Database. The name does not have to * be unique within your project. */ displayName: string; /** * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. */ effectiveLabels: { [key: string]: string; }; /** * The ID of the subscription entitlement associated with the Autonomous * Database. */ entitlementId: string; /** * The labels or tags associated with the Autonomous Database. * * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. * Please refer to the field 'effective_labels' for all of the labels present on the resource. */ labels: { [key: string]: string; }; /** * The location of the resource. * * - - - */ location: string; /** * Identifier. The name of the Autonomous Database resource in the following format: * projects/{project}/locations/{region}/autonomousDatabases/{autonomous_database} */ name: string; /** * The name of the VPC network used by the Autonomous Database. * Format: projects/{project}/global/networks/{network} */ network: string; /** * The name of the OdbNetwork associated with the Autonomous Database. * Format: * projects/{project}/locations/{location}/odbNetworks/{odb_network} * It is optional but if specified, this should match the parent ODBNetwork of * the odbSubnet and backup_odb_subnet. */ odbNetwork: string; /** * The name of the OdbSubnet associated with the Autonomous Database for * IP allocation. Format: * projects/{project}/locations/{location}/odbNetworks/{odb_network}/odbSubnets/{odb_subnet} */ odbSubnet: string; /** * The peer Autonomous Database names of the given Autonomous Database. */ peerAutonomousDatabases: string[]; /** * The project to which the resource belongs. If it * is not provided, the provider project is used. */ project: string; /** * The properties of an Autonomous Database. */ properties: outputs.oracledatabase.GetAutonomousDatabasesAutonomousDatabaseProperty[]; /** * The combination of labels configured directly on the resource * and default labels configured on the provider. */ pulumiLabels: { [key: string]: string; }; /** * The source Autonomous Database configuration for the standby Autonomous Database. */ sourceConfigs: outputs.oracledatabase.GetAutonomousDatabasesAutonomousDatabaseSourceConfig[]; } interface GetAutonomousDatabasesAutonomousDatabaseProperty { /** * The amount of storage currently being used for user and system data, in * terabytes. */ actualUsedDataStorageSizeTb: number; /** * The amount of storage currently allocated for the database tables and * billed for, rounded up in terabytes. */ allocatedStorageSizeTb: number; /** * Oracle APEX Application Development. * https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseApex */ apexDetails: outputs.oracledatabase.GetAutonomousDatabasesAutonomousDatabasePropertyApexDetail[]; /** * This field indicates the status of Data Guard and Access control for the * Autonomous Database. The field's value is null if Data Guard is disabled * or Access Control is disabled. The field's value is TRUE if both Data Guard * and Access Control are enabled, and the Autonomous Database is using * primary IP access control list (ACL) for standby. The field's value is * FALSE if both Data Guard and Access Control are enabled, and the Autonomous * Database is using a different IP access control list (ACL) for standby * compared to primary. */ arePrimaryAllowlistedIpsUsed: boolean; /** * The Autonomous Container Database OCID. */ autonomousContainerDatabaseId: string; /** * The list of available Oracle Database upgrade versions for an Autonomous * Database. */ availableUpgradeVersions: string[]; /** * The retention period for the Autonomous Database. This field is specified * in days, can range from 1 day to 60 days, and has a default value of * 60 days. */ backupRetentionPeriodDays: number; /** * The character set for the Autonomous Database. The default is AL32UTF8. */ characterSet: string; /** * The number of compute servers for the Autonomous Database. */ computeCount: number; /** * The connection string used to connect to the Autonomous Database. * https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseConnectionStrings */ connectionStrings: outputs.oracledatabase.GetAutonomousDatabasesAutonomousDatabasePropertyConnectionString[]; /** * The URLs for accessing Oracle Application Express (APEX) and SQL Developer * Web with a browser from a Compute instance. * https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseConnectionUrls */ connectionUrls: outputs.oracledatabase.GetAutonomousDatabasesAutonomousDatabasePropertyConnectionUrl[]; /** * The number of CPU cores to be made available to the database. */ cpuCoreCount: number; /** * The list of customer contacts. */ customerContacts: outputs.oracledatabase.GetAutonomousDatabasesAutonomousDatabasePropertyCustomerContact[]; /** * The current state of the Data Safe registration for the * Autonomous Database. * Possible values: * DATA_SAFE_STATE_UNSPECIFIED * REGISTERING * REGISTERED * DEREGISTERING * NOT_REGISTERED * FAILED */ dataSafeState: string; /** * The size of the data stored in the database, in gigabytes. */ dataStorageSizeGb: number; /** * The size of the data stored in the database, in terabytes. */ dataStorageSizeTb: number; /** * The current state of database management for the Autonomous Database. * Possible values: * DATABASE_MANAGEMENT_STATE_UNSPECIFIED * ENABLING * ENABLED * DISABLING * NOT_ENABLED * FAILED_ENABLING * FAILED_DISABLING */ databaseManagementState: string; /** * The edition of the Autonomous Databases. * Possible values: * DATABASE_EDITION_UNSPECIFIED * STANDARD_EDITION * ENTERPRISE_EDITION */ dbEdition: string; /** * The Oracle Database version for the Autonomous Database. */ dbVersion: string; /** * Possible values: * DB_WORKLOAD_UNSPECIFIED * OLTP * DW * AJD * APEX */ dbWorkload: string; /** * This field indicates the number of seconds of data loss during a Data * Guard failover. */ failedDataRecoveryDuration: string; /** * This field indicates if auto scaling is enabled for the Autonomous Database * CPU core count. */ isAutoScalingEnabled: boolean; /** * This field indicates whether the Autonomous Database has local (in-region) * Data Guard enabled. */ isLocalDataGuardEnabled: boolean; /** * This field indicates if auto scaling is enabled for the Autonomous Database * storage. */ isStorageAutoScalingEnabled: boolean; /** * The license type used for the Autonomous Database. * Possible values: * LICENSE_TYPE_UNSPECIFIED * LICENSE_INCLUDED * BRING_YOUR_OWN_LICENSE */ licenseType: string; /** * The details of the current lifestyle state of the Autonomous Database. */ lifecycleDetails: string; /** * This field indicates the maximum data loss limit for an Autonomous * Database, in seconds. */ localAdgAutoFailoverMaxDataLossLimit: number; /** * This field indicates the local disaster recovery (DR) type of an * Autonomous Database. * Possible values: * LOCAL_DISASTER_RECOVERY_TYPE_UNSPECIFIED * ADG * BACKUP_BASED */ localDisasterRecoveryType: string; /** * Autonomous Data Guard standby database details. * https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/AutonomousDatabaseStandbySummary */ localStandbyDbs: outputs.oracledatabase.GetAutonomousDatabasesAutonomousDatabasePropertyLocalStandbyDb[]; /** * The date and time when maintenance will begin. */ maintenanceBeginTime: string; /** * The date and time when maintenance will end. */ maintenanceEndTime: string; /** * The maintenance schedule of the Autonomous Database. * Possible values: * MAINTENANCE_SCHEDULE_TYPE_UNSPECIFIED * EARLY * REGULAR */ maintenanceScheduleType: string; /** * The amount of memory enabled per ECPU, in gigabytes. */ memoryPerOracleComputeUnitGbs: number; /** * The memory assigned to in-memory tables in an Autonomous Database. */ memoryTableGbs: number; /** * This field specifies if the Autonomous Database requires mTLS connections. */ mtlsConnectionRequired: boolean; /** * The national character set for the Autonomous Database. The default is * AL16UTF16. */ nCharacterSet: string; /** * The long term backup schedule of the Autonomous Database. */ nextLongTermBackupTime: string; /** * The Oracle Cloud Infrastructure link for the Autonomous Database. */ ociUrl: string; /** * OCID of the Autonomous Database. * https://docs.oracle.com/en-us/iaas/Content/General/Concepts/identifiers.htm#Oracle */ ocid: string; /** * This field indicates the current mode of the Autonomous Database. * Possible values: * OPEN_MODE_UNSPECIFIED * READ_ONLY * READ_WRITE */ openMode: string; /** * Possible values: * OPERATIONS_INSIGHTS_STATE_UNSPECIFIED * ENABLING * ENABLED * DISABLING * NOT_ENABLED * FAILED_ENABLING * FAILED_DISABLING */ operationsInsightsState: string; /** * The list of OCIDs of standby databases located in Autonomous Data Guard * remote regions that are associated with the source database. */ peerDbIds: string[]; /** * The permission level of the Autonomous Database. * Possible values: * PERMISSION_LEVEL_UNSPECIFIED * RESTRICTED * UNRESTRICTED */ permissionLevel: string; /** * The private endpoint for the Autonomous Database. */ privateEndpoint: string; /** * The private endpoint IP address for the Autonomous Database. */ privateEndpointIp: string; /** * The private endpoint label for the Autonomous Database. */ privateEndpointLabel: string; /** * The refresh mode of the cloned Autonomous Database. * Possible values: * REFRESHABLE_MODE_UNSPECIFIED * AUTOMATIC * MANUAL */ refreshableMode: string; /** * The refresh State of the clone. * Possible values: * REFRESHABLE_STATE_UNSPECIFIED * REFRESHING * NOT_REFRESHING */ refreshableState: string; /** * The Data Guard role of the Autonomous Database. * Possible values: * ROLE_UNSPECIFIED * PRIMARY * STANDBY * DISABLED_STANDBY * BACKUP_COPY * SNAPSHOT_STANDBY */ role: string; /** * The list and details of the scheduled operations of the Autonomous * Database. */ scheduledOperationDetails: outputs.oracledatabase.GetAutonomousDatabasesAutonomousDatabasePropertyScheduledOperationDetail[]; /** * The ID of the Oracle Cloud Infrastructure vault secret. */ secretId: string; /** * The SQL Web Developer URL for the Autonomous Database. */ sqlWebDeveloperUrl: string; /** * Possible values: * STATE_UNSPECIFIED * PROVISIONING * AVAILABLE * STOPPING * STOPPED * STARTING * TERMINATING * TERMINATED * UNAVAILABLE * RESTORE_IN_PROGRESS * RESTORE_FAILED * BACKUP_IN_PROGRESS * SCALE_IN_PROGRESS * AVAILABLE_NEEDS_ATTENTION * UPDATING * MAINTENANCE_IN_PROGRESS * RESTARTING * RECREATING * ROLE_CHANGE_IN_PROGRESS * UPGRADING * INACCESSIBLE * STANDBY */ state: string; /** * The list of available regions that can be used to create a clone for the * Autonomous Database. */ supportedCloneRegions: string[]; /** * The storage space used by automatic backups of Autonomous Database, in * gigabytes. */ totalAutoBackupStorageSizeGbs: number; /** * The storage space used by Autonomous Database, in gigabytes. */ usedDataStorageSizeTbs: number; /** * The ID of the Oracle Cloud Infrastructure vault. */ vaultId: string; } interface GetAutonomousDatabasesAutonomousDatabasePropertyApexDetail { /** * The Oracle APEX Application Development version. */ apexVersion: string; /** * The Oracle REST Data Services (ORDS) version. */ ordsVersion: string; } interface GetAutonomousDatabasesAutonomousDatabasePropertyConnectionString { /** * A list of all connection strings that can be used to connect to the * Autonomous Database. */ allConnectionStrings: outputs.oracledatabase.GetAutonomousDatabasesAutonomousDatabasePropertyConnectionStringAllConnectionString[]; /** * The database service provides the least level of resources to each SQL * statement, but supports the most number of concurrent SQL statements. */ dedicated: string; /** * The database service provides the highest level of resources to each SQL * statement. */ high: string; /** * The database service provides the least level of resources to each SQL * statement. */ low: string; /** * The database service provides a lower level of resources to each SQL * statement. */ medium: string; /** * A list of connection string profiles to allow clients to group, filter, and * select values based on the structured metadata. */ profiles: outputs.oracledatabase.GetAutonomousDatabasesAutonomousDatabasePropertyConnectionStringProfile[]; } interface GetAutonomousDatabasesAutonomousDatabasePropertyConnectionStringAllConnectionString { /** * The database service provides the highest level of resources to each SQL * statement. */ high: string; /** * The database service provides the least level of resources to each SQL * statement. */ low: string; /** * The database service provides a lower level of resources to each SQL * statement. */ medium: string; } interface GetAutonomousDatabasesAutonomousDatabasePropertyConnectionStringProfile { /** * The current consumer group being used by the connection. * Possible values: * CONSUMER_GROUP_UNSPECIFIED * HIGH * MEDIUM * LOW * TP * TPURGENT */ consumerGroup: string; /** * The display name for the database connection. */ displayName: string; /** * The host name format being currently used in connection string. * Possible values: * HOST_FORMAT_UNSPECIFIED * FQDN * IP */ hostFormat: string; /** * This field indicates if the connection string is regional and is only * applicable for cross-region Data Guard. */ isRegional: boolean; /** * The protocol being used by the connection. * Possible values: * PROTOCOL_UNSPECIFIED * TCP * TCPS */ protocol: string; /** * The current session mode of the connection. * Possible values: * SESSION_MODE_UNSPECIFIED * DIRECT * INDIRECT */ sessionMode: string; /** * The syntax of the connection string. * Possible values: * SYNTAX_FORMAT_UNSPECIFIED * LONG * EZCONNECT * EZCONNECTPLUS */ syntaxFormat: string; /** * This field indicates the TLS authentication type of the connection. * Possible values: * TLS_AUTHENTICATION_UNSPECIFIED * SERVER * MUTUAL */ tlsAuthentication: string; /** * The value of the connection string. */ value: string; } interface GetAutonomousDatabasesAutonomousDatabasePropertyConnectionUrl { /** * Oracle Application Express (APEX) URL. */ apexUri: string; /** * The URL of the Database Transforms for the Autonomous Database. */ databaseTransformsUri: string; /** * The URL of the Graph Studio for the Autonomous Database. */ graphStudioUri: string; /** * The URL of the Oracle Machine Learning (OML) Notebook for the Autonomous * Database. */ machineLearningNotebookUri: string; /** * The URL of Machine Learning user management the Autonomous Database. */ machineLearningUserManagementUri: string; /** * The URL of the MongoDB API for the Autonomous Database. */ mongoDbUri: string; /** * The Oracle REST Data Services (ORDS) URL of the Web Access for the * Autonomous Database. */ ordsUri: string; /** * The URL of the Oracle SQL Developer Web for the Autonomous Database. */ sqlDevWebUri: string; } interface GetAutonomousDatabasesAutonomousDatabasePropertyCustomerContact { /** * The email address used by Oracle to send notifications regarding databases * and infrastructure. */ email: string; } interface GetAutonomousDatabasesAutonomousDatabasePropertyLocalStandbyDb { /** * The date and time the Autonomous Data Guard role was switched for the * standby Autonomous Database. */ dataGuardRoleChangedTime: string; /** * The date and time the Disaster Recovery role was switched for the standby * Autonomous Database. */ disasterRecoveryRoleChangedTime: string; /** * The amount of time, in seconds, that the data of the standby database lags * in comparison to the data of the primary database. */ lagTimeDuration: string; /** * The additional details about the current lifecycle state of the * Autonomous Database. */ lifecycleDetails: string; /** * Possible values: * STATE_UNSPECIFIED * PROVISIONING * AVAILABLE * STOPPING * STOPPED * STARTING * TERMINATING * TERMINATED * UNAVAILABLE * RESTORE_IN_PROGRESS * RESTORE_FAILED * BACKUP_IN_PROGRESS * SCALE_IN_PROGRESS * AVAILABLE_NEEDS_ATTENTION * UPDATING * MAINTENANCE_IN_PROGRESS * RESTARTING * RECREATING * ROLE_CHANGE_IN_PROGRESS * UPGRADING * INACCESSIBLE * STANDBY */ state: string; } interface GetAutonomousDatabasesAutonomousDatabasePropertyScheduledOperationDetail { /** * Possible values: * DAY_OF_WEEK_UNSPECIFIED * MONDAY * TUESDAY * WEDNESDAY * THURSDAY * FRIDAY * SATURDAY * SUNDAY */ dayOfWeek: string; /** * Represents a time of day. The date and time zone are either not significant * or are specified elsewhere. An API may choose to allow leap seconds. Related * types are google.type.Date and 'google.protobuf.Timestamp'. */ startTimes: outputs.oracledatabase.GetAutonomousDatabasesAutonomousDatabasePropertyScheduledOperationDetailStartTime[]; /** * Represents a time of day. The date and time zone are either not significant * or are specified elsewhere. An API may choose to allow leap seconds. Related * types are google.type.Date and 'google.protobuf.Timestamp'. */ stopTimes: outputs.oracledatabase.GetAutonomousDatabasesAutonomousDatabasePropertyScheduledOperationDetailStopTime[]; } interface GetAutonomousDatabasesAutonomousDatabasePropertyScheduledOperationDetailStartTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose * to allow the value "24:00:00" for scenarios like business closing time. */ hours: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may * allow the value 60 if it allows leap-seconds. */ seconds: number; } interface GetAutonomousDatabasesAutonomousDatabasePropertyScheduledOperationDetailStopTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose * to allow the value "24:00:00" for scenarios like business closing time. */ hours: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may * allow the value 60 if it allows leap-seconds. */ seconds: number; } interface GetAutonomousDatabasesAutonomousDatabaseSourceConfig { /** * This field specifies if the replication of automatic backups is enabled when creating a Data Guard. */ automaticBackupsReplicationEnabled: boolean; /** * The name of the primary Autonomous Database that is used to create a Peer Autonomous Database from a source. */ autonomousDatabase: string; } interface GetCloudExadataInfrastructureProperty { /** * The requested number of additional storage servers activated for the * Exadata Infrastructure. */ activatedStorageCount: number; /** * The requested number of additional storage servers for the Exadata * Infrastructure. */ additionalStorageCount: number; /** * The available storage can be allocated to the Exadata Infrastructure * resource, in gigabytes (GB). */ availableStorageSizeGb: number; /** * The number of compute servers for the Exadata Infrastructure. */ computeCount: number; /** * The number of enabled CPU cores. */ cpuCount: number; /** * The list of customer contacts. */ customerContacts: outputs.oracledatabase.GetCloudExadataInfrastructurePropertyCustomerContact[]; /** * Size, in terabytes, of the DATA disk group. */ dataStorageSizeTb: number; /** * The local node storage allocated in GBs. */ dbNodeStorageSizeGb: number; /** * The software version of the database servers (dom0) in the Exadata * Infrastructure. */ dbServerVersion: string; /** * Maintenance window as defined by Oracle. * https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/MaintenanceWindow */ maintenanceWindows: outputs.oracledatabase.GetCloudExadataInfrastructurePropertyMaintenanceWindow[]; /** * The total number of CPU cores available. */ maxCpuCount: number; /** * The total available DATA disk group size. */ maxDataStorageTb: number; /** * The total local node storage available in GBs. */ maxDbNodeStorageSizeGb: number; /** * The total memory available in GBs. */ maxMemoryGb: number; /** * The memory allocated in GBs. */ memorySizeGb: number; /** * The monthly software version of the database servers (dom0) * in the Exadata Infrastructure. Example: 20.1.15 */ monthlyDbServerVersion: string; /** * The monthly software version of the storage servers (cells) * in the Exadata Infrastructure. Example: 20.1.15 */ monthlyStorageServerVersion: string; /** * The OCID of the next maintenance run. */ nextMaintenanceRunId: string; /** * The time when the next maintenance run will occur. */ nextMaintenanceRunTime: string; /** * The time when the next security maintenance run will occur. */ nextSecurityMaintenanceRunTime: string; /** * Deep link to the OCI console to view this resource. */ ociUrl: string; /** * OCID of created infra. * https://docs.oracle.com/en-us/iaas/Content/General/Concepts/identifiers.htm#Oracle */ ocid: string; /** * The shape of the Exadata Infrastructure. The shape determines the * amount of CPU, storage, and memory resources allocated to the instance. */ shape: string; /** * The current lifecycle state of the Exadata Infrastructure. * Possible values: * STATE_UNSPECIFIED * PROVISIONING * AVAILABLE * UPDATING * TERMINATING * TERMINATED * FAILED * MAINTENANCE_IN_PROGRESS */ state: string; /** * The number of Cloud Exadata storage servers for the Exadata Infrastructure. */ storageCount: number; /** * The software version of the storage servers (cells) in the Exadata * Infrastructure. */ storageServerVersion: string; /** * The total storage allocated to the Exadata Infrastructure * resource, in gigabytes (GB). */ totalStorageSizeGb: number; } interface GetCloudExadataInfrastructurePropertyCustomerContact { /** * The email address used by Oracle to send notifications regarding databases * and infrastructure. */ email: string; } interface GetCloudExadataInfrastructurePropertyMaintenanceWindow { /** * Determines the amount of time the system will wait before the start of each * database server patching operation. Custom action timeout is in minutes and * valid value is between 15 to 120 (inclusive). */ customActionTimeoutMins: number; /** * Days during the week when maintenance should be performed. */ daysOfWeeks: string[]; /** * The window of hours during the day when maintenance should be performed. * The window is a 4 hour slot. Valid values are: * 0 - represents time slot 0:00 - 3:59 UTC * 4 - represents time slot 4:00 - 7:59 UTC * 8 - represents time slot 8:00 - 11:59 UTC * 12 - represents time slot 12:00 - 15:59 UTC * 16 - represents time slot 16:00 - 19:59 UTC * 20 - represents time slot 20:00 - 23:59 UTC */ hoursOfDays: number[]; /** * If true, enables the configuration of a custom action timeout (waiting * period) between database server patching operations. */ isCustomActionTimeoutEnabled: boolean; /** * Lead time window allows user to set a lead time to prepare for a down time. * The lead time is in weeks and valid value is between 1 to 4. */ leadTimeWeek: number; /** * Months during the year when maintenance should be performed. */ months: string[]; /** * Cloud CloudExadataInfrastructure node patching method, either "ROLLING" * or "NONROLLING". Default value is ROLLING. * Possible values: * PATCHING_MODE_UNSPECIFIED * ROLLING * NON_ROLLING */ patchingMode: string; /** * The maintenance window scheduling preference. * Possible values: * MAINTENANCE_WINDOW_PREFERENCE_UNSPECIFIED * CUSTOM_PREFERENCE * NO_PREFERENCE */ preference: string; /** * Weeks during the month when maintenance should be performed. Weeks start on * the 1st, 8th, 15th, and 22nd days of the month, and have a duration of 7 * days. Weeks start and end based on calendar dates, not days of the week. */ weeksOfMonths: number[]; } interface GetCloudExadataInfrastructuresCloudExadataInfrastructure { /** * The ID of the Exadata Infrastructure to create. This value is restricted * to (^a-z?$) and must be a maximum of 63 * characters in length. The value must start with a letter and end with * a letter or a number. */ cloudExadataInfrastructureId: string; /** * The date and time that the Exadata Infrastructure was created. */ createTime: string; /** * Whether or not to allow Terraform to destroy the instance. Unless this field is set to false in Terraform state, a terraform destroy or terraform apply that would delete the instance will fail. */ deletionProtection: boolean; /** * User friendly name for this resource. */ displayName: string; /** * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. */ effectiveLabels: { [key: string]: string; }; /** * Entitlement ID of the private offer against which this infrastructure * resource is provisioned. */ entitlementId: string; /** * GCP location where Oracle Exadata is hosted. */ gcpOracleZone: string; /** * Labels or tags associated with the resource. * * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. * Please refer to the field 'effective_labels' for all of the labels present on the resource. */ labels: { [key: string]: string; }; /** * The location of the resource. * * - - - */ location: string; /** * Identifier. The name of the Exadata Infrastructure resource with the following format: * projects/{project}/locations/{region}/cloudExadataInfrastructures/{cloud_exadata_infrastructure} */ name: string; /** * The project to which the resource belongs. If it * is not provided, the provider project is used. */ project: string; /** * Various properties of Exadata Infrastructure. */ properties: outputs.oracledatabase.GetCloudExadataInfrastructuresCloudExadataInfrastructureProperty[]; /** * The combination of labels configured directly on the resource * and default labels configured on the provider. */ pulumiLabels: { [key: string]: string; }; } interface GetCloudExadataInfrastructuresCloudExadataInfrastructureProperty { /** * The requested number of additional storage servers activated for the * Exadata Infrastructure. */ activatedStorageCount: number; /** * The requested number of additional storage servers for the Exadata * Infrastructure. */ additionalStorageCount: number; /** * The available storage can be allocated to the Exadata Infrastructure * resource, in gigabytes (GB). */ availableStorageSizeGb: number; /** * The number of compute servers for the Exadata Infrastructure. */ computeCount: number; /** * The number of enabled CPU cores. */ cpuCount: number; /** * The list of customer contacts. */ customerContacts: outputs.oracledatabase.GetCloudExadataInfrastructuresCloudExadataInfrastructurePropertyCustomerContact[]; /** * Size, in terabytes, of the DATA disk group. */ dataStorageSizeTb: number; /** * The local node storage allocated in GBs. */ dbNodeStorageSizeGb: number; /** * The software version of the database servers (dom0) in the Exadata * Infrastructure. */ dbServerVersion: string; /** * Maintenance window as defined by Oracle. * https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/MaintenanceWindow */ maintenanceWindows: outputs.oracledatabase.GetCloudExadataInfrastructuresCloudExadataInfrastructurePropertyMaintenanceWindow[]; /** * The total number of CPU cores available. */ maxCpuCount: number; /** * The total available DATA disk group size. */ maxDataStorageTb: number; /** * The total local node storage available in GBs. */ maxDbNodeStorageSizeGb: number; /** * The total memory available in GBs. */ maxMemoryGb: number; /** * The memory allocated in GBs. */ memorySizeGb: number; /** * The monthly software version of the database servers (dom0) * in the Exadata Infrastructure. Example: 20.1.15 */ monthlyDbServerVersion: string; /** * The monthly software version of the storage servers (cells) * in the Exadata Infrastructure. Example: 20.1.15 */ monthlyStorageServerVersion: string; /** * The OCID of the next maintenance run. */ nextMaintenanceRunId: string; /** * The time when the next maintenance run will occur. */ nextMaintenanceRunTime: string; /** * The time when the next security maintenance run will occur. */ nextSecurityMaintenanceRunTime: string; /** * Deep link to the OCI console to view this resource. */ ociUrl: string; /** * OCID of created infra. * https://docs.oracle.com/en-us/iaas/Content/General/Concepts/identifiers.htm#Oracle */ ocid: string; /** * The shape of the Exadata Infrastructure. The shape determines the * amount of CPU, storage, and memory resources allocated to the instance. */ shape: string; /** * The current lifecycle state of the Exadata Infrastructure. * Possible values: * STATE_UNSPECIFIED * PROVISIONING * AVAILABLE * UPDATING * TERMINATING * TERMINATED * FAILED * MAINTENANCE_IN_PROGRESS */ state: string; /** * The number of Cloud Exadata storage servers for the Exadata Infrastructure. */ storageCount: number; /** * The software version of the storage servers (cells) in the Exadata * Infrastructure. */ storageServerVersion: string; /** * The total storage allocated to the Exadata Infrastructure * resource, in gigabytes (GB). */ totalStorageSizeGb: number; } interface GetCloudExadataInfrastructuresCloudExadataInfrastructurePropertyCustomerContact { /** * The email address used by Oracle to send notifications regarding databases * and infrastructure. */ email: string; } interface GetCloudExadataInfrastructuresCloudExadataInfrastructurePropertyMaintenanceWindow { /** * Determines the amount of time the system will wait before the start of each * database server patching operation. Custom action timeout is in minutes and * valid value is between 15 to 120 (inclusive). */ customActionTimeoutMins: number; /** * Days during the week when maintenance should be performed. */ daysOfWeeks: string[]; /** * The window of hours during the day when maintenance should be performed. * The window is a 4 hour slot. Valid values are: * 0 - represents time slot 0:00 - 3:59 UTC * 4 - represents time slot 4:00 - 7:59 UTC * 8 - represents time slot 8:00 - 11:59 UTC * 12 - represents time slot 12:00 - 15:59 UTC * 16 - represents time slot 16:00 - 19:59 UTC * 20 - represents time slot 20:00 - 23:59 UTC */ hoursOfDays: number[]; /** * If true, enables the configuration of a custom action timeout (waiting * period) between database server patching operations. */ isCustomActionTimeoutEnabled: boolean; /** * Lead time window allows user to set a lead time to prepare for a down time. * The lead time is in weeks and valid value is between 1 to 4. */ leadTimeWeek: number; /** * Months during the year when maintenance should be performed. */ months: string[]; /** * Cloud CloudExadataInfrastructure node patching method, either "ROLLING" * or "NONROLLING". Default value is ROLLING. * Possible values: * PATCHING_MODE_UNSPECIFIED * ROLLING * NON_ROLLING */ patchingMode: string; /** * The maintenance window scheduling preference. * Possible values: * MAINTENANCE_WINDOW_PREFERENCE_UNSPECIFIED * CUSTOM_PREFERENCE * NO_PREFERENCE */ preference: string; /** * Weeks during the month when maintenance should be performed. Weeks start on * the 1st, 8th, 15th, and 22nd days of the month, and have a duration of 7 * days. Weeks start and end based on calendar dates, not days of the week. */ weeksOfMonths: number[]; } interface GetCloudVmClusterProperty { /** * OCI Cluster name. */ clusterName: string; /** * Compartment ID of cluster. */ compartmentId: string; /** * Number of enabled CPU cores. */ cpuCoreCount: number; /** * The data disk group size to be allocated in TBs. */ dataStorageSizeTb: number; /** * Local storage per VM */ dbNodeStorageSizeGb: number; /** * OCID of database servers. */ dbServerOcids: string[]; /** * Data collection options for diagnostics. */ diagnosticsDataCollectionOptions: outputs.oracledatabase.GetCloudVmClusterPropertyDiagnosticsDataCollectionOption[]; /** * The type of redundancy. * Possible values: * DISK_REDUNDANCY_UNSPECIFIED * HIGH * NORMAL */ diskRedundancy: string; /** * DNS listener IP. */ dnsListenerIp: string; /** * Parent DNS domain where SCAN DNS and hosts names are qualified. * ex: ocispdelegated.ocisp10jvnet.oraclevcn.com */ domain: string; /** * Grid Infrastructure Version. */ giVersion: string; /** * host name without domain. * format: "-" with some suffix. * ex: sp2-yi0xq where "sp2" is the hostname_prefix. */ hostname: string; /** * Prefix for VM cluster host names. */ hostnamePrefix: string; /** * License type of VM Cluster. * Possible values: * LICENSE_TYPE_UNSPECIFIED * LICENSE_INCLUDED * BRING_YOUR_OWN_LICENSE */ licenseType: string; /** * Use local backup. */ localBackupEnabled: boolean; /** * Memory allocated in GBs. */ memorySizeGb: number; /** * Number of database servers. */ nodeCount: number; /** * Deep link to the OCI console to view this resource. */ ociUrl: string; /** * Oracle Cloud Infrastructure ID of VM Cluster. */ ocid: string; /** * OCPU count per VM. Minimum is 0.1. */ ocpuCount: number; /** * SCAN DNS name. * ex: sp2-yi0xq-scan.ocispdelegated.ocisp10jvnet.oraclevcn.com */ scanDns: string; /** * OCID of scan DNS record. */ scanDnsRecordId: string; /** * OCIDs of scan IPs. */ scanIpIds: string[]; /** * SCAN listener port - TCP */ scanListenerPortTcp: number; /** * SCAN listener port - TLS */ scanListenerPortTcpSsl: number; /** * Shape of VM Cluster. */ shape: string; /** * Use exadata sparse snapshots. */ sparseDiskgroupEnabled: boolean; /** * SSH public keys to be stored with cluster. */ sshPublicKeys: string[]; /** * State of the cluster. * Possible values: * STATE_UNSPECIFIED * PROVISIONING * AVAILABLE * UPDATING * TERMINATING * TERMINATED * FAILED * MAINTENANCE_IN_PROGRESS */ state: string; /** * The storage allocation for the disk group, in gigabytes (GB). */ storageSizeGb: number; /** * Operating system version of the image. */ systemVersion: string; /** * Represents a time zone from the * [IANA Time Zone Database](https://www.iana.org/time-zones). */ timeZones: outputs.oracledatabase.GetCloudVmClusterPropertyTimeZone[]; } interface GetCloudVmClusterPropertyDiagnosticsDataCollectionOption { /** * Indicates whether diagnostic collection is enabled for the VM cluster */ diagnosticsEventsEnabled: boolean; /** * Indicates whether health monitoring is enabled for the VM cluster */ healthMonitoringEnabled: boolean; /** * Indicates whether incident logs and trace collection are enabled for the VM * cluster */ incidentLogsEnabled: boolean; } interface GetCloudVmClusterPropertyTimeZone { /** * IANA Time Zone Database time zone, e.g. "America/New_York". */ id: string; /** * IANA Time Zone Database version number, e.g. "2019a". */ version: string; } interface GetCloudVmClustersCloudVmCluster { /** * The name of the backup OdbSubnet associated with the VM Cluster. * Format: * projects/{project}/locations/{location}/odbNetworks/{odb_network}/odbSubnets/{odb_subnet} */ backupOdbSubnet: string; /** * CIDR range of the backup subnet. */ backupSubnetCidr: string; /** * Network settings. CIDR to use for cluster IP allocation. */ cidr: string; /** * The ID of the VM Cluster to create. This value is restricted * to (^a-z?$) and must be a maximum of 63 * characters in length. The value must start with a letter and end with * a letter or a number. */ cloudVmClusterId: string; /** * The date and time that the VM cluster was created. */ createTime: string; /** * Whether Terraform will be prevented from destroying the cluster. Deleting this cluster via terraform destroy or terraform apply will only succeed if this field is false in the Terraform state. */ deletionProtection: boolean; /** * User friendly name for this resource. */ displayName: string; /** * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. */ effectiveLabels: { [key: string]: string; }; /** * The name of the Exadata Infrastructure resource on which VM cluster * resource is created, in the following format: * projects/{project}/locations/{region}/cloudExadataInfrastuctures/{cloud_extradata_infrastructure} */ exadataInfrastructure: string; /** * GCP location where Oracle Exadata is hosted. It is same as GCP Oracle zone * of Exadata infrastructure. */ gcpOracleZone: string; /** * Labels or tags associated with the VM Cluster. * * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. * Please refer to the field 'effective_labels' for all of the labels present on the resource. */ labels: { [key: string]: string; }; /** * The location of the resource. * * - - - */ location: string; /** * Identifier. The name of the VM Cluster resource with the format: * projects/{project}/locations/{region}/cloudVmClusters/{cloud_vm_cluster} */ name: string; /** * The name of the VPC network. * Format: projects/{project}/global/networks/{network} */ network: string; /** * The name of the OdbNetwork associated with the VM Cluster. * Format: * projects/{project}/locations/{location}/odbNetworks/{odb_network} * It is optional but if specified, this should match the parent ODBNetwork of * the odbSubnet and backup_odb_subnet. */ odbNetwork: string; /** * The name of the OdbSubnet associated with the VM Cluster for * IP allocation. Format: * projects/{project}/locations/{location}/odbNetworks/{odb_network}/odbSubnets/{odb_subnet} */ odbSubnet: string; /** * The project to which the resource belongs. If it * is not provided, the provider project is used. */ project: string; /** * Various properties and settings associated with Exadata VM cluster. */ properties: outputs.oracledatabase.GetCloudVmClustersCloudVmClusterProperty[]; /** * The combination of labels configured directly on the resource * and default labels configured on the provider. */ pulumiLabels: { [key: string]: string; }; } interface GetCloudVmClustersCloudVmClusterProperty { /** * OCI Cluster name. */ clusterName: string; /** * Compartment ID of cluster. */ compartmentId: string; /** * Number of enabled CPU cores. */ cpuCoreCount: number; /** * The data disk group size to be allocated in TBs. */ dataStorageSizeTb: number; /** * Local storage per VM */ dbNodeStorageSizeGb: number; /** * OCID of database servers. */ dbServerOcids: string[]; /** * Data collection options for diagnostics. */ diagnosticsDataCollectionOptions: outputs.oracledatabase.GetCloudVmClustersCloudVmClusterPropertyDiagnosticsDataCollectionOption[]; /** * The type of redundancy. * Possible values: * DISK_REDUNDANCY_UNSPECIFIED * HIGH * NORMAL */ diskRedundancy: string; /** * DNS listener IP. */ dnsListenerIp: string; /** * Parent DNS domain where SCAN DNS and hosts names are qualified. * ex: ocispdelegated.ocisp10jvnet.oraclevcn.com */ domain: string; /** * Grid Infrastructure Version. */ giVersion: string; /** * host name without domain. * format: "-" with some suffix. * ex: sp2-yi0xq where "sp2" is the hostname_prefix. */ hostname: string; /** * Prefix for VM cluster host names. */ hostnamePrefix: string; /** * License type of VM Cluster. * Possible values: * LICENSE_TYPE_UNSPECIFIED * LICENSE_INCLUDED * BRING_YOUR_OWN_LICENSE */ licenseType: string; /** * Use local backup. */ localBackupEnabled: boolean; /** * Memory allocated in GBs. */ memorySizeGb: number; /** * Number of database servers. */ nodeCount: number; /** * Deep link to the OCI console to view this resource. */ ociUrl: string; /** * Oracle Cloud Infrastructure ID of VM Cluster. */ ocid: string; /** * OCPU count per VM. Minimum is 0.1. */ ocpuCount: number; /** * SCAN DNS name. * ex: sp2-yi0xq-scan.ocispdelegated.ocisp10jvnet.oraclevcn.com */ scanDns: string; /** * OCID of scan DNS record. */ scanDnsRecordId: string; /** * OCIDs of scan IPs. */ scanIpIds: string[]; /** * SCAN listener port - TCP */ scanListenerPortTcp: number; /** * SCAN listener port - TLS */ scanListenerPortTcpSsl: number; /** * Shape of VM Cluster. */ shape: string; /** * Use exadata sparse snapshots. */ sparseDiskgroupEnabled: boolean; /** * SSH public keys to be stored with cluster. */ sshPublicKeys: string[]; /** * State of the cluster. * Possible values: * STATE_UNSPECIFIED * PROVISIONING * AVAILABLE * UPDATING * TERMINATING * TERMINATED * FAILED * MAINTENANCE_IN_PROGRESS */ state: string; /** * The storage allocation for the disk group, in gigabytes (GB). */ storageSizeGb: number; /** * Operating system version of the image. */ systemVersion: string; /** * Represents a time zone from the * [IANA Time Zone Database](https://www.iana.org/time-zones). */ timeZones: outputs.oracledatabase.GetCloudVmClustersCloudVmClusterPropertyTimeZone[]; } interface GetCloudVmClustersCloudVmClusterPropertyDiagnosticsDataCollectionOption { /** * Indicates whether diagnostic collection is enabled for the VM cluster */ diagnosticsEventsEnabled: boolean; /** * Indicates whether health monitoring is enabled for the VM cluster */ healthMonitoringEnabled: boolean; /** * Indicates whether incident logs and trace collection are enabled for the VM * cluster */ incidentLogsEnabled: boolean; } interface GetCloudVmClustersCloudVmClusterPropertyTimeZone { /** * IANA Time Zone Database time zone, e.g. "America/New_York". */ id: string; /** * IANA Time Zone Database version number, e.g. "2019a". */ version: string; } interface GetDbNodesDbNode { /** * The dbnode name */ name: string; properties: outputs.oracledatabase.GetDbNodesDbNodeProperty[]; } interface GetDbNodesDbNodeProperty { /** * Output only */ dbNodeStorageSizeGb: number; /** * Output only */ dbServerOcid: string; /** * Output only */ hostname: string; /** * Output only */ memorySizeGb: number; /** * Output only */ ocid: string; /** * Output only */ ocpuCount: number; /** * Output only */ state: string; /** * Output only */ totalCpuCoreCount: number; } interface GetDbServersDbServer { /** * The Display name */ displayName: string; properties: outputs.oracledatabase.GetDbServersDbServerProperty[]; } interface GetDbServersDbServerProperty { /** * Output only */ dbNodeIds: string[]; /** * Output only */ dbNodeStorageSizeGb: number; /** * Output only */ maxDbNodeStorageSizeGb: number; /** * Output only */ maxMemorySizeGb: number; /** * Output only */ maxOcpuCount: number; /** * Output only */ memorySizeGb: number; /** * Output only */ ocid: string; /** * Output only */ ocpuCount: number; /** * Output only */ state: string; /** * Output only */ vmCount: number; } } export declare namespace organizations { interface AccessApprovalSettingsEnrolledService { /** * The product for which Access Approval will be enrolled. Allowed values are listed (case-sensitive): * all * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * cloudkms.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * iam.googleapis.com * pubsub.googleapis.com * storage.googleapis.com */ cloudProduct: string; /** * The enrollment level of the service. * Default value is `BLOCK_ALL`. * Possible values are: `BLOCK_ALL`. */ enrollmentLevel?: string; } interface GetFoldersFolder { /** * The timestamp of when the folder was created */ createTime: string; /** * The timestamp of when the folder was requested to be deleted (if applicable) */ deleteTime: string; /** * The display name of the folder */ displayName: string; /** * Entity tag identifier of the folder */ etag: string; /** * The id of the folder */ name: string; /** * The parent id of the folder */ parent: string; /** * The lifecycle state of the folder */ state: string; /** * The timestamp of when the folder was last modified */ updateTime: string; } interface GetIAMPolicyAuditConfig { /** * A nested block that defines the operations you'd like to log. */ auditLogConfigs: outputs.organizations.GetIAMPolicyAuditConfigAuditLogConfig[]; /** * Defines a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services. */ service: string; } interface GetIAMPolicyAuditConfigAuditLogConfig { /** * Specifies the identities that are exempt from these types of logging operations. Follows the same format of the `members` array for `binding`. */ exemptedMembers?: string[]; /** * Defines the logging level. `DATA_READ`, `DATA_WRITE` and `ADMIN_READ` capture different types of events. See [the audit configuration documentation](https://docs.cloud.google.com/resource-manager/reference/rest/Shared.Types/AuditConfig) for more details. */ logType: string; } interface GetIAMPolicyBinding { /** * An [IAM Condition](https://cloud.google.com/iam/docs/conditions-overview) for a given binding. Structure is documented below. */ condition?: outputs.organizations.GetIAMPolicyBindingCondition; /** * An array of identities that will be granted the privilege in the `role`. For more details on format and restrictions see https://cloud.google.com/billing/reference/rest/v1/Policy#Binding * Each entry can have one of the following values: * * **allUsers**: A special identifier that represents anyone who is on the internet; with or without a Google account. Some resources **don't** support this identity. * * **allAuthenticatedUsers**: A special identifier that represents anyone who is authenticated with a Google account or a service account. Some resources **don't** support this identity. * * **user:{emailid}**: An email address that represents a specific Google account. For example, alice@gmail.com. * * **serviceAccount:{emailid}**: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com. * * **group:{emailid}**: An email address that represents a Google group. For example, admins@example.com. * * **domain:{domain}**: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com. */ members: string[]; /** * The role/permission that will be granted to the members. * See the [IAM Roles](https://cloud.google.com/compute/docs/access/iam) documentation for a complete list of roles. * Note that custom roles must be of the format `[projects|organizations]/{parent-name}/roles/{role-name}`. */ role: string; } interface GetIAMPolicyBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface GetIamCustomRolesRole { /** * The current deleted state of the role. */ deleted: boolean; /** * A human-readable description for the role. */ description: string; /** * an identifier for the resource with the format `organizations/{{org_id}}/roles/{{role_id}}`. */ id: string; /** * The name of the role in the format `organizations/{{org_id}}/roles/{{role_id}}`. Like `id`, this field can be used as a reference in other resources such as IAM role bindings. */ name: string; /** * The names of the permissions this role grants when bound in an IAM policy. */ permissions: string[]; /** * The camel case role id used for this role. */ roleId: string; /** * The current launch stage of the role. List of possible stages is [here](https://cloud.google.com/iam/reference/rest/v1/organizations.roles#Role.RoleLaunchStage). */ stage: string; /** * A human-readable title for the role. */ title: string; } interface GetSOrganization { /** * The Google for Work customer ID of the Organization. */ directoryCustomerId: string; /** * A human-readable string that refers to the Organization in the Google Cloud console. The string will be set to the primary domain (for example, `"google.com"`) of the G Suite customer that owns the organization. */ displayName: string; /** * The Organization's current lifecycle state. */ lifecycleState: string; /** * The resource name of the Organization in the form `organizations/{organization_id}`. */ name: string; /** * The Organization ID. */ orgId: string; } interface IAMBindingCondition { description?: string; expression: string; title: string; } interface IAMMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface IamAuditConfigAuditLogConfig { /** * Identities that do not cause logging for this type of permission. * Each entry can have one of the following values: * * **user:{emailid}**: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com. * * **serviceAccount:{emailid}**: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com. * * **group:{emailid}**: An email address that represents a Google group. For example, admins@example.com. * * **domain:{domain}**: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com. */ exemptedMembers?: string[]; /** * Permission type for which logging is to be configured. Must be one of `DATA_READ`, `DATA_WRITE`, or `ADMIN_READ`. */ logType: string; } interface PolicyBooleanPolicy { /** * If true, then the Policy is enforced. If false, then any configuration is acceptable. */ enforced: boolean; } interface PolicyListPolicy { /** * or `deny` - (Optional) One or the other must be set. */ allow?: outputs.organizations.PolicyListPolicyAllow; /** * One or the other must be set. */ deny?: outputs.organizations.PolicyListPolicyDeny; /** * If set to true, the values from the effective Policy of the parent resource * are inherited, meaning the values set in this Policy are added to the values inherited up the hierarchy. * * The `allow` or `deny` blocks support: */ inheritFromParent?: boolean; /** * The Google Cloud Console will try to default to a configuration that matches the value specified in this field. */ suggestedValue: string; } interface PolicyListPolicyAllow { /** * The policy allows or denies all values. */ all?: boolean; /** * The policy can define specific values that are allowed or denied. */ values?: string[]; } interface PolicyListPolicyDeny { /** * The policy allows or denies all values. */ all?: boolean; /** * The policy can define specific values that are allowed or denied. */ values?: string[]; } interface PolicyRestorePolicy { /** * May only be set to true. If set, then the default Policy is restored. */ default: boolean; } } export declare namespace orgpolicy { interface PolicyDryRunSpec { /** * (Output) * An opaque tag indicating the current version of the policy, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy` is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policy to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. */ etag: string; /** * Determines the inheritance behavior for this policy. If `inheritFromParent` is true, policy rules set higher up in the hierarchy (up to the closest root) are inherited and present in the effective policy. If it is false, then no rules are inherited, and this policy becomes the new root for evaluation. This field can be set only for policies which configure list constraints. */ inheritFromParent?: boolean; /** * Ignores policies set above this resource and restores the `constraintDefault` enforcement behavior of the specific constraint at this resource. This field can be set in policies for either list or boolean constraints. If set, `rules` must be empty and `inheritFromParent` must be set to false. */ reset?: boolean; /** * In policies for boolean constraints, the following requirements apply: - There must be one and only one policy rule where condition is unset. - Boolean policy rules with conditions must set `enforced` to the opposite of the policy rule without a condition. - During policy evaluation, policy rules with conditions that are true for a target resource take precedence. * Structure is documented below. */ rules?: outputs.orgpolicy.PolicyDryRunSpecRule[]; /** * (Output) * Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. */ updateTime: string; } interface PolicyDryRunSpecRule { /** * Setting this to `"TRUE"` means that all values are allowed. This field can be set only in Policies for list constraints. */ allowAll?: string; /** * A condition which determines whether this rule is used in the evaluation of the policy. When set, the `expression` field in the `Expr' must include from 1 to 10 subexpressions, joined by the "||" or "&&" operators. Each subexpression must be of the form "resource.matchTag('/tag_key_short_name, 'tag_value_short_name')". or "resource.matchTagId('tagKeys/key_id', 'tagValues/value_id')". where keyName and valueName are the resource names for Label Keys and Values. These names are available from the Tag Manager Service. An example expression is: "resource.matchTag('123456789/environment, 'prod')". or "resource.matchTagId('tagKeys/123', 'tagValues/456')". * Structure is documented below. */ condition?: outputs.orgpolicy.PolicyDryRunSpecRuleCondition; /** * Setting this to `"TRUE"` means that all values are denied. This field can be set only in Policies for list constraints. */ denyAll?: string; /** * If `"TRUE"`, then the `Policy` is enforced. If `"FALSE"`, then any configuration is acceptable. This field can be set only in Policies for boolean constraints. */ enforce?: string; /** * Optional. Required for Managed Constraints if parameters defined in constraints. Pass parameter values when policy enforcement is enabled. Ensure that parameter value types match those defined in the constraint definition. For example: { \"allowedLocations\" : [\"us-east1\", \"us-west1\"], \"allowAll\" : true } */ parameters?: string; /** * List of values to be used for this policy rule. This field can be set only in policies for list constraints. * Structure is documented below. */ values?: outputs.orgpolicy.PolicyDryRunSpecRuleValues; } interface PolicyDryRunSpecRuleCondition { /** * Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression?: string; /** * Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. */ location?: string; /** * Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface PolicyDryRunSpecRuleValues { /** * List of values allowed at this resource. */ allowedValues?: string[]; /** * List of values denied at this resource. */ deniedValues?: string[]; } interface PolicySpec { /** * (Output) * An opaque tag indicating the current version of the `Policy`, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the `Policy` is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current `Policy` to use when executing a read-modify-write loop. When the `Policy` is returned from a `GetEffectivePolicy` request, the `etag` will be unset. */ etag: string; /** * Determines the inheritance behavior for this `Policy`. If `inheritFromParent` is true, PolicyRules set higher up in the hierarchy (up to the closest root) are inherited and present in the effective policy. If it is false, then no rules are inherited, and this Policy becomes the new root for evaluation. This field can be set only for Policies which configure list constraints. */ inheritFromParent?: boolean; /** * Ignores policies set above this resource and restores the `constraintDefault` enforcement behavior of the specific `Constraint` at this resource. This field can be set in policies for either list or boolean constraints. If set, `rules` must be empty and `inheritFromParent` must be set to false. */ reset?: boolean; /** * In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set `enforced` to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence. * Structure is documented below. */ rules?: outputs.orgpolicy.PolicySpecRule[]; /** * (Output) * Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that `Policy`. */ updateTime: string; } interface PolicySpecRule { /** * Setting this to `"TRUE"` means that all values are allowed. This field can be set only in Policies for list constraints. */ allowAll?: string; /** * A condition which determines whether this rule is used in the evaluation of the policy. When set, the `expression` field in the `Expr' must include from 1 to 10 subexpressions, joined by the "||" or "&&" operators. Each subexpression must be of the form "resource.matchTag('/tag_key_short_name, 'tag_value_short_name')". or "resource.matchTagId('tagKeys/key_id', 'tagValues/value_id')". where keyName and valueName are the resource names for Label Keys and Values. These names are available from the Tag Manager Service. An example expression is: "resource.matchTag('123456789/environment, 'prod')". or "resource.matchTagId('tagKeys/123', 'tagValues/456')". * Structure is documented below. */ condition?: outputs.orgpolicy.PolicySpecRuleCondition; /** * Setting this to `"TRUE"` means that all values are denied. This field can be set only in Policies for list constraints. */ denyAll?: string; /** * If `"TRUE"`, then the `Policy` is enforced. If `"FALSE"`, then any configuration is acceptable. This field can be set only in Policies for boolean constraints. */ enforce?: string; /** * Optional. Required for Managed Constraints if parameters defined in constraints. Pass parameter values when policy enforcement is enabled. Ensure that parameter value types match those defined in the constraint definition. For example: { \"allowedLocations\" : [\"us-east1\", \"us-west1\"], \"allowAll\" : true } */ parameters?: string; /** * List of values to be used for this policy rule. This field can be set only in policies for list constraints. * Structure is documented below. */ values?: outputs.orgpolicy.PolicySpecRuleValues; } interface PolicySpecRuleCondition { /** * Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression?: string; /** * Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. */ location?: string; /** * Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface PolicySpecRuleValues { /** * List of values allowed at this resource. */ allowedValues?: string[]; /** * List of values denied at this resource. */ deniedValues?: string[]; } } export declare namespace osconfig { interface GuestPoliciesAssignment { /** * Targets instances matching at least one of these label sets. This allows an assignment to target disparate groups, * for example "env=prod or env=staging". * Structure is documented below. */ groupLabels?: outputs.osconfig.GuestPoliciesAssignmentGroupLabel[]; /** * Targets VM instances whose name starts with one of these prefixes. * Like labels, this is another way to group VM instances when targeting configs, * for example prefix="prod-". * Only supported for project-level policies. */ instanceNamePrefixes?: string[]; /** * Targets any of the instances specified. Instances are specified by their URI in the form * zones/[ZONE]/instances/[INSTANCE_NAME]. * Instance targeting is uncommon and is supported to facilitate the management of changes * by the instance or to target specific VM instances for development and testing. * Only supported for project-level policies and must reference instances within this project. */ instances?: string[]; /** * Targets VM instances matching at least one of the following OS types. * VM instances must match all supplied criteria for a given OsType to be included. * Structure is documented below. */ osTypes?: outputs.osconfig.GuestPoliciesAssignmentOsType[]; /** * Targets instances in any of these zones. Leave empty to target instances in any zone. * Zonal targeting is uncommon and is supported to facilitate the management of changes by zone. */ zones?: string[]; } interface GuestPoliciesAssignmentGroupLabel { /** * Google Compute Engine instance labels that must be present for an instance to be included in this assignment group. */ labels: { [key: string]: string; }; } interface GuestPoliciesAssignmentOsType { /** * Targets VM instances with OS Inventory enabled and having the following OS architecture. */ osArchitecture?: string; /** * Targets VM instances with OS Inventory enabled and having the following OS short name, for example "debian" or "windows". */ osShortName?: string; /** * Targets VM instances with OS Inventory enabled and having the following following OS version. */ osVersion?: string; } interface GuestPoliciesPackage { /** * The desiredState the agent should maintain for this package. The default is to ensure the package is installed. * Possible values are: `INSTALLED`, `UPDATED`, `REMOVED`. */ desiredState?: string; /** * Type of package manager that can be used to install this package. If a system does not have the package manager, * the package is not installed or removed no error message is returned. By default, or if you specify ANY, * the agent attempts to install and remove this package using the default package manager. * This is useful when creating a policy that applies to different types of systems. * The default behavior is ANY. * Default value is `ANY`. * Possible values are: `ANY`, `APT`, `YUM`, `ZYPPER`, `GOO`. */ manager?: string; /** * The name of the package. A package is uniquely identified for conflict validation * by checking the package name and the manager(s) that the package targets. */ name: string; } interface GuestPoliciesPackageRepository { /** * An Apt Repository. * Structure is documented below. */ apt?: outputs.osconfig.GuestPoliciesPackageRepositoryApt; /** * A Goo Repository. * Structure is documented below. */ goo?: outputs.osconfig.GuestPoliciesPackageRepositoryGoo; /** * A Yum Repository. * Structure is documented below. */ yum?: outputs.osconfig.GuestPoliciesPackageRepositoryYum; /** * A Zypper Repository. * Structure is documented below. */ zypper?: outputs.osconfig.GuestPoliciesPackageRepositoryZypper; } interface GuestPoliciesPackageRepositoryApt { /** * Type of archive files in this repository. The default behavior is DEB. * Default value is `DEB`. * Possible values are: `DEB`, `DEB_SRC`. */ archiveType?: string; /** * List of components for this repository. Must contain at least one item. */ components: string[]; /** * Distribution of this repository. */ distribution: string; /** * URI of the key file for this repository. The agent maintains a keyring at * /etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg containing all the keys in any applied guest policy. */ gpgKey?: string; /** * URI for this repository. */ uri: string; } interface GuestPoliciesPackageRepositoryGoo { /** * The name of the repository. */ name: string; /** * The url of the repository. */ url: string; } interface GuestPoliciesPackageRepositoryYum { /** * The location of the repository directory. */ baseUrl: string; /** * The display name of the repository. */ displayName?: string; /** * URIs of GPG keys. */ gpgKeys?: string[]; /** * A one word, unique name for this repository. This is the repo id in the Yum config file and also the displayName * if displayName is omitted. This id is also used as the unique identifier when checking for guest policy conflicts. */ id: string; } interface GuestPoliciesPackageRepositoryZypper { /** * The location of the repository directory. */ baseUrl: string; /** * The display name of the repository. */ displayName?: string; /** * URIs of GPG keys. */ gpgKeys?: string[]; /** * A one word, unique name for this repository. This is the repo id in the zypper config file and also the displayName * if displayName is omitted. This id is also used as the unique identifier when checking for guest policy conflicts. */ id: string; } interface GuestPoliciesRecipe { /** * Resources available to be used in the steps in the recipe. * Structure is documented below. */ artifacts?: outputs.osconfig.GuestPoliciesRecipeArtifact[]; /** * Default is INSTALLED. The desired state the agent should maintain for this recipe. * INSTALLED: The software recipe is installed on the instance but won't be updated to new versions. * INSTALLED_KEEP_UPDATED: The software recipe is installed on the instance. The recipe is updated to a higher version, * if a higher version of the recipe is assigned to this instance. * REMOVE: Remove is unsupported for software recipes and attempts to create or update a recipe to the REMOVE state is rejected. * Default value is `INSTALLED`. * Possible values are: `INSTALLED`, `UPDATED`, `REMOVED`. */ desiredState?: string; /** * Actions to be taken for installing this recipe. On failure it stops executing steps and does not attempt another installation. * Any steps taken (including partially completed steps) are not rolled back. * Structure is documented below. */ installSteps?: outputs.osconfig.GuestPoliciesRecipeInstallStep[]; /** * Unique identifier for the recipe. Only one recipe with a given name is installed on an instance. * Names are also used to identify resources which helps to determine whether guest policies have conflicts. * This means that requests to create multiple recipes with the same name and version are rejected since they * could potentially have conflicting assignments. */ name: string; /** * Actions to be taken for updating this recipe. On failure it stops executing steps and does not attempt another update for this recipe. * Any steps taken (including partially completed steps) are not rolled back. * Structure is documented below. */ updateSteps?: outputs.osconfig.GuestPoliciesRecipeUpdateStep[]; /** * The version of this software recipe. Version can be up to 4 period separated numbers (e.g. 12.34.56.78). */ version?: string; } interface GuestPoliciesRecipeArtifact { /** * Defaults to false. When false, recipes are subject to validations based on the artifact type: * Remote: A checksum must be specified, and only protocols with transport-layer security are permitted. * GCS: An object generation number must be specified. */ allowInsecure?: boolean; /** * A Google Cloud Storage artifact. * Structure is documented below. */ gcs?: outputs.osconfig.GuestPoliciesRecipeArtifactGcs; /** * Id of the artifact, which the installation and update steps of this recipe can reference. * Artifacts in a recipe cannot have the same id. */ id: string; /** * A generic remote artifact. * Structure is documented below. */ remote?: outputs.osconfig.GuestPoliciesRecipeArtifactRemote; } interface GuestPoliciesRecipeArtifactGcs { /** * Bucket of the Google Cloud Storage object. Given an example URL: https://storage.googleapis.com/my-bucket/foo/bar#1234567 * this value would be my-bucket. */ bucket?: string; /** * Must be provided if allowInsecure is false. Generation number of the Google Cloud Storage object. * https://storage.googleapis.com/my-bucket/foo/bar#1234567 this value would be 1234567. */ generation?: number; /** * Name of the Google Cloud Storage object. Given an example URL: https://storage.googleapis.com/my-bucket/foo/bar#1234567 * this value would be foo/bar. */ object?: string; } interface GuestPoliciesRecipeArtifactRemote { /** * Must be provided if allowInsecure is false. SHA256 checksum in hex format, to compare to the checksum of the artifact. * If the checksum is not empty and it doesn't match the artifact then the recipe installation fails before running any * of the steps. */ checkSum?: string; /** * URI from which to fetch the object. It should contain both the protocol and path following the format {protocol}://{location}. */ uri?: string; } interface GuestPoliciesRecipeInstallStep { /** * Extracts an archive into the specified directory. * Structure is documented below. */ archiveExtraction?: outputs.osconfig.GuestPoliciesRecipeInstallStepArchiveExtraction; /** * Installs a deb file via dpkg. * Structure is documented below. */ dpkgInstallation?: outputs.osconfig.GuestPoliciesRecipeInstallStepDpkgInstallation; /** * Copies a file onto the instance. * Structure is documented below. */ fileCopy?: outputs.osconfig.GuestPoliciesRecipeInstallStepFileCopy; /** * Executes an artifact or local file. * Structure is documented below. */ fileExec?: outputs.osconfig.GuestPoliciesRecipeInstallStepFileExec; /** * Installs an MSI file. * Structure is documented below. */ msiInstallation?: outputs.osconfig.GuestPoliciesRecipeInstallStepMsiInstallation; /** * Installs an rpm file via the rpm utility. * Structure is documented below. */ rpmInstallation?: outputs.osconfig.GuestPoliciesRecipeInstallStepRpmInstallation; /** * Runs commands in a shell. * Structure is documented below. */ scriptRun?: outputs.osconfig.GuestPoliciesRecipeInstallStepScriptRun; } interface GuestPoliciesRecipeInstallStepArchiveExtraction { /** * The id of the relevant artifact in the recipe. */ artifactId: string; /** * Directory to extract archive to. Defaults to / on Linux or C:\ on Windows. */ destination: string; /** * The type of the archive to extract. * Possible values are: `TAR`, `TAR_GZIP`, `TAR_BZIP`, `TAR_LZMA`, `TAR_XZ`, `ZIP`. */ type: string; } interface GuestPoliciesRecipeInstallStepDpkgInstallation { /** * The id of the relevant artifact in the recipe. */ artifactId: string; } interface GuestPoliciesRecipeInstallStepFileCopy { /** * The id of the relevant artifact in the recipe. */ artifactId: string; /** * The absolute path on the instance to put the file. */ destination: string; /** * Whether to allow this step to overwrite existing files.If this is false and the file already exists the file * is not overwritten and the step is considered a success. Defaults to false. */ overwrite?: boolean; /** * Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users * for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit * number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one * bit corresponds to the execute permission. Default behavior is 755. * Below are some examples of permissions and their associated values: * read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4 */ permissions?: string; } interface GuestPoliciesRecipeInstallStepFileExec { /** * A list of possible return values that the program can return to indicate a success. Defaults to [0]. */ allowedExitCodes: string; /** * Arguments to be passed to the provided executable. */ args?: string[]; /** * The id of the relevant artifact in the recipe. */ artifactId?: string; /** * The absolute path of the file on the local filesystem. */ localPath?: string; } interface GuestPoliciesRecipeInstallStepMsiInstallation { /** * Return codes that indicate that the software installed or updated successfully. Behaviour defaults to [0] */ allowedExitCodes: number[]; /** * The id of the relevant artifact in the recipe. */ artifactId: string; /** * The flags to use when installing the MSI. Defaults to the install flag. */ flags: string[]; } interface GuestPoliciesRecipeInstallStepRpmInstallation { /** * The id of the relevant artifact in the recipe. */ artifactId: string; } interface GuestPoliciesRecipeInstallStepScriptRun { /** * Return codes that indicate that the software installed or updated successfully. Behaviour defaults to [0] */ allowedExitCodes: number[]; /** * The script interpreter to use to run the script. If no interpreter is specified the script is executed directly, * which likely only succeed for scripts with shebang lines. * Possible values are: `SHELL`, `POWERSHELL`. */ interpreter?: string; /** * The shell script to be executed. */ script: string; } interface GuestPoliciesRecipeUpdateStep { /** * Extracts an archive into the specified directory. * Structure is documented below. */ archiveExtraction?: outputs.osconfig.GuestPoliciesRecipeUpdateStepArchiveExtraction; /** * Installs a deb file via dpkg. * Structure is documented below. */ dpkgInstallation?: outputs.osconfig.GuestPoliciesRecipeUpdateStepDpkgInstallation; /** * Copies a file onto the instance. * Structure is documented below. */ fileCopy?: outputs.osconfig.GuestPoliciesRecipeUpdateStepFileCopy; /** * Executes an artifact or local file. * Structure is documented below. */ fileExec?: outputs.osconfig.GuestPoliciesRecipeUpdateStepFileExec; /** * Installs an MSI file. * Structure is documented below. */ msiInstallation?: outputs.osconfig.GuestPoliciesRecipeUpdateStepMsiInstallation; /** * Installs an rpm file via the rpm utility. * Structure is documented below. */ rpmInstallation?: outputs.osconfig.GuestPoliciesRecipeUpdateStepRpmInstallation; /** * Runs commands in a shell. * Structure is documented below. */ scriptRun?: outputs.osconfig.GuestPoliciesRecipeUpdateStepScriptRun; } interface GuestPoliciesRecipeUpdateStepArchiveExtraction { /** * The id of the relevant artifact in the recipe. */ artifactId: string; /** * Directory to extract archive to. Defaults to / on Linux or C:\ on Windows. */ destination: string; /** * The type of the archive to extract. * Possible values are: `TAR`, `TAR_GZIP`, `TAR_BZIP`, `TAR_LZMA`, `TAR_XZ`, `ZIP`. */ type: string; } interface GuestPoliciesRecipeUpdateStepDpkgInstallation { /** * The id of the relevant artifact in the recipe. */ artifactId: string; } interface GuestPoliciesRecipeUpdateStepFileCopy { /** * The id of the relevant artifact in the recipe. */ artifactId: string; /** * The absolute path on the instance to put the file. */ destination: string; /** * Whether to allow this step to overwrite existing files.If this is false and the file already exists the file * is not overwritten and the step is considered a success. Defaults to false. */ overwrite?: boolean; /** * Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users * for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit * number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one * bit corresponds to the execute permission. Default behavior is 755. * Below are some examples of permissions and their associated values: * read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4 */ permissions?: string; } interface GuestPoliciesRecipeUpdateStepFileExec { /** * A list of possible return values that the program can return to indicate a success. Defaults to [0]. */ allowedExitCodes: number[]; /** * Arguments to be passed to the provided executable. */ args?: string[]; /** * The id of the relevant artifact in the recipe. */ artifactId?: string; /** * The absolute path of the file on the local filesystem. */ localPath?: string; } interface GuestPoliciesRecipeUpdateStepMsiInstallation { /** * Return codes that indicate that the software installed or updated successfully. Behaviour defaults to [0] */ allowedExitCodes: number[]; /** * The id of the relevant artifact in the recipe. */ artifactId: string; /** * The flags to use when installing the MSI. Defaults to the install flag. */ flags: string[]; } interface GuestPoliciesRecipeUpdateStepRpmInstallation { /** * The id of the relevant artifact in the recipe. */ artifactId: string; } interface GuestPoliciesRecipeUpdateStepScriptRun { /** * Return codes that indicate that the software installed or updated successfully. Behaviour defaults to [0] */ allowedExitCodes: number[]; /** * The script interpreter to use to run the script. If no interpreter is specified the script is executed directly, * which likely only succeed for scripts with shebang lines. * Possible values are: `SHELL`, `POWERSHELL`. */ interpreter?: string; /** * The shell script to be executed. */ script: string; } interface OsPolicyAssignmentInstanceFilter { /** * Target all VMs in the project. If true, no other criteria * is permitted. */ all?: boolean; /** * List of label sets used for VM exclusion. If * the list has more than one label set, the VM is excluded if any of the label * sets are applicable for the VM. Structure is * documented below. */ exclusionLabels?: outputs.osconfig.OsPolicyAssignmentInstanceFilterExclusionLabel[]; /** * List of label sets used for VM inclusion. If * the list has more than one `LabelSet`, the VM is included if any of the * label sets are applicable for the VM. Structure is * documented below. */ inclusionLabels?: outputs.osconfig.OsPolicyAssignmentInstanceFilterInclusionLabel[]; /** * List of inventories to select VMs. A VM is * selected if its inventory data matches at least one of the following * inventories. Structure is documented below. */ inventories?: outputs.osconfig.OsPolicyAssignmentInstanceFilterInventory[]; } interface OsPolicyAssignmentInstanceFilterExclusionLabel { /** * Labels are identified by key/value pairs in this map. * A VM should contain all the key/value pairs specified in this map to be * selected. */ labels?: { [key: string]: string; }; } interface OsPolicyAssignmentInstanceFilterInclusionLabel { /** * Labels are identified by key/value pairs in this map. * A VM should contain all the key/value pairs specified in this map to be * selected. */ labels?: { [key: string]: string; }; } interface OsPolicyAssignmentInstanceFilterInventory { /** * The OS short name */ osShortName: string; /** * The OS version Prefix matches are supported if * asterisk(*) is provided as the last character. For example, to match all * versions with a major version of `7`, specify the following value for this * field `7.*` An empty string matches all OS versions. */ osVersion?: string; } interface OsPolicyAssignmentOsPolicy { /** * This flag determines the OS * policy compliance status when none of the resource groups within the policy * are applicable for a VM. Set this value to `true` if the policy needs to be * reported as compliant even if the policy has nothing to validate or enforce. */ allowNoResourceGroupMatch?: boolean; /** * Policy description. Length of the description is * limited to 1024 characters. */ description?: string; /** * The id of the OS policy with the following restrictions: * * * Must contain only lowercase letters, numbers, and hyphens. * * Must start with a letter. * * Must be between 1-63 characters. * * Must end with a number or a letter. * * Must be unique within the assignment. */ id: string; /** * Policy mode Possible values are: `MODE_UNSPECIFIED`, * `VALIDATION`, `ENFORCEMENT`. */ mode: string; /** * List of resource groups for the policy. For a * particular VM, resource groups are evaluated in the order specified and the * first resource group that is applicable is selected and the rest are * ignored. If none of the resource groups are applicable for a VM, the VM is * considered to be non-compliant w.r.t this policy. This behavior can be * toggled by the flag `allowNoResourceGroupMatch` Structure is * documented below. */ resourceGroups: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroup[]; } interface OsPolicyAssignmentOsPolicyResourceGroup { /** * List of inventory filters for the resource * group. The resources in this resource group are applied to the target VM if * it satisfies at least one of the following inventory filters. For example, * to apply this resource group to VMs running either `RHEL` or `CentOS` * operating systems, specify 2 items for the list with following values: * inventory_filters[0].os_short_name='rhel' and * inventory_filters[1].os_short_name='centos' If the list is empty, this * resource group will be applied to the target VM unconditionally. Structure * is documented below. */ inventoryFilters?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupInventoryFilter[]; /** * List of resources configured for this resource * group. The resources are executed in the exact order specified here. * Structure is documented below. */ resources: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResource[]; } interface OsPolicyAssignmentOsPolicyResourceGroupInventoryFilter { /** * The OS short name */ osShortName: string; /** * The OS version Prefix matches are supported if * asterisk(*) is provided as the last character. For example, to match all * versions with a major version of `7`, specify the following value for this * field `7.*` An empty string matches all OS versions. */ osVersion?: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResource { /** * Exec resource Structure is * documented below. */ exec?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourceExec; /** * File resource Structure is * documented below. */ file?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourceFile; /** * The id of the resource with the following restrictions: * * * Must contain only lowercase letters, numbers, and hyphens. * * Must start with a letter. * * Must be between 1-63 characters. * * Must end with a number or a letter. * * Must be unique within the OS policy. */ id: string; /** * Package resource Structure is * documented below. */ pkg?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourcePkg; /** * Package repository resource Structure is * documented below. */ repository?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourceRepository; } interface OsPolicyAssignmentOsPolicyResourceGroupResourceExec { /** * What to run to bring this resource into the desired * state. An exit code of 100 indicates "success", any other exit code * indicates a failure running enforce. Structure is * documented below. */ enforce?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourceExecEnforce; /** * What to run to validate this resource is in the * desired state. An exit code of 100 indicates "in desired state", and exit * code of 101 indicates "not in desired state". Any other exit code indicates * a failure running validate. Structure is * documented below. */ validate: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourceExecValidate; } interface OsPolicyAssignmentOsPolicyResourceGroupResourceExecEnforce { /** * Optional arguments to pass to the source during * execution. */ args?: string[]; /** * A remote or local file. Structure is * documented below. */ file?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourceExecEnforceFile; /** * The script interpreter to use. Possible values * are: `INTERPRETER_UNSPECIFIED`, `NONE`, `SHELL`, `POWERSHELL`. */ interpreter: string; /** * Only recorded for enforce Exec. Path to an * output file (that is created by this Exec) whose content will be recorded in * OSPolicyResourceCompliance after a successful run. Absence or failure to * read this file will result in this ExecResource being non-compliant. Output * file size is limited to 100K bytes. */ outputFilePath?: string; /** * An inline script. The size of the script is limited to * 1024 characters. */ script?: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourceExecEnforceFile { /** * Defaults to false. When false, files are * subject to validations based on the file type: Remote: A checksum must be * specified. Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * A Cloud Storage object. Structure is * documented below. */ gcs?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourceExecEnforceFileGcs; /** * A local path within the VM to use. */ localPath?: string; /** * A generic remote file. Structure is * documented below. */ remote?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourceExecEnforceFileRemote; } interface OsPolicyAssignmentOsPolicyResourceGroupResourceExecEnforceFileGcs { /** * Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: number; /** * Name of the Cloud Storage object. */ object: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourceExecEnforceFileRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * URI from which to fetch the object. It should contain * both the protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourceExecValidate { /** * Optional arguments to pass to the source during * execution. */ args?: string[]; /** * A remote or local file. Structure is * documented below. */ file?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourceExecValidateFile; /** * The script interpreter to use. Possible values * are: `INTERPRETER_UNSPECIFIED`, `NONE`, `SHELL`, `POWERSHELL`. */ interpreter: string; /** * Only recorded for enforce Exec. Path to an * output file (that is created by this Exec) whose content will be recorded in * OSPolicyResourceCompliance after a successful run. Absence or failure to * read this file will result in this ExecResource being non-compliant. Output * file size is limited to 100K bytes. */ outputFilePath?: string; /** * An inline script. The size of the script is limited to * 1024 characters. */ script?: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourceExecValidateFile { /** * Defaults to false. When false, files are * subject to validations based on the file type: Remote: A checksum must be * specified. Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * A Cloud Storage object. Structure is * documented below. */ gcs?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourceExecValidateFileGcs; /** * A local path within the VM to use. */ localPath?: string; /** * A generic remote file. Structure is * documented below. */ remote?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourceExecValidateFileRemote; } interface OsPolicyAssignmentOsPolicyResourceGroupResourceExecValidateFileGcs { /** * Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: number; /** * Name of the Cloud Storage object. */ object: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourceExecValidateFileRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * URI from which to fetch the object. It should contain * both the protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourceFile { /** * A a file with this content. The size of the content * is limited to 1024 characters. */ content?: string; /** * A remote or local source. Structure is * documented below. */ file?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourceFileFile; /** * The absolute path of the file within the VM. */ path: string; /** * Consists of three octal digits which represent, in * order, the permissions of the owner, group, and other users for the file * (similarly to the numeric mode used in the linux chmod utility). Each digit * represents a three bit number with the 4 bit corresponding to the read * permissions, the 2 bit corresponds to the write bit, and the one bit * corresponds to the execute permission. Default behavior is 755. Below are * some examples of permissions and their associated values: read, write, and * execute: 7 read and execute: 5 read and write: 6 read only: 4 */ permissions: string; /** * Desired state of the file. Possible values are: * `DESIRED_STATE_UNSPECIFIED`, `PRESENT`, `ABSENT`, `CONTENTS_MATCH`. */ state: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourceFileFile { /** * Defaults to false. When false, files are * subject to validations based on the file type: Remote: A checksum must be * specified. Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * A Cloud Storage object. Structure is * documented below. */ gcs?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourceFileFileGcs; /** * A local path within the VM to use. */ localPath?: string; /** * A generic remote file. Structure is * documented below. */ remote?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourceFileFileRemote; } interface OsPolicyAssignmentOsPolicyResourceGroupResourceFileFileGcs { /** * Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: number; /** * Name of the Cloud Storage object. */ object: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourceFileFileRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * URI from which to fetch the object. It should contain * both the protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourcePkg { /** * A package managed by Apt. Structure is * documented below. */ apt?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourcePkgApt; /** * A deb package file. Structure is * documented below. */ deb?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourcePkgDeb; /** * The desired state the agent should maintain for * this package. Possible values are: `DESIRED_STATE_UNSPECIFIED`, `INSTALLED`, * `REMOVED`. */ desiredState: string; /** * A package managed by GooGet. Structure is * documented below. */ googet?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourcePkgGooget; /** * An MSI package. Structure is * documented below. */ msi?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourcePkgMsi; /** * An rpm package file. Structure is * documented below. */ rpm?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourcePkgRpm; /** * A package managed by YUM. Structure is * documented below. */ yum?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourcePkgYum; /** * A package managed by Zypper. Structure is * documented below. */ zypper?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourcePkgZypper; } interface OsPolicyAssignmentOsPolicyResourceGroupResourcePkgApt { /** * Package name. */ name: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourcePkgDeb { /** * Whether dependencies should also be installed. - * install when false: `dpkg -i package` - install when true: `apt-get update * && apt-get -y install package.deb` */ pullDeps?: boolean; /** * A deb package. Structure is * documented below. */ source: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourcePkgDebSource; } interface OsPolicyAssignmentOsPolicyResourceGroupResourcePkgDebSource { /** * Defaults to false. When false, files are * subject to validations based on the file type: Remote: A checksum must be * specified. Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * A Cloud Storage object. Structure is * documented below. */ gcs?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourcePkgDebSourceGcs; /** * A local path within the VM to use. */ localPath?: string; /** * A generic remote file. Structure is * documented below. */ remote?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourcePkgDebSourceRemote; } interface OsPolicyAssignmentOsPolicyResourceGroupResourcePkgDebSourceGcs { /** * Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: number; /** * Name of the Cloud Storage object. */ object: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourcePkgDebSourceRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * URI from which to fetch the object. It should contain * both the protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourcePkgGooget { /** * Package name. */ name: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourcePkgMsi { /** * Additional properties to use during installation. * This should be in the format of Property=Setting. Appended to the defaults * of `ACTION=INSTALL REBOOT=ReallySuppress`. */ properties?: string[]; /** * The MSI package. Structure is * documented below. */ source: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourcePkgMsiSource; } interface OsPolicyAssignmentOsPolicyResourceGroupResourcePkgMsiSource { /** * Defaults to false. When false, files are * subject to validations based on the file type: Remote: A checksum must be * specified. Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * A Cloud Storage object. Structure is * documented below. */ gcs?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourcePkgMsiSourceGcs; /** * A local path within the VM to use. */ localPath?: string; /** * A generic remote file. Structure is * documented below. */ remote?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourcePkgMsiSourceRemote; } interface OsPolicyAssignmentOsPolicyResourceGroupResourcePkgMsiSourceGcs { /** * Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: number; /** * Name of the Cloud Storage object. */ object: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourcePkgMsiSourceRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * URI from which to fetch the object. It should contain * both the protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourcePkgRpm { /** * Whether dependencies should also be installed. - * install when false: `rpm --upgrade --replacepkgs package.rpm` - install when * true: `yum -y install package.rpm` or `zypper -y install package.rpm` */ pullDeps?: boolean; /** * An rpm package. Structure is * documented below. */ source: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourcePkgRpmSource; } interface OsPolicyAssignmentOsPolicyResourceGroupResourcePkgRpmSource { /** * Defaults to false. When false, files are * subject to validations based on the file type: Remote: A checksum must be * specified. Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * A Cloud Storage object. Structure is * documented below. */ gcs?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourcePkgRpmSourceGcs; /** * A local path within the VM to use. */ localPath?: string; /** * A generic remote file. Structure is * documented below. */ remote?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourcePkgRpmSourceRemote; } interface OsPolicyAssignmentOsPolicyResourceGroupResourcePkgRpmSourceGcs { /** * Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: number; /** * Name of the Cloud Storage object. */ object: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourcePkgRpmSourceRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * URI from which to fetch the object. It should contain * both the protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourcePkgYum { /** * Package name. */ name: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourcePkgZypper { /** * Package name. */ name: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourceRepository { /** * An Apt Repository. Structure is * documented below. */ apt?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourceRepositoryApt; /** * A Goo Repository. Structure is * documented below. */ goo?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourceRepositoryGoo; /** * A Yum Repository. Structure is * documented below. */ yum?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourceRepositoryYum; /** * A Zypper Repository. Structure is * documented below. */ zypper?: outputs.osconfig.OsPolicyAssignmentOsPolicyResourceGroupResourceRepositoryZypper; } interface OsPolicyAssignmentOsPolicyResourceGroupResourceRepositoryApt { /** * Type of archive files in this repository. * Possible values are: `ARCHIVE_TYPE_UNSPECIFIED`, `DEB`, `DEB_SRC`. */ archiveType: string; /** * List of components for this repository. Must * contain at least one item. */ components: string[]; /** * Distribution of this repository. */ distribution: string; /** * URI of the key file for this repository. The agent * maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`. */ gpgKey?: string; /** * URI for this repository. */ uri: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourceRepositoryGoo { /** * The name of the repository. */ name: string; /** * The url of the repository. */ url: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourceRepositoryYum { /** * The location of the repository directory. */ baseUrl: string; /** * The display name of the repository. */ displayName?: string; /** * URIs of GPG keys. */ gpgKeys?: string[]; /** * A one word, unique name for this repository. This is the * `repo id` in the yum config file and also the `displayName` if * `displayName` is omitted. This id is also used as the unique identifier * when checking for resource conflicts. */ id: string; } interface OsPolicyAssignmentOsPolicyResourceGroupResourceRepositoryZypper { /** * The location of the repository directory. */ baseUrl: string; /** * The display name of the repository. */ displayName?: string; /** * URIs of GPG keys. */ gpgKeys?: string[]; /** * A one word, unique name for this repository. This is the * `repo id` in the zypper config file and also the `displayName` if * `displayName` is omitted. This id is also used as the unique identifier * when checking for GuestPolicy conflicts. */ id: string; } interface OsPolicyAssignmentRollout { /** * The maximum number (or percentage) of VMs * per zone to disrupt at any given moment. Structure is * documented below. */ disruptionBudget: outputs.osconfig.OsPolicyAssignmentRolloutDisruptionBudget; /** * This determines the minimum duration of * time to wait after the configuration changes are applied through the current * rollout. A VM continues to count towards the `disruptionBudget` at least * until this duration of time has passed after configuration changes are * applied. */ minWaitDuration: string; } interface OsPolicyAssignmentRolloutDisruptionBudget { /** * Specifies a fixed value. */ fixed?: number; /** * Specifies the relative value defined as a percentage, * which will be multiplied by a reference value. * * -------------------------------------------------------------------------------- */ percent?: number; } interface PatchDeploymentInstanceFilter { /** * Target all VM instances in the project. If true, no other criteria is permitted. */ all?: boolean; /** * Targets VM instances matching ANY of these GroupLabels. This allows targeting of disparate groups of VM instances. * Structure is documented below. */ groupLabels?: outputs.osconfig.PatchDeploymentInstanceFilterGroupLabel[]; /** * Targets VMs whose name starts with one of these prefixes. Similar to labels, this is another way to group * VMs when targeting configs, for example prefix="prod-". */ instanceNamePrefixes?: string[]; /** * Targets any of the VM instances specified. Instances are specified by their URI in the `form zones/{{zone}}/instances/{{instance_name}}`, * `projects/{{project_id}}/zones/{{zone}}/instances/{{instance_name}}`, or * `https://www.googleapis.com/compute/v1/projects/{{project_id}}/zones/{{zone}}/instances/{{instance_name}}` */ instances?: string[]; /** * Targets VM instances in ANY of these zones. Leave empty to target VM instances in any zone. */ zones?: string[]; } interface PatchDeploymentInstanceFilterGroupLabel { /** * Compute Engine instance labels that must be present for a VM instance to be targeted by this filter */ labels: { [key: string]: string; }; } interface PatchDeploymentOneTimeSchedule { /** * The desired patch job execution time. A timestamp in RFC3339 UTC "Zulu" format, * accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z". */ executeTime: string; } interface PatchDeploymentPatchConfig { /** * Apt update settings. Use this setting to override the default apt patch rules. * Structure is documented below. */ apt?: outputs.osconfig.PatchDeploymentPatchConfigApt; /** * goo update settings. Use this setting to override the default goo patch rules. * Structure is documented below. */ goo?: outputs.osconfig.PatchDeploymentPatchConfigGoo; /** * Allows the patch job to run on Managed instance groups (MIGs). */ migInstancesAllowed?: boolean; /** * The ExecStep to run after the patch update. * Structure is documented below. */ postStep?: outputs.osconfig.PatchDeploymentPatchConfigPostStep; /** * The ExecStep to run before the patch update. * Structure is documented below. */ preStep?: outputs.osconfig.PatchDeploymentPatchConfigPreStep; /** * Post-patch reboot settings. * Possible values are: `DEFAULT`, `ALWAYS`, `NEVER`. */ rebootConfig?: string; /** * Windows update settings. Use this setting to override the default Windows patch rules. * Structure is documented below. */ windowsUpdate?: outputs.osconfig.PatchDeploymentPatchConfigWindowsUpdate; /** * Yum update settings. Use this setting to override the default yum patch rules. * Structure is documented below. */ yum?: outputs.osconfig.PatchDeploymentPatchConfigYum; /** * zypper update settings. Use this setting to override the default zypper patch rules. * Structure is documented below. */ zypper?: outputs.osconfig.PatchDeploymentPatchConfigZypper; } interface PatchDeploymentPatchConfigApt { /** * List of packages to exclude from update. These packages will be excluded. */ excludes?: string[]; /** * An exclusive list of packages to be updated. These are the only packages that will be updated. * If these packages are not installed, they will be ignored. This field cannot be specified with * any other patch configuration fields. */ exclusivePackages?: string[]; /** * By changing the type to DIST, the patching is performed using apt-get dist-upgrade instead. * Possible values are: `DIST`, `UPGRADE`. */ type?: string; } interface PatchDeploymentPatchConfigGoo { /** * goo update settings. Use this setting to override the default goo patch rules. */ enabled: boolean; } interface PatchDeploymentPatchConfigPostStep { /** * The ExecStepConfig for all Linux VMs targeted by the PatchJob. * Structure is documented below. */ linuxExecStepConfig?: outputs.osconfig.PatchDeploymentPatchConfigPostStepLinuxExecStepConfig; /** * The ExecStepConfig for all Windows VMs targeted by the PatchJob. * Structure is documented below. */ windowsExecStepConfig?: outputs.osconfig.PatchDeploymentPatchConfigPostStepWindowsExecStepConfig; } interface PatchDeploymentPatchConfigPostStepLinuxExecStepConfig { /** * Defaults to [0]. A list of possible return values that the execution can return to indicate a success. */ allowedSuccessCodes?: number[]; /** * A Cloud Storage object containing the executable. * Structure is documented below. */ gcsObject?: outputs.osconfig.PatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject; /** * The script interpreter to use to run the script. If no interpreter is specified the script will * be executed directly, which will likely only succeed for scripts with shebang lines. * Possible values are: `SHELL`, `POWERSHELL`. */ interpreter?: string; /** * An absolute path to the executable on the VM. */ localPath?: string; } interface PatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject { /** * Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. This is used to ensure that the ExecStep specified by this PatchJob does not change. */ generationNumber: string; /** * Name of the Cloud Storage object. */ object: string; } interface PatchDeploymentPatchConfigPostStepWindowsExecStepConfig { /** * Defaults to [0]. A list of possible return values that the execution can return to indicate a success. */ allowedSuccessCodes?: number[]; /** * A Cloud Storage object containing the executable. * Structure is documented below. */ gcsObject?: outputs.osconfig.PatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject; /** * The script interpreter to use to run the script. If no interpreter is specified the script will * be executed directly, which will likely only succeed for scripts with shebang lines. * Possible values are: `SHELL`, `POWERSHELL`. */ interpreter?: string; /** * An absolute path to the executable on the VM. */ localPath?: string; } interface PatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject { /** * Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. This is used to ensure that the ExecStep specified by this PatchJob does not change. */ generationNumber: string; /** * Name of the Cloud Storage object. */ object: string; } interface PatchDeploymentPatchConfigPreStep { /** * The ExecStepConfig for all Linux VMs targeted by the PatchJob. * Structure is documented below. */ linuxExecStepConfig?: outputs.osconfig.PatchDeploymentPatchConfigPreStepLinuxExecStepConfig; /** * The ExecStepConfig for all Windows VMs targeted by the PatchJob. * Structure is documented below. */ windowsExecStepConfig?: outputs.osconfig.PatchDeploymentPatchConfigPreStepWindowsExecStepConfig; } interface PatchDeploymentPatchConfigPreStepLinuxExecStepConfig { /** * Defaults to [0]. A list of possible return values that the execution can return to indicate a success. */ allowedSuccessCodes?: number[]; /** * A Cloud Storage object containing the executable. * Structure is documented below. */ gcsObject?: outputs.osconfig.PatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject; /** * The script interpreter to use to run the script. If no interpreter is specified the script will * be executed directly, which will likely only succeed for scripts with shebang lines. * Possible values are: `SHELL`, `POWERSHELL`. */ interpreter?: string; /** * An absolute path to the executable on the VM. */ localPath?: string; } interface PatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject { /** * Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. This is used to ensure that the ExecStep specified by this PatchJob does not change. */ generationNumber: string; /** * Name of the Cloud Storage object. */ object: string; } interface PatchDeploymentPatchConfigPreStepWindowsExecStepConfig { /** * Defaults to [0]. A list of possible return values that the execution can return to indicate a success. */ allowedSuccessCodes?: number[]; /** * A Cloud Storage object containing the executable. * Structure is documented below. */ gcsObject?: outputs.osconfig.PatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject; /** * The script interpreter to use to run the script. If no interpreter is specified the script will * be executed directly, which will likely only succeed for scripts with shebang lines. * Possible values are: `SHELL`, `POWERSHELL`. */ interpreter?: string; /** * An absolute path to the executable on the VM. */ localPath?: string; } interface PatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject { /** * Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. This is used to ensure that the ExecStep specified by this PatchJob does not change. */ generationNumber: string; /** * Name of the Cloud Storage object. */ object: string; } interface PatchDeploymentPatchConfigWindowsUpdate { /** * Only apply updates of these windows update classifications. If empty, all updates are applied. * Each value may be one of: `CRITICAL`, `SECURITY`, `DEFINITION`, `DRIVER`, `FEATURE_PACK`, `SERVICE_PACK`, `TOOL`, `UPDATE_ROLLUP`, `UPDATE`. */ classifications?: string[]; /** * List of KBs to exclude from update. */ excludes?: string[]; /** * An exclusive list of kbs to be updated. These are the only patches that will be updated. * This field must not be used with other patch configurations. */ exclusivePatches?: string[]; } interface PatchDeploymentPatchConfigYum { /** * List of packages to exclude from update. These packages will be excluded. */ excludes?: string[]; /** * An exclusive list of packages to be updated. These are the only packages that will be updated. * If these packages are not installed, they will be ignored. This field cannot be specified with * any other patch configuration fields. */ exclusivePackages?: string[]; /** * Will cause patch to run yum update-minimal instead. */ minimal?: boolean; /** * Adds the --security flag to yum update. Not supported on all platforms. */ security?: boolean; } interface PatchDeploymentPatchConfigZypper { /** * Install only patches with these categories. Common categories include security, recommended, and feature. */ categories?: string[]; /** * List of packages to exclude from update. */ excludes?: string[]; /** * An exclusive list of patches to be updated. These are the only patches that will be installed using 'zypper patch patch:' command. * This field must not be used with any other patch configuration fields. */ exclusivePatches?: string[]; /** * Install only patches with these severities. Common severities include critical, important, moderate, and low. */ severities?: string[]; /** * Adds the --with-optional flag to zypper patch. */ withOptional?: boolean; /** * Adds the --with-update flag, to zypper patch. */ withUpdate?: boolean; } interface PatchDeploymentRecurringSchedule { /** * The end time at which a recurring patch deployment schedule is no longer active. * A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z". */ endTime?: string; /** * (Output) * The time the last patch job ran successfully. * A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z". */ lastExecuteTime: string; /** * Schedule with monthly executions. * Structure is documented below. */ monthly?: outputs.osconfig.PatchDeploymentRecurringScheduleMonthly; /** * (Output) * The time the next patch job is scheduled to run. * A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z". */ nextExecuteTime: string; /** * The time that the recurring schedule becomes effective. Defaults to createTime of the patch deployment. * A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z". */ startTime?: string; /** * Time of the day to run a recurring deployment. * Structure is documented below. */ timeOfDay: outputs.osconfig.PatchDeploymentRecurringScheduleTimeOfDay; /** * Defines the time zone that timeOfDay is relative to. The rules for daylight saving time are * determined by the chosen time zone. * Structure is documented below. */ timeZone: outputs.osconfig.PatchDeploymentRecurringScheduleTimeZone; /** * Schedule with weekly executions. * Structure is documented below. */ weekly?: outputs.osconfig.PatchDeploymentRecurringScheduleWeekly; } interface PatchDeploymentRecurringScheduleMonthly { /** * One day of the month. 1-31 indicates the 1st to the 31st day. -1 indicates the last day of the month. * Months without the target day will be skipped. For example, a schedule to run "every month on the 31st" * will not run in February, April, June, etc. */ monthDay?: number; /** * Week day in a month. * Structure is documented below. */ weekDayOfMonth?: outputs.osconfig.PatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth; } interface PatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth { /** * A day of the week. * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeek: string; /** * Represents the number of days before or after the given week day of month that the patch deployment is scheduled for. */ dayOffset?: number; /** * Week number in a month. 1-4 indicates the 1st to 4th week of the month. -1 indicates the last week of the month. */ weekOrdinal: number; } interface PatchDeploymentRecurringScheduleTimeOfDay { /** * Hours of day in 24 hour format. Should be from 0 to 23. * An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface PatchDeploymentRecurringScheduleTimeZone { /** * IANA Time Zone Database time zone, e.g. "America/New_York". */ id: string; /** * IANA Time Zone Database version number, e.g. "2019a". */ version?: string; } interface PatchDeploymentRecurringScheduleWeekly { /** * IANA Time Zone Database time zone, e.g. "America/New_York". * Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ dayOfWeek: string; } interface PatchDeploymentRollout { /** * The maximum number (or percentage) of VMs per zone to disrupt at any given moment. The number of VMs calculated from multiplying the percentage by the total number of VMs in a zone is rounded up. * During patching, a VM is considered disrupted from the time the agent is notified to begin until patching has completed. This disruption time includes the time to complete reboot and any post-patch steps. * A VM contributes to the disruption budget if its patching operation fails either when applying the patches, running pre or post patch steps, or if it fails to respond with a success notification before timing out. VMs that are not running or do not have an active agent do not count toward this disruption budget. * For zone-by-zone rollouts, if the disruption budget in a zone is exceeded, the patch job stops, because continuing to the next zone requires completion of the patch process in the previous zone. * For example, if the disruption budget has a fixed value of 10, and 8 VMs fail to patch in the current zone, the patch job continues to patch 2 VMs at a time until the zone is completed. When that zone is completed successfully, patching begins with 10 VMs at a time in the next zone. If 10 VMs in the next zone fail to patch, the patch job stops. * Structure is documented below. */ disruptionBudget: outputs.osconfig.PatchDeploymentRolloutDisruptionBudget; /** * Mode of the patch rollout. * Possible values are: `ZONE_BY_ZONE`, `CONCURRENT_ZONES`. */ mode: string; } interface PatchDeploymentRolloutDisruptionBudget { /** * Specifies a fixed value. */ fixed?: number; /** * Specifies the relative value defined as a percentage, which will be multiplied by a reference value. */ percentage?: number; } interface V2PolicyOrchestratorForFolderOrchestratedResource { /** * ID of the resource to be used while generating set of affected resources. * For UPSERT action the value is auto-generated during PolicyOrchestrator * creation when not set. When the value is set it should following next * restrictions: * * Must contain only lowercase letters, numbers, and hyphens. * * Must start with a letter. * * Must be between 1-63 characters. * * Must end with a number or a letter. * * Must be unique within the project. * For DELETE action, ID must be specified explicitly during * PolicyOrchestrator creation. * * * The `osPolicyAssignmentV1Payload` block supports: */ id?: string; /** * OS policy assignment is an API resource that is used to * apply a set of OS policies to a dynamically targeted group of Compute Engine * VM instances. * An OS policy is used to define the desired state configuration for a * Compute Engine VM instance through a set of configuration resources that * provide capabilities such as installing or removing software packages, or * executing a script. * For more information about the OS policy resource definitions and examples, * see * [OS policy and OS policy * assignment](https://cloud.google.com/compute/docs/os-configuration-management/working-with-os-policies). * Structure is documented below. */ osPolicyAssignmentV1Payload?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1Payload; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1Payload { /** * Indicates that this revision has been successfully rolled out in this zone * and new VMs will be assigned OS policies from this revision. * * For a given OS policy assignment, there is only one revision with a value * of 'true' for this field. */ baseline: boolean; /** * Indicates that this revision deletes the OS policy assignment. */ deleted: boolean; /** * Freeform text describing the purpose of the resource. */ description?: string; /** * This checksum is computed by the server based on the value of other * fields, and may be sent on update and delete requests to ensure the * client has an up-to-date value before proceeding. */ etag: string; /** * Filters to select target VMs for an assignment. * * If more than one filter criteria is specified below, a VM will be selected * if and only if it satisfies all of them. */ instanceFilter: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilter; /** * Identifier. In form of * * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}` */ name?: string; /** * List of OS policies to be applied to the VMs. */ osPolicies: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicy[]; /** * Set to true, if the there are ongoing changes being applied by the * orchestrator. */ reconciling: boolean; /** * The timestamp that the revision was created. */ revisionCreateTime: string; /** * The assignment revision ID * A new revision is committed whenever a rollout is triggered for a OS policy * assignment */ revisionId: string; /** * Message to configure the rollout at the zonal level for the OS policy * assignment. */ rollout: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadRollout; /** * OS policy assignment rollout state * Possible values: * IN_PROGRESS * CANCELLING * CANCELLED * SUCCEEDED */ rolloutState: string; /** * Server generated unique id for the OS policy assignment resource. */ uid: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilter { /** * Target all VMs in the project. If true, no other criteria is * permitted. */ all?: boolean; /** * List of label sets used for VM exclusion. * If the list has more than one label set, the VM is excluded if any * of the label sets are applicable for the VM. * Structure is documented below. */ exclusionLabels?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilterExclusionLabel[]; /** * List of label sets used for VM inclusion. * If the list has more than one `LabelSet`, the VM is included if any * of the label sets are applicable for the VM. * Structure is documented below. */ inclusionLabels?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilterInclusionLabel[]; /** * List of inventories to select VMs. * A VM is selected if its inventory data matches at least one of the * following inventories. * Structure is documented below. */ inventories?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilterInventory[]; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilterExclusionLabel { /** * Labels are identified by key/value pairs in this map. * A VM should contain all the key/value pairs specified in this * map to be selected. */ labels?: { [key: string]: string; }; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilterInclusionLabel { /** * Labels are identified by key/value pairs in this map. * A VM should contain all the key/value pairs specified in this * map to be selected. */ labels?: { [key: string]: string; }; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilterInventory { /** * The OS short name */ osShortName: string; /** * The OS version * Prefix matches are supported if asterisk(*) is provided as the * last character. For example, to match all versions with a major * version of `7`, specify the following value for this field `7.*` * An empty string matches all OS versions. */ osVersion?: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicy { /** * This flag determines the OS policy compliance status when none of the * resource groups within the policy are applicable for a VM. Set this value * to `true` if the policy needs to be reported as compliant even if the * policy has nothing to validate or enforce. */ allowNoResourceGroupMatch?: boolean; /** * Policy description. * Length of the description is limited to 1024 characters. */ description?: string; /** * The id of the OS policy with the following restrictions: * * Must contain only lowercase letters, numbers, and hyphens. * * Must start with a letter. * * Must be between 1-63 characters. * * Must end with a number or a letter. * * Must be unique within the assignment. */ id: string; /** * Policy mode * Possible values are: `VALIDATION`, `ENFORCEMENT`. */ mode: string; /** * List of resource groups for the policy. * For a particular VM, resource groups are evaluated in the order specified * and the first resource group that is applicable is selected and the rest * are ignored. * If none of the resource groups are applicable for a VM, the VM is * considered to be non-compliant w.r.t this policy. This behavior can be * toggled by the flag `allowNoResourceGroupMatch` * Structure is documented below. */ resourceGroups: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroup[]; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroup { /** * List of inventory filters for the resource group. * The resources in this resource group are applied to the target VM if it * satisfies at least one of the following inventory filters. * For example, to apply this resource group to VMs running either `RHEL` or * `CentOS` operating systems, specify 2 items for the list with following * values: * inventory_filters[0].os_short_name='rhel' and * inventory_filters[1].os_short_name='centos' * If the list is empty, this resource group will be applied to the target * VM unconditionally. * Structure is documented below. */ inventoryFilters?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupInventoryFilter[]; /** * List of resources configured for this resource group. * The resources are executed in the exact order specified here. * Structure is documented below. */ resources: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResource[]; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupInventoryFilter { /** * The OS short name */ osShortName: string; /** * The OS version * Prefix matches are supported if asterisk(*) is provided as the * last character. For example, to match all versions with a major * version of `7`, specify the following value for this field `7.*` * An empty string matches all OS versions. */ osVersion?: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResource { /** * A resource that allows executing scripts on the VM. * The `ExecResource` has 2 stages: `validate` and `enforce` and both stages * accept a script as an argument to execute. * When the `ExecResource` is applied by the agent, it first executes the * script in the `validate` stage. The `validate` stage can signal that the * `ExecResource` is already in the desired state by returning an exit code * of `100`. If the `ExecResource` is not in the desired state, it should * return an exit code of `101`. Any other exit code returned by this stage * is considered an error. * If the `ExecResource` is not in the desired state based on the exit code * from the `validate` stage, the agent proceeds to execute the script from * the `enforce` stage. If the `ExecResource` is already in the desired * state, the `enforce` stage will not be run. * Similar to `validate` stage, the `enforce` stage should return an exit * code of `100` to indicate that the resource in now in its desired state. * Any other exit code is considered an error. * NOTE: An exit code of `100` was chosen over `0` (and `101` vs `1`) to * have an explicit indicator of `in desired state`, `not in desired state` * and errors. Because, for example, Powershell will always return an exit * code of `0` unless an `exit` statement is provided in the script. So, for * reasons of consistency and being explicit, exit codes `100` and `101` * were chosen. * Structure is documented below. */ exec?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExec; /** * A resource that manages the state of a file. * Structure is documented below. */ file?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFile; /** * The id of the resource with the following restrictions: * * Must contain only lowercase letters, numbers, and hyphens. * * Must start with a letter. * * Must be between 1-63 characters. * * Must end with a number or a letter. * * Must be unique within the OS policy. */ id: string; /** * A resource that manages a system package. * Structure is documented below. */ pkg?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkg; /** * A resource that manages a package repository. * Structure is documented below. */ repository?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepository; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExec { /** * A file or script to execute. * Structure is documented below. */ enforce?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforce; /** * A file or script to execute. * Structure is documented below. */ validate: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidate; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforce { /** * Optional arguments to pass to the source during execution. */ args?: string[]; /** * A remote or local file. * Structure is documented below. */ file?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforceFile; /** * The script interpreter to use. * Possible values are: `NONE`, `SHELL`, `POWERSHELL`. */ interpreter: string; /** * Only recorded for enforce Exec. * Path to an output file (that is created by this Exec) whose * content will be recorded in OSPolicyResourceCompliance after a * successful run. Absence or failure to read this file will result in * this ExecResource being non-compliant. Output file size is limited to * 500K bytes. */ outputFilePath?: string; /** * An inline script. * The size of the script is limited to 32KiB. */ script?: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforceFile { /** * Defaults to false. When false, files are subject to validations * based on the file type: * Remote: A checksum must be specified. * Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * Specifies a file available as a Cloud Storage Object. * Structure is documented below. */ gcs?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforceFileGcs; /** * A local path within the VM to use. */ localPath?: string; /** * Specifies a file available via some URI. * Structure is documented below. */ remote?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforceFileRemote; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforceFileGcs { /** * Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: string; /** * Name of the Cloud Storage object. */ object: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforceFileRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * URI from which to fetch the object. It should contain both the * protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidate { /** * Optional arguments to pass to the source during execution. */ args?: string[]; /** * A remote or local file. * Structure is documented below. */ file?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidateFile; /** * The script interpreter to use. * Possible values are: `NONE`, `SHELL`, `POWERSHELL`. */ interpreter: string; /** * Only recorded for enforce Exec. * Path to an output file (that is created by this Exec) whose * content will be recorded in OSPolicyResourceCompliance after a * successful run. Absence or failure to read this file will result in * this ExecResource being non-compliant. Output file size is limited to * 500K bytes. */ outputFilePath?: string; /** * An inline script. * The size of the script is limited to 32KiB. */ script?: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidateFile { /** * Defaults to false. When false, files are subject to validations * based on the file type: * Remote: A checksum must be specified. * Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * Specifies a file available as a Cloud Storage Object. * Structure is documented below. */ gcs?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidateFileGcs; /** * A local path within the VM to use. */ localPath?: string; /** * Specifies a file available via some URI. * Structure is documented below. */ remote?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidateFileRemote; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidateFileGcs { /** * Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: string; /** * Name of the Cloud Storage object. */ object: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidateFileRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * URI from which to fetch the object. It should contain both the * protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFile { /** * A a file with this content. * The size of the content is limited to 32KiB. */ content?: string; /** * A remote or local file. * Structure is documented below. */ file?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFileFile; /** * The absolute path of the file within the VM. */ path: string; /** * Consists of three octal digits which represent, in * order, the permissions of the owner, group, and other users for the * file (similarly to the numeric mode used in the linux chmod * utility). Each digit represents a three bit number with the 4 bit * corresponding to the read permissions, the 2 bit corresponds to the * write bit, and the one bit corresponds to the execute permission. * Default behavior is 755. * Below are some examples of permissions and their associated values: * read, write, and execute: 7 * read and execute: 5 * read and write: 6 * read only: 4 */ permissions?: string; /** * Desired state of the file. * Possible values are: `PRESENT`, `ABSENT`, `CONTENTS_MATCH`. */ state: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFileFile { /** * Defaults to false. When false, files are subject to validations * based on the file type: * Remote: A checksum must be specified. * Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * Specifies a file available as a Cloud Storage Object. * Structure is documented below. */ gcs?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFileFileGcs; /** * A local path within the VM to use. */ localPath?: string; /** * Specifies a file available via some URI. * Structure is documented below. */ remote?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFileFileRemote; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFileFileGcs { /** * Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: string; /** * Name of the Cloud Storage object. */ object: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFileFileRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * URI from which to fetch the object. It should contain both the * protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkg { /** * A package managed by APT. * - install: `apt-get update && apt-get -y install [name]` * - remove: `apt-get -y remove [name]` * Structure is documented below. */ apt?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgApt; /** * A deb package file. dpkg packages only support INSTALLED state. * Structure is documented below. */ deb?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDeb; /** * The desired state the agent should maintain for this package. * Possible values are: `INSTALLED`, `REMOVED`. */ desiredState: string; /** * A package managed by GooGet. * - install: `googet -noconfirm install package` * - remove: `googet -noconfirm remove package` * Structure is documented below. */ googet?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgGooget; /** * An MSI package. MSI packages only support INSTALLED state. * Structure is documented below. */ msi?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsi; /** * An RPM package file. RPM packages only support INSTALLED state. * Structure is documented below. */ rpm?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpm; /** * A package managed by YUM. * - install: `yum -y install package` * - remove: `yum -y remove package` * Structure is documented below. */ yum?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgYum; /** * A package managed by Zypper. * - install: `zypper -y install package` * - remove: `zypper -y rm package` * Structure is documented below. */ zypper?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgZypper; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgApt { /** * Package name. */ name: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDeb { /** * Whether dependencies should also be installed. * - install when false: `dpkg -i package` * - install when true: `apt-get update && apt-get -y install * package.deb` */ pullDeps?: boolean; /** * A remote or local file. * Structure is documented below. */ source: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDebSource; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDebSource { /** * Defaults to false. When false, files are subject to validations * based on the file type: * Remote: A checksum must be specified. * Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * Specifies a file available as a Cloud Storage Object. * Structure is documented below. */ gcs?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDebSourceGcs; /** * A local path within the VM to use. */ localPath?: string; /** * Specifies a file available via some URI. * Structure is documented below. */ remote?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDebSourceRemote; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDebSourceGcs { /** * Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: string; /** * Name of the Cloud Storage object. */ object: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDebSourceRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * URI from which to fetch the object. It should contain both the * protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgGooget { /** * Package name. */ name: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsi { /** * Additional properties to use during installation. * This should be in the format of Property=Setting. * Appended to the defaults of `ACTION=INSTALL * REBOOT=ReallySuppress`. */ properties?: string[]; /** * A remote or local file. * Structure is documented below. */ source: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsiSource; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsiSource { /** * Defaults to false. When false, files are subject to validations * based on the file type: * Remote: A checksum must be specified. * Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * Specifies a file available as a Cloud Storage Object. * Structure is documented below. */ gcs?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsiSourceGcs; /** * A local path within the VM to use. */ localPath?: string; /** * Specifies a file available via some URI. * Structure is documented below. */ remote?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsiSourceRemote; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsiSourceGcs { /** * Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: string; /** * Name of the Cloud Storage object. */ object: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsiSourceRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * URI from which to fetch the object. It should contain both the * protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpm { /** * Whether dependencies should also be installed. * - install when false: `rpm --upgrade --replacepkgs package.rpm` * - install when true: `yum -y install package.rpm` or * `zypper -y install package.rpm` */ pullDeps?: boolean; /** * A remote or local file. * Structure is documented below. */ source: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpmSource; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpmSource { /** * Defaults to false. When false, files are subject to validations * based on the file type: * Remote: A checksum must be specified. * Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * Specifies a file available as a Cloud Storage Object. * Structure is documented below. */ gcs?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpmSourceGcs; /** * A local path within the VM to use. */ localPath?: string; /** * Specifies a file available via some URI. * Structure is documented below. */ remote?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpmSourceRemote; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpmSourceGcs { /** * Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: string; /** * Name of the Cloud Storage object. */ object: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpmSourceRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * URI from which to fetch the object. It should contain both the * protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgYum { /** * Package name. */ name: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgZypper { /** * Package name. */ name: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepository { /** * Represents a single apt package repository. These will be added to * a repo file that will be managed at * `/etc/apt/sources.list.d/google_osconfig.list`. * Structure is documented below. */ apt?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryApt; /** * Represents a Goo package repository. These are added to a repo file * that is managed at * `C:/ProgramData/GooGet/repos/google_osconfig.repo`. * Structure is documented below. */ goo?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryGoo; /** * Represents a single yum package repository. These are added to a * repo file that is managed at * `/etc/yum.repos.d/google_osconfig.repo`. * Structure is documented below. */ yum?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryYum; /** * Represents a single zypper package repository. These are added to a * repo file that is managed at * `/etc/zypp/repos.d/google_osconfig.repo`. * Structure is documented below. */ zypper?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryZypper; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryApt { /** * Type of archive files in this repository. * Possible values are: `DEB`, `DEB_SRC`. */ archiveType: string; /** * List of components for this repository. Must contain at least one * item. */ components: string[]; /** * Distribution of this repository. */ distribution: string; /** * URI of the key file for this repository. The agent maintains a * keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`. */ gpgKey?: string; /** * URI for this repository. */ uri: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryGoo { /** * The name of the repository. */ name: string; /** * The url of the repository. */ url: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryYum { /** * The location of the repository directory. */ baseUrl: string; /** * The display name of the repository. */ displayName?: string; /** * URIs of GPG keys. */ gpgKeys?: string[]; /** * A one word, unique name for this repository. This is the `repo * id` in the yum config file and also the `displayName` if * `displayName` is omitted. This id is also used as the unique * identifier when checking for resource conflicts. */ id: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryZypper { /** * The location of the repository directory. */ baseUrl: string; /** * The display name of the repository. */ displayName?: string; /** * URIs of GPG keys. */ gpgKeys?: string[]; /** * A one word, unique name for this repository. This is the `repo * id` in the zypper config file and also the `displayName` if * `displayName` is omitted. This id is also used as the unique * identifier when checking for GuestPolicy conflicts. */ id: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadRollout { /** * Message encapsulating a value that can be either absolute ("fixed") or * relative ("percent") to a value. * Structure is documented below. */ disruptionBudget: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadRolloutDisruptionBudget; /** * This determines the minimum duration of time to wait after the * configuration changes are applied through the current rollout. A * VM continues to count towards the `disruptionBudget` at least * until this duration of time has passed after configuration changes are * applied. */ minWaitDuration: string; } interface V2PolicyOrchestratorForFolderOrchestratedResourceOsPolicyAssignmentV1PayloadRolloutDisruptionBudget { /** * Specifies a fixed value. */ fixed?: number; /** * Specifies the relative value defined as a percentage, which will be * multiplied by a reference value. */ percent?: number; } interface V2PolicyOrchestratorForFolderOrchestrationScope { /** * Selectors of the orchestration scope. There is a logical AND between each * selector defined. * When there is no explicit `ResourceHierarchySelector` selector specified, * the scope is by default bounded to the parent of the policy orchestrator * resource. * Structure is documented below. */ selectors?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestrationScopeSelector[]; } interface V2PolicyOrchestratorForFolderOrchestrationScopeSelector { /** * Selector containing locations in scope. * Structure is documented below. */ locationSelector?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestrationScopeSelectorLocationSelector; /** * Selector containing Cloud Resource Manager resource hierarchy nodes. * Structure is documented below. */ resourceHierarchySelector?: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestrationScopeSelectorResourceHierarchySelector; } interface V2PolicyOrchestratorForFolderOrchestrationScopeSelectorLocationSelector { /** * Names of the locations in scope. * Format: `us-central1-a` */ includedLocations?: string[]; } interface V2PolicyOrchestratorForFolderOrchestrationScopeSelectorResourceHierarchySelector { /** * Names of the folders in scope. * Format: `folders/{folder_id}` */ includedFolders?: string[]; /** * Names of the projects in scope. * Format: `projects/{project_number}` */ includedProjects?: string[]; } interface V2PolicyOrchestratorForFolderOrchestrationState { /** * (Output) * Describes the state of a single iteration of the orchestrator. * Structure is documented below. */ currentIterationStates: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestrationStateCurrentIterationState[]; /** * (Output) * Describes the state of a single iteration of the orchestrator. * Structure is documented below. */ previousIterationStates: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestrationStatePreviousIterationState[]; } interface V2PolicyOrchestratorForFolderOrchestrationStateCurrentIterationState { /** * (Output) * The `Status` type defines a logical error model that is suitable for * different programming environments, including REST APIs and RPC APIs. It is * used by [gRPC](https://github.com/grpc). Each `Status` message contains * three pieces of data: error code, error message, and error details. * You can find out more about this error model and how to work with it in the * [API Design Guide](https://cloud.google.com/apis/design/errors). * Structure is documented below. */ errors: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestrationStateCurrentIterationStateError[]; /** * (Output) * Number of orchestration actions which failed so far. For more details, * query the Cloud Logs. */ failedActions: string; /** * (Output) * Finish time of the wave iteration. */ finishTime: string; /** * (Output) * Overall number of actions done by the orchestrator so far. */ performedActions: string; /** * (Output) * An estimated percentage of the progress. Number between 0 and 100. */ progress: number; /** * (Output) * Handle to the Progressive Rollouts API rollout resource, which contains * detailed information about a particular orchestration iteration. */ rolloutResource: string; /** * (Output) * Start time of the wave iteration. */ startTime: string; /** * State of the orchestrator. Can be updated to change orchestrator behaviour. * Allowed values: * - `ACTIVE` - orchestrator is actively looking for actions to be taken. * - `STOPPED` - orchestrator won't make any changes. * Note: There might be more states added in the future. We use string here * instead of an enum, to avoid the need of propagating new states to all the * client code. */ state: string; } interface V2PolicyOrchestratorForFolderOrchestrationStateCurrentIterationStateError { /** * (Output) * The status code, which should be an enum value of google.rpc.Code. */ code: number; /** * (Output) * A list of messages that carry the error details. There is a common set of * message types for APIs to use. * Structure is documented below. */ details: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestrationStateCurrentIterationStateErrorDetail[]; /** * (Output) * A developer-facing error message, which should be in English. Any * user-facing error message should be localized and sent in the * google.rpc.Status.details field, or localized by the client. */ message: string; } interface V2PolicyOrchestratorForFolderOrchestrationStateCurrentIterationStateErrorDetail { /** * (Output) * A URL/resource name that uniquely identifies the type of the serialized protocol buffer message */ typeUrl: string; /** * (Output) */ value: string; } interface V2PolicyOrchestratorForFolderOrchestrationStatePreviousIterationState { /** * (Output) * The `Status` type defines a logical error model that is suitable for * different programming environments, including REST APIs and RPC APIs. It is * used by [gRPC](https://github.com/grpc). Each `Status` message contains * three pieces of data: error code, error message, and error details. * You can find out more about this error model and how to work with it in the * [API Design Guide](https://cloud.google.com/apis/design/errors). * Structure is documented below. */ errors: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestrationStatePreviousIterationStateError[]; /** * (Output) * Number of orchestration actions which failed so far. For more details, * query the Cloud Logs. */ failedActions: string; /** * (Output) * Finish time of the wave iteration. */ finishTime: string; /** * (Output) * Overall number of actions done by the orchestrator so far. */ performedActions: string; /** * (Output) * An estimated percentage of the progress. Number between 0 and 100. */ progress: number; /** * (Output) * Handle to the Progressive Rollouts API rollout resource, which contains * detailed information about a particular orchestration iteration. */ rolloutResource: string; /** * (Output) * Start time of the wave iteration. */ startTime: string; /** * State of the orchestrator. Can be updated to change orchestrator behaviour. * Allowed values: * - `ACTIVE` - orchestrator is actively looking for actions to be taken. * - `STOPPED` - orchestrator won't make any changes. * Note: There might be more states added in the future. We use string here * instead of an enum, to avoid the need of propagating new states to all the * client code. */ state: string; } interface V2PolicyOrchestratorForFolderOrchestrationStatePreviousIterationStateError { /** * (Output) * The status code, which should be an enum value of google.rpc.Code. */ code: number; /** * (Output) * A list of messages that carry the error details. There is a common set of * message types for APIs to use. * Structure is documented below. */ details: outputs.osconfig.V2PolicyOrchestratorForFolderOrchestrationStatePreviousIterationStateErrorDetail[]; /** * (Output) * A developer-facing error message, which should be in English. Any * user-facing error message should be localized and sent in the * google.rpc.Status.details field, or localized by the client. */ message: string; } interface V2PolicyOrchestratorForFolderOrchestrationStatePreviousIterationStateErrorDetail { /** * (Output) * A URL/resource name that uniquely identifies the type of the serialized protocol buffer message */ typeUrl: string; /** * (Output) */ value: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResource { /** * Optional. ID of the resource to be used while generating set of affected resources. * For UPSERT action the value is auto-generated during PolicyOrchestrator * creation when not set. When the value is set it should following next * restrictions: * * Must contain only lowercase letters, numbers, and hyphens. * * Must start with a letter. * * Must be between 1-63 characters. * * Must end with a number or a letter. * * Must be unique within the project. * For DELETE action, ID must be specified explicitly during * PolicyOrchestrator creation. * * * The `osPolicyAssignmentV1Payload` block supports: */ id?: string; /** * OS policy assignment is an API resource that is used to * apply a set of OS policies to a dynamically targeted group of Compute Engine * VM instances. * An OS policy is used to define the desired state configuration for a * Compute Engine VM instance through a set of configuration resources that * provide capabilities such as installing or removing software packages, or * executing a script. * For more information about the OS policy resource definitions and examples, * see * [OS policy and OS policy * assignment](https://cloud.google.com/compute/docs/os-configuration-management/working-with-os-policies). * Structure is documented below. */ osPolicyAssignmentV1Payload?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1Payload; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1Payload { /** * Output only. Indicates that this revision has been successfully rolled out in this zone * and new VMs will be assigned OS policies from this revision. * * For a given OS policy assignment, there is only one revision with a value * of 'true' for this field. */ baseline: boolean; /** * Output only. Indicates that this revision deletes the OS policy assignment. */ deleted: boolean; /** * Optional. Freeform text describing the purpose of the resource. */ description?: string; /** * Output only. This checksum is computed by the server based on the value of other * fields, and may be sent on update and delete requests to ensure the * client has an up-to-date value before proceeding. */ etag?: string; /** * Filters to select target VMs for an assignment. * * If more than one filter criteria is specified below, a VM will be selected * if and only if it satisfies all of them. */ instanceFilter: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilter; /** * Immutable. Identifier. In form of * * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}` */ name?: string; /** * Required. List of OS policies to be applied to the VMs. */ osPolicies: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicy[]; /** * Output only. Set to true, if the there are ongoing changes being applied by the * orchestrator. */ reconciling: boolean; /** * Output only. The timestamp that the revision was created. */ revisionCreateTime: string; /** * Output only. The assignment revision ID * A new revision is committed whenever a rollout is triggered for a OS policy * assignment */ revisionId: string; /** * Message to configure the rollout at the zonal level for the OS policy * assignment. */ rollout: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadRollout; /** * Output only. OS policy assignment rollout state * Possible values: * ROLLOUT_STATE_UNSPECIFIED * IN_PROGRESS * CANCELLING * CANCELLED * SUCCEEDED */ rolloutState: string; /** * Output only. Server generated unique id for the OS policy assignment resource. */ uid: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilter { /** * Target all VMs in the project. If true, no other criteria is * permitted. */ all?: boolean; /** * List of label sets used for VM exclusion. * If the list has more than one label set, the VM is excluded if any * of the label sets are applicable for the VM. * Structure is documented below. */ exclusionLabels?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilterExclusionLabel[]; /** * List of label sets used for VM inclusion. * If the list has more than one `LabelSet`, the VM is included if any * of the label sets are applicable for the VM. * Structure is documented below. */ inclusionLabels?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilterInclusionLabel[]; /** * List of inventories to select VMs. * A VM is selected if its inventory data matches at least one of the * following inventories. * Structure is documented below. */ inventories?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilterInventory[]; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilterExclusionLabel { /** * Labels are identified by key/value pairs in this map. * A VM should contain all the key/value pairs specified in this * map to be selected. */ labels?: { [key: string]: string; }; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilterInclusionLabel { /** * Labels are identified by key/value pairs in this map. * A VM should contain all the key/value pairs specified in this * map to be selected. */ labels?: { [key: string]: string; }; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilterInventory { /** * Required. The OS short name */ osShortName: string; /** * The OS version * Prefix matches are supported if asterisk(*) is provided as the * last character. For example, to match all versions with a major * version of `7`, specify the following value for this field `7.*` * An empty string matches all OS versions. */ osVersion?: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicy { /** * This flag determines the OS policy compliance status when none of the * resource groups within the policy are applicable for a VM. Set this value * to `true` if the policy needs to be reported as compliant even if the * policy has nothing to validate or enforce. */ allowNoResourceGroupMatch?: boolean; /** * Policy description. * Length of the description is limited to 1024 characters. */ description?: string; /** * Required. The id of the OS policy with the following restrictions: * * Must contain only lowercase letters, numbers, and hyphens. * * Must start with a letter. * * Must be between 1-63 characters. * * Must end with a number or a letter. * * Must be unique within the assignment. */ id: string; /** * Required. Policy mode * Possible values: * MODE_UNSPECIFIED * VALIDATION * ENFORCEMENT */ mode: string; /** * Required. List of resource groups for the policy. * For a particular VM, resource groups are evaluated in the order specified * and the first resource group that is applicable is selected and the rest * are ignored. * If none of the resource groups are applicable for a VM, the VM is * considered to be non-compliant w.r.t this policy. This behavior can be * toggled by the flag `allowNoResourceGroupMatch` * Structure is documented below. */ resourceGroups: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroup[]; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroup { /** * List of inventory filters for the resource group. * The resources in this resource group are applied to the target VM if it * satisfies at least one of the following inventory filters. * For example, to apply this resource group to VMs running either `RHEL` or * `CentOS` operating systems, specify 2 items for the list with following * values: * inventory_filters[0].os_short_name='rhel' and * inventory_filters[1].os_short_name='centos' * If the list is empty, this resource group will be applied to the target * VM unconditionally. * Structure is documented below. */ inventoryFilters?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupInventoryFilter[]; /** * Required. List of resources configured for this resource group. * The resources are executed in the exact order specified here. * Structure is documented below. */ resources: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResource[]; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupInventoryFilter { /** * Required. The OS short name */ osShortName: string; /** * The OS version * Prefix matches are supported if asterisk(*) is provided as the * last character. For example, to match all versions with a major * version of `7`, specify the following value for this field `7.*` * An empty string matches all OS versions. */ osVersion?: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResource { /** * A resource that allows executing scripts on the VM. * The `ExecResource` has 2 stages: `validate` and `enforce` and both stages * accept a script as an argument to execute. * When the `ExecResource` is applied by the agent, it first executes the * script in the `validate` stage. The `validate` stage can signal that the * `ExecResource` is already in the desired state by returning an exit code * of `100`. If the `ExecResource` is not in the desired state, it should * return an exit code of `101`. Any other exit code returned by this stage * is considered an error. * If the `ExecResource` is not in the desired state based on the exit code * from the `validate` stage, the agent proceeds to execute the script from * the `enforce` stage. If the `ExecResource` is already in the desired * state, the `enforce` stage will not be run. * Similar to `validate` stage, the `enforce` stage should return an exit * code of `100` to indicate that the resource in now in its desired state. * Any other exit code is considered an error. * NOTE: An exit code of `100` was chosen over `0` (and `101` vs `1`) to * have an explicit indicator of `in desired state`, `not in desired state` * and errors. Because, for example, Powershell will always return an exit * code of `0` unless an `exit` statement is provided in the script. So, for * reasons of consistency and being explicit, exit codes `100` and `101` * were chosen. * Structure is documented below. */ exec?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExec; /** * A resource that manages the state of a file. * Structure is documented below. */ file?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFile; /** * Required. The id of the resource with the following restrictions: * * Must contain only lowercase letters, numbers, and hyphens. * * Must start with a letter. * * Must be between 1-63 characters. * * Must end with a number or a letter. * * Must be unique within the OS policy. */ id: string; /** * A resource that manages a system package. * Structure is documented below. */ pkg?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkg; /** * A resource that manages a package repository. * Structure is documented below. */ repository?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepository; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExec { /** * A file or script to execute. * Structure is documented below. */ enforce?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforce; /** * A file or script to execute. * Structure is documented below. */ validate: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidate; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforce { /** * Optional arguments to pass to the source during execution. */ args?: string[]; /** * A remote or local file. * Structure is documented below. */ file?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforceFile; /** * Required. The script interpreter to use. * Possible values: * INTERPRETER_UNSPECIFIED * NONE * SHELL * POWERSHELL */ interpreter: string; /** * Only recorded for enforce Exec. * Path to an output file (that is created by this Exec) whose * content will be recorded in OSPolicyResourceCompliance after a * successful run. Absence or failure to read this file will result in * this ExecResource being non-compliant. Output file size is limited to * 500K bytes. */ outputFilePath?: string; /** * An inline script. * The size of the script is limited to 32KiB. */ script?: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforceFile { /** * Defaults to false. When false, files are subject to validations * based on the file type: * Remote: A checksum must be specified. * Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * Specifies a file available as a Cloud Storage Object. * Structure is documented below. */ gcs?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforceFileGcs; /** * A local path within the VM to use. */ localPath?: string; /** * Specifies a file available via some URI. * Structure is documented below. */ remote?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforceFileRemote; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforceFileGcs { /** * Required. Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: string; /** * Required. Name of the Cloud Storage object. */ object: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforceFileRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * Required. URI from which to fetch the object. It should contain both the * protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidate { /** * Optional arguments to pass to the source during execution. */ args?: string[]; /** * A remote or local file. * Structure is documented below. */ file?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidateFile; /** * Required. The script interpreter to use. * Possible values: * INTERPRETER_UNSPECIFIED * NONE * SHELL * POWERSHELL */ interpreter: string; /** * Only recorded for enforce Exec. * Path to an output file (that is created by this Exec) whose * content will be recorded in OSPolicyResourceCompliance after a * successful run. Absence or failure to read this file will result in * this ExecResource being non-compliant. Output file size is limited to * 500K bytes. */ outputFilePath?: string; /** * An inline script. * The size of the script is limited to 32KiB. */ script?: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidateFile { /** * Defaults to false. When false, files are subject to validations * based on the file type: * Remote: A checksum must be specified. * Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * Specifies a file available as a Cloud Storage Object. * Structure is documented below. */ gcs?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidateFileGcs; /** * A local path within the VM to use. */ localPath?: string; /** * Specifies a file available via some URI. * Structure is documented below. */ remote?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidateFileRemote; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidateFileGcs { /** * Required. Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: string; /** * Required. Name of the Cloud Storage object. */ object: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidateFileRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * Required. URI from which to fetch the object. It should contain both the * protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFile { /** * A a file with this content. * The size of the content is limited to 32KiB. */ content?: string; /** * A remote or local file. * Structure is documented below. */ file?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFileFile; /** * Required. The absolute path of the file within the VM. */ path: string; /** * Consists of three octal digits which represent, in * order, the permissions of the owner, group, and other users for the * file (similarly to the numeric mode used in the linux chmod * utility). Each digit represents a three bit number with the 4 bit * corresponding to the read permissions, the 2 bit corresponds to the * write bit, and the one bit corresponds to the execute permission. * Default behavior is 755. * Below are some examples of permissions and their associated values: * read, write, and execute: 7 * read and execute: 5 * read and write: 6 * read only: 4 */ permissions?: string; /** * Required. Desired state of the file. * Possible values: * DESIRED_STATE_UNSPECIFIED * PRESENT * ABSENT * CONTENTS_MATCH */ state: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFileFile { /** * Defaults to false. When false, files are subject to validations * based on the file type: * Remote: A checksum must be specified. * Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * Specifies a file available as a Cloud Storage Object. * Structure is documented below. */ gcs?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFileFileGcs; /** * A local path within the VM to use. */ localPath?: string; /** * Specifies a file available via some URI. * Structure is documented below. */ remote?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFileFileRemote; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFileFileGcs { /** * Required. Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: string; /** * Required. Name of the Cloud Storage object. */ object: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFileFileRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * Required. URI from which to fetch the object. It should contain both the * protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkg { /** * A package managed by APT. * - install: `apt-get update && apt-get -y install [name]` * - remove: `apt-get -y remove [name]` * Structure is documented below. */ apt?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgApt; /** * A deb package file. dpkg packages only support INSTALLED state. * Structure is documented below. */ deb?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDeb; /** * Required. The desired state the agent should maintain for this package. * Possible values: * DESIRED_STATE_UNSPECIFIED * INSTALLED * REMOVED */ desiredState: string; /** * A package managed by GooGet. * - install: `googet -noconfirm install package` * - remove: `googet -noconfirm remove package` * Structure is documented below. */ googet?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgGooget; /** * An MSI package. MSI packages only support INSTALLED state. * Structure is documented below. */ msi?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsi; /** * An RPM package file. RPM packages only support INSTALLED state. * Structure is documented below. */ rpm?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpm; /** * A package managed by YUM. * - install: `yum -y install package` * - remove: `yum -y remove package` * Structure is documented below. */ yum?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgYum; /** * A package managed by Zypper. * - install: `zypper -y install package` * - remove: `zypper -y rm package` * Structure is documented below. */ zypper?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgZypper; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgApt { /** * Required. Package name. */ name: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDeb { /** * Whether dependencies should also be installed. * - install when false: `dpkg -i package` * - install when true: `apt-get update && apt-get -y install * package.deb` */ pullDeps?: boolean; /** * A remote or local file. * Structure is documented below. */ source: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDebSource; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDebSource { /** * Defaults to false. When false, files are subject to validations * based on the file type: * Remote: A checksum must be specified. * Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * Specifies a file available as a Cloud Storage Object. * Structure is documented below. */ gcs?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDebSourceGcs; /** * A local path within the VM to use. */ localPath?: string; /** * Specifies a file available via some URI. * Structure is documented below. */ remote?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDebSourceRemote; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDebSourceGcs { /** * Required. Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: string; /** * Required. Name of the Cloud Storage object. */ object: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDebSourceRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * Required. URI from which to fetch the object. It should contain both the * protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgGooget { /** * Required. Package name. */ name: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsi { /** * Additional properties to use during installation. * This should be in the format of Property=Setting. * Appended to the defaults of `ACTION=INSTALL * REBOOT=ReallySuppress`. */ properties?: string[]; /** * A remote or local file. * Structure is documented below. */ source: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsiSource; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsiSource { /** * Defaults to false. When false, files are subject to validations * based on the file type: * Remote: A checksum must be specified. * Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * Specifies a file available as a Cloud Storage Object. * Structure is documented below. */ gcs?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsiSourceGcs; /** * A local path within the VM to use. */ localPath?: string; /** * Specifies a file available via some URI. * Structure is documented below. */ remote?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsiSourceRemote; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsiSourceGcs { /** * Required. Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: string; /** * Required. Name of the Cloud Storage object. */ object: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsiSourceRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * Required. URI from which to fetch the object. It should contain both the * protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpm { /** * Whether dependencies should also be installed. * - install when false: `rpm --upgrade --replacepkgs package.rpm` * - install when true: `yum -y install package.rpm` or * `zypper -y install package.rpm` */ pullDeps?: boolean; /** * A remote or local file. * Structure is documented below. */ source: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpmSource; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpmSource { /** * Defaults to false. When false, files are subject to validations * based on the file type: * Remote: A checksum must be specified. * Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * Specifies a file available as a Cloud Storage Object. * Structure is documented below. */ gcs?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpmSourceGcs; /** * A local path within the VM to use. */ localPath?: string; /** * Specifies a file available via some URI. * Structure is documented below. */ remote?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpmSourceRemote; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpmSourceGcs { /** * Required. Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: string; /** * Required. Name of the Cloud Storage object. */ object: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpmSourceRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * Required. URI from which to fetch the object. It should contain both the * protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgYum { /** * Required. Package name. */ name: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgZypper { /** * Required. Package name. */ name: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepository { /** * Represents a single apt package repository. These will be added to * a repo file that will be managed at * `/etc/apt/sources.list.d/google_osconfig.list`. * Structure is documented below. */ apt?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryApt; /** * Represents a Goo package repository. These are added to a repo file * that is managed at * `C:/ProgramData/GooGet/repos/google_osconfig.repo`. * Structure is documented below. */ goo?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryGoo; /** * Represents a single yum package repository. These are added to a * repo file that is managed at * `/etc/yum.repos.d/google_osconfig.repo`. * Structure is documented below. */ yum?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryYum; /** * Represents a single zypper package repository. These are added to a * repo file that is managed at * `/etc/zypp/repos.d/google_osconfig.repo`. * Structure is documented below. */ zypper?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryZypper; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryApt { /** * Required. Type of archive files in this repository. * Possible values: * ARCHIVE_TYPE_UNSPECIFIED * DEB * DEB_SRC */ archiveType: string; /** * Required. List of components for this repository. Must contain at least one * item. */ components: string[]; /** * Required. Distribution of this repository. */ distribution: string; /** * URI of the key file for this repository. The agent maintains a * keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`. */ gpgKey?: string; /** * Required. URI for this repository. */ uri: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryGoo { /** * Required. The name of the repository. */ name: string; /** * Required. The url of the repository. */ url: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryYum { /** * Required. The location of the repository directory. */ baseUrl: string; /** * The display name of the repository. */ displayName?: string; /** * URIs of GPG keys. */ gpgKeys?: string[]; /** * Required. A one word, unique name for this repository. This is the `repo * id` in the yum config file and also the `displayName` if * `displayName` is omitted. This id is also used as the unique * identifier when checking for resource conflicts. */ id: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryZypper { /** * Required. The location of the repository directory. */ baseUrl: string; /** * The display name of the repository. */ displayName?: string; /** * URIs of GPG keys. */ gpgKeys?: string[]; /** * Required. A one word, unique name for this repository. This is the `repo * id` in the zypper config file and also the `displayName` if * `displayName` is omitted. This id is also used as the unique * identifier when checking for GuestPolicy conflicts. */ id: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadRollout { /** * Message encapsulating a value that can be either absolute ("fixed") or * relative ("percent") to a value. * Structure is documented below. */ disruptionBudget: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadRolloutDisruptionBudget; /** * Required. This determines the minimum duration of time to wait after the * configuration changes are applied through the current rollout. A * VM continues to count towards the `disruptionBudget` at least * until this duration of time has passed after configuration changes are * applied. */ minWaitDuration: string; } interface V2PolicyOrchestratorForOrganizationOrchestratedResourceOsPolicyAssignmentV1PayloadRolloutDisruptionBudget { /** * Specifies a fixed value. */ fixed?: number; /** * Specifies the relative value defined as a percentage, which will be * multiplied by a reference value. */ percent?: number; } interface V2PolicyOrchestratorForOrganizationOrchestrationScope { /** * Optional. Selectors of the orchestration scope. There is a logical AND between each * selector defined. * When there is no explicit `ResourceHierarchySelector` selector specified, * the scope is by default bounded to the parent of the policy orchestrator * resource. * Structure is documented below. */ selectors?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestrationScopeSelector[]; } interface V2PolicyOrchestratorForOrganizationOrchestrationScopeSelector { /** * Selector containing locations in scope. * Structure is documented below. */ locationSelector?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestrationScopeSelectorLocationSelector; /** * Selector containing Cloud Resource Manager resource hierarchy nodes. * Structure is documented below. */ resourceHierarchySelector?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestrationScopeSelectorResourceHierarchySelector; } interface V2PolicyOrchestratorForOrganizationOrchestrationScopeSelectorLocationSelector { /** * Optional. Names of the locations in scope. * Format: `us-central1-a` */ includedLocations?: string[]; } interface V2PolicyOrchestratorForOrganizationOrchestrationScopeSelectorResourceHierarchySelector { /** * Optional. Names of the folders in scope. * Format: `folders/{folder_id}` */ includedFolders?: string[]; /** * Optional. Names of the projects in scope. * Format: `projects/{project_number}` */ includedProjects?: string[]; } interface V2PolicyOrchestratorForOrganizationOrchestrationState { /** * (Output) * Describes the state of a single iteration of the orchestrator. * Structure is documented below. */ currentIterationStates: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestrationStateCurrentIterationState[]; /** * Describes the state of a single iteration of the orchestrator. * Structure is documented below. */ previousIterationState?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestrationStatePreviousIterationState; } interface V2PolicyOrchestratorForOrganizationOrchestrationStateCurrentIterationState { /** * The `Status` type defines a logical error model that is suitable for * different programming environments, including REST APIs and RPC APIs. It is * used by [gRPC](https://github.com/grpc). Each `Status` message contains * three pieces of data: error code, error message, and error details. * You can find out more about this error model and how to work with it in the * [API Design Guide](https://cloud.google.com/apis/design/errors). * Structure is documented below. */ error?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestrationStateCurrentIterationStateError; /** * (Output) * Output only. Number of orchestration actions which failed so far. For more details, * query the Cloud Logs. */ failedActions: string; /** * (Output) * Output only. Finish time of the wave iteration. */ finishTime: string; /** * (Output) * Output only. Overall number of actions done by the orchestrator so far. */ performedActions: string; /** * (Output) * Output only. An estimated percentage of the progress. Number between 0 and 100. */ progress: number; /** * (Output) * Output only. Handle to the Progressive Rollouts API rollout resource, which contains * detailed information about a particular orchestration iteration. */ rolloutResource: string; /** * (Output) * Output only. Start time of the wave iteration. */ startTime: string; /** * Optional. State of the orchestrator. Can be updated to change orchestrator behaviour. * Allowed values: * - `ACTIVE` - orchestrator is actively looking for actions to be taken. * - `STOPPED` - orchestrator won't make any changes. * Note: There might be more states added in the future. We use string here * instead of an enum, to avoid the need of propagating new states to all the * client code. */ state: string; } interface V2PolicyOrchestratorForOrganizationOrchestrationStateCurrentIterationStateError { /** * The status code, which should be an enum value of google.rpc.Code. */ code?: number; /** * A list of messages that carry the error details. There is a common set of * message types for APIs to use. * Structure is documented below. */ details?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestrationStateCurrentIterationStateErrorDetail[]; /** * A developer-facing error message, which should be in English. Any * user-facing error message should be localized and sent in the * google.rpc.Status.details field, or localized by the client. */ message?: string; } interface V2PolicyOrchestratorForOrganizationOrchestrationStateCurrentIterationStateErrorDetail { /** * A URL/resource name that uniquely identifies the type of the serialized protocol buffer message */ typeUrl?: string; /** * (Optional) */ value?: string; } interface V2PolicyOrchestratorForOrganizationOrchestrationStatePreviousIterationState { /** * The `Status` type defines a logical error model that is suitable for * different programming environments, including REST APIs and RPC APIs. It is * used by [gRPC](https://github.com/grpc). Each `Status` message contains * three pieces of data: error code, error message, and error details. * You can find out more about this error model and how to work with it in the * [API Design Guide](https://cloud.google.com/apis/design/errors). * Structure is documented below. */ error?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestrationStatePreviousIterationStateError; /** * (Output) * Output only. Number of orchestration actions which failed so far. For more details, * query the Cloud Logs. */ failedActions: string; /** * (Output) * Output only. Finish time of the wave iteration. */ finishTime: string; /** * (Output) * Output only. Overall number of actions done by the orchestrator so far. */ performedActions: string; /** * (Output) * Output only. An estimated percentage of the progress. Number between 0 and 100. */ progress: number; /** * (Output) * Output only. Handle to the Progressive Rollouts API rollout resource, which contains * detailed information about a particular orchestration iteration. */ rolloutResource: string; /** * (Output) * Output only. Start time of the wave iteration. */ startTime: string; /** * Optional. State of the orchestrator. Can be updated to change orchestrator behaviour. * Allowed values: * - `ACTIVE` - orchestrator is actively looking for actions to be taken. * - `STOPPED` - orchestrator won't make any changes. * Note: There might be more states added in the future. We use string here * instead of an enum, to avoid the need of propagating new states to all the * client code. */ state: string; } interface V2PolicyOrchestratorForOrganizationOrchestrationStatePreviousIterationStateError { /** * The status code, which should be an enum value of google.rpc.Code. */ code?: number; /** * A list of messages that carry the error details. There is a common set of * message types for APIs to use. * Structure is documented below. */ details?: outputs.osconfig.V2PolicyOrchestratorForOrganizationOrchestrationStatePreviousIterationStateErrorDetail[]; /** * A developer-facing error message, which should be in English. Any * user-facing error message should be localized and sent in the * google.rpc.Status.details field, or localized by the client. */ message?: string; } interface V2PolicyOrchestratorForOrganizationOrchestrationStatePreviousIterationStateErrorDetail { /** * A URL/resource name that uniquely identifies the type of the serialized protocol buffer message */ typeUrl?: string; /** * (Optional) */ value?: string; } interface V2PolicyOrchestratorOrchestratedResource { /** * Optional. ID of the resource to be used while generating set of affected resources. * For UPSERT action the value is auto-generated during PolicyOrchestrator * creation when not set. When the value is set it should following next * restrictions: * * Must contain only lowercase letters, numbers, and hyphens. * * Must start with a letter. * * Must be between 1-63 characters. * * Must end with a number or a letter. * * Must be unique within the project. * For DELETE action, ID must be specified explicitly during * PolicyOrchestrator creation. * * * The `osPolicyAssignmentV1Payload` block supports: */ id?: string; /** * OS policy assignment is an API resource that is used to * apply a set of OS policies to a dynamically targeted group of Compute Engine * VM instances. * An OS policy is used to define the desired state configuration for a * Compute Engine VM instance through a set of configuration resources that * provide capabilities such as installing or removing software packages, or * executing a script. * For more information about the OS policy resource definitions and examples, * see * [OS policy and OS policy * assignment](https://cloud.google.com/compute/docs/os-configuration-management/working-with-os-policies). * Structure is documented below. */ osPolicyAssignmentV1Payload?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1Payload; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1Payload { /** * Output only. Indicates that this revision has been successfully rolled out in this zone * and new VMs will be assigned OS policies from this revision. * * For a given OS policy assignment, there is only one revision with a value * of 'true' for this field. */ baseline: boolean; /** * Output only. Indicates that this revision deletes the OS policy assignment. */ deleted: boolean; /** * Optional. Freeform text describing the purpose of the resource. */ description?: string; /** * Filters to select target VMs for an assignment. * * If more than one filter criteria is specified below, a VM will be selected * if and only if it satisfies all of them. */ instanceFilter: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilter; /** * Immutable. Identifier. In form of * * `organizations/{organization_id}/locations/global/policyOrchestrators/{orchestrator_id}` * * `folders/{folder_id}/locations/global/policyOrchestrators/{orchestrator_id}` * * `projects/{project_id_or_number}/locations/global/policyOrchestrators/{orchestrator_id}` */ name?: string; /** * Required. List of OS policies to be applied to the VMs. */ osPolicies: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicy[]; /** * Output only. Set to true, if the there are ongoing changes being applied by the * orchestrator. */ reconciling: boolean; /** * Output only. The timestamp that the revision was created. */ revisionCreateTime: string; /** * Output only. The assignment revision ID * A new revision is committed whenever a rollout is triggered for a OS policy * assignment */ revisionId: string; /** * Message to configure the rollout at the zonal level for the OS policy * assignment. */ rollout: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadRollout; /** * Output only. OS policy assignment rollout state * Possible values: * ROLLOUT_STATE_UNSPECIFIED * IN_PROGRESS * CANCELLING * CANCELLED * SUCCEEDED */ rolloutState: string; /** * Output only. Server generated unique id for the OS policy assignment resource. */ uid: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilter { /** * Target all VMs in the project. If true, no other criteria is * permitted. */ all?: boolean; /** * List of label sets used for VM exclusion. * If the list has more than one label set, the VM is excluded if any * of the label sets are applicable for the VM. * Structure is documented below. */ exclusionLabels?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilterExclusionLabel[]; /** * List of label sets used for VM inclusion. * If the list has more than one `LabelSet`, the VM is included if any * of the label sets are applicable for the VM. * Structure is documented below. */ inclusionLabels?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilterInclusionLabel[]; /** * List of inventories to select VMs. * A VM is selected if its inventory data matches at least one of the * following inventories. * Structure is documented below. */ inventories?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilterInventory[]; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilterExclusionLabel { /** * Labels are identified by key/value pairs in this map. * A VM should contain all the key/value pairs specified in this * map to be selected. */ labels?: { [key: string]: string; }; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilterInclusionLabel { /** * Labels are identified by key/value pairs in this map. * A VM should contain all the key/value pairs specified in this * map to be selected. */ labels?: { [key: string]: string; }; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadInstanceFilterInventory { /** * Required. The OS short name */ osShortName: string; /** * The OS version * Prefix matches are supported if asterisk(*) is provided as the * last character. For example, to match all versions with a major * version of `7`, specify the following value for this field `7.*` * An empty string matches all OS versions. */ osVersion?: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicy { /** * This flag determines the OS policy compliance status when none of the * resource groups within the policy are applicable for a VM. Set this value * to `true` if the policy needs to be reported as compliant even if the * policy has nothing to validate or enforce. */ allowNoResourceGroupMatch?: boolean; /** * Policy description. * Length of the description is limited to 1024 characters. */ description?: string; /** * Required. The id of the OS policy with the following restrictions: * * Must contain only lowercase letters, numbers, and hyphens. * * Must start with a letter. * * Must be between 1-63 characters. * * Must end with a number or a letter. * * Must be unique within the assignment. */ id: string; /** * Required. Policy mode * Possible values: * MODE_UNSPECIFIED * VALIDATION * ENFORCEMENT */ mode: string; /** * Required. List of resource groups for the policy. * For a particular VM, resource groups are evaluated in the order specified * and the first resource group that is applicable is selected and the rest * are ignored. * If none of the resource groups are applicable for a VM, the VM is * considered to be non-compliant w.r.t this policy. This behavior can be * toggled by the flag `allowNoResourceGroupMatch` * Structure is documented below. */ resourceGroups: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroup[]; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroup { /** * List of inventory filters for the resource group. * The resources in this resource group are applied to the target VM if it * satisfies at least one of the following inventory filters. * For example, to apply this resource group to VMs running either `RHEL` or * `CentOS` operating systems, specify 2 items for the list with following * values: * inventory_filters[0].os_short_name='rhel' and * inventory_filters[1].os_short_name='centos' * If the list is empty, this resource group will be applied to the target * VM unconditionally. * Structure is documented below. */ inventoryFilters?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupInventoryFilter[]; /** * Required. List of resources configured for this resource group. * The resources are executed in the exact order specified here. * Structure is documented below. */ resources: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResource[]; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupInventoryFilter { /** * Required. The OS short name */ osShortName: string; /** * The OS version * Prefix matches are supported if asterisk(*) is provided as the * last character. For example, to match all versions with a major * version of `7`, specify the following value for this field `7.*` * An empty string matches all OS versions. */ osVersion?: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResource { /** * A resource that allows executing scripts on the VM. * The `ExecResource` has 2 stages: `validate` and `enforce` and both stages * accept a script as an argument to execute. * When the `ExecResource` is applied by the agent, it first executes the * script in the `validate` stage. The `validate` stage can signal that the * `ExecResource` is already in the desired state by returning an exit code * of `100`. If the `ExecResource` is not in the desired state, it should * return an exit code of `101`. Any other exit code returned by this stage * is considered an error. * If the `ExecResource` is not in the desired state based on the exit code * from the `validate` stage, the agent proceeds to execute the script from * the `enforce` stage. If the `ExecResource` is already in the desired * state, the `enforce` stage will not be run. * Similar to `validate` stage, the `enforce` stage should return an exit * code of `100` to indicate that the resource in now in its desired state. * Any other exit code is considered an error. * NOTE: An exit code of `100` was chosen over `0` (and `101` vs `1`) to * have an explicit indicator of `in desired state`, `not in desired state` * and errors. Because, for example, Powershell will always return an exit * code of `0` unless an `exit` statement is provided in the script. So, for * reasons of consistency and being explicit, exit codes `100` and `101` * were chosen. * Structure is documented below. */ exec?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExec; /** * A resource that manages the state of a file. * Structure is documented below. */ file?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFile; /** * Required. The id of the resource with the following restrictions: * * Must contain only lowercase letters, numbers, and hyphens. * * Must start with a letter. * * Must be between 1-63 characters. * * Must end with a number or a letter. * * Must be unique within the OS policy. */ id: string; /** * A resource that manages a system package. * Structure is documented below. */ pkg?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkg; /** * A resource that manages a package repository. * Structure is documented below. */ repository?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepository; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExec { /** * A file or script to execute. * Structure is documented below. */ enforce?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforce; /** * A file or script to execute. * Structure is documented below. */ validate: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidate; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforce { /** * Optional arguments to pass to the source during execution. */ args?: string[]; /** * A remote or local file. * Structure is documented below. */ file?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforceFile; /** * Required. The script interpreter to use. * Possible values: * INTERPRETER_UNSPECIFIED * NONE * SHELL * POWERSHELL */ interpreter: string; /** * Only recorded for enforce Exec. * Path to an output file (that is created by this Exec) whose * content will be recorded in OSPolicyResourceCompliance after a * successful run. Absence or failure to read this file will result in * this ExecResource being non-compliant. Output file size is limited to * 500K bytes. */ outputFilePath?: string; /** * An inline script. * The size of the script is limited to 32KiB. */ script?: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforceFile { /** * Defaults to false. When false, files are subject to validations * based on the file type: * Remote: A checksum must be specified. * Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * Specifies a file available as a Cloud Storage Object. * Structure is documented below. */ gcs?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforceFileGcs; /** * A local path within the VM to use. */ localPath?: string; /** * Specifies a file available via some URI. * Structure is documented below. */ remote?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforceFileRemote; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforceFileGcs { /** * Required. Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: string; /** * Required. Name of the Cloud Storage object. */ object: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecEnforceFileRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * Required. URI from which to fetch the object. It should contain both the * protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidate { /** * Optional arguments to pass to the source during execution. */ args?: string[]; /** * A remote or local file. * Structure is documented below. */ file?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidateFile; /** * Required. The script interpreter to use. * Possible values: * INTERPRETER_UNSPECIFIED * NONE * SHELL * POWERSHELL */ interpreter: string; /** * Only recorded for enforce Exec. * Path to an output file (that is created by this Exec) whose * content will be recorded in OSPolicyResourceCompliance after a * successful run. Absence or failure to read this file will result in * this ExecResource being non-compliant. Output file size is limited to * 500K bytes. */ outputFilePath?: string; /** * An inline script. * The size of the script is limited to 32KiB. */ script?: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidateFile { /** * Defaults to false. When false, files are subject to validations * based on the file type: * Remote: A checksum must be specified. * Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * Specifies a file available as a Cloud Storage Object. * Structure is documented below. */ gcs?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidateFileGcs; /** * A local path within the VM to use. */ localPath?: string; /** * Specifies a file available via some URI. * Structure is documented below. */ remote?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidateFileRemote; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidateFileGcs { /** * Required. Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: string; /** * Required. Name of the Cloud Storage object. */ object: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceExecValidateFileRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * Required. URI from which to fetch the object. It should contain both the * protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFile { /** * A a file with this content. * The size of the content is limited to 32KiB. */ content?: string; /** * A remote or local file. * Structure is documented below. */ file?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFileFile; /** * Required. The absolute path of the file within the VM. */ path: string; /** * Consists of three octal digits which represent, in * order, the permissions of the owner, group, and other users for the * file (similarly to the numeric mode used in the linux chmod * utility). Each digit represents a three bit number with the 4 bit * corresponding to the read permissions, the 2 bit corresponds to the * write bit, and the one bit corresponds to the execute permission. * Default behavior is 755. * Below are some examples of permissions and their associated values: * read, write, and execute: 7 * read and execute: 5 * read and write: 6 * read only: 4 */ permissions?: string; /** * Required. Desired state of the file. * Possible values: * DESIRED_STATE_UNSPECIFIED * PRESENT * ABSENT * CONTENTS_MATCH */ state: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFileFile { /** * Defaults to false. When false, files are subject to validations * based on the file type: * Remote: A checksum must be specified. * Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * Specifies a file available as a Cloud Storage Object. * Structure is documented below. */ gcs?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFileFileGcs; /** * A local path within the VM to use. */ localPath?: string; /** * Specifies a file available via some URI. * Structure is documented below. */ remote?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFileFileRemote; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFileFileGcs { /** * Required. Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: string; /** * Required. Name of the Cloud Storage object. */ object: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceFileFileRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * Required. URI from which to fetch the object. It should contain both the * protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkg { /** * A package managed by APT. * - install: `apt-get update && apt-get -y install [name]` * - remove: `apt-get -y remove [name]` * Structure is documented below. */ apt?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgApt; /** * A deb package file. dpkg packages only support INSTALLED state. * Structure is documented below. */ deb?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDeb; /** * Required. The desired state the agent should maintain for this package. * Possible values: * DESIRED_STATE_UNSPECIFIED * INSTALLED * REMOVED */ desiredState: string; /** * A package managed by GooGet. * - install: `googet -noconfirm install package` * - remove: `googet -noconfirm remove package` * Structure is documented below. */ googet?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgGooget; /** * An MSI package. MSI packages only support INSTALLED state. * Structure is documented below. */ msi?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsi; /** * An RPM package file. RPM packages only support INSTALLED state. * Structure is documented below. */ rpm?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpm; /** * A package managed by YUM. * - install: `yum -y install package` * - remove: `yum -y remove package` * Structure is documented below. */ yum?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgYum; /** * A package managed by Zypper. * - install: `zypper -y install package` * - remove: `zypper -y rm package` * Structure is documented below. */ zypper?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgZypper; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgApt { /** * Required. Package name. */ name: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDeb { /** * Whether dependencies should also be installed. * - install when false: `dpkg -i package` * - install when true: `apt-get update && apt-get -y install * package.deb` */ pullDeps?: boolean; /** * A remote or local file. * Structure is documented below. */ source: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDebSource; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDebSource { /** * Defaults to false. When false, files are subject to validations * based on the file type: * Remote: A checksum must be specified. * Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * Specifies a file available as a Cloud Storage Object. * Structure is documented below. */ gcs?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDebSourceGcs; /** * A local path within the VM to use. */ localPath?: string; /** * Specifies a file available via some URI. * Structure is documented below. */ remote?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDebSourceRemote; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDebSourceGcs { /** * Required. Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: string; /** * Required. Name of the Cloud Storage object. */ object: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgDebSourceRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * Required. URI from which to fetch the object. It should contain both the * protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgGooget { /** * Required. Package name. */ name: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsi { /** * Additional properties to use during installation. * This should be in the format of Property=Setting. * Appended to the defaults of `ACTION=INSTALL * REBOOT=ReallySuppress`. */ properties?: string[]; /** * A remote or local file. * Structure is documented below. */ source: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsiSource; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsiSource { /** * Defaults to false. When false, files are subject to validations * based on the file type: * Remote: A checksum must be specified. * Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * Specifies a file available as a Cloud Storage Object. * Structure is documented below. */ gcs?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsiSourceGcs; /** * A local path within the VM to use. */ localPath?: string; /** * Specifies a file available via some URI. * Structure is documented below. */ remote?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsiSourceRemote; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsiSourceGcs { /** * Required. Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: string; /** * Required. Name of the Cloud Storage object. */ object: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgMsiSourceRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * Required. URI from which to fetch the object. It should contain both the * protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpm { /** * Whether dependencies should also be installed. * - install when false: `rpm --upgrade --replacepkgs package.rpm` * - install when true: `yum -y install package.rpm` or * `zypper -y install package.rpm` */ pullDeps?: boolean; /** * A remote or local file. * Structure is documented below. */ source: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpmSource; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpmSource { /** * Defaults to false. When false, files are subject to validations * based on the file type: * Remote: A checksum must be specified. * Cloud Storage: An object generation number must be specified. */ allowInsecure?: boolean; /** * Specifies a file available as a Cloud Storage Object. * Structure is documented below. */ gcs?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpmSourceGcs; /** * A local path within the VM to use. */ localPath?: string; /** * Specifies a file available via some URI. * Structure is documented below. */ remote?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpmSourceRemote; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpmSourceGcs { /** * Required. Bucket of the Cloud Storage object. */ bucket: string; /** * Generation number of the Cloud Storage object. */ generation?: string; /** * Required. Name of the Cloud Storage object. */ object: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgRpmSourceRemote { /** * SHA256 checksum of the remote file. */ sha256Checksum?: string; /** * Required. URI from which to fetch the object. It should contain both the * protocol and path following the format `{protocol}://{location}`. */ uri: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgYum { /** * Required. Package name. */ name: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourcePkgZypper { /** * Required. Package name. */ name: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepository { /** * Represents a single apt package repository. These will be added to * a repo file that will be managed at * `/etc/apt/sources.list.d/google_osconfig.list`. * Structure is documented below. */ apt?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryApt; /** * Represents a Goo package repository. These are added to a repo file * that is managed at * `C:/ProgramData/GooGet/repos/google_osconfig.repo`. * Structure is documented below. */ goo?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryGoo; /** * Represents a single yum package repository. These are added to a * repo file that is managed at * `/etc/yum.repos.d/google_osconfig.repo`. * Structure is documented below. */ yum?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryYum; /** * Represents a single zypper package repository. These are added to a * repo file that is managed at * `/etc/zypp/repos.d/google_osconfig.repo`. * Structure is documented below. */ zypper?: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryZypper; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryApt { /** * Required. Type of archive files in this repository. * Possible values: * ARCHIVE_TYPE_UNSPECIFIED * DEB * DEB_SRC */ archiveType: string; /** * Required. List of components for this repository. Must contain at least one * item. */ components: string[]; /** * Required. Distribution of this repository. */ distribution: string; /** * URI of the key file for this repository. The agent maintains a * keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`. */ gpgKey?: string; /** * Required. URI for this repository. */ uri: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryGoo { /** * Required. The name of the repository. */ name: string; /** * Required. The url of the repository. */ url: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryYum { /** * Required. The location of the repository directory. */ baseUrl: string; /** * The display name of the repository. */ displayName?: string; /** * URIs of GPG keys. */ gpgKeys?: string[]; /** * Required. A one word, unique name for this repository. This is the `repo * id` in the yum config file and also the `displayName` if * `displayName` is omitted. This id is also used as the unique * identifier when checking for resource conflicts. */ id: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadOsPolicyResourceGroupResourceRepositoryZypper { /** * Required. The location of the repository directory. */ baseUrl: string; /** * The display name of the repository. */ displayName?: string; /** * URIs of GPG keys. */ gpgKeys?: string[]; /** * Required. A one word, unique name for this repository. This is the `repo * id` in the zypper config file and also the `displayName` if * `displayName` is omitted. This id is also used as the unique * identifier when checking for GuestPolicy conflicts. */ id: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadRollout { /** * Message encapsulating a value that can be either absolute ("fixed") or * relative ("percent") to a value. * Structure is documented below. */ disruptionBudget: outputs.osconfig.V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadRolloutDisruptionBudget; /** * Required. This determines the minimum duration of time to wait after the * configuration changes are applied through the current rollout. A * VM continues to count towards the `disruptionBudget` at least * until this duration of time has passed after configuration changes are * applied. */ minWaitDuration: string; } interface V2PolicyOrchestratorOrchestratedResourceOsPolicyAssignmentV1PayloadRolloutDisruptionBudget { /** * Specifies a fixed value. */ fixed?: number; /** * Specifies the relative value defined as a percentage, which will be * multiplied by a reference value. */ percent?: number; } interface V2PolicyOrchestratorOrchestrationScope { /** * Optional. Selectors of the orchestration scope. There is a logical AND between each * selector defined. * When there is no explicit `ResourceHierarchySelector` selector specified, * the scope is by default bounded to the parent of the policy orchestrator * resource. * Structure is documented below. */ selectors?: outputs.osconfig.V2PolicyOrchestratorOrchestrationScopeSelector[]; } interface V2PolicyOrchestratorOrchestrationScopeSelector { /** * Selector containing locations in scope. * Structure is documented below. */ locationSelector?: outputs.osconfig.V2PolicyOrchestratorOrchestrationScopeSelectorLocationSelector; /** * Selector containing Cloud Resource Manager resource hierarchy nodes. * Structure is documented below. */ resourceHierarchySelector?: outputs.osconfig.V2PolicyOrchestratorOrchestrationScopeSelectorResourceHierarchySelector; } interface V2PolicyOrchestratorOrchestrationScopeSelectorLocationSelector { /** * Optional. Names of the locations in scope. * Format: `us-central1-a` */ includedLocations?: string[]; } interface V2PolicyOrchestratorOrchestrationScopeSelectorResourceHierarchySelector { /** * Optional. Names of the folders in scope. * Format: `folders/{folder_id}` */ includedFolders?: string[]; /** * Optional. Names of the projects in scope. * Format: `projects/{project_number}` */ includedProjects?: string[]; } interface V2PolicyOrchestratorOrchestrationState { /** * (Output) * Describes the state of a single iteration of the orchestrator. * Structure is documented below. */ currentIterationStates: outputs.osconfig.V2PolicyOrchestratorOrchestrationStateCurrentIterationState[]; /** * Describes the state of a single iteration of the orchestrator. * Structure is documented below. */ previousIterationState?: outputs.osconfig.V2PolicyOrchestratorOrchestrationStatePreviousIterationState; } interface V2PolicyOrchestratorOrchestrationStateCurrentIterationState { /** * The `Status` type defines a logical error model that is suitable for * different programming environments, including REST APIs and RPC APIs. It is * used by [gRPC](https://github.com/grpc). Each `Status` message contains * three pieces of data: error code, error message, and error details. * You can find out more about this error model and how to work with it in the * [API Design Guide](https://cloud.google.com/apis/design/errors). * Structure is documented below. */ error?: outputs.osconfig.V2PolicyOrchestratorOrchestrationStateCurrentIterationStateError; /** * (Output) * Output only. Number of orchestration actions which failed so far. For more details, * query the Cloud Logs. */ failedActions: string; /** * (Output) * Output only. Finish time of the wave iteration. */ finishTime: string; /** * (Output) * Output only. Overall number of actions done by the orchestrator so far. */ performedActions: string; /** * (Output) * Output only. An estimated percentage of the progress. Number between 0 and 100. */ progress: number; /** * (Output) * Output only. Handle to the Progressive Rollouts API rollout resource, which contains * detailed information about a particular orchestration iteration. */ rolloutResource: string; /** * (Output) * Output only. Start time of the wave iteration. */ startTime: string; /** * Optional. State of the orchestrator. Can be updated to change orchestrator behaviour. * Allowed values: * - `ACTIVE` - orchestrator is actively looking for actions to be taken. * - `STOPPED` - orchestrator won't make any changes. * Note: There might be more states added in the future. We use string here * instead of an enum, to avoid the need of propagating new states to all the * client code. */ state: string; } interface V2PolicyOrchestratorOrchestrationStateCurrentIterationStateError { /** * The status code, which should be an enum value of google.rpc.Code. */ code?: number; /** * A list of messages that carry the error details. There is a common set of * message types for APIs to use. * Structure is documented below. */ details?: outputs.osconfig.V2PolicyOrchestratorOrchestrationStateCurrentIterationStateErrorDetail[]; /** * A developer-facing error message, which should be in English. Any * user-facing error message should be localized and sent in the * google.rpc.Status.details field, or localized by the client. */ message?: string; } interface V2PolicyOrchestratorOrchestrationStateCurrentIterationStateErrorDetail { /** * A URL/resource name that uniquely identifies the type of the serialized protocol buffer message */ typeUrl?: string; /** * (Optional) */ value?: string; } interface V2PolicyOrchestratorOrchestrationStatePreviousIterationState { /** * The `Status` type defines a logical error model that is suitable for * different programming environments, including REST APIs and RPC APIs. It is * used by [gRPC](https://github.com/grpc). Each `Status` message contains * three pieces of data: error code, error message, and error details. * You can find out more about this error model and how to work with it in the * [API Design Guide](https://cloud.google.com/apis/design/errors). * Structure is documented below. */ error?: outputs.osconfig.V2PolicyOrchestratorOrchestrationStatePreviousIterationStateError; /** * (Output) * Output only. Number of orchestration actions which failed so far. For more details, * query the Cloud Logs. */ failedActions: string; /** * (Output) * Output only. Finish time of the wave iteration. */ finishTime: string; /** * (Output) * Output only. Overall number of actions done by the orchestrator so far. */ performedActions: string; /** * (Output) * Output only. An estimated percentage of the progress. Number between 0 and 100. */ progress: number; /** * (Output) * Output only. Handle to the Progressive Rollouts API rollout resource, which contains * detailed information about a particular orchestration iteration. */ rolloutResource: string; /** * (Output) * Output only. Start time of the wave iteration. */ startTime: string; /** * Optional. State of the orchestrator. Can be updated to change orchestrator behaviour. * Allowed values: * - `ACTIVE` - orchestrator is actively looking for actions to be taken. * - `STOPPED` - orchestrator won't make any changes. * Note: There might be more states added in the future. We use string here * instead of an enum, to avoid the need of propagating new states to all the * client code. */ state: string; } interface V2PolicyOrchestratorOrchestrationStatePreviousIterationStateError { /** * The status code, which should be an enum value of google.rpc.Code. */ code?: number; /** * A list of messages that carry the error details. There is a common set of * message types for APIs to use. * Structure is documented below. */ details?: outputs.osconfig.V2PolicyOrchestratorOrchestrationStatePreviousIterationStateErrorDetail[]; /** * A developer-facing error message, which should be in English. Any * user-facing error message should be localized and sent in the * google.rpc.Status.details field, or localized by the client. */ message?: string; } interface V2PolicyOrchestratorOrchestrationStatePreviousIterationStateErrorDetail { /** * A URL/resource name that uniquely identifies the type of the serialized protocol buffer message */ typeUrl?: string; /** * (Optional) */ value?: string; } } export declare namespace parametermanager { interface GetParameterPolicyMember { /** * IAM policy binding member referring to a Google Cloud resource by user-assigned name. If a * resource is deleted and recreated with the same name, the binding will be applicable to the * new resource. Format: * 'principal://parametermanager.googleapis.com/projects/{{project}}/name/locations/global/parameters/{{parameter_id}}' */ iamPolicyNamePrincipal: string; /** * IAM policy binding member referring to a Google Cloud resource by system-assigned unique identifier. * If a resource is deleted and recreated with the same name, the binding will not be applicable to the * new resource. Format: * 'principal://parametermanager.googleapis.com/projects/{{project}}/uid/locations/global/parameters/{{uid}}' */ iamPolicyUidPrincipal: string; } interface GetParametersParameter { /** * The time at which the parameter was created. */ createTime: string; /** * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. */ effectiveLabels: { [key: string]: string; }; /** * The format type of the parameter. */ format: string; /** * The resource name of the Cloud KMS CryptoKey used to encrypt parameter version payload. Format `projects/{{project}}/locations/global/keyRings/{{key_ring}}/cryptoKeys/{{crypto_key}}` */ kmsKey: string; /** * The labels assigned to the parameter. */ labels: { [key: string]: string; }; /** * The resource name of the parameter. Format: `projects/{{project}}/locations/global/parameters/{{parameter_id}}` */ name: string; /** * The unique name of the resource. */ parameterId: string; /** * An object containing a unique resource identity tied to the parameter. Structure is documented below. */ policyMembers: outputs.parametermanager.GetParametersParameterPolicyMember[]; /** * The ID of the project. */ project: string; /** * The combination of labels configured directly on the resource * and default labels configured on the provider. */ pulumiLabels: { [key: string]: string; }; /** * The time at which the parameter was updated. */ updateTime: string; } interface GetParametersParameterPolicyMember { /** * AM policy binding member referring to a Google Cloud resource by user-assigned name. If a resource is deleted and recreated with the same name, the binding will be applicable to the * new resource. Format: * `principal://parametermanager.googleapis.com/projects/{{project}}/name/locations/global/parameters/{{parameter_id}}` */ iamPolicyNamePrincipal: string; /** * IAM policy binding member referring to a Google Cloud resource by system-assigned unique identifier. * If a resource is deleted and recreated with the same name, the binding will not be applicable to the * new resource. Format: * `principal://parametermanager.googleapis.com/projects/{{project}}/uid/locations/global/parameters/{{uid}}` */ iamPolicyUidPrincipal: string; } interface GetRegionalParameterPolicyMember { /** * IAM policy binding member referring to a Google Cloud resource by user-assigned name. If a resource is * deleted and recreated with the same name, the binding will be applicable to the new resource. Format: * 'principal://parametermanager.googleapis.com/projects/{{project}}/name/locations/{{location}}/parameters/{{parameter_id}}' */ iamPolicyNamePrincipal: string; /** * IAM policy binding member referring to a Google Cloud resource by system-assigned unique identifier. If * a resource is deleted and recreated with the same name, the binding will not be applicable to the new * resource. Format: * 'principal://parametermanager.googleapis.com/projects/{{project}}/uid/locations/{{location}}/parameters/{{uid}}' */ iamPolicyUidPrincipal: string; } interface GetRegionalParametersParameter { /** * The time at which the regional parameter was created. */ createTime: string; /** * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. */ effectiveLabels: { [key: string]: string; }; /** * The format type of the regional parameter. */ format: string; /** * The resource name of the Cloud KMS CryptoKey used to encrypt regional parameter version payload. Format `projects/{{project}}/locations/{{location}}/keyRings/{{key_ring}}/cryptoKeys/{{crypto_key}}` */ kmsKey: string; /** * The labels assigned to the regional parameter. */ labels: { [key: string]: string; }; /** * The location of regional parameter. */ location: string; /** * The resource name of the regional parameter. Format: `projects/{{project}}/locations/{{location}}/parameters/{{parameter_id}}` */ name: string; /** * The unique name of the resource. */ parameterId: string; /** * An object containing a unique resource identity tied to the regional parameter. Structure is documented below. */ policyMembers: outputs.parametermanager.GetRegionalParametersParameterPolicyMember[]; /** * The ID of the project. */ project: string; /** * The combination of labels configured directly on the resource * and default labels configured on the provider. */ pulumiLabels: { [key: string]: string; }; /** * The time at which the regional parameter was updated. */ updateTime: string; } interface GetRegionalParametersParameterPolicyMember { /** * AM policy binding member referring to a Google Cloud resource by user-assigned name. If a resource is deleted and recreated with the same name, the binding will be applicable to the * new resource. Format: * `principal://parametermanager.googleapis.com/projects/{{project}}/name/locations/{{location}}/parameters/{{parameter_id}}` */ iamPolicyNamePrincipal: string; /** * IAM policy binding member referring to a Google Cloud resource by system-assigned unique identifier. * If a resource is deleted and recreated with the same name, the binding will not be applicable to the * new resource. Format: * `principal://parametermanager.googleapis.com/projects/{{project}}/uid/locations/{{location}}/parameters/{{uid}}` */ iamPolicyUidPrincipal: string; } interface ParameterPolicyMember { /** * (Output) * IAM policy binding member referring to a Google Cloud resource by user-assigned name. If a * resource is deleted and recreated with the same name, the binding will be applicable to the * new resource. Format: * `principal://parametermanager.googleapis.com/projects/{{project}}/name/locations/global/parameters/{{parameter_id}}` */ iamPolicyNamePrincipal: string; /** * (Output) * IAM policy binding member referring to a Google Cloud resource by system-assigned unique identifier. * If a resource is deleted and recreated with the same name, the binding will not be applicable to the * new resource. Format: * `principal://parametermanager.googleapis.com/projects/{{project}}/uid/locations/global/parameters/{{uid}}` */ iamPolicyUidPrincipal: string; } interface RegionalParameterPolicyMember { /** * (Output) * IAM policy binding member referring to a Google Cloud resource by user-assigned name. If a resource is * deleted and recreated with the same name, the binding will be applicable to the new resource. Format: * `principal://parametermanager.googleapis.com/projects/{{project}}/name/locations/{{location}}/parameters/{{parameter_id}}` */ iamPolicyNamePrincipal: string; /** * (Output) * IAM policy binding member referring to a Google Cloud resource by system-assigned unique identifier. If * a resource is deleted and recreated with the same name, the binding will not be applicable to the new * resource. Format: * `principal://parametermanager.googleapis.com/projects/{{project}}/uid/locations/{{location}}/parameters/{{uid}}` */ iamPolicyUidPrincipal: string; } } export declare namespace privilegedaccessmanager { interface EntitlementAdditionalNotificationTargets { /** * Optional. Additional email addresses to be notified when a principal(requester) is granted access. */ adminEmailRecipients?: string[]; /** * Optional. Additional email address to be notified about an eligible entitlement. */ requesterEmailRecipients?: string[]; } interface EntitlementApprovalWorkflow { /** * A manual approval workflow where users who are designated as approvers need to call the ApproveGrant/DenyGrant APIs for an Grant. * The workflow can consist of multiple serial steps where each step defines who can act as Approver in that step and how many of those users should approve before the workflow moves to the next step. * This can be used to create approval workflows such as * * Require an approval from any user in a group G. * * Require an approval from any k number of users from a Group G. * * Require an approval from any user in a group G and then from a user U. etc. * A single user might be part of `approvers` ACL for multiple steps in this workflow but they can only approve once and that approval will only be considered to satisfy the approval step at which it was granted. * Structure is documented below. */ manualApprovals: outputs.privilegedaccessmanager.EntitlementApprovalWorkflowManualApprovals; } interface EntitlementApprovalWorkflowManualApprovals { /** * Optional. Do the approvers need to provide a justification for their actions? */ requireApproverJustification?: boolean; /** * List of approval steps in this workflow. These steps would be followed in the specified order sequentially. * Structure is documented below. */ steps: outputs.privilegedaccessmanager.EntitlementApprovalWorkflowManualApprovalsStep[]; } interface EntitlementApprovalWorkflowManualApprovalsStep { /** * How many users from the above list need to approve. * If there are not enough distinct users in the list above then the workflow * will indefinitely block. Should always be greater than 0. Currently 1 is the only * supported value. */ approvalsNeeded?: number; /** * Optional. Additional email addresses to be notified when a grant is pending approval. */ approverEmailRecipients?: string[]; /** * The potential set of approvers in this step. This list should contain at only one entry. * Structure is documented below. */ approvers: outputs.privilegedaccessmanager.EntitlementApprovalWorkflowManualApprovalsStepApprovers; /** * (Output, Beta) * Output Only. The ID of the approval step. */ id: string; } interface EntitlementApprovalWorkflowManualApprovalsStepApprovers { /** * Users who are being allowed for the operation. Each entry should be a valid v1 IAM Principal Identifier. Format for these is documented at: https://cloud.google.com/iam/docs/principal-identifiers#v1 */ principals: string[]; } interface EntitlementEligibleUser { /** * Users who are being allowed for the operation. Each entry should be a valid v1 IAM Principal Identifier. Format for these is documented at "https://cloud.google.com/iam/docs/principal-identifiers#v1" */ principals: string[]; } interface EntitlementPrivilegedAccess { /** * GcpIamAccess represents IAM based access control on a GCP resource. Refer to https://cloud.google.com/iam/docs to understand more about IAM. * Structure is documented below. */ gcpIamAccess: outputs.privilegedaccessmanager.EntitlementPrivilegedAccessGcpIamAccess; } interface EntitlementPrivilegedAccessGcpIamAccess { /** * Name of the resource. */ resource: string; /** * The type of this resource. */ resourceType: string; /** * Role bindings to be created on successful grant. * Structure is documented below. */ roleBindings: outputs.privilegedaccessmanager.EntitlementPrivilegedAccessGcpIamAccessRoleBinding[]; } interface EntitlementPrivilegedAccessGcpIamAccessRoleBinding { /** * The expression field of the IAM condition to be associated with the role. If specified, a user with an active grant for this entitlement would be able to access the resource only if this condition evaluates to true for their request. * https://cloud.google.com/iam/docs/conditions-overview#attributes. */ conditionExpression?: string; /** * (Output, Beta) * Output Only. The ID corresponding to this role binding in the policy binding. This will be unique within an entitlement across time. Gets re-generated each time the entitlement is updated. */ id: string; /** * IAM role to be granted. https://cloud.google.com/iam/docs/roles-overview. */ role: string; } interface EntitlementRequesterJustificationConfig { /** * The justification is not mandatory but can be provided in any of the supported formats. */ notMandatory?: outputs.privilegedaccessmanager.EntitlementRequesterJustificationConfigNotMandatory; /** * The requester has to provide a justification in the form of free flowing text. */ unstructured?: outputs.privilegedaccessmanager.EntitlementRequesterJustificationConfigUnstructured; } interface EntitlementRequesterJustificationConfigNotMandatory { } interface EntitlementRequesterJustificationConfigUnstructured { } interface GetEntitlementAdditionalNotificationTarget { /** * Optional. Additional email addresses to be notified when a principal(requester) is granted access. */ adminEmailRecipients: string[]; /** * Optional. Additional email address to be notified about an eligible entitlement. */ requesterEmailRecipients: string[]; } interface GetEntitlementApprovalWorkflow { /** * A manual approval workflow where users who are designated as approvers need to call the ApproveGrant/DenyGrant APIs for an Grant. * The workflow can consist of multiple serial steps where each step defines who can act as Approver in that step and how many of those users should approve before the workflow moves to the next step. * This can be used to create approval workflows such as * * Require an approval from any user in a group G. * * Require an approval from any k number of users from a Group G. * * Require an approval from any user in a group G and then from a user U. etc. * A single user might be part of 'approvers' ACL for multiple steps in this workflow but they can only approve once and that approval will only be considered to satisfy the approval step at which it was granted. */ manualApprovals: outputs.privilegedaccessmanager.GetEntitlementApprovalWorkflowManualApproval[]; } interface GetEntitlementApprovalWorkflowManualApproval { /** * Optional. Do the approvers need to provide a justification for their actions? */ requireApproverJustification: boolean; /** * List of approval steps in this workflow. These steps would be followed in the specified order sequentially. */ steps: outputs.privilegedaccessmanager.GetEntitlementApprovalWorkflowManualApprovalStep[]; } interface GetEntitlementApprovalWorkflowManualApprovalStep { /** * How many users from the above list need to approve. * If there are not enough distinct users in the list above then the workflow * will indefinitely block. Should always be greater than 0. Currently 1 is the only * supported value. */ approvalsNeeded: number; /** * Optional. Additional email addresses to be notified when a grant is pending approval. */ approverEmailRecipients: string[]; /** * The potential set of approvers in this step. This list should contain at only one entry. */ approvers: outputs.privilegedaccessmanager.GetEntitlementApprovalWorkflowManualApprovalStepApprover[]; /** * Output Only. The ID of the approval step. */ id: string; } interface GetEntitlementApprovalWorkflowManualApprovalStepApprover { /** * Users who are being allowed for the operation. Each entry should be a valid v1 IAM Principal Identifier. Format for these is documented at: https://cloud.google.com/iam/docs/principal-identifiers#v1 */ principals: string[]; } interface GetEntitlementEligibleUser { /** * Users who are being allowed for the operation. Each entry should be a valid v1 IAM Principal Identifier. Format for these is documented at "https://cloud.google.com/iam/docs/principal-identifiers#v1" */ principals: string[]; } interface GetEntitlementPrivilegedAccess { /** * GcpIamAccess represents IAM based access control on a GCP resource. Refer to https://cloud.google.com/iam/docs to understand more about IAM. */ gcpIamAccesses: outputs.privilegedaccessmanager.GetEntitlementPrivilegedAccessGcpIamAccess[]; } interface GetEntitlementPrivilegedAccessGcpIamAccess { /** * Name of the resource. */ resource: string; /** * The type of this resource. */ resourceType: string; /** * Role bindings to be created on successful grant. */ roleBindings: outputs.privilegedaccessmanager.GetEntitlementPrivilegedAccessGcpIamAccessRoleBinding[]; } interface GetEntitlementPrivilegedAccessGcpIamAccessRoleBinding { /** * The expression field of the IAM condition to be associated with the role. If specified, a user with an active grant for this entitlement would be able to access the resource only if this condition evaluates to true for their request. * https://cloud.google.com/iam/docs/conditions-overview#attributes. */ conditionExpression: string; /** * Output Only. The ID corresponding to this role binding in the policy binding. This will be unique within an entitlement across time. Gets re-generated each time the entitlement is updated. */ id: string; /** * IAM role to be granted. https://cloud.google.com/iam/docs/roles-overview. */ role: string; } interface GetEntitlementRequesterJustificationConfig { /** * The justification is not mandatory but can be provided in any of the supported formats. */ notMandatories: outputs.privilegedaccessmanager.GetEntitlementRequesterJustificationConfigNotMandatory[]; /** * The requester has to provide a justification in the form of free flowing text. */ unstructureds: outputs.privilegedaccessmanager.GetEntitlementRequesterJustificationConfigUnstructured[]; } interface GetEntitlementRequesterJustificationConfigNotMandatory { } interface GetEntitlementRequesterJustificationConfigUnstructured { } interface SettingsEmailNotificationSettings { /** * CustomNotificationBehavior provides granular control over email notification delivery. Allows admins to selectively enable/disable notifications for specific events and specific personas. * Structure is documented below. */ customNotificationBehavior?: outputs.privilegedaccessmanager.SettingsEmailNotificationSettingsCustomNotificationBehavior; /** * This option indicates that all email notifications are disabled. */ disableAllNotifications?: outputs.privilegedaccessmanager.SettingsEmailNotificationSettingsDisableAllNotifications; } interface SettingsEmailNotificationSettingsCustomNotificationBehavior { /** * Email notifications specific to Requesters. * Structure is documented below. */ adminNotifications?: outputs.privilegedaccessmanager.SettingsEmailNotificationSettingsCustomNotificationBehaviorAdminNotifications; /** * Email notifications specific to Approvers. * Structure is documented below. */ approverNotifications?: outputs.privilegedaccessmanager.SettingsEmailNotificationSettingsCustomNotificationBehaviorApproverNotifications; /** * Email notifications specific to Requesters. * Structure is documented below. */ requesterNotifications?: outputs.privilegedaccessmanager.SettingsEmailNotificationSettingsCustomNotificationBehaviorRequesterNotifications; } interface SettingsEmailNotificationSettingsCustomNotificationBehaviorAdminNotifications { /** * Notification mode for grant activated. * Possible values are: `NOTIFICATION_MODE_UNSPECIFIED`, `ENABLED`, `DISABLED`. */ grantActivated: string; /** * Notification mode for grant activation failed. * Possible values are: `NOTIFICATION_MODE_UNSPECIFIED`, `ENABLED`, `DISABLED`. */ grantActivationFailed: string; /** * Notification mode for grant ended. * Possible values are: `NOTIFICATION_MODE_UNSPECIFIED`, `ENABLED`, `DISABLED`. */ grantEnded: string; /** * Notification mode for grant externally modified. * Possible values are: `NOTIFICATION_MODE_UNSPECIFIED`, `ENABLED`, `DISABLED`. */ grantExternallyModified: string; } interface SettingsEmailNotificationSettingsCustomNotificationBehaviorApproverNotifications { /** * Notification mode for pending approval. * Possible values are: `NOTIFICATION_MODE_UNSPECIFIED`, `ENABLED`, `DISABLED`. */ pendingApproval: string; } interface SettingsEmailNotificationSettingsCustomNotificationBehaviorRequesterNotifications { /** * Notification mode for entitlement assigned. * Possible values are: `NOTIFICATION_MODE_UNSPECIFIED`, `ENABLED`, `DISABLED`. */ entitlementAssigned: string; /** * Notification mode for grant activated. * Possible values are: `NOTIFICATION_MODE_UNSPECIFIED`, `ENABLED`, `DISABLED`. */ grantActivated: string; /** * Notification mode for grant activation failed. * Possible values are: `NOTIFICATION_MODE_UNSPECIFIED`, `ENABLED`, `DISABLED`. */ grantActivationFailed: string; /** * Notification mode for grant denied. * Possible values are: `NOTIFICATION_MODE_UNSPECIFIED`, `ENABLED`, `DISABLED`. */ grantDenied: string; /** * Notification mode for grant ended. * Possible values are: `NOTIFICATION_MODE_UNSPECIFIED`, `ENABLED`, `DISABLED`. */ grantEnded: string; /** * Notification mode for grant expired. * Possible values are: `NOTIFICATION_MODE_UNSPECIFIED`, `ENABLED`, `DISABLED`. */ grantExpired: string; /** * Notification mode for grant externally modified. * Possible values are: `NOTIFICATION_MODE_UNSPECIFIED`, `ENABLED`, `DISABLED`. */ grantExternallyModified: string; /** * Notification mode for grant revoked. * Possible values are: `NOTIFICATION_MODE_UNSPECIFIED`, `ENABLED`, `DISABLED`. */ grantRevoked: string; } interface SettingsEmailNotificationSettingsDisableAllNotifications { } interface SettingsServiceAccountApproverSettings { /** * Indicates whether service account is allowed to grant approvals. */ enabled?: boolean; } } export declare namespace projects { interface AccessApprovalSettingsEnrolledService { /** * The product for which Access Approval will be enrolled. Allowed values are listed (case-sensitive): * all * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * cloudkms.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * iam.googleapis.com * pubsub.googleapis.com * storage.googleapis.com */ cloudProduct: string; /** * The enrollment level of the service. * Default value is `BLOCK_ALL`. * Possible values are: `BLOCK_ALL`. */ enrollmentLevel?: string; } interface ApiKeyRestrictions { /** * The Android apps that are allowed to use the key. */ androidKeyRestrictions?: outputs.projects.ApiKeyRestrictionsAndroidKeyRestrictions; /** * A restriction for a specific service and optionally one or more specific methods. Requests are allowed if they match any of these restrictions. If no restrictions are specified, all targets are allowed. */ apiTargets?: outputs.projects.ApiKeyRestrictionsApiTarget[]; /** * The HTTP referrers (websites) that are allowed to use the key. */ browserKeyRestrictions?: outputs.projects.ApiKeyRestrictionsBrowserKeyRestrictions; /** * The iOS apps that are allowed to use the key. */ iosKeyRestrictions?: outputs.projects.ApiKeyRestrictionsIosKeyRestrictions; /** * The IP addresses of callers that are allowed to use the key. */ serverKeyRestrictions?: outputs.projects.ApiKeyRestrictionsServerKeyRestrictions; } interface ApiKeyRestrictionsAndroidKeyRestrictions { /** * A list of Android applications that are allowed to make API calls with this key. */ allowedApplications: outputs.projects.ApiKeyRestrictionsAndroidKeyRestrictionsAllowedApplication[]; } interface ApiKeyRestrictionsAndroidKeyRestrictionsAllowedApplication { /** * The package name of the application. */ packageName: string; /** * The SHA1 fingerprint of the application. For example, both sha1 formats are acceptable : DA:39:A3:EE:5E:6B:4B:0D:32:55:BF:EF:95:60:18:90:AF:D8:07:09 or DA39A3EE5E6B4B0D3255BFEF95601890AFD80709. Output format is the latter. */ sha1Fingerprint: string; } interface ApiKeyRestrictionsApiTarget { /** * Optional. List of one or more methods that can be called. If empty, all methods for the service are allowed. A wildcard (*) can be used as the last symbol. Valid examples: `google.cloud.translate.v2.TranslateService.GetSupportedLanguage` `TranslateText` `Get*` `translate.googleapis.com.Get*` */ methods?: string[]; /** * The service for this restriction. It should be the canonical service name, for example: `translate.googleapis.com`. You can use `gcloud services list` to get a list of services that are enabled in the project. */ service: string; } interface ApiKeyRestrictionsBrowserKeyRestrictions { /** * A list of regular expressions for the referrer URLs that are allowed to make API calls with this key. */ allowedReferrers: string[]; } interface ApiKeyRestrictionsIosKeyRestrictions { /** * A list of bundle IDs that are allowed when making API calls with this key. */ allowedBundleIds: string[]; } interface ApiKeyRestrictionsServerKeyRestrictions { /** * A list of the caller IP addresses that are allowed to make API calls with this key. */ allowedIps: string[]; } interface GetAncestryAncestor { /** * If it's a project, the `projectId` is exported, else the numeric folder id or organization id. */ id: string; /** * One of `"project"`, `"folder"` or `"organization"`. */ type: string; } interface GetIamCustomRolesRole { /** * The current deleted state of the role. */ deleted: boolean; /** * A human-readable description for the role. */ description: string; /** * an identifier for the resource with the format `projects/{{project}}/roles/{{role_id}}`. */ id: string; /** * The name of the role in the format `projects/{{project}}/roles/{{role_id}}`. Like `id`, this field can be used as a reference in other resources such as IAM role bindings. */ name: string; /** * The names of the permissions this role grants when bound in an IAM policy. */ permissions: string[]; /** * The camel case role id used for this role. */ roleId: string; /** * The current launch stage of the role. List of possible stages is [here](https://cloud.google.com/iam/reference/rest/v1/organizations.roles#Role.RoleLaunchStage). */ stage: string; /** * A human-readable title for the role. */ title: string; } interface GetOrganizationPolicyBooleanPolicy { /** * If true, then the Policy is enforced. If false, then any configuration is acceptable. */ enforced: boolean; } interface GetOrganizationPolicyListPolicy { /** * One or the other must be set. */ allows: outputs.projects.GetOrganizationPolicyListPolicyAllow[]; /** * One or the other must be set. */ denies: outputs.projects.GetOrganizationPolicyListPolicyDeny[]; /** * If set to true, the values from the effective Policy of the parent resource are inherited, meaning the values set in this Policy are added to the values inherited up the hierarchy. */ inheritFromParent: boolean; /** * The Google Cloud Console will try to default to a configuration that matches the value specified in this field. */ suggestedValue: string; } interface GetOrganizationPolicyListPolicyAllow { /** * The policy allows or denies all values. */ all: boolean; /** * The policy can define specific values that are allowed or denied. */ values: string[]; } interface GetOrganizationPolicyListPolicyDeny { /** * The policy allows or denies all values. */ all: boolean; /** * The policy can define specific values that are allowed or denied. */ values: string[]; } interface GetOrganizationPolicyRestorePolicy { /** * May only be set to true. If set, then the default Policy is restored. */ default: boolean; } interface GetProjectProject { /** * Creation time in RFC3339 UTC "Zulu" format. */ createTime: string; /** * A set of key/value label pairs assigned on a project. */ labels: { [key: string]: string; }; /** * The Project lifecycle state. */ lifecycleState: string; /** * The optional user-assigned display name of the project. */ name: string; /** * The numeric identifier of the project. */ number: string; /** * An optional reference to a parent resource. */ parent: { [key: string]: string; }; /** * The project id of the project. */ projectId: string; } interface IAMAuditConfigAuditLogConfig { /** * Identities that do not cause logging for this type of permission. The format is the same as that for `members`. */ exemptedMembers?: string[]; /** * Permission type for which logging is to be configured. Must be one of `DATA_READ`, `DATA_WRITE`, or `ADMIN_READ`. */ logType: string; } interface IAMBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface IAMMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface OrganizationPolicyBooleanPolicy { /** * If true, then the Policy is enforced. If false, then any configuration is acceptable. */ enforced: boolean; } interface OrganizationPolicyListPolicy { /** * or `deny` - (Optional) One or the other must be set. */ allow?: outputs.projects.OrganizationPolicyListPolicyAllow; /** * One or the other must be set. */ deny?: outputs.projects.OrganizationPolicyListPolicyDeny; /** * If set to true, the values from the effective Policy of the parent resource * are inherited, meaning the values set in this Policy are added to the values inherited up the hierarchy. * * The `allow` or `deny` blocks support: */ inheritFromParent?: boolean; /** * The Google Cloud Console will try to default to a configuration that matches the value specified in this field. */ suggestedValue: string; } interface OrganizationPolicyListPolicyAllow { /** * The policy allows or denies all values. */ all?: boolean; /** * The policy can define specific values that are allowed or denied. */ values?: string[]; } interface OrganizationPolicyListPolicyDeny { /** * The policy allows or denies all values. */ all?: boolean; /** * The policy can define specific values that are allowed or denied. */ values?: string[]; } interface OrganizationPolicyRestorePolicy { /** * May only be set to true. If set, then the default Policy is restored. */ default: boolean; } } export declare namespace pubsub { interface GetSubscriptionBigqueryConfig { /** * When true and useTopicSchema or useTableSchema is true, any fields that are a part of the topic schema or message schema that * are not part of the BigQuery table schema are dropped when writing to BigQuery. Otherwise, the schemas must be kept in sync * and any messages with extra fields are not written and remain in the subscription's backlog. */ dropUnknownFields: boolean; /** * The service account to use to write to BigQuery. If not specified, the Pub/Sub * [service agent](https://cloud.google.com/iam/docs/service-agents), * service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. */ serviceAccountEmail: string; /** * The name of the table to which to write data, of the form {projectId}.{datasetId}.{tableId} */ table: string; /** * When true, use the BigQuery table's schema as the columns to write to in BigQuery. Messages * must be published in JSON format. Only one of useTopicSchema and useTableSchema can be set. */ useTableSchema: boolean; /** * When true, use the topic's schema as the columns to write to in BigQuery, if it exists. * Only one of useTopicSchema and useTableSchema can be set. */ useTopicSchema: boolean; /** * When true, write the subscription name, messageId, publishTime, attributes, and orderingKey to additional columns in the table. * The subscription name, messageId, and publishTime fields are put in their own columns while all other message properties (other than data) are written to a JSON object in the attributes column. */ writeMetadata: boolean; } interface GetSubscriptionCloudStorageConfig { /** * If set, message data will be written to Cloud Storage in Avro format. */ avroConfigs: outputs.pubsub.GetSubscriptionCloudStorageConfigAvroConfig[]; /** * User-provided name for the Cloud Storage bucket. The bucket must be created by the user. The bucket name must be without any prefix like "gs://". */ bucket: string; /** * User-provided format string specifying how to represent datetimes in Cloud Storage filenames. */ filenameDatetimeFormat: string; /** * User-provided prefix for Cloud Storage filename. */ filenamePrefix: string; /** * User-provided suffix for Cloud Storage filename. Must not end in "/". */ filenameSuffix: string; /** * The maximum bytes that can be written to a Cloud Storage file before a new file is created. Min 1 KB, max 10 GiB. * The maxBytes limit may be exceeded in cases where messages are larger than the limit. */ maxBytes: number; /** * The maximum duration that can elapse before a new Cloud Storage file is created. Min 1 minute, max 10 minutes, default 5 minutes. * May not exceed the subscription's acknowledgement deadline. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ maxDuration: string; /** * The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. */ maxMessages: number; /** * The service account to use to write to Cloud Storage. If not specified, the Pub/Sub * [service agent](https://cloud.google.com/iam/docs/service-agents), * service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. */ serviceAccountEmail: string; /** * An output-only field that indicates whether or not the subscription can receive messages. */ state: string; } interface GetSubscriptionCloudStorageConfigAvroConfig { /** * When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. */ useTopicSchema: boolean; /** * When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. */ writeMetadata: boolean; } interface GetSubscriptionDeadLetterPolicy { /** * The name of the topic to which dead letter messages should be published. * Format is 'projects/{project}/topics/{topic}'. * * The Cloud Pub/Sub service account associated with the enclosing subscription's * parent project (i.e., * service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have * permission to Publish() to this topic. * * The operation will fail if the topic does not exist. * Users should ensure that there is a subscription attached to this topic * since messages published to a topic with no subscriptions are lost. */ deadLetterTopic: string; /** * The maximum number of delivery attempts for any message. The value must be * between 5 and 100. * * The number of delivery attempts is defined as 1 + (the sum of number of * NACKs and number of times the acknowledgement deadline has been exceeded for the message). * * A NACK is any call to ModifyAckDeadline with a 0 deadline. Note that * client libraries may automatically extend ack_deadlines. * * This field will be honored on a best effort basis. * * If this parameter is 0, a default value of 5 is used. */ maxDeliveryAttempts: number; } interface GetSubscriptionExpirationPolicy { /** * Specifies the "time-to-live" duration for an associated resource. The * resource expires if it is not active for a period of ttl. * If ttl is set to "", the associated resource never expires. * A duration in seconds with up to nine fractional digits, terminated by 's'. * Example - "3.5s". */ ttl: string; } interface GetSubscriptionMessageTransform { /** * Controls whether or not to use this transform. If not set or 'false', * the transform will be applied to messages. Default: 'true'. */ disabled: boolean; /** * Javascript User Defined Function. If multiple Javascript UDFs are specified on a resource, * each one must have a unique 'function_name'. */ javascriptUdfs: outputs.pubsub.GetSubscriptionMessageTransformJavascriptUdf[]; } interface GetSubscriptionMessageTransformJavascriptUdf { /** * JavaScript code that contains a function 'function_name' with the * following signature: * ''' * /** * * Transforms a Pub/Sub message. * * * * @return {(Object)>|null)} - To * * filter a message, return 'null'. To transform a message return a map * * with the following keys: * * - (required) 'data' : {string} * * - (optional) 'attributes' : {Object} * * Returning empty 'attributes' will remove all attributes from the * * message. * * * * @param {(Object)>} Pub/Sub * * message. Keys: * * - (required) 'data' : {string} * * - (required) 'attributes' : {Object} * * * * @param {Object} metadata - Pub/Sub message metadata. * * Keys: * * - (required) 'message_id' : {string} * * - (optional) 'publish_time': {string} YYYY-MM-DDTHH:MM:SSZ format * * - (optional) 'ordering_key': {string} * */ * function (message, metadata) { * } * ''' */ code: string; /** * Name of the JavaScript function that should be applied to Pub/Sub messages. */ functionName: string; } interface GetSubscriptionPushConfig { /** * Endpoint configuration attributes. * * Every endpoint has a set of API supported attributes that can * be used to control different aspects of the message delivery. * * The currently supported attribute is x-goog-version, which you * can use to change the format of the pushed message. This * attribute indicates the version of the data expected by * the endpoint. This controls the shape of the pushed message * (i.e., its fields and metadata). The endpoint version is * based on the version of the Pub/Sub API. * * If not present during the subscriptions.create call, * it will default to the version of the API used to make * such call. If not present during a subscriptions.modifyPushConfig * call, its value will not be changed. subscriptions.get * calls will always return a valid version, even if the * subscription was created without this attribute. * * The possible values for this attribute are: * * - v1beta1: uses the push format defined in the v1beta1 Pub/Sub API. * - v1 or v1beta2: uses the push format defined in the v1 Pub/Sub API. */ attributes: { [key: string]: string; }; /** * When set, the payload to the push endpoint is not wrapped.Sets the * 'data' field as the HTTP body for delivery. */ noWrappers: outputs.pubsub.GetSubscriptionPushConfigNoWrapper[]; /** * If specified, Pub/Sub will generate and attach an OIDC JWT token as * an Authorization header in the HTTP request for every pushed message. */ oidcTokens: outputs.pubsub.GetSubscriptionPushConfigOidcToken[]; /** * A URL locating the endpoint to which messages should be pushed. * For example, a Webhook endpoint might use * "https://example.com/push". */ pushEndpoint: string; } interface GetSubscriptionPushConfigNoWrapper { /** * When true, writes the Pub/Sub message metadata to * 'x-goog-pubsub-:' headers of the HTTP request. Writes the * Pub/Sub message attributes to ':' headers of the HTTP request. */ writeMetadata: boolean; } interface GetSubscriptionPushConfigOidcToken { /** * Audience to be used when generating OIDC token. The audience claim * identifies the recipients that the JWT is intended for. The audience * value is a single case-sensitive string. Having multiple values (array) * for the audience field is not supported. More info about the OIDC JWT * token audience here: https://tools.ietf.org/html/rfc7519#section-4.1.3 * Note: if not specified, the Push endpoint URL will be used. */ audience: string; /** * Service account email to be used for generating the OIDC token. * The caller (for subscriptions.create, subscriptions.patch, and * subscriptions.modifyPushConfig RPCs) must have the * iam.serviceAccounts.actAs permission for the service account. */ serviceAccountEmail: string; } interface GetSubscriptionRetryPolicy { /** * The maximum delay between consecutive deliveries of a given message. Value should be between 0 and 600 seconds. Defaults to 600 seconds. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ maximumBackoff: string; /** * The minimum delay between consecutive deliveries of a given message. Value should be between 0 and 600 seconds. Defaults to 10 seconds. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ minimumBackoff: string; } interface GetTopicIngestionDataSourceSetting { /** * Settings for ingestion from Amazon Kinesis Data Streams. */ awsKineses: outputs.pubsub.GetTopicIngestionDataSourceSettingAwsKinese[]; /** * Settings for ingestion from Amazon Managed Streaming for Apache Kafka. */ awsMsks: outputs.pubsub.GetTopicIngestionDataSourceSettingAwsMsk[]; /** * Settings for ingestion from Azure Event Hubs. */ azureEventHubs: outputs.pubsub.GetTopicIngestionDataSourceSettingAzureEventHub[]; /** * Settings for ingestion from Cloud Storage. */ cloudStorages: outputs.pubsub.GetTopicIngestionDataSourceSettingCloudStorage[]; /** * Settings for ingestion from Confluent Cloud. */ confluentClouds: outputs.pubsub.GetTopicIngestionDataSourceSettingConfluentCloud[]; /** * Settings for Platform Logs regarding ingestion to Pub/Sub. If unset, * no Platform Logs will be generated.' */ platformLogsSettings: outputs.pubsub.GetTopicIngestionDataSourceSettingPlatformLogsSetting[]; } interface GetTopicIngestionDataSourceSettingAwsKinese { /** * AWS role ARN to be used for Federated Identity authentication with * Kinesis. Check the Pub/Sub docs for how to set up this role and the * required permissions that need to be attached to it. */ awsRoleArn: string; /** * The Kinesis consumer ARN to used for ingestion in * Enhanced Fan-Out mode. The consumer must be already * created and ready to be used. */ consumerArn: string; /** * The GCP service account to be used for Federated Identity authentication * with Kinesis (via a 'AssumeRoleWithWebIdentity' call for the provided * role). The 'awsRoleArn' must be set up with 'accounts.google.com:sub' * equals to this service account number. */ gcpServiceAccount: string; /** * The Kinesis stream ARN to ingest data from. */ streamArn: string; } interface GetTopicIngestionDataSourceSettingAwsMsk { /** * AWS role ARN to be used for Federated Identity authentication with * MSK. Check the Pub/Sub docs for how to set up this role and the * required permissions that need to be attached to it. */ awsRoleArn: string; /** * ARN that uniquely identifies the MSK cluster. */ clusterArn: string; /** * The GCP service account to be used for Federated Identity authentication * with MSK (via a 'AssumeRoleWithWebIdentity' call for the provided * role). The 'awsRoleArn' must be set up with 'accounts.google.com:sub' * equals to this service account number. */ gcpServiceAccount: string; /** * The name of the MSK topic that Pub/Sub will import from. */ topic: string; } interface GetTopicIngestionDataSourceSettingAzureEventHub { /** * The Azure event hub client ID to use for ingestion. */ clientId: string; /** * The Azure event hub to ingest data from. */ eventHub: string; /** * The GCP service account to be used for Federated Identity authentication * with Azure (via a 'AssumeRoleWithWebIdentity' call for the provided * role). */ gcpServiceAccount: string; /** * The Azure event hub namespace to ingest data from. */ namespace: string; /** * The name of the resource group within an Azure subscription. */ resourceGroup: string; /** * The Azure event hub subscription ID to use for ingestion. */ subscriptionId: string; /** * The Azure event hub tenant ID to use for ingestion. */ tenantId: string; } interface GetTopicIngestionDataSourceSettingCloudStorage { /** * Configuration for reading Cloud Storage data in Avro binary format. The * bytes of each object will be set to the 'data' field of a Pub/Sub message. */ avroFormats: outputs.pubsub.GetTopicIngestionDataSourceSettingCloudStorageAvroFormat[]; /** * Cloud Storage bucket. The bucket name must be without any * prefix like "gs://". See the bucket naming requirements: * https://cloud.google.com/storage/docs/buckets#naming. */ bucket: string; /** * Glob pattern used to match objects that will be ingested. If unset, all * objects will be ingested. See the supported patterns: * https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob */ matchGlob: string; /** * The timestamp set in RFC3339 text format. If set, only objects with a * larger or equal timestamp will be ingested. Unset by default, meaning * all objects will be ingested. */ minimumObjectCreateTime: string; /** * Configuration for reading Cloud Storage data written via Cloud Storage * subscriptions(See https://cloud.google.com/pubsub/docs/cloudstorage). The * data and attributes fields of the originally exported Pub/Sub message * will be restored when publishing. */ pubsubAvroFormats: outputs.pubsub.GetTopicIngestionDataSourceSettingCloudStoragePubsubAvroFormat[]; /** * Configuration for reading Cloud Storage data in text format. Each line of * text as specified by the delimiter will be set to the 'data' field of a * Pub/Sub message. */ textFormats: outputs.pubsub.GetTopicIngestionDataSourceSettingCloudStorageTextFormat[]; } interface GetTopicIngestionDataSourceSettingCloudStorageAvroFormat { } interface GetTopicIngestionDataSourceSettingCloudStoragePubsubAvroFormat { } interface GetTopicIngestionDataSourceSettingCloudStorageTextFormat { /** * The delimiter to use when using the 'text' format. Each line of text as * specified by the delimiter will be set to the 'data' field of a Pub/Sub * message. When unset, '\n' is used. */ delimiter: string; } interface GetTopicIngestionDataSourceSettingConfluentCloud { /** * The Confluent Cloud bootstrap server. The format is url:port. */ bootstrapServer: string; /** * The Confluent Cloud cluster ID. */ clusterId: string; /** * The GCP service account to be used for Federated Identity authentication * with Confluent Cloud. */ gcpServiceAccount: string; /** * Identity pool ID to be used for Federated Identity authentication with Confluent Cloud. */ identityPoolId: string; /** * Name of the Confluent Cloud topic that Pub/Sub will import from. */ topic: string; } interface GetTopicIngestionDataSourceSettingPlatformLogsSetting { /** * The minimum severity level of Platform Logs that will be written. If unspecified, * no Platform Logs will be written. Default value: "SEVERITY_UNSPECIFIED" Possible values: ["SEVERITY_UNSPECIFIED", "DISABLED", "DEBUG", "INFO", "WARNING", "ERROR"] */ severity: string; } interface GetTopicMessageStoragePolicy { /** * A list of IDs of GCP regions where messages that are published to * the topic may be persisted in storage. Messages published by * publishers running in non-allowed GCP regions (or running outside * of GCP altogether) will be routed for storage in one of the * allowed regions. An empty list means that no regions are allowed, * and is not a valid configuration. */ allowedPersistenceRegions: string[]; /** * If true, 'allowedPersistenceRegions' is also used to enforce in-transit * guarantees for messages. That is, Pub/Sub will fail topics.publish * operations on this topic and subscribe operations on any subscription * attached to this topic in any region that is not in 'allowedPersistenceRegions'. */ enforceInTransit: boolean; } interface GetTopicMessageTransform { /** * Controls whether or not to use this transform. If not set or 'false', * the transform will be applied to messages. Default: 'true'. */ disabled: boolean; /** * Javascript User Defined Function. If multiple Javascript UDFs are specified on a resource, * each one must have a unique 'function_name'. */ javascriptUdfs: outputs.pubsub.GetTopicMessageTransformJavascriptUdf[]; } interface GetTopicMessageTransformJavascriptUdf { /** * JavaScript code that contains a function 'function_name' with the * following signature: * ''' * /** * * Transforms a Pub/Sub message. * * * * @return {(Object)>|null)} - To * * filter a message, return 'null'. To transform a message return a map * * with the following keys: * * - (required) 'data' : {string} * * - (optional) 'attributes' : {Object} * * Returning empty 'attributes' will remove all attributes from the * * message. * * * * @param {(Object)>} Pub/Sub * * message. Keys: * * - (required) 'data' : {string} * * - (required) 'attributes' : {Object} * * * * @param {Object} metadata - Pub/Sub message metadata. * * Keys: * * - (required) 'message_id' : {string} * * - (optional) 'publish_time': {string} YYYY-MM-DDTHH:MM:SSZ format * * - (optional) 'ordering_key': {string} * */ * function (message, metadata) { * } * ''' */ code: string; /** * Name of the JavaScript function that should be applied to Pub/Sub messages. */ functionName: string; } interface GetTopicSchemaSetting { /** * The encoding of messages validated against schema. Default value: "ENCODING_UNSPECIFIED" Possible values: ["ENCODING_UNSPECIFIED", "JSON", "BINARY"] */ encoding: string; /** * The name of the schema that messages published should be * validated against. Format is projects/{project}/schemas/{schema}. * The value of this field will be _deleted-schema_ * if the schema has been deleted. */ schema: string; } interface LiteSubscriptionDeliveryConfig { /** * When this subscription should send messages to subscribers relative to messages persistence in storage. * Possible values are: `DELIVER_IMMEDIATELY`, `DELIVER_AFTER_STORED`, `DELIVERY_REQUIREMENT_UNSPECIFIED`. */ deliveryRequirement: string; } interface LiteTopicPartitionConfig { /** * The capacity configuration. * Structure is documented below. */ capacity?: outputs.pubsub.LiteTopicPartitionConfigCapacity; /** * The number of partitions in the topic. Must be at least 1. */ count: number; } interface LiteTopicPartitionConfigCapacity { /** * Subscribe throughput capacity per partition in MiB/s. Must be >= 4 and <= 16. */ publishMibPerSec: number; /** * Publish throughput capacity per partition in MiB/s. Must be >= 4 and <= 16. */ subscribeMibPerSec: number; } interface LiteTopicReservationConfig { /** * The Reservation to use for this topic's throughput capacity. */ throughputReservation?: string; } interface LiteTopicRetentionConfig { /** * The provisioned storage, in bytes, per partition. If the number of bytes stored * in any of the topic's partitions grows beyond this value, older messages will be * dropped to make room for newer ones, regardless of the value of period. */ perPartitionBytes: string; /** * How long a published message is retained. If unset, messages will be retained as * long as the bytes retained for each partition is below perPartitionBytes. A * duration in seconds with up to nine fractional digits, terminated by 's'. * Example: "3.5s". */ period?: string; } interface SchemaIamBindingCondition { description?: string; expression: string; title: string; } interface SchemaIamMemberCondition { description?: string; expression: string; title: string; } interface SubscriptionBigqueryConfig { /** * When true and useTopicSchema or useTableSchema is true, any fields that are a part of the topic schema or message schema that * are not part of the BigQuery table schema are dropped when writing to BigQuery. Otherwise, the schemas must be kept in sync * and any messages with extra fields are not written and remain in the subscription's backlog. */ dropUnknownFields?: boolean; /** * The service account to use to write to BigQuery. If not specified, the Pub/Sub * [service agent](https://cloud.google.com/iam/docs/service-agents), * service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. */ serviceAccountEmail?: string; /** * The name of the table to which to write data, of the form {projectId}.{datasetId}.{tableId} */ table: string; /** * When true, use the BigQuery table's schema as the columns to write to in BigQuery. Messages * must be published in JSON format. Only one of useTopicSchema and useTableSchema can be set. */ useTableSchema?: boolean; /** * When true, use the topic's schema as the columns to write to in BigQuery, if it exists. * Only one of useTopicSchema and useTableSchema can be set. */ useTopicSchema?: boolean; /** * When true, write the subscription name, messageId, publishTime, attributes, and orderingKey to additional columns in the table. * The subscription name, messageId, and publishTime fields are put in their own columns while all other message properties (other than data) are written to a JSON object in the attributes column. */ writeMetadata?: boolean; } interface SubscriptionCloudStorageConfig { /** * If set, message data will be written to Cloud Storage in Avro format. * Structure is documented below. */ avroConfig?: outputs.pubsub.SubscriptionCloudStorageConfigAvroConfig; /** * User-provided name for the Cloud Storage bucket. The bucket must be created by the user. The bucket name must be without any prefix like "gs://". */ bucket: string; /** * User-provided format string specifying how to represent datetimes in Cloud Storage filenames. */ filenameDatetimeFormat?: string; /** * User-provided prefix for Cloud Storage filename. */ filenamePrefix?: string; /** * User-provided suffix for Cloud Storage filename. Must not end in "/". */ filenameSuffix?: string; /** * The maximum bytes that can be written to a Cloud Storage file before a new file is created. Min 1 KB, max 10 GiB. * The maxBytes limit may be exceeded in cases where messages are larger than the limit. */ maxBytes?: number; /** * The maximum duration that can elapse before a new Cloud Storage file is created. Min 1 minute, max 10 minutes, default 5 minutes. * May not exceed the subscription's acknowledgement deadline. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ maxDuration?: string; /** * The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. */ maxMessages?: number; /** * The service account to use to write to Cloud Storage. If not specified, the Pub/Sub * [service agent](https://cloud.google.com/iam/docs/service-agents), * service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. */ serviceAccountEmail?: string; /** * (Output) * An output-only field that indicates whether or not the subscription can receive messages. */ state: string; } interface SubscriptionCloudStorageConfigAvroConfig { /** * When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. */ useTopicSchema?: boolean; /** * When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. */ writeMetadata?: boolean; } interface SubscriptionDeadLetterPolicy { /** * The name of the topic to which dead letter messages should be published. * Format is `projects/{project}/topics/{topic}`. * The Cloud Pub/Sub service account associated with the enclosing subscription's * parent project (i.e., * service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have * permission to Publish() to this topic. * The operation will fail if the topic does not exist. * Users should ensure that there is a subscription attached to this topic * since messages published to a topic with no subscriptions are lost. */ deadLetterTopic?: string; /** * The maximum number of delivery attempts for any message. The value must be * between 5 and 100. * The number of delivery attempts is defined as 1 + (the sum of number of * NACKs and number of times the acknowledgement deadline has been exceeded for the message). * A NACK is any call to ModifyAckDeadline with a 0 deadline. Note that * client libraries may automatically extend ack_deadlines. * This field will be honored on a best effort basis. * If this parameter is 0, a default value of 5 is used. */ maxDeliveryAttempts?: number; } interface SubscriptionExpirationPolicy { /** * Specifies the "time-to-live" duration for an associated resource. The * resource expires if it is not active for a period of ttl. * If ttl is set to "", the associated resource never expires. * A duration in seconds with up to nine fractional digits, terminated by 's'. * Example - "3.5s". */ ttl: string; } interface SubscriptionIAMBindingCondition { description?: string; expression: string; title: string; } interface SubscriptionIAMMemberCondition { description?: string; expression: string; title: string; } interface SubscriptionMessageTransform { /** * Controls whether or not to use this transform. If not set or `false`, * the transform will be applied to messages. Default: `true`. */ disabled?: boolean; /** * Javascript User Defined Function. If multiple Javascript UDFs are specified on a resource, * each one must have a unique `functionName`. * Structure is documented below. */ javascriptUdf?: outputs.pubsub.SubscriptionMessageTransformJavascriptUdf; } interface SubscriptionMessageTransformJavascriptUdf { /** * JavaScript code that contains a function `functionName` with the * following signature: * ``` * /** * * Transforms a Pub/Sub message. * * * * @return {(Object)>|null)} - To * * filter a message, return `null`. To transform a message return a map * * with the following keys: * * - (required) 'data' : {string} * * - (optional) 'attributes' : {Object} * * Returning empty `attributes` will remove all attributes from the * * message. * * * * @param {(Object)>} Pub/Sub * * message. Keys: * * - (required) 'data' : {string} * * - (required) 'attributes' : {Object} * * * * @param {Object} metadata - Pub/Sub message metadata. * * Keys: * * - (required) 'message_id' : {string} * * - (optional) 'publish_time': {string} YYYY-MM-DDTHH:MM:SSZ format * * - (optional) 'ordering_key': {string} * */ * function (message, metadata) { * } * ``` */ code: string; /** * Name of the JavaScript function that should be applied to Pub/Sub messages. */ functionName: string; } interface SubscriptionPushConfig { /** * Endpoint configuration attributes. * Every endpoint has a set of API supported attributes that can * be used to control different aspects of the message delivery. * The currently supported attribute is x-goog-version, which you * can use to change the format of the pushed message. This * attribute indicates the version of the data expected by * the endpoint. This controls the shape of the pushed message * (i.e., its fields and metadata). The endpoint version is * based on the version of the Pub/Sub API. * If not present during the subscriptions.create call, * it will default to the version of the API used to make * such call. If not present during a subscriptions.modifyPushConfig * call, its value will not be changed. subscriptions.get * calls will always return a valid version, even if the * subscription was created without this attribute. * The possible values for this attribute are: * - v1beta1: uses the push format defined in the v1beta1 Pub/Sub API. * - v1 or v1beta2: uses the push format defined in the v1 Pub/Sub API. */ attributes?: { [key: string]: string; }; /** * When set, the payload to the push endpoint is not wrapped.Sets the * `data` field as the HTTP body for delivery. * Structure is documented below. */ noWrapper?: outputs.pubsub.SubscriptionPushConfigNoWrapper; /** * If specified, Pub/Sub will generate and attach an OIDC JWT token as * an Authorization header in the HTTP request for every pushed message. * Structure is documented below. */ oidcToken?: outputs.pubsub.SubscriptionPushConfigOidcToken; /** * A URL locating the endpoint to which messages should be pushed. * For example, a Webhook endpoint might use * "https://example.com/push". */ pushEndpoint: string; } interface SubscriptionPushConfigNoWrapper { /** * When true, writes the Pub/Sub message metadata to * `x-goog-pubsub-:` headers of the HTTP request. Writes the * Pub/Sub message attributes to `:` headers of the HTTP request. */ writeMetadata: boolean; } interface SubscriptionPushConfigOidcToken { /** * Audience to be used when generating OIDC token. The audience claim * identifies the recipients that the JWT is intended for. The audience * value is a single case-sensitive string. Having multiple values (array) * for the audience field is not supported. More info about the OIDC JWT * token audience here: https://tools.ietf.org/html/rfc7519#section-4.1.3 * Note: if not specified, the Push endpoint URL will be used. */ audience?: string; /** * Service account email to be used for generating the OIDC token. * The caller (for subscriptions.create, subscriptions.patch, and * subscriptions.modifyPushConfig RPCs) must have the * iam.serviceAccounts.actAs permission for the service account. */ serviceAccountEmail: string; } interface SubscriptionRetryPolicy { /** * The maximum delay between consecutive deliveries of a given message. Value should be between 0 and 600 seconds. Defaults to 600 seconds. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ maximumBackoff: string; /** * The minimum delay between consecutive deliveries of a given message. Value should be between 0 and 600 seconds. Defaults to 10 seconds. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ minimumBackoff: string; } interface TopicIAMBindingCondition { description?: string; expression: string; title: string; } interface TopicIAMMemberCondition { description?: string; expression: string; title: string; } interface TopicIngestionDataSourceSettings { /** * Settings for ingestion from Amazon Kinesis Data Streams. * Structure is documented below. */ awsKinesis?: outputs.pubsub.TopicIngestionDataSourceSettingsAwsKinesis; /** * Settings for ingestion from Amazon Managed Streaming for Apache Kafka. * Structure is documented below. */ awsMsk?: outputs.pubsub.TopicIngestionDataSourceSettingsAwsMsk; /** * Settings for ingestion from Azure Event Hubs. * Structure is documented below. */ azureEventHubs?: outputs.pubsub.TopicIngestionDataSourceSettingsAzureEventHubs; /** * Settings for ingestion from Cloud Storage. * Structure is documented below. */ cloudStorage?: outputs.pubsub.TopicIngestionDataSourceSettingsCloudStorage; /** * Settings for ingestion from Confluent Cloud. * Structure is documented below. */ confluentCloud?: outputs.pubsub.TopicIngestionDataSourceSettingsConfluentCloud; /** * Settings for Platform Logs regarding ingestion to Pub/Sub. If unset, * no Platform Logs will be generated.' * Structure is documented below. */ platformLogsSettings?: outputs.pubsub.TopicIngestionDataSourceSettingsPlatformLogsSettings; } interface TopicIngestionDataSourceSettingsAwsKinesis { /** * AWS role ARN to be used for Federated Identity authentication with * Kinesis. Check the Pub/Sub docs for how to set up this role and the * required permissions that need to be attached to it. */ awsRoleArn: string; /** * The Kinesis consumer ARN to used for ingestion in * Enhanced Fan-Out mode. The consumer must be already * created and ready to be used. */ consumerArn: string; /** * The GCP service account to be used for Federated Identity authentication * with Kinesis (via a `AssumeRoleWithWebIdentity` call for the provided * role). The `awsRoleArn` must be set up with `accounts.google.com:sub` * equals to this service account number. */ gcpServiceAccount: string; /** * The Kinesis stream ARN to ingest data from. */ streamArn: string; } interface TopicIngestionDataSourceSettingsAwsMsk { /** * AWS role ARN to be used for Federated Identity authentication with * MSK. Check the Pub/Sub docs for how to set up this role and the * required permissions that need to be attached to it. */ awsRoleArn: string; /** * ARN that uniquely identifies the MSK cluster. */ clusterArn: string; /** * The GCP service account to be used for Federated Identity authentication * with MSK (via a `AssumeRoleWithWebIdentity` call for the provided * role). The `awsRoleArn` must be set up with `accounts.google.com:sub` * equals to this service account number. */ gcpServiceAccount: string; /** * The name of the MSK topic that Pub/Sub will import from. */ topic: string; } interface TopicIngestionDataSourceSettingsAzureEventHubs { /** * The Azure event hub client ID to use for ingestion. */ clientId?: string; /** * The Azure event hub to ingest data from. */ eventHub?: string; /** * The GCP service account to be used for Federated Identity authentication * with Azure (via a `AssumeRoleWithWebIdentity` call for the provided * role). */ gcpServiceAccount?: string; /** * The Azure event hub namespace to ingest data from. */ namespace?: string; /** * The name of the resource group within an Azure subscription. */ resourceGroup?: string; /** * The Azure event hub subscription ID to use for ingestion. */ subscriptionId?: string; /** * The Azure event hub tenant ID to use for ingestion. */ tenantId?: string; } interface TopicIngestionDataSourceSettingsCloudStorage { /** * Configuration for reading Cloud Storage data in Avro binary format. The * bytes of each object will be set to the `data` field of a Pub/Sub message. */ avroFormat?: outputs.pubsub.TopicIngestionDataSourceSettingsCloudStorageAvroFormat; /** * Cloud Storage bucket. The bucket name must be without any * prefix like "gs://". See the bucket naming requirements: * https://cloud.google.com/storage/docs/buckets#naming. */ bucket: string; /** * Glob pattern used to match objects that will be ingested. If unset, all * objects will be ingested. See the supported patterns: * https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob */ matchGlob?: string; /** * The timestamp set in RFC3339 text format. If set, only objects with a * larger or equal timestamp will be ingested. Unset by default, meaning * all objects will be ingested. */ minimumObjectCreateTime?: string; /** * Configuration for reading Cloud Storage data written via Cloud Storage * subscriptions(See https://cloud.google.com/pubsub/docs/cloudstorage). The * data and attributes fields of the originally exported Pub/Sub message * will be restored when publishing. */ pubsubAvroFormat?: outputs.pubsub.TopicIngestionDataSourceSettingsCloudStoragePubsubAvroFormat; /** * Configuration for reading Cloud Storage data in text format. Each line of * text as specified by the delimiter will be set to the `data` field of a * Pub/Sub message. * Structure is documented below. */ textFormat?: outputs.pubsub.TopicIngestionDataSourceSettingsCloudStorageTextFormat; } interface TopicIngestionDataSourceSettingsCloudStorageAvroFormat { } interface TopicIngestionDataSourceSettingsCloudStoragePubsubAvroFormat { } interface TopicIngestionDataSourceSettingsCloudStorageTextFormat { /** * The delimiter to use when using the 'text' format. Each line of text as * specified by the delimiter will be set to the 'data' field of a Pub/Sub * message. When unset, '\n' is used. */ delimiter?: string; } interface TopicIngestionDataSourceSettingsConfluentCloud { /** * The Confluent Cloud bootstrap server. The format is url:port. */ bootstrapServer: string; /** * The Confluent Cloud cluster ID. */ clusterId?: string; /** * The GCP service account to be used for Federated Identity authentication * with Confluent Cloud. */ gcpServiceAccount: string; /** * Identity pool ID to be used for Federated Identity authentication with Confluent Cloud. */ identityPoolId: string; /** * Name of the Confluent Cloud topic that Pub/Sub will import from. */ topic: string; } interface TopicIngestionDataSourceSettingsPlatformLogsSettings { /** * The minimum severity level of Platform Logs that will be written. If unspecified, * no Platform Logs will be written. * Default value is `SEVERITY_UNSPECIFIED`. * Possible values are: `SEVERITY_UNSPECIFIED`, `DISABLED`, `DEBUG`, `INFO`, `WARNING`, `ERROR`. */ severity?: string; } interface TopicMessageStoragePolicy { /** * A list of IDs of GCP regions where messages that are published to * the topic may be persisted in storage. Messages published by * publishers running in non-allowed GCP regions (or running outside * of GCP altogether) will be routed for storage in one of the * allowed regions. An empty list means that no regions are allowed, * and is not a valid configuration. */ allowedPersistenceRegions: string[]; /** * If true, `allowedPersistenceRegions` is also used to enforce in-transit * guarantees for messages. That is, Pub/Sub will fail topics.publish * operations on this topic and subscribe operations on any subscription * attached to this topic in any region that is not in `allowedPersistenceRegions`. */ enforceInTransit?: boolean; } interface TopicMessageTransform { /** * Controls whether or not to use this transform. If not set or `false`, * the transform will be applied to messages. Default: `true`. */ disabled?: boolean; /** * Javascript User Defined Function. If multiple Javascript UDFs are specified on a resource, * each one must have a unique `functionName`. * Structure is documented below. */ javascriptUdf?: outputs.pubsub.TopicMessageTransformJavascriptUdf; } interface TopicMessageTransformJavascriptUdf { /** * JavaScript code that contains a function `functionName` with the * following signature: * ``` * /** * * Transforms a Pub/Sub message. * * * * @return {(Object)>|null)} - To * * filter a message, return `null`. To transform a message return a map * * with the following keys: * * - (required) 'data' : {string} * * - (optional) 'attributes' : {Object} * * Returning empty `attributes` will remove all attributes from the * * message. * * * * @param {(Object)>} Pub/Sub * * message. Keys: * * - (required) 'data' : {string} * * - (required) 'attributes' : {Object} * * * * @param {Object} metadata - Pub/Sub message metadata. * * Keys: * * - (required) 'message_id' : {string} * * - (optional) 'publish_time': {string} YYYY-MM-DDTHH:MM:SSZ format * * - (optional) 'ordering_key': {string} * */ * function (message, metadata) { * } * ``` */ code: string; /** * Name of the JavaScript function that should be applied to Pub/Sub messages. */ functionName: string; } interface TopicSchemaSettings { /** * The encoding of messages validated against schema. * Default value is `ENCODING_UNSPECIFIED`. * Possible values are: `ENCODING_UNSPECIFIED`, `JSON`, `BINARY`. */ encoding?: string; /** * The name of the schema that messages published should be * validated against. Format is projects/{project}/schemas/{schema}. * The value of this field will be _deleted-schema_ * if the schema has been deleted. */ schema: string; } } export declare namespace recaptcha { interface EnterpriseKeyAndroidSettings { /** * If set to true, it means allowedPackageNames will not be enforced. */ allowAllPackageNames?: boolean; /** * Android package names of apps allowed to use the key. Example: 'com.companyname.appname' */ allowedPackageNames?: string[]; } interface EnterpriseKeyIosSettings { /** * If set to true, it means allowedBundleIds will not be enforced. */ allowAllBundleIds?: boolean; /** * iOS bundle ids of apps allowed to use the key. Example: 'com.companyname.productname.appname' */ allowedBundleIds?: string[]; } interface EnterpriseKeyTestingOptions { /** * For challenge-based keys only (CHECKBOX, INVISIBLE), all challenge requests for this site will return nocaptcha if NOCAPTCHA, or an unsolvable challenge if UNSOLVABLE_CHALLENGE. Possible values: TESTING_CHALLENGE_UNSPECIFIED, NOCAPTCHA, UNSOLVABLE_CHALLENGE */ testingChallenge: string; /** * All assessments for this Key will return this score. Must be between 0 (likely not legitimate) and 1 (likely legitimate) inclusive. */ testingScore?: number; } interface EnterpriseKeyWafSettings { /** * Supported WAF features. For more information, see https://cloud.google.com/recaptcha-enterprise/docs/usecase#comparison_of_features. Possible values: CHALLENGE_PAGE, SESSION_TOKEN, ACTION_TOKEN, EXPRESS */ wafFeature: string; /** * The WAF service that uses this key. Possible values: CA, FASTLY */ wafService: string; } interface EnterpriseKeyWebSettings { /** * If set to true, it means allowedDomains will not be enforced. */ allowAllDomains?: boolean; /** * If set to true, the key can be used on AMP (Accelerated Mobile Pages) websites. This is supported only for the SCORE integration type. */ allowAmpTraffic?: boolean; /** * Domains or subdomains of websites allowed to use the key. All subdomains of an allowed domain are automatically allowed. A valid domain requires a host and must not include any path, port, query or fragment. Examples: 'example.com' or 'subdomain.example.com' */ allowedDomains?: string[]; /** * Settings for the frequency and difficulty at which this key triggers captcha challenges. This should only be specified for IntegrationTypes CHECKBOX and INVISIBLE. Possible values: CHALLENGE_SECURITY_PREFERENCE_UNSPECIFIED, USABILITY, BALANCE, SECURITY */ challengeSecurityPreference: string; /** * Required. Describes how this key is integrated with the website. Possible values: SCORE, CHECKBOX, INVISIBLE */ integrationType: string; } } export declare namespace redis { interface ClusterAutomatedBackupConfig { /** * Trigger automated backups at a fixed frequency. * Structure is documented below. */ fixedFrequencySchedule: outputs.redis.ClusterAutomatedBackupConfigFixedFrequencySchedule; /** * How long to keep automated backups before the backups are deleted. * The value should be between 1 day and 365 days. If not specified, the default value is 35 days. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ retention: string; } interface ClusterAutomatedBackupConfigFixedFrequencySchedule { /** * The start time of every automated backup in UTC. * It must be set to the start of an hour. This field is required. * Structure is documented below. */ startTime: outputs.redis.ClusterAutomatedBackupConfigFixedFrequencyScheduleStartTime; } interface ClusterAutomatedBackupConfigFixedFrequencyScheduleStartTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. * An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours: number; } interface ClusterCrossClusterReplicationConfig { /** * The role of the cluster in cross cluster replication. Supported values are: * 1. `CLUSTER_ROLE_UNSPECIFIED`: This is an independent cluster that has never participated in cross cluster replication. It allows both reads and writes. * 1. `NONE`: This is an independent cluster that previously participated in cross cluster replication(either as a `PRIMARY` or `SECONDARY` cluster). It allows both reads and writes. * 1. `PRIMARY`: This cluster serves as the replication source for secondary clusters that are replicating from it. Any data written to it is automatically replicated to its secondary clusters. It allows both reads and writes. * 1. `SECONDARY`: This cluster replicates data from the primary cluster. It allows only reads. * Possible values are: `CLUSTER_ROLE_UNSPECIFIED`, `NONE`, `PRIMARY`, `SECONDARY`. */ clusterRole?: string; /** * (Output) * An output only view of all the member clusters participating in cross cluster replication. This field is populated for all the member clusters irrespective of their cluster role. * Structure is documented below. */ memberships: outputs.redis.ClusterCrossClusterReplicationConfigMembership[]; /** * Details of the primary cluster that is used as the replication source for this secondary cluster. This is allowed to be set only for clusters whose cluster role is of type `SECONDARY`. * Structure is documented below. */ primaryCluster?: outputs.redis.ClusterCrossClusterReplicationConfigPrimaryCluster; /** * List of secondary clusters that are replicating from this primary cluster. This is allowed to be set only for clusters whose cluster role is of type `PRIMARY`. * Structure is documented below. */ secondaryClusters?: outputs.redis.ClusterCrossClusterReplicationConfigSecondaryCluster[]; /** * (Output) * The last time cross cluster replication config was updated. */ updateTime: string; } interface ClusterCrossClusterReplicationConfigMembership { /** * Details of the primary cluster that is used as the replication source for all the secondary clusters. */ primaryClusters: outputs.redis.ClusterCrossClusterReplicationConfigMembershipPrimaryCluster[]; /** * List of secondary clusters that are replicating from the primary cluster. */ secondaryClusters: outputs.redis.ClusterCrossClusterReplicationConfigMembershipSecondaryCluster[]; } interface ClusterCrossClusterReplicationConfigMembershipPrimaryCluster { /** * The full resource path of the primary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id} */ cluster: string; /** * (Output) * The unique id of the primary cluster. */ uid: string; } interface ClusterCrossClusterReplicationConfigMembershipSecondaryCluster { /** * (Output) * The full resource path of the secondary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id} */ cluster: string; /** * (Output) * The unique id of the secondary cluster. */ uid: string; } interface ClusterCrossClusterReplicationConfigPrimaryCluster { /** * The full resource path of the primary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id} */ cluster?: string; /** * (Output) * The unique id of the primary cluster. */ uid: string; } interface ClusterCrossClusterReplicationConfigSecondaryCluster { /** * (Output) * The full resource path of the secondary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id} */ cluster?: string; /** * (Output) * The unique id of the secondary cluster. */ uid: string; } interface ClusterDiscoveryEndpoint { /** * Output only. The IP allocated on the consumer network for the PSC forwarding rule. */ address?: string; /** * Output only. The port number of the exposed Redis endpoint. */ port?: number; /** * Output only. Customer configuration for where the endpoint * is created and accessed from. * Structure is documented below. */ pscConfig?: outputs.redis.ClusterDiscoveryEndpointPscConfig; } interface ClusterDiscoveryEndpointPscConfig { /** * The consumer network where the IP address resides, in the form of projects/{projectId}/global/networks/{network_id}. */ network?: string; } interface ClusterGcsSource { /** * URIs of the GCS objects to import. Example: gs://bucket1/object1, gs://bucket2/folder2/object2 */ uris: string[]; } interface ClusterMaintenancePolicy { /** * (Output) * Output only. The time when the policy was created. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ createTime: string; /** * (Output) * Output only. The time when the policy was last updated. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ updateTime: string; /** * Optional. Maintenance window that is applied to resources covered by this policy. * Minimum 1. For the current version, the maximum number * of weeklyWindow is expected to be one. * Structure is documented below. */ weeklyMaintenanceWindows?: outputs.redis.ClusterMaintenancePolicyWeeklyMaintenanceWindow[]; } interface ClusterMaintenancePolicyWeeklyMaintenanceWindow { /** * Required. The day of week that maintenance updates occur. * - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. * - MONDAY: Monday * - TUESDAY: Tuesday * - WEDNESDAY: Wednesday * - THURSDAY: Thursday * - FRIDAY: Friday * - SATURDAY: Saturday * - SUNDAY: Sunday * Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ day: string; /** * (Output) * Output only. Duration of the maintenance window. * The current window is fixed at 1 hour. * A duration in seconds with up to nine fractional digits, * terminated by 's'. Example: "3.5s". */ duration: string; /** * Required. Start time of the window in UTC time. * Structure is documented below. */ startTime: outputs.redis.ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime; } interface ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. * An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. * An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface ClusterMaintenanceSchedule { /** * (Output) * Output only. The end time of any upcoming scheduled maintenance for this cluster. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ endTime: string; /** * (Output) * Output only. The deadline that the maintenance schedule start time * can not go beyond, including reschedule. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ scheduleDeadlineTime: string; /** * (Output) * Output only. The start time of any upcoming scheduled maintenance for this cluster. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ startTime: string; } interface ClusterManagedBackupSource { /** * Example: `projects/{project}/locations/{location}/backupCollections/{collection}/backups/{backup}`. */ backup: string; } interface ClusterManagedServerCa { /** * (Output) * The PEM encoded CA certificate chains for redis managed server authentication * Structure is documented below. */ caCerts: outputs.redis.ClusterManagedServerCaCaCert[]; } interface ClusterManagedServerCaCaCert { /** * (Output) * The certificates that form the CA chain, from leaf to root order */ certificates: string[]; } interface ClusterPersistenceConfig { /** * AOF configuration. This field will be ignored if mode is not AOF. * Structure is documented below. */ aofConfig: outputs.redis.ClusterPersistenceConfigAofConfig; /** * Optional. Controls whether Persistence features are enabled. If not provided, the existing value will be used. * - DISABLED: Persistence (both backup and restore) is disabled for the cluster. * - RDB: RDB based Persistence is enabled. * - AOF: AOF based Persistence is enabled. * Possible values are: `PERSISTENCE_MODE_UNSPECIFIED`, `DISABLED`, `RDB`, `AOF`. */ mode: string; /** * RDB configuration. This field will be ignored if mode is not RDB. * Structure is documented below. */ rdbConfig: outputs.redis.ClusterPersistenceConfigRdbConfig; } interface ClusterPersistenceConfigAofConfig { /** * Optional. Available fsync modes. * - NO - Do not explicitly call fsync(). Rely on OS defaults. * - EVERYSEC - Call fsync() once per second in a background thread. A balance between performance and durability. * - ALWAYS - Call fsync() for earch write command. * Possible values are: `APPEND_FSYNC_UNSPECIFIED`, `NO`, `EVERYSEC`, `ALWAYS`. */ appendFsync: string; } interface ClusterPersistenceConfigRdbConfig { /** * Optional. Available snapshot periods for scheduling. * - ONE_HOUR: Snapshot every 1 hour. * - SIX_HOURS: Snapshot every 6 hours. * - TWELVE_HOURS: Snapshot every 12 hours. * - TWENTY_FOUR_HOURS: Snapshot every 24 hours. * Possible values are: `SNAPSHOT_PERIOD_UNSPECIFIED`, `ONE_HOUR`, `SIX_HOURS`, `TWELVE_HOURS`, `TWENTY_FOUR_HOURS`. */ rdbSnapshotPeriod: string; /** * The time that the first snapshot was/will be attempted, and to which * future snapshots will be aligned. * If not provided, the current time will be used. */ rdbSnapshotStartTime: string; } interface ClusterPscConfig { /** * Required. The consumer network where the network address of * the discovery endpoint will be reserved, in the form of * projects/{network_project_id_or_number}/global/networks/{network_id}. */ network: string; } interface ClusterPscConnection { /** * Output only. The IP allocated on the consumer network for the PSC forwarding rule. */ address?: string; /** * Output only. The URI of the consumer side forwarding rule. Example: projects/{projectNumOrId}/regions/us-east1/forwardingRules/{resourceId}. */ forwardingRule?: string; /** * The consumer network where the IP address resides, in the form of projects/{projectId}/global/networks/{network_id}. */ network?: string; /** * Output only. The consumer projectId where the forwarding rule is created from. */ projectId?: string; /** * Output only. The PSC connection id of the forwarding rule connected to the service attachment. */ pscConnectionId?: string; } interface ClusterPscServiceAttachment { /** * (Output) * Type of a PSC connection targeting this service attachment. */ connectionType: string; /** * (Output) * Service attachment URI which your self-created PscConnection should use as */ serviceAttachment: string; } interface ClusterStateInfo { /** * A nested object resource. * Structure is documented below. */ updateInfo?: outputs.redis.ClusterStateInfoUpdateInfo; } interface ClusterStateInfoUpdateInfo { /** * Target number of replica nodes per shard. */ targetReplicaCount?: number; /** * Target number of shards for redis cluster. */ targetShardCount?: number; } interface ClusterUserCreatedConnectionsClusterEndpoint { /** * Structure is documented below. */ connections?: outputs.redis.ClusterUserCreatedConnectionsClusterEndpointConnection[]; } interface ClusterUserCreatedConnectionsClusterEndpointConnection { /** * Detailed information of a PSC connection that is created by the customer * who owns the cluster. * Structure is documented below. */ pscConnection?: outputs.redis.ClusterUserCreatedConnectionsClusterEndpointConnectionPscConnection; } interface ClusterUserCreatedConnectionsClusterEndpointConnectionPscConnection { /** * The IP allocated on the consumer network for the PSC forwarding rule. */ address: string; /** * (Output) * Output Only. Type of a PSC Connection. * Possible values: * CONNECTION_TYPE_DISCOVERY * CONNECTION_TYPE_PRIMARY * CONNECTION_TYPE_READER */ connectionType: string; /** * The URI of the consumer side forwarding rule. * Format: * projects/{project}/regions/{region}/forwardingRules/{forwarding_rule} */ forwardingRule: string; /** * The consumer network where the IP address resides, in the form of * projects/{project_id}/global/networks/{network_id}. */ network: string; /** * The consumer projectId where the forwarding rule is created from. */ projectId: string; /** * The PSC connection id of the forwarding rule connected to the * service attachment. */ pscConnectionId: string; /** * (Output) * Output Only. The status of the PSC connection: whether a connection exists and ACTIVE or it no longer exists. * Possible values: * ACTIVE * NOT_FOUND */ pscConnectionStatus: string; /** * The service attachment which is the target of the PSC connection, in the form of projects/{project-id}/regions/{region}/serviceAttachments/{service-attachment-id}. */ serviceAttachment: string; } interface ClusterZoneDistributionConfig { /** * Immutable. The mode for zone distribution for Memorystore Redis cluster. * If not provided, MULTI_ZONE will be used as default * Possible values are: `MULTI_ZONE`, `SINGLE_ZONE`. */ mode: string; /** * Immutable. The zone for single zone Memorystore Redis cluster. */ zone?: string; } interface GetClusterAutomatedBackupConfig { /** * Trigger automated backups at a fixed frequency. */ fixedFrequencySchedules: outputs.redis.GetClusterAutomatedBackupConfigFixedFrequencySchedule[]; /** * How long to keep automated backups before the backups are deleted. * The value should be between 1 day and 365 days. If not specified, the default value is 35 days. * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ retention: string; } interface GetClusterAutomatedBackupConfigFixedFrequencySchedule { /** * The start time of every automated backup in UTC. * It must be set to the start of an hour. This field is required. */ startTimes: outputs.redis.GetClusterAutomatedBackupConfigFixedFrequencyScheduleStartTime[]; } interface GetClusterAutomatedBackupConfigFixedFrequencyScheduleStartTime { /** * Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. * An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours: number; } interface GetClusterCrossClusterReplicationConfig { /** * The role of the cluster in cross cluster replication. Supported values are: * * 1. 'CLUSTER_ROLE_UNSPECIFIED': This is an independent cluster that has never participated in cross cluster replication. It allows both reads and writes. * * 1. 'NONE': This is an independent cluster that previously participated in cross cluster replication(either as a 'PRIMARY' or 'SECONDARY' cluster). It allows both reads and writes. * * 1. 'PRIMARY': This cluster serves as the replication source for secondary clusters that are replicating from it. Any data written to it is automatically replicated to its secondary clusters. It allows both reads and writes. * * 1. 'SECONDARY': This cluster replicates data from the primary cluster. It allows only reads. Possible values: ["CLUSTER_ROLE_UNSPECIFIED", "NONE", "PRIMARY", "SECONDARY"] */ clusterRole: string; /** * An output only view of all the member clusters participating in cross cluster replication. This field is populated for all the member clusters irrespective of their cluster role. */ memberships: outputs.redis.GetClusterCrossClusterReplicationConfigMembership[]; /** * Details of the primary cluster that is used as the replication source for this secondary cluster. This is allowed to be set only for clusters whose cluster role is of type 'SECONDARY'. */ primaryClusters: outputs.redis.GetClusterCrossClusterReplicationConfigPrimaryCluster[]; /** * List of secondary clusters that are replicating from this primary cluster. This is allowed to be set only for clusters whose cluster role is of type 'PRIMARY'. */ secondaryClusters: outputs.redis.GetClusterCrossClusterReplicationConfigSecondaryCluster[]; /** * The last time cross cluster replication config was updated. */ updateTime: string; } interface GetClusterCrossClusterReplicationConfigMembership { /** * Details of the primary cluster that is used as the replication source for all the secondary clusters. */ primaryClusters: outputs.redis.GetClusterCrossClusterReplicationConfigMembershipPrimaryCluster[]; /** * List of secondary clusters that are replicating from the primary cluster. */ secondaryClusters: outputs.redis.GetClusterCrossClusterReplicationConfigMembershipSecondaryCluster[]; } interface GetClusterCrossClusterReplicationConfigMembershipPrimaryCluster { /** * The full resource path of the primary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id} */ cluster: string; /** * The unique id of the primary cluster. */ uid: string; } interface GetClusterCrossClusterReplicationConfigMembershipSecondaryCluster { /** * The full resource path of the secondary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id} */ cluster: string; /** * The unique id of the secondary cluster. */ uid: string; } interface GetClusterCrossClusterReplicationConfigPrimaryCluster { /** * The full resource path of the primary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id} */ cluster: string; /** * The unique id of the primary cluster. */ uid: string; } interface GetClusterCrossClusterReplicationConfigSecondaryCluster { /** * The full resource path of the secondary cluster in the format: projects/{project}/locations/{region}/clusters/{cluster-id} */ cluster: string; /** * The unique id of the secondary cluster. */ uid: string; } interface GetClusterDiscoveryEndpoint { /** * Output only. Network address of the exposed Redis endpoint used by clients to connect to the service. */ address: string; /** * Output only. The port number of the exposed Redis endpoint. */ port: number; /** * Output only. Customer configuration for where the endpoint * is created and accessed from. */ pscConfigs: outputs.redis.GetClusterDiscoveryEndpointPscConfig[]; } interface GetClusterDiscoveryEndpointPscConfig { /** * The consumer network where the network address of the discovery * endpoint will be reserved, in the form of * projects/{network_project_id}/global/networks/{network_id}. */ network: string; } interface GetClusterGcsSource { /** * URIs of the GCS objects to import. Example: gs://bucket1/object1, gs://bucket2/folder2/object2 */ uris: string[]; } interface GetClusterMaintenancePolicy { /** * Output only. The time when the policy was created. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ createTime: string; /** * Output only. The time when the policy was last updated. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ updateTime: string; /** * Optional. Maintenance window that is applied to resources covered by this policy. * Minimum 1. For the current version, the maximum number * of weeklyWindow is expected to be one. */ weeklyMaintenanceWindows: outputs.redis.GetClusterMaintenancePolicyWeeklyMaintenanceWindow[]; } interface GetClusterMaintenancePolicyWeeklyMaintenanceWindow { /** * Required. The day of week that maintenance updates occur. * * - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. * - MONDAY: Monday * - TUESDAY: Tuesday * - WEDNESDAY: Wednesday * - THURSDAY: Thursday * - FRIDAY: Friday * - SATURDAY: Saturday * - SUNDAY: Sunday Possible values: ["DAY_OF_WEEK_UNSPECIFIED", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"] */ day: string; /** * Output only. Duration of the maintenance window. * The current window is fixed at 1 hour. * A duration in seconds with up to nine fractional digits, * terminated by 's'. Example: "3.5s". */ duration: string; /** * Required. Start time of the window in UTC time. */ startTimes: outputs.redis.GetClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime[]; } interface GetClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. * An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. * An API may allow the value 60 if it allows leap-seconds. */ seconds: number; } interface GetClusterMaintenanceSchedule { /** * Output only. The end time of any upcoming scheduled maintenance for this cluster. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ endTime: string; /** * Output only. The deadline that the maintenance schedule start time * can not go beyond, including reschedule. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ scheduleDeadlineTime: string; /** * Output only. The start time of any upcoming scheduled maintenance for this cluster. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ startTime: string; } interface GetClusterManagedBackupSource { /** * Example: 'projects/{project}/locations/{location}/backupCollections/{collection}/backups/{backup}'. */ backup: string; } interface GetClusterManagedServerCa { /** * The PEM encoded CA certificate chains for redis managed server authentication */ caCerts: outputs.redis.GetClusterManagedServerCaCaCert[]; } interface GetClusterManagedServerCaCaCert { /** * The certificates that form the CA chain, from leaf to root order */ certificates: string[]; } interface GetClusterPersistenceConfig { /** * AOF configuration. This field will be ignored if mode is not AOF. */ aofConfigs: outputs.redis.GetClusterPersistenceConfigAofConfig[]; /** * Optional. Controls whether Persistence features are enabled. If not provided, the existing value will be used. * * - DISABLED: Persistence (both backup and restore) is disabled for the cluster. * - RDB: RDB based Persistence is enabled. * - AOF: AOF based Persistence is enabled. Possible values: ["PERSISTENCE_MODE_UNSPECIFIED", "DISABLED", "RDB", "AOF"] */ mode: string; /** * RDB configuration. This field will be ignored if mode is not RDB. */ rdbConfigs: outputs.redis.GetClusterPersistenceConfigRdbConfig[]; } interface GetClusterPersistenceConfigAofConfig { /** * Optional. Available fsync modes. * * - NO - Do not explicitly call fsync(). Rely on OS defaults. * - EVERYSEC - Call fsync() once per second in a background thread. A balance between performance and durability. * - ALWAYS - Call fsync() for earch write command. Possible values: ["APPEND_FSYNC_UNSPECIFIED", "NO", "EVERYSEC", "ALWAYS"] */ appendFsync: string; } interface GetClusterPersistenceConfigRdbConfig { /** * Optional. Available snapshot periods for scheduling. * * - ONE_HOUR: Snapshot every 1 hour. * - SIX_HOURS: Snapshot every 6 hours. * - TWELVE_HOURS: Snapshot every 12 hours. * - TWENTY_FOUR_HOURS: Snapshot every 24 hours. Possible values: ["SNAPSHOT_PERIOD_UNSPECIFIED", "ONE_HOUR", "SIX_HOURS", "TWELVE_HOURS", "TWENTY_FOUR_HOURS"] */ rdbSnapshotPeriod: string; /** * The time that the first snapshot was/will be attempted, and to which * future snapshots will be aligned. * If not provided, the current time will be used. */ rdbSnapshotStartTime: string; } interface GetClusterPscConfig { /** * Required. The consumer network where the network address of * the discovery endpoint will be reserved, in the form of * projects/{network_project_id_or_number}/global/networks/{network_id}. */ network: string; } interface GetClusterPscConnection { /** * Output only. The IP allocated on the consumer network for the PSC forwarding rule. */ address: string; /** * Output only. The URI of the consumer side forwarding rule. Example: projects/{projectNumOrId}/regions/us-east1/forwardingRules/{resourceId}. */ forwardingRule: string; /** * The consumer network where the IP address resides, in the form of projects/{projectId}/global/networks/{network_id}. */ network: string; /** * Output only. The consumer projectId where the forwarding rule is created from. */ projectId: string; /** * Output only. The PSC connection id of the forwarding rule connected to the service attachment. */ pscConnectionId: string; } interface GetClusterPscServiceAttachment { /** * Type of a PSC connection targeting this service attachment. */ connectionType: string; /** * Service attachment URI which your self-created PscConnection should use as */ serviceAttachment: string; } interface GetClusterStateInfo { /** * A nested object resource. */ updateInfos: outputs.redis.GetClusterStateInfoUpdateInfo[]; } interface GetClusterStateInfoUpdateInfo { /** * Target number of replica nodes per shard. */ targetReplicaCount: number; /** * Target number of shards for redis cluster. */ targetShardCount: number; } interface GetClusterZoneDistributionConfig { /** * Immutable. The mode for zone distribution for Memorystore Redis cluster. * If not provided, MULTI_ZONE will be used as default Possible values: ["MULTI_ZONE", "SINGLE_ZONE"] */ mode: string; /** * Immutable. The zone for single zone Memorystore Redis cluster. */ zone: string; } interface GetInstanceMaintenancePolicy { /** * Output only. The time when the policy was created. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ createTime: string; /** * Optional. Description of what this policy is for. * Create/Update methods return INVALID_ARGUMENT if the * length is greater than 512. */ description: string; /** * Output only. The time when the policy was last updated. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ updateTime: string; /** * Optional. Maintenance window that is applied to resources covered by this policy. * Minimum 1. For the current version, the maximum number * of weeklyWindow is expected to be one. */ weeklyMaintenanceWindows: outputs.redis.GetInstanceMaintenancePolicyWeeklyMaintenanceWindow[]; } interface GetInstanceMaintenancePolicyWeeklyMaintenanceWindow { /** * Required. The day of week that maintenance updates occur. * * - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. * - MONDAY: Monday * - TUESDAY: Tuesday * - WEDNESDAY: Wednesday * - THURSDAY: Thursday * - FRIDAY: Friday * - SATURDAY: Saturday * - SUNDAY: Sunday Possible values: ["DAY_OF_WEEK_UNSPECIFIED", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"] */ day: string; /** * Output only. Duration of the maintenance window. * The current window is fixed at 1 hour. * A duration in seconds with up to nine fractional digits, * terminated by 's'. Example: "3.5s". */ duration: string; /** * Required. Start time of the window in UTC time. */ startTimes: outputs.redis.GetInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime[]; } interface GetInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. * An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. * An API may allow the value 60 if it allows leap-seconds. */ seconds: number; } interface GetInstanceMaintenanceSchedule { /** * Output only. The end time of any upcoming scheduled maintenance for this instance. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ endTime: string; /** * Output only. The deadline that the maintenance schedule start time * can not go beyond, including reschedule. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ scheduleDeadlineTime: string; /** * Output only. The start time of any upcoming scheduled maintenance for this instance. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ startTime: string; } interface GetInstanceNode { /** * Node identifying string. e.g. 'node-0', 'node-1' */ id: string; /** * Location of the node. */ zone: string; } interface GetInstancePersistenceConfig { /** * Optional. Controls whether Persistence features are enabled. If not provided, the existing value will be used. * * - DISABLED: Persistence is disabled for the instance, and any existing snapshots are deleted. * - RDB: RDB based Persistence is enabled. Possible values: ["DISABLED", "RDB"] */ persistenceMode: string; /** * Output only. The next time that a snapshot attempt is scheduled to occur. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up * to nine fractional digits. * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ rdbNextSnapshotTime: string; /** * Optional. Available snapshot periods for scheduling. * * - ONE_HOUR: Snapshot every 1 hour. * - SIX_HOURS: Snapshot every 6 hours. * - TWELVE_HOURS: Snapshot every 12 hours. * - TWENTY_FOUR_HOURS: Snapshot every 24 hours. Possible values: ["ONE_HOUR", "SIX_HOURS", "TWELVE_HOURS", "TWENTY_FOUR_HOURS"] */ rdbSnapshotPeriod: string; /** * Optional. Date and time that the first snapshot was/will be attempted, * and to which future snapshots will be aligned. If not provided, * the current time will be used. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution * and up to nine fractional digits. * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ rdbSnapshotStartTime: string; } interface GetInstanceServerCaCert { /** * The certificate data in PEM format. */ cert: string; /** * The time when the certificate was created. */ createTime: string; /** * The time when the certificate expires. */ expireTime: string; /** * Serial number, as extracted from the certificate. */ serialNumber: string; /** * Sha1 Fingerprint of the certificate. */ sha1Fingerprint: string; } interface InstanceMaintenancePolicy { /** * (Output) * Output only. The time when the policy was created. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ createTime: string; /** * Optional. Description of what this policy is for. * Create/Update methods return INVALID_ARGUMENT if the * length is greater than 512. */ description?: string; /** * (Output) * Output only. The time when the policy was last updated. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ updateTime: string; /** * Optional. Maintenance window that is applied to resources covered by this policy. * Minimum 1. For the current version, the maximum number * of weeklyWindow is expected to be one. * Structure is documented below. */ weeklyMaintenanceWindows?: outputs.redis.InstanceMaintenancePolicyWeeklyMaintenanceWindow[]; } interface InstanceMaintenancePolicyWeeklyMaintenanceWindow { /** * Required. The day of week that maintenance updates occur. * - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. * - MONDAY: Monday * - TUESDAY: Tuesday * - WEDNESDAY: Wednesday * - THURSDAY: Thursday * - FRIDAY: Friday * - SATURDAY: Saturday * - SUNDAY: Sunday * Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. */ day: string; /** * (Output) * Output only. Duration of the maintenance window. * The current window is fixed at 1 hour. * A duration in seconds with up to nine fractional digits, * terminated by 's'. Example: "3.5s". */ duration: string; /** * Required. Start time of the window in UTC time. * Structure is documented below. */ startTime: outputs.redis.InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime; } interface InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime { /** * Hours of day in 24 hour format. Should be from 0 to 23. * An API may choose to allow the value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes?: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos?: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. * An API may allow the value 60 if it allows leap-seconds. */ seconds?: number; } interface InstanceMaintenanceSchedule { /** * (Output) * Output only. The end time of any upcoming scheduled maintenance for this instance. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ endTime: string; /** * (Output) * Output only. The deadline that the maintenance schedule start time * can not go beyond, including reschedule. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ scheduleDeadlineTime: string; /** * (Output) * Output only. The start time of any upcoming scheduled maintenance for this instance. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond * resolution and up to nine fractional digits. */ startTime: string; } interface InstanceNode { /** * (Output) * Node identifying string. e.g. 'node-0', 'node-1' */ id: string; /** * (Output) * Location of the node. */ zone: string; } interface InstancePersistenceConfig { /** * Optional. Controls whether Persistence features are enabled. If not provided, the existing value will be used. * - DISABLED: Persistence is disabled for the instance, and any existing snapshots are deleted. * - RDB: RDB based Persistence is enabled. * Possible values are: `DISABLED`, `RDB`. */ persistenceMode: string; /** * (Output) * Output only. The next time that a snapshot attempt is scheduled to occur. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up * to nine fractional digits. * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ rdbNextSnapshotTime: string; /** * Optional. Available snapshot periods for scheduling. * - ONE_HOUR: Snapshot every 1 hour. * - SIX_HOURS: Snapshot every 6 hours. * - TWELVE_HOURS: Snapshot every 12 hours. * - TWENTY_FOUR_HOURS: Snapshot every 24 hours. * Possible values are: `ONE_HOUR`, `SIX_HOURS`, `TWELVE_HOURS`, `TWENTY_FOUR_HOURS`. */ rdbSnapshotPeriod?: string; /** * Optional. Date and time that the first snapshot was/will be attempted, * and to which future snapshots will be aligned. If not provided, * the current time will be used. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution * and up to nine fractional digits. * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ rdbSnapshotStartTime: string; } interface InstanceServerCaCert { /** * (Output) * The certificate data in PEM format. */ cert: string; /** * (Output) * The time when the certificate was created. */ createTime: string; /** * (Output) * The time when the certificate expires. */ expireTime: string; /** * (Output) * Serial number, as extracted from the certificate. */ serialNumber: string; /** * (Output) * Sha1 Fingerprint of the certificate. */ sha1Fingerprint: string; } } export declare namespace runtimeconfig { interface ConfigIamBindingCondition { description?: string; expression: string; title: string; } interface ConfigIamMemberCondition { description?: string; expression: string; title: string; } } export declare namespace saasruntime { interface ReleaseBlueprint { /** * (Output) * Type of the engine used to actuate the blueprint. e.g. terraform, helm etc. */ engine: string; /** * URI to a blueprint used by the Unit (required unless unitKind or release is * set). */ package?: string; /** * (Output) * Version metadata if present on the blueprint. */ version: string; } interface ReleaseInputVariable { /** * Name of a supported variable type. Supported types are STRING, INT, BOOL. * Possible values are: `TYPE_UNSPECIFIED`, `STRING`, `INT`, `BOOL`. */ type?: string; /** * String encoded value for the variable. */ value?: string; /** * Name of the variable from actuation configs. */ variable: string; } interface ReleaseInputVariableDefault { /** * Name of a supported variable type. Supported types are STRING, INT, BOOL. * Possible values are: `TYPE_UNSPECIFIED`, `STRING`, `INT`, `BOOL`. */ type?: string; /** * String encoded value for the variable. */ value?: string; /** * Name of the variable from actuation configs. */ variable: string; } interface ReleaseOutputVariable { /** * Name of a supported variable type. Supported types are STRING, INT, BOOL. * Possible values are: `TYPE_UNSPECIFIED`, `STRING`, `INT`, `BOOL`. */ type?: string; /** * String encoded value for the variable. */ value?: string; /** * Name of the variable from actuation configs. */ variable: string; } interface ReleaseReleaseRequirements { /** * A list of releases from which a unit can be upgraded to this one * (optional). If left empty no constraints will be applied. When provided, * unit upgrade requests to this release will check and enforce this * constraint. */ upgradeableFromReleases?: string[]; } interface RolloutKindErrorBudget { /** * The maximum number of failed units allowed in a location without pausing * the rollout. */ allowedCount?: number; /** * The maximum percentage of units allowed to fail (0, 100] within a location * without pausing the rollout. */ allowedPercentage?: number; } interface SaaSLocation { /** * Name of location. */ name?: string; } interface UnitCondition { /** * Last time the condition transited from one status to another. */ lastTransitionTime: string; /** * Human readable message indicating details about the last transition. */ message: string; /** * Brief reason for the condition's last transition. */ reason: string; /** * Status of the condition. * Possible values: * STATUS_UNKNOWN * STATUS_TRUE * STATUS_FALSE */ status: string; /** * Name of a supported variable type. Supported types are string, int, bool. * Possible values: * STRING * INT * BOOL */ type: string; } interface UnitDependency { /** * (Output) * Alias for the name of the dependency. */ alias: string; /** * (Output) * A reference to the Unit object. */ unit: string; } interface UnitDependent { /** * (Output) * Alias for the name of the dependency. */ alias: string; /** * (Output) * A reference to the Unit object. */ unit: string; } interface UnitInputVariable { /** * Name of a supported variable type. Supported types are string, int, bool. * Possible values: * STRING * INT * BOOL */ type?: string; /** * String encoded value for the variable. */ value?: string; /** * Name of the variable from actuation configs. */ variable: string; } interface UnitKindDependency { /** * An alias for the dependency. Used for input variable mapping. */ alias: string; /** * The unit kind of the dependency. */ unitKind: string; } interface UnitKindInputVariableMapping { /** * Output variables whose values will be passed on to dependencies * Structure is documented below. */ from?: outputs.saasruntime.UnitKindInputVariableMappingFrom; /** * Input variables whose values will be passed on to dependencies * Structure is documented below. */ to?: outputs.saasruntime.UnitKindInputVariableMappingTo; /** * name of the variable */ variable: string; } interface UnitKindInputVariableMappingFrom { /** * Alias of the dependency that the outputVariable will pass its value to */ dependency: string; /** * Name of the outputVariable on the dependency */ outputVariable: string; } interface UnitKindInputVariableMappingTo { /** * Alias of the dependency that the inputVariable will pass its value to */ dependency: string; /** * Tells SaaS Runtime if this mapping should be used during lookup or not */ ignoreForLookup?: boolean; /** * Name of the inputVariable on the dependency */ inputVariable: string; } interface UnitKindOutputVariableMapping { /** * Output variables whose values will be passed on to dependencies * Structure is documented below. */ from?: outputs.saasruntime.UnitKindOutputVariableMappingFrom; /** * Input variables whose values will be passed on to dependencies * Structure is documented below. */ to?: outputs.saasruntime.UnitKindOutputVariableMappingTo; /** * name of the variable */ variable: string; } interface UnitKindOutputVariableMappingFrom { /** * Alias of the dependency that the outputVariable will pass its value to */ dependency: string; /** * Name of the outputVariable on the dependency */ outputVariable: string; } interface UnitKindOutputVariableMappingTo { /** * Alias of the dependency that the inputVariable will pass its value to */ dependency: string; /** * Tells SaaS Runtime if this mapping should be used during lookup or not */ ignoreForLookup?: boolean; /** * Name of the inputVariable on the dependency */ inputVariable: string; } interface UnitMaintenance { /** * If present, it fixes the release on the unit until the given time; i.e. * changes to the release field will be rejected. Rollouts should and will * also respect this by not requesting an upgrade in the first place. */ pinnedUntilTime?: string; } interface UnitOperationCondition { /** * (Output) * Last time the condition transited from one status to another. */ lastTransitionTime: string; /** * (Output) * Human readable message indicating details about the last transition. */ message: string; /** * (Output) * Brief reason for the condition's last transition. */ reason: string; /** * (Output) * Status of the condition. * Possible values: * STATUS_UNKNOWN * STATUS_TRUE * STATUS_FALSE */ status: string; /** * (Output) * Type of the condition. * Possible values: * TYPE_SCHEDULED * TYPE_RUNNING * TYPE_SUCCEEDED * TYPE_CANCELLED */ type: string; } interface UnitOperationDeprovision { } interface UnitOperationProvision { /** * Set of input variables. Maximum 100. (optional) * Structure is documented below. */ inputVariables?: outputs.saasruntime.UnitOperationProvisionInputVariable[]; /** * Reference to the Release object to use for the Unit. (optional). */ release?: string; } interface UnitOperationProvisionInputVariable { /** * Name of a supported variable type. Supported types are string, int, bool. * Possible values: * STRING * INT * BOOL */ type?: string; /** * String encoded value for the variable. */ value?: string; /** * Name of the variable from actuation configs. */ variable: string; } interface UnitOperationUpgrade { /** * Set of input variables. Maximum 100. (optional) * Structure is documented below. */ inputVariables?: outputs.saasruntime.UnitOperationUpgradeInputVariable[]; /** * Reference to the Release object to use for the Unit. (optional). */ release?: string; } interface UnitOperationUpgradeInputVariable { /** * Name of a supported variable type. Supported types are string, int, bool. * Possible values: * STRING * INT * BOOL */ type?: string; /** * String encoded value for the variable. */ value?: string; /** * Name of the variable from actuation configs. */ variable: string; } interface UnitOutputVariable { /** * Name of a supported variable type. Supported types are string, int, bool. * Possible values: * STRING * INT * BOOL */ type?: string; /** * String encoded value for the variable. */ value?: string; /** * Name of the variable from actuation configs. */ variable: string; } } export declare namespace secretmanager { interface GetRegionalSecretCustomerManagedEncryption { /** * The resource name of the Cloud KMS CryptoKey used to encrypt secret payloads. */ kmsKeyName: string; } interface GetRegionalSecretRotation { /** * Timestamp in UTC at which the Secret is scheduled to rotate. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine * fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ nextRotationTime: string; /** * The Duration between rotation notifications. Must be in seconds and at least 3600s (1h) * and at most 3153600000s (100 years). If rotationPeriod is set, 'next_rotation_time' must * be set. 'next_rotation_time' will be advanced by this period when the service * automatically sends rotation notifications. */ rotationPeriod: string; } interface GetRegionalSecretTopic { /** * The resource name of the Pub/Sub topic that will be published to, in the following format: * projects/*/topics/*. For publication to succeed, the Secret Manager Service * Agent service account must have pubsub.publisher permissions on the topic. */ name: string; } interface GetRegionalSecretVersionCustomerManagedEncryption { /** * The resource name of the Cloud KMS CryptoKey used to encrypt secret payloads. */ kmsKeyVersionName: string; } interface GetRegionalSecretsSecret { /** * Custom metadata about the regional secret. */ annotations: { [key: string]: string; }; /** * The time at which the regional secret was created. */ createTime: string; /** * Customer Managed Encryption for the regional secret. * Structure is documented below. */ customerManagedEncryptions: outputs.secretmanager.GetRegionalSecretsSecretCustomerManagedEncryption[]; /** * Whether Terraform will be prevented from destroying the regional secret. Defaults to false. * When the field is set to true in Terraform state, a 'terraform apply' * or 'terraform destroy' that would delete the federation will fail. */ deletionProtection: boolean; /** * All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. */ effectiveAnnotations: { [key: string]: string; }; /** * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. */ effectiveLabels: { [key: string]: string; }; /** * Timestamp in UTC when the regional secret is scheduled to expire. */ expireTime: string; /** * The labels assigned to this regional secret. */ labels: { [key: string]: string; }; /** * The location of the regional secret. */ location: string; /** * The resource name of the Pub/Sub topic that will be published to. */ name: string; /** * The ID of the project. */ project: string; /** * The combination of labels configured directly on the resource * and default labels configured on the provider. */ pulumiLabels: { [key: string]: string; }; /** * The rotation time and period for a regional secret. * Structure is documented below. */ rotations: outputs.secretmanager.GetRegionalSecretsSecretRotation[]; /** * The unique name of the resource. */ secretId: string; /** * A map of resource manager tags. * Resource manager tag keys and values have the same definition as resource manager tags. * Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/{tag_value_id}. */ tags: { [key: string]: string; }; /** * A list of up to 10 Pub/Sub topics to which messages are published when control plane operations are called on the regional secret or its versions. * Structure is documented below. */ topics: outputs.secretmanager.GetRegionalSecretsSecretTopic[]; /** * The TTL for the regional secret. A duration in seconds with up to nine fractional digits, * terminated by 's'. Example: "3.5s". Only one of 'ttl' or 'expire_time' can be provided. */ ttl: string; /** * Mapping from version alias to version name. */ versionAliases: { [key: string]: string; }; /** * The version destroy ttl for the regional secret version. */ versionDestroyTtl: string; } interface GetRegionalSecretsSecretCustomerManagedEncryption { /** * Describes the Cloud KMS encryption key that will be used to protect destination secret. */ kmsKeyName: string; } interface GetRegionalSecretsSecretRotation { /** * Timestamp in UTC at which the secret is scheduled to rotate. */ nextRotationTime: string; /** * The Duration between rotation notifications. */ rotationPeriod: string; } interface GetRegionalSecretsSecretTopic { /** * The resource name of the Pub/Sub topic that will be published to. */ name: string; } interface GetSecretReplication { /** * The Secret will automatically be replicated without any restrictions. */ autos: outputs.secretmanager.GetSecretReplicationAuto[]; /** * The Secret will be replicated to the regions specified by the user. */ userManageds: outputs.secretmanager.GetSecretReplicationUserManaged[]; } interface GetSecretReplicationAuto { /** * The customer-managed encryption configuration of the Secret. * If no configuration is provided, Google-managed default * encryption is used. */ customerManagedEncryptions: outputs.secretmanager.GetSecretReplicationAutoCustomerManagedEncryption[]; } interface GetSecretReplicationAutoCustomerManagedEncryption { /** * The resource name of the Cloud KMS CryptoKey used to encrypt secret payloads. */ kmsKeyName: string; } interface GetSecretReplicationUserManaged { /** * The list of Replicas for this Secret. Cannot be empty. */ replicas: outputs.secretmanager.GetSecretReplicationUserManagedReplica[]; } interface GetSecretReplicationUserManagedReplica { /** * Customer Managed Encryption for the secret. */ customerManagedEncryptions: outputs.secretmanager.GetSecretReplicationUserManagedReplicaCustomerManagedEncryption[]; /** * The canonical IDs of the location to replicate data. For example: "us-east1". */ location: string; } interface GetSecretReplicationUserManagedReplicaCustomerManagedEncryption { /** * Describes the Cloud KMS encryption key that will be used to protect destination secret. */ kmsKeyName: string; } interface GetSecretRotation { /** * Timestamp in UTC at which the Secret is scheduled to rotate. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ nextRotationTime: string; /** * The Duration between rotation notifications. Must be in seconds and at least 3600s (1h) and at most 3153600000s (100 years). * If rotationPeriod is set, 'next_rotation_time' must be set. 'next_rotation_time' will be advanced by this period when the service automatically sends rotation notifications. */ rotationPeriod: string; } interface GetSecretTopic { /** * The resource name of the Pub/Sub topic that will be published to, in the following format: projects/*/topics/*. * For publication to succeed, the Secret Manager Service Agent service account must have pubsub.publisher permissions on the topic. */ name: string; } interface GetSecretsSecret { /** * Custom metadata about the secret. */ annotations: { [key: string]: string; }; /** * The time at which the Secret was created. */ createTime: string; /** * Whether Terraform will be prevented from destroying the secret. Defaults to false. * When the field is set to true in Terraform state, a 'terraform apply' * or 'terraform destroy' that would delete the secret will fail. */ deletionProtection: boolean; /** * All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. */ effectiveAnnotations: { [key: string]: string; }; /** * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. */ effectiveLabels: { [key: string]: string; }; /** * Timestamp in UTC when the Secret is scheduled to expire. */ expireTime: string; /** * The labels assigned to this Secret. */ labels: { [key: string]: string; }; /** * The resource name of the Pub/Sub topic that will be published to. */ name: string; /** * The ID of the project. */ project: string; /** * The combination of labels configured directly on the resource * and default labels configured on the provider. */ pulumiLabels: { [key: string]: string; }; /** * The replication policy of the secret data attached to the Secret. * Structure is documented below. */ replications: outputs.secretmanager.GetSecretsSecretReplication[]; /** * The rotation time and period for a Secret. * Structure is documented below. */ rotations: outputs.secretmanager.GetSecretsSecretRotation[]; /** * This must be unique within the project. */ secretId: string; /** * A map of resource manager tags. * Resource manager tag keys and values have the same definition as resource manager tags. * Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/{tag_value_id}. */ tags: { [key: string]: string; }; /** * A list of up to 10 Pub/Sub topics to which messages are published when control plane operations are called on the secret or its versions. * Structure is documented below. */ topics: outputs.secretmanager.GetSecretsSecretTopic[]; /** * The TTL for the Secret. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". * Only one of 'ttl' or 'expire_time' can be provided. */ ttl: string; /** * Mapping from version alias to version name. */ versionAliases: { [key: string]: string; }; /** * The version destroy ttl for the secret version. */ versionDestroyTtl: string; } interface GetSecretsSecretReplication { /** * The Secret will automatically be replicated without any restrictions. * Structure is documented below. */ autos: outputs.secretmanager.GetSecretsSecretReplicationAuto[]; /** * The Secret will be replicated to the regions specified by the user. * Structure is documented below. */ userManageds: outputs.secretmanager.GetSecretsSecretReplicationUserManaged[]; } interface GetSecretsSecretReplicationAuto { /** * Customer Managed Encryption for the secret. * Structure is documented below. */ customerManagedEncryptions: outputs.secretmanager.GetSecretsSecretReplicationAutoCustomerManagedEncryption[]; } interface GetSecretsSecretReplicationAutoCustomerManagedEncryption { /** * Describes the Cloud KMS encryption key that will be used to protect destination secret. */ kmsKeyName: string; } interface GetSecretsSecretReplicationUserManaged { /** * The list of Replicas for this Secret. * Structure is documented below. */ replicas: outputs.secretmanager.GetSecretsSecretReplicationUserManagedReplica[]; } interface GetSecretsSecretReplicationUserManagedReplica { /** * Customer Managed Encryption for the secret. * Structure is documented below. */ customerManagedEncryptions: outputs.secretmanager.GetSecretsSecretReplicationUserManagedReplicaCustomerManagedEncryption[]; /** * The canonical IDs of the location to replicate data. */ location: string; } interface GetSecretsSecretReplicationUserManagedReplicaCustomerManagedEncryption { /** * Describes the Cloud KMS encryption key that will be used to protect destination secret. */ kmsKeyName: string; } interface GetSecretsSecretRotation { /** * Timestamp in UTC at which the Secret is scheduled to rotate. */ nextRotationTime: string; /** * The Duration between rotation notifications. */ rotationPeriod: string; } interface GetSecretsSecretTopic { /** * The resource name of the Pub/Sub topic that will be published to. */ name: string; } interface RegionalSecretCustomerManagedEncryption { /** * The resource name of the Cloud KMS CryptoKey used to encrypt secret payloads. */ kmsKeyName: string; } interface RegionalSecretIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface RegionalSecretIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface RegionalSecretRotation { /** * Timestamp in UTC at which the Secret is scheduled to rotate. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine * fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ nextRotationTime?: string; /** * The Duration between rotation notifications. Must be in seconds and at least 3600s (1h) * and at most 3153600000s (100 years). If rotationPeriod is set, `nextRotationTime` must * be set. `nextRotationTime` will be advanced by this period when the service * automatically sends rotation notifications. */ rotationPeriod?: string; } interface RegionalSecretTopic { /** * The resource name of the Pub/Sub topic that will be published to, in the following format: * projects/*/topics/*. For publication to succeed, the Secret Manager Service * Agent service account must have pubsub.publisher permissions on the topic. */ name: string; } interface RegionalSecretVersionCustomerManagedEncryption { /** * (Output) * The resource name of the Cloud KMS CryptoKey used to encrypt secret payloads. */ kmsKeyVersionName: string; } interface SecretIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface SecretIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface SecretReplication { /** * The Secret will automatically be replicated without any restrictions. * Structure is documented below. */ auto?: outputs.secretmanager.SecretReplicationAuto; /** * The Secret will be replicated to the regions specified by the user. * Structure is documented below. */ userManaged?: outputs.secretmanager.SecretReplicationUserManaged; } interface SecretReplicationAuto { /** * The customer-managed encryption configuration of the Secret. * If no configuration is provided, Google-managed default * encryption is used. * Structure is documented below. */ customerManagedEncryption?: outputs.secretmanager.SecretReplicationAutoCustomerManagedEncryption; } interface SecretReplicationAutoCustomerManagedEncryption { /** * Describes the Cloud KMS encryption key that will be used to protect destination secret. */ kmsKeyName: string; } interface SecretReplicationUserManaged { /** * The list of Replicas for this Secret. Cannot be empty. * Structure is documented below. */ replicas: outputs.secretmanager.SecretReplicationUserManagedReplica[]; } interface SecretReplicationUserManagedReplica { /** * Customer Managed Encryption for the secret. * Structure is documented below. */ customerManagedEncryption?: outputs.secretmanager.SecretReplicationUserManagedReplicaCustomerManagedEncryption; /** * The canonical IDs of the location to replicate data. For example: "us-east1". */ location: string; } interface SecretReplicationUserManagedReplicaCustomerManagedEncryption { /** * Describes the Cloud KMS encryption key that will be used to protect destination secret. */ kmsKeyName: string; } interface SecretRotation { /** * Timestamp in UTC at which the Secret is scheduled to rotate. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ nextRotationTime?: string; /** * The Duration between rotation notifications. Must be in seconds and at least 3600s (1h) and at most 3153600000s (100 years). * If rotationPeriod is set, `nextRotationTime` must be set. `nextRotationTime` will be advanced by this period when the service automatically sends rotation notifications. */ rotationPeriod?: string; } interface SecretTopic { /** * The resource name of the Pub/Sub topic that will be published to, in the following format: projects/*/topics/*. * For publication to succeed, the Secret Manager Service Agent service account must have pubsub.publisher permissions on the topic. */ name: string; } } export declare namespace securesourcemanager { interface HookPushOption { /** * Trigger hook for matching branches only. * Specified as glob pattern. If empty or *, events for all branches are * reported. Examples: main, {main,release*}. * See https://pkg.go.dev/github.com/gobwas/glob documentation. */ branchFilter?: string; } interface InstanceHostConfig { /** * (Output) * API hostname. */ api: string; /** * (Output) * Git HTTP hostname. */ gitHttp: string; /** * (Output) * Git SSH hostname. */ gitSsh: string; /** * (Output) * HTML hostname. */ html: string; } interface InstanceIamBindingCondition { description?: string; expression: string; title: string; } interface InstanceIamMemberCondition { description?: string; expression: string; title: string; } interface InstancePrivateConfig { /** * CA pool resource, resource must in the format of `projects/{project}/locations/{location}/caPools/{ca_pool}`. */ caPool?: string; /** * (Output) * Service Attachment for HTTP, resource is in the format of `projects/{project}/regions/{region}/serviceAttachments/{service_attachment}`. */ httpServiceAttachment: string; /** * 'Indicate if it's private instance.' */ isPrivate: boolean; /** * (Output) * Service Attachment for SSH, resource is in the format of `projects/{project}/regions/{region}/serviceAttachments/{service_attachment}`. */ sshServiceAttachment: string; } interface InstanceWorkforceIdentityFederationConfig { /** * 'Whether Workforce Identity Federation is enabled.' */ enabled: boolean; } interface RepositoryIamBindingCondition { description?: string; expression: string; title: string; } interface RepositoryIamMemberCondition { description?: string; expression: string; title: string; } interface RepositoryInitialConfig { /** * Default branch name of the repository. */ defaultBranch?: string; /** * List of gitignore template names user can choose from. * Valid values can be viewed at https://cloud.google.com/secure-source-manager/docs/reference/rest/v1/projects.locations.repositories#initialconfig. */ gitignores?: string[]; /** * License template name user can choose from. * Valid values can be viewed at https://cloud.google.com/secure-source-manager/docs/reference/rest/v1/projects.locations.repositories#initialconfig. */ license?: string; /** * README template name. * Valid values can be viewed at https://cloud.google.com/secure-source-manager/docs/reference/rest/v1/projects.locations.repositories#initialconfig. */ readme?: string; } interface RepositoryUri { /** * (Output) * API is the URI for API access. */ api: string; /** * (Output) * gitHttps is the git HTTPS URI for git operations. */ gitHttps: string; /** * (Output) * HTML is the URI for the user to view the repository in a browser. */ html: string; } } export declare namespace securitycenter { interface FolderCustomModuleCustomConfig { /** * Custom output properties. * Structure is documented below. */ customOutput?: outputs.securitycenter.FolderCustomModuleCustomConfigCustomOutput; /** * Text that describes the vulnerability or misconfiguration that the custom * module detects. This explanation is returned with each finding instance to * help investigators understand the detected issue. The text must be enclosed in quotation marks. */ description?: string; /** * The CEL expression to evaluate to produce findings. When the expression evaluates * to true against a resource, a finding is generated. * Structure is documented below. */ predicate: outputs.securitycenter.FolderCustomModuleCustomConfigPredicate; /** * An explanation of the recommended steps that security teams can take to resolve * the detected issue. This explanation is returned with each finding generated by * this module in the nextSteps property of the finding JSON. */ recommendation: string; /** * The resource types that the custom module operates on. Each custom module * can specify up to 5 resource types. * Structure is documented below. */ resourceSelector: outputs.securitycenter.FolderCustomModuleCustomConfigResourceSelector; /** * The severity to assign to findings generated by the module. * Possible values are: `CRITICAL`, `HIGH`, `MEDIUM`, `LOW`. */ severity: string; } interface FolderCustomModuleCustomConfigCustomOutput { /** * A list of custom output properties to add to the finding. * Structure is documented below. */ properties?: outputs.securitycenter.FolderCustomModuleCustomConfigCustomOutputProperty[]; } interface FolderCustomModuleCustomConfigCustomOutputProperty { /** * Name of the property for the custom output. */ name?: string; /** * The CEL expression for the custom output. A resource property can be specified * to return the value of the property or a text string enclosed in quotation marks. * Structure is documented below. */ valueExpression?: outputs.securitycenter.FolderCustomModuleCustomConfigCustomOutputPropertyValueExpression; } interface FolderCustomModuleCustomConfigCustomOutputPropertyValueExpression { /** * Description of the expression. This is a longer text which describes the * expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a * file name and a position in the file. */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. This can * be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface FolderCustomModuleCustomConfigPredicate { /** * Description of the expression. This is a longer text which describes the * expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a * file name and a position in the file. */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. This can * be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface FolderCustomModuleCustomConfigResourceSelector { /** * The resource types to run the detector on. */ resourceTypes: string[]; } interface FolderNotificationConfigStreamingConfig { /** * Expression that defines the filter to apply across create/update * events of assets or findings as specified by the event type. The * expression is a list of zero or more restrictions combined via * logical operators AND and OR. Parentheses are supported, and OR * has higher precedence than AND. * Restrictions have the form and may have * a - character in front of them to indicate negation. The fields * map to those defined in the corresponding resource. * The supported operators are: * * = for all value types. * * >, <, >=, <= for integer values. * * :, meaning substring matching, for strings. * The supported value types are: * * string literals in quotes. * * integer literals without quotes. * * boolean literals true and false without quotes. * See * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) * for information on how to write a filter. */ filter: string; } interface InstanceIamBindingCondition { /** * An optional description of the instance. */ description?: string; expression: string; title: string; } interface InstanceIamMemberCondition { /** * An optional description of the instance. */ description?: string; expression: string; title: string; } interface ManagementFolderSecurityHealthAnalyticsCustomModuleCustomConfig { /** * Custom output properties. * Structure is documented below. */ customOutput?: outputs.securitycenter.ManagementFolderSecurityHealthAnalyticsCustomModuleCustomConfigCustomOutput; /** * Text that describes the vulnerability or misconfiguration that the custom * module detects. This explanation is returned with each finding instance to * help investigators understand the detected issue. The text must be enclosed in quotation marks. */ description?: string; /** * The CEL expression to evaluate to produce findings. When the expression evaluates * to true against a resource, a finding is generated. * Structure is documented below. */ predicate?: outputs.securitycenter.ManagementFolderSecurityHealthAnalyticsCustomModuleCustomConfigPredicate; /** * An explanation of the recommended steps that security teams can take to resolve * the detected issue. This explanation is returned with each finding generated by * this module in the nextSteps property of the finding JSON. */ recommendation?: string; /** * The resource types that the custom module operates on. Each custom module * can specify up to 5 resource types. * Structure is documented below. */ resourceSelector?: outputs.securitycenter.ManagementFolderSecurityHealthAnalyticsCustomModuleCustomConfigResourceSelector; /** * The severity to assign to findings generated by the module. * Possible values are: `CRITICAL`, `HIGH`, `MEDIUM`, `LOW`. */ severity?: string; } interface ManagementFolderSecurityHealthAnalyticsCustomModuleCustomConfigCustomOutput { /** * A list of custom output properties to add to the finding. * Structure is documented below. */ properties?: outputs.securitycenter.ManagementFolderSecurityHealthAnalyticsCustomModuleCustomConfigCustomOutputProperty[]; } interface ManagementFolderSecurityHealthAnalyticsCustomModuleCustomConfigCustomOutputProperty { /** * Name of the property for the custom output. */ name?: string; /** * The CEL expression for the custom output. A resource property can be specified * to return the value of the property or a text string enclosed in quotation marks. * Structure is documented below. */ valueExpression?: outputs.securitycenter.ManagementFolderSecurityHealthAnalyticsCustomModuleCustomConfigCustomOutputPropertyValueExpression; } interface ManagementFolderSecurityHealthAnalyticsCustomModuleCustomConfigCustomOutputPropertyValueExpression { /** * Description of the expression. This is a longer text which describes the * expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a * file name and a position in the file. */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. This can * be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface ManagementFolderSecurityHealthAnalyticsCustomModuleCustomConfigPredicate { /** * Description of the expression. This is a longer text which describes the * expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a * file name and a position in the file. */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. This can * be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface ManagementFolderSecurityHealthAnalyticsCustomModuleCustomConfigResourceSelector { /** * The resource types to run the detector on. */ resourceTypes: string[]; } interface ManagementOrganizationSecurityHealthAnalyticsCustomModuleCustomConfig { /** * Custom output properties. * Structure is documented below. */ customOutput?: outputs.securitycenter.ManagementOrganizationSecurityHealthAnalyticsCustomModuleCustomConfigCustomOutput; /** * Text that describes the vulnerability or misconfiguration that the custom * module detects. This explanation is returned with each finding instance to * help investigators understand the detected issue. The text must be enclosed in quotation marks. */ description?: string; /** * The CEL expression to evaluate to produce findings. When the expression evaluates * to true against a resource, a finding is generated. * Structure is documented below. */ predicate: outputs.securitycenter.ManagementOrganizationSecurityHealthAnalyticsCustomModuleCustomConfigPredicate; /** * An explanation of the recommended steps that security teams can take to resolve * the detected issue. This explanation is returned with each finding generated by * this module in the nextSteps property of the finding JSON. */ recommendation: string; /** * The resource types that the custom module operates on. Each custom module * can specify up to 5 resource types. * Structure is documented below. */ resourceSelector: outputs.securitycenter.ManagementOrganizationSecurityHealthAnalyticsCustomModuleCustomConfigResourceSelector; /** * The severity to assign to findings generated by the module. * Possible values are: `CRITICAL`, `HIGH`, `MEDIUM`, `LOW`. */ severity: string; } interface ManagementOrganizationSecurityHealthAnalyticsCustomModuleCustomConfigCustomOutput { /** * A list of custom output properties to add to the finding. * Structure is documented below. */ properties?: outputs.securitycenter.ManagementOrganizationSecurityHealthAnalyticsCustomModuleCustomConfigCustomOutputProperty[]; } interface ManagementOrganizationSecurityHealthAnalyticsCustomModuleCustomConfigCustomOutputProperty { /** * Name of the property for the custom output. */ name?: string; /** * The CEL expression for the custom output. A resource property can be specified * to return the value of the property or a text string enclosed in quotation marks. * Structure is documented below. */ valueExpression?: outputs.securitycenter.ManagementOrganizationSecurityHealthAnalyticsCustomModuleCustomConfigCustomOutputPropertyValueExpression; } interface ManagementOrganizationSecurityHealthAnalyticsCustomModuleCustomConfigCustomOutputPropertyValueExpression { /** * Description of the expression. This is a longer text which describes the * expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a * file name and a position in the file. */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. This can * be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface ManagementOrganizationSecurityHealthAnalyticsCustomModuleCustomConfigPredicate { /** * Description of the expression. This is a longer text which describes the * expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a * file name and a position in the file. */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. This can * be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface ManagementOrganizationSecurityHealthAnalyticsCustomModuleCustomConfigResourceSelector { /** * The resource types to run the detector on. */ resourceTypes: string[]; } interface ManagementProjectSecurityHealthAnalyticsCustomModuleCustomConfig { /** * Custom output properties. * Structure is documented below. */ customOutput?: outputs.securitycenter.ManagementProjectSecurityHealthAnalyticsCustomModuleCustomConfigCustomOutput; /** * Text that describes the vulnerability or misconfiguration that the custom * module detects. This explanation is returned with each finding instance to * help investigators understand the detected issue. The text must be enclosed in quotation marks. */ description?: string; /** * The CEL expression to evaluate to produce findings. When the expression evaluates * to true against a resource, a finding is generated. * Structure is documented below. */ predicate: outputs.securitycenter.ManagementProjectSecurityHealthAnalyticsCustomModuleCustomConfigPredicate; /** * An explanation of the recommended steps that security teams can take to resolve * the detected issue. This explanation is returned with each finding generated by * this module in the nextSteps property of the finding JSON. */ recommendation: string; /** * The resource types that the custom module operates on. Each custom module * can specify up to 5 resource types. * Structure is documented below. */ resourceSelector: outputs.securitycenter.ManagementProjectSecurityHealthAnalyticsCustomModuleCustomConfigResourceSelector; /** * The severity to assign to findings generated by the module. * Possible values are: `CRITICAL`, `HIGH`, `MEDIUM`, `LOW`. */ severity: string; } interface ManagementProjectSecurityHealthAnalyticsCustomModuleCustomConfigCustomOutput { /** * A list of custom output properties to add to the finding. * Structure is documented below. */ properties?: outputs.securitycenter.ManagementProjectSecurityHealthAnalyticsCustomModuleCustomConfigCustomOutputProperty[]; } interface ManagementProjectSecurityHealthAnalyticsCustomModuleCustomConfigCustomOutputProperty { /** * Name of the property for the custom output. */ name?: string; /** * The CEL expression for the custom output. A resource property can be specified * to return the value of the property or a text string enclosed in quotation marks. * Structure is documented below. */ valueExpression?: outputs.securitycenter.ManagementProjectSecurityHealthAnalyticsCustomModuleCustomConfigCustomOutputPropertyValueExpression; } interface ManagementProjectSecurityHealthAnalyticsCustomModuleCustomConfigCustomOutputPropertyValueExpression { /** * Description of the expression. This is a longer text which describes the * expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a * file name and a position in the file. */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. This can * be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface ManagementProjectSecurityHealthAnalyticsCustomModuleCustomConfigPredicate { /** * Description of the expression. This is a longer text which describes the * expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a * file name and a position in the file. */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. This can * be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface ManagementProjectSecurityHealthAnalyticsCustomModuleCustomConfigResourceSelector { /** * The resource types to run the detector on. */ resourceTypes: string[]; } interface NotificationConfigStreamingConfig { /** * Expression that defines the filter to apply across create/update * events of assets or findings as specified by the event type. The * expression is a list of zero or more restrictions combined via * logical operators AND and OR. Parentheses are supported, and OR * has higher precedence than AND. * Restrictions have the form and may have * a - character in front of them to indicate negation. The fields * map to those defined in the corresponding resource. * The supported operators are: * * = for all value types. * * >, <, >=, <= for integer values. * * :, meaning substring matching, for strings. * The supported value types are: * * string literals in quotes. * * integer literals without quotes. * * boolean literals true and false without quotes. * See * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) * for information on how to write a filter. */ filter: string; } interface OrganizationCustomModuleCustomConfig { /** * Custom output properties. * Structure is documented below. */ customOutput?: outputs.securitycenter.OrganizationCustomModuleCustomConfigCustomOutput; /** * Text that describes the vulnerability or misconfiguration that the custom * module detects. This explanation is returned with each finding instance to * help investigators understand the detected issue. The text must be enclosed in quotation marks. */ description?: string; /** * The CEL expression to evaluate to produce findings. When the expression evaluates * to true against a resource, a finding is generated. * Structure is documented below. */ predicate: outputs.securitycenter.OrganizationCustomModuleCustomConfigPredicate; /** * An explanation of the recommended steps that security teams can take to resolve * the detected issue. This explanation is returned with each finding generated by * this module in the nextSteps property of the finding JSON. */ recommendation: string; /** * The resource types that the custom module operates on. Each custom module * can specify up to 5 resource types. * Structure is documented below. */ resourceSelector: outputs.securitycenter.OrganizationCustomModuleCustomConfigResourceSelector; /** * The severity to assign to findings generated by the module. * Possible values are: `CRITICAL`, `HIGH`, `MEDIUM`, `LOW`. */ severity: string; } interface OrganizationCustomModuleCustomConfigCustomOutput { /** * A list of custom output properties to add to the finding. * Structure is documented below. */ properties?: outputs.securitycenter.OrganizationCustomModuleCustomConfigCustomOutputProperty[]; } interface OrganizationCustomModuleCustomConfigCustomOutputProperty { /** * Name of the property for the custom output. */ name?: string; /** * The CEL expression for the custom output. A resource property can be specified * to return the value of the property or a text string enclosed in quotation marks. * Structure is documented below. */ valueExpression?: outputs.securitycenter.OrganizationCustomModuleCustomConfigCustomOutputPropertyValueExpression; } interface OrganizationCustomModuleCustomConfigCustomOutputPropertyValueExpression { /** * Description of the expression. This is a longer text which describes the * expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a * file name and a position in the file. */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. This can * be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface OrganizationCustomModuleCustomConfigPredicate { /** * Description of the expression. This is a longer text which describes the * expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a * file name and a position in the file. */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. This can * be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface OrganizationCustomModuleCustomConfigResourceSelector { /** * The resource types to run the detector on. */ resourceTypes: string[]; } interface ProjectCustomModuleCustomConfig { /** * Custom output properties. * Structure is documented below. */ customOutput?: outputs.securitycenter.ProjectCustomModuleCustomConfigCustomOutput; /** * Text that describes the vulnerability or misconfiguration that the custom * module detects. This explanation is returned with each finding instance to * help investigators understand the detected issue. The text must be enclosed in quotation marks. */ description?: string; /** * The CEL expression to evaluate to produce findings. When the expression evaluates * to true against a resource, a finding is generated. * Structure is documented below. */ predicate: outputs.securitycenter.ProjectCustomModuleCustomConfigPredicate; /** * An explanation of the recommended steps that security teams can take to resolve * the detected issue. This explanation is returned with each finding generated by * this module in the nextSteps property of the finding JSON. */ recommendation: string; /** * The resource types that the custom module operates on. Each custom module * can specify up to 5 resource types. * Structure is documented below. */ resourceSelector: outputs.securitycenter.ProjectCustomModuleCustomConfigResourceSelector; /** * The severity to assign to findings generated by the module. * Possible values are: `CRITICAL`, `HIGH`, `MEDIUM`, `LOW`. */ severity: string; } interface ProjectCustomModuleCustomConfigCustomOutput { /** * A list of custom output properties to add to the finding. * Structure is documented below. */ properties?: outputs.securitycenter.ProjectCustomModuleCustomConfigCustomOutputProperty[]; } interface ProjectCustomModuleCustomConfigCustomOutputProperty { /** * Name of the property for the custom output. */ name?: string; /** * The CEL expression for the custom output. A resource property can be specified * to return the value of the property or a text string enclosed in quotation marks. * Structure is documented below. */ valueExpression?: outputs.securitycenter.ProjectCustomModuleCustomConfigCustomOutputPropertyValueExpression; } interface ProjectCustomModuleCustomConfigCustomOutputPropertyValueExpression { /** * Description of the expression. This is a longer text which describes the * expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a * file name and a position in the file. */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. This can * be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface ProjectCustomModuleCustomConfigPredicate { /** * Description of the expression. This is a longer text which describes the * expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a * file name and a position in the file. */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. This can * be used e.g. in UIs which allow to enter the expression. */ title?: string; } interface ProjectCustomModuleCustomConfigResourceSelector { /** * The resource types to run the detector on. */ resourceTypes: string[]; } interface ProjectNotificationConfigStreamingConfig { /** * Expression that defines the filter to apply across create/update * events of assets or findings as specified by the event type. The * expression is a list of zero or more restrictions combined via * logical operators AND and OR. Parentheses are supported, and OR * has higher precedence than AND. * Restrictions have the form and may have * a - character in front of them to indicate negation. The fields * map to those defined in the corresponding resource. * The supported operators are: * * = for all value types. * * >, <, >=, <= for integer values. * * :, meaning substring matching, for strings. * The supported value types are: * * string literals in quotes. * * integer literals without quotes. * * boolean literals true and false without quotes. * See * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) * for information on how to write a filter. */ filter: string; } interface SourceIamBindingCondition { /** * The description of the source (max of 1024 characters). */ description?: string; expression: string; title: string; } interface SourceIamMemberCondition { /** * The description of the source (max of 1024 characters). */ description?: string; expression: string; title: string; } interface V2FolderNotificationConfigStreamingConfig { /** * Expression that defines the filter to apply across create/update * events of assets or findings as specified by the event type. The * expression is a list of zero or more restrictions combined via * logical operators AND and OR. Parentheses are supported, and OR * has higher precedence than AND. * Restrictions have the form and may have * a - character in front of them to indicate negation. The fields * map to those defined in the corresponding resource. * The supported operators are: * * = for all value types. * * >, <, >=, <= for integer values. * * :, meaning substring matching, for strings. * The supported value types are: * * string literals in quotes. * * integer literals without quotes. * * boolean literals true and false without quotes. * See * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) * for information on how to write a filter. */ filter: string; } interface V2OrganizationNotificationConfigStreamingConfig { /** * Expression that defines the filter to apply across create/update * events of assets or findings as specified by the event type. The * expression is a list of zero or more restrictions combined via * logical operators AND and OR. Parentheses are supported, and OR * has higher precedence than AND. * Restrictions have the form and may have * a - character in front of them to indicate negation. The fields * map to those defined in the corresponding resource. * The supported operators are: * * = for all value types. * * >, <, >=, <= for integer values. * * :, meaning substring matching, for strings. * The supported value types are: * * string literals in quotes. * * integer literals without quotes. * * boolean literals true and false without quotes. * See * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) * for information on how to write a filter. */ filter: string; } interface V2OrganizationSourceIamBindingCondition { description?: string; expression: string; title: string; } interface V2OrganizationSourceIamMemberCondition { description?: string; expression: string; title: string; } interface V2ProjectNotificationConfigStreamingConfig { /** * Expression that defines the filter to apply across create/update * events of assets or findings as specified by the event type. The * expression is a list of zero or more restrictions combined via * logical operators AND and OR. Parentheses are supported, and OR * has higher precedence than AND. * Restrictions have the form and may have * a - character in front of them to indicate negation. The fields * map to those defined in the corresponding resource. * The supported operators are: * * = for all value types. * * >, <, >=, <= for integer values. * * :, meaning substring matching, for strings. * The supported value types are: * * string literals in quotes. * * integer literals without quotes. * * boolean literals true and false without quotes. * See * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) * for information on how to write a filter. */ filter: string; } } export declare namespace securityposture { interface PosturePolicySet { /** * Description of the policy set. */ description?: string; /** * List of security policy * Structure is documented below. */ policies: outputs.securityposture.PosturePolicySetPolicy[]; /** * ID of the policy set. */ policySetId: string; } interface PosturePolicySetPolicy { /** * Mapping for policy to security standards and controls. * Structure is documented below. */ complianceStandards?: outputs.securityposture.PosturePolicySetPolicyComplianceStandard[]; /** * Policy constraint definition.It can have the definition of one of following constraints: orgPolicyConstraint orgPolicyConstraintCustom securityHealthAnalyticsModule securityHealthAnalyticsCustomModule * Structure is documented below. */ constraint: outputs.securityposture.PosturePolicySetPolicyConstraint; /** * Description of the policy. */ description?: string; /** * ID of the policy. */ policyId: string; } interface PosturePolicySetPolicyComplianceStandard { /** * Mapping of security controls for the policy. */ control?: string; /** * Mapping of compliance standards for the policy. */ standard?: string; } interface PosturePolicySetPolicyConstraint { /** * Organization policy canned constraint definition. * Structure is documented below. */ orgPolicyConstraint?: outputs.securityposture.PosturePolicySetPolicyConstraintOrgPolicyConstraint; /** * Organization policy custom constraint policy definition. * Structure is documented below. */ orgPolicyConstraintCustom?: outputs.securityposture.PosturePolicySetPolicyConstraintOrgPolicyConstraintCustom; /** * Definition of Security Health Analytics Custom Module. * Structure is documented below. */ securityHealthAnalyticsCustomModule?: outputs.securityposture.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModule; /** * Security Health Analytics built-in detector definition. * Structure is documented below. */ securityHealthAnalyticsModule?: outputs.securityposture.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsModule; } interface PosturePolicySetPolicyConstraintOrgPolicyConstraint { /** * Organization policy canned constraint Id */ cannedConstraintId: string; /** * Definition of policy rules * Structure is documented below. */ policyRules: outputs.securityposture.PosturePolicySetPolicyConstraintOrgPolicyConstraintPolicyRule[]; } interface PosturePolicySetPolicyConstraintOrgPolicyConstraintCustom { /** * Organization policy custom constraint definition. * Structure is documented below. */ customConstraint?: outputs.securityposture.PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomCustomConstraint; /** * Definition of policy rules * Structure is documented below. */ policyRules: outputs.securityposture.PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomPolicyRule[]; } interface PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomCustomConstraint { /** * The action to take if the condition is met. * Possible values are: `ALLOW`, `DENY`. */ actionType: string; /** * A CEL condition that refers to a supported service resource, for example `resource.management.autoUpgrade == false`. For details about CEL usage, see [Common Expression Language](https://docs.cloud.google.com/resource-manager/docs/organization-policy/creating-managing-custom-constraints#common_expression_language). */ condition: string; /** * A human-friendly description of the constraint to display as an error message when the policy is violated. */ description?: string; /** * A human-friendly name for the constraint. */ displayName?: string; /** * A list of RESTful methods for which to enforce the constraint. Can be `CREATE`, `UPDATE`, or both. Not all Google Cloud services support both methods. To see supported methods for each service, find the service in [Supported services](https://docs.cloud.google.com/resource-manager/docs/organization-policy/custom-constraint-supported-services). */ methodTypes: string[]; /** * Immutable. The name of the custom constraint. This is unique within the organization. */ name: string; /** * Immutable. The fully qualified name of the Google Cloud REST resource containing the object and field you want to restrict. For example, `container.googleapis.com/NodePool`. */ resourceTypes: string[]; } interface PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomPolicyRule { /** * Setting this to true means that all values are allowed. This field can be set only in policies for list constraints. */ allowAll?: boolean; /** * Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. * This page details the objects and attributes that are used to the build the CEL expressions for * custom access levels - https://cloud.google.com/access-context-manager/docs/custom-access-level-spec. * Structure is documented below. */ condition?: outputs.securityposture.PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomPolicyRuleCondition; /** * Setting this to true means that all values are denied. This field can be set only in policies for list constraints. */ denyAll?: boolean; /** * If `true`, then the policy is enforced. If `false`, then any configuration is acceptable. * This field can be set only in policies for boolean constraints. */ enforce?: boolean; /** * List of values to be used for this policy rule. This field can be set only in policies for list constraints. * Structure is documented below. */ values?: outputs.securityposture.PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomPolicyRuleValues; } interface PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomPolicyRuleCondition { /** * Description of the expression */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a file name and a position in the file */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. */ title?: string; } interface PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomPolicyRuleValues { /** * List of values allowed at this resource. */ allowedValues?: string[]; /** * List of values denied at this resource. */ deniedValues?: string[]; } interface PosturePolicySetPolicyConstraintOrgPolicyConstraintPolicyRule { /** * Setting this to true means that all values are allowed. This field can be set only in policies for list constraints. */ allowAll?: boolean; /** * Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. * This page details the objects and attributes that are used to the build the CEL expressions for * custom access levels - https://cloud.google.com/access-context-manager/docs/custom-access-level-spec. * Structure is documented below. */ condition?: outputs.securityposture.PosturePolicySetPolicyConstraintOrgPolicyConstraintPolicyRuleCondition; /** * Setting this to true means that all values are denied. This field can be set only in policies for list constraints. */ denyAll?: boolean; /** * If `true`, then the policy is enforced. If `false`, then any configuration is acceptable. * This field can be set only in policies for boolean constraints. */ enforce?: boolean; /** * List of values to be used for this policy rule. This field can be set only in policies for list constraints. * Structure is documented below. */ values?: outputs.securityposture.PosturePolicySetPolicyConstraintOrgPolicyConstraintPolicyRuleValues; } interface PosturePolicySetPolicyConstraintOrgPolicyConstraintPolicyRuleCondition { /** * Description of the expression */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a file name and a position in the file */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. */ title?: string; } interface PosturePolicySetPolicyConstraintOrgPolicyConstraintPolicyRuleValues { /** * List of values allowed at this resource. */ allowedValues?: string[]; /** * List of values denied at this resource. */ deniedValues?: string[]; } interface PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModule { /** * Custom module details. * Structure is documented below. */ config: outputs.securityposture.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfig; /** * The display name of the Security Health Analytics custom module. This * display name becomes the finding category for all findings that are * returned by this custom module. */ displayName?: string; /** * (Output) * A server generated id of custom module. */ id: string; /** * The state of enablement for the module at its level of the resource hierarchy. * Possible values are: `ENABLEMENT_STATE_UNSPECIFIED`, `ENABLED`, `DISABLED`. */ moduleEnablementState?: string; } interface PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfig { /** * Custom output properties. A set of optional name-value pairs that define custom source properties to * return with each finding that is generated by the custom module. The custom * source properties that are defined here are included in the finding JSON * under `sourceProperties`. * Structure is documented below. */ customOutput?: outputs.securityposture.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigCustomOutput; /** * Text that describes the vulnerability or misconfiguration that the custom * module detects. */ description?: string; /** * The CEL expression to evaluate to produce findings.When the expression * evaluates to true against a resource, a finding is generated. * Structure is documented below. */ predicate: outputs.securityposture.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigPredicate; /** * An explanation of the recommended steps that security teams can take to * resolve the detected issue */ recommendation?: string; /** * The resource types that the custom module operates on. Each custom module * can specify up to 5 resource types. * Structure is documented below. */ resourceSelector: outputs.securityposture.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigResourceSelector; /** * The severity to assign to findings generated by the module. * Possible values are: `SEVERITY_UNSPECIFIED`, `CRITICAL`, `HIGH`, `MEDIUM`, `LOW`. */ severity: string; } interface PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigCustomOutput { /** * A list of custom output properties to add to the finding. * Structure is documented below. */ properties?: outputs.securityposture.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigCustomOutputProperty[]; } interface PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigCustomOutputProperty { /** * Name of the property for the custom output. */ name: string; /** * The CEL expression for the custom output. A resource property can be * specified to return the value of the property or a text string enclosed * in quotation marks. * Structure is documented below. */ valueExpression?: outputs.securityposture.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigCustomOutputPropertyValueExpression; } interface PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigCustomOutputPropertyValueExpression { /** * Description of the expression */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a file name and a position in the file */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. */ title?: string; } interface PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigPredicate { /** * Description of the expression */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * String indicating the location of the expression for error reporting, e.g. a file name and a position in the file */ location?: string; /** * Title for the expression, i.e. a short string describing its purpose. */ title?: string; } interface PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigResourceSelector { /** * The resource types to run the detector on. */ resourceTypes: string[]; } interface PosturePolicySetPolicyConstraintSecurityHealthAnalyticsModule { /** * The state of enablement for the module at its level of the resource hierarchy. * Possible values are: `ENABLEMENT_STATE_UNSPECIFIED`, `ENABLED`, `DISABLED`. */ moduleEnablementState?: string; /** * The name of the module eg: BIGQUERY_TABLE_CMEK_DISABLED. */ moduleName: string; } } export declare namespace serviceaccount { interface GetSAccount { /** * The Google service account ID (the part before the `@` sign in the `email`) */ accountId: string; /** * Whether a service account is disabled or not. */ disabled: boolean; /** * The display name for the service account. */ displayName: string; /** * The e-mail address of the service account. This value * should be referenced from any `gcp.organizations.getIAMPolicy` data sources * that would grant the service account privileges. */ email: string; /** * The Identity of the service account in the form `serviceAccount:{email}`. This value is often used to refer to the service account in order to grant IAM permissions. */ member: string; /** * The fully-qualified name of the service account. */ name: string; /** * The unique id of the service account. */ uniqueId: string; } interface IAMBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface IAMMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } } export declare namespace servicedirectory { interface NamespaceIamBindingCondition { description?: string; expression: string; title: string; } interface NamespaceIamMemberCondition { description?: string; expression: string; title: string; } interface ServiceIamBindingCondition { description?: string; expression: string; title: string; } interface ServiceIamMemberCondition { description?: string; expression: string; title: string; } } export declare namespace siteverification { interface WebResourceSite { /** * The site identifier. If the type is set to SITE, the identifier is a URL. If the type is * set to INET_DOMAIN, the identifier is a domain name. */ identifier: string; /** * The type of resource to be verified. * Possible values are: `INET_DOMAIN`, `SITE`. */ type: string; } } export declare namespace sourcerepo { interface GetRepositoryPubsubConfig { /** * The format of the Cloud Pub/Sub messages. * - PROTOBUF: The message payload is a serialized protocol buffer of SourceRepoEvent. * - JSON: The message payload is a JSON string of SourceRepoEvent. Possible values: ["PROTOBUF", "JSON"] */ messageFormat: string; /** * Email address of the service account used for publishing Cloud Pub/Sub messages. * This service account needs to be in the same project as the PubsubConfig. When added, * the caller needs to have iam.serviceAccounts.actAs permission on this service account. * If unspecified, it defaults to the compute engine default service account. */ serviceAccountEmail: string; topic: string; } interface RepositoryIamBindingCondition { description?: string; expression: string; title: string; } interface RepositoryIamMemberCondition { description?: string; expression: string; title: string; } interface RepositoryPubsubConfig { /** * The format of the Cloud Pub/Sub messages. * - PROTOBUF: The message payload is a serialized protocol buffer of SourceRepoEvent. * - JSON: The message payload is a JSON string of SourceRepoEvent. * Possible values are: `PROTOBUF`, `JSON`. */ messageFormat: string; /** * Email address of the service account used for publishing Cloud Pub/Sub messages. * This service account needs to be in the same project as the PubsubConfig. When added, * the caller needs to have iam.serviceAccounts.actAs permission on this service account. * If unspecified, it defaults to the compute engine default service account. */ serviceAccountEmail: string; /** * The identifier for this object. Format specified above. */ topic: string; } } export declare namespace spanner { interface BackupScheduleEncryptionConfig { /** * The encryption type of backups created by the backup schedule. * Possible values are USE_DATABASE_ENCRYPTION, GOOGLE_DEFAULT_ENCRYPTION, or CUSTOMER_MANAGED_ENCRYPTION. * If you use CUSTOMER_MANAGED_ENCRYPTION, you must specify a kmsKeyName or kmsKeyNames. * Possible values are: `USE_DATABASE_ENCRYPTION`, `GOOGLE_DEFAULT_ENCRYPTION`, `CUSTOMER_MANAGED_ENCRYPTION`. */ encryptionType: string; /** * The resource name of the Cloud KMS key to use for encryption. * Format: 'projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{cryptoKey}' */ kmsKeyName?: string; /** * Fully qualified name of the KMS keys to use to encrypt this database. The keys must exist * in the same locations as the Spanner Database. */ kmsKeyNames?: string[]; } interface BackupScheduleFullBackupSpec { } interface BackupScheduleIncrementalBackupSpec { } interface BackupScheduleSpec { /** * Cron style schedule specification.. * Structure is documented below. */ cronSpec?: outputs.spanner.BackupScheduleSpecCronSpec; } interface BackupScheduleSpecCronSpec { /** * Textual representation of the crontab. User can customize the * backup frequency and the backup version time using the cron * expression. The version time must be in UTC timzeone. * The backup will contain an externally consistent copy of the * database at the version time. Allowed frequencies are 12 hour, 1 day, * 1 week and 1 month. Examples of valid cron specifications: * 0 2/12 * * * : every 12 hours at (2, 14) hours past midnight in UTC. * 0 2,14 * * * : every 12 hours at (2,14) hours past midnight in UTC. * 0 2 * * * : once a day at 2 past midnight in UTC. * 0 2 * * 0 : once a week every Sunday at 2 past midnight in UTC. * 0 2 8 * * : once a month on 8th day at 2 past midnight in UTC. */ text?: string; } interface DatabaseEncryptionConfig { /** * Fully qualified name of the KMS key to use to encrypt this database. This key must exist * in the same location as the Spanner Database. */ kmsKeyName?: string; /** * Fully qualified name of the KMS keys to use to encrypt this database. The keys must exist * in the same locations as the Spanner Database. */ kmsKeyNames: string[]; } interface DatabaseIAMBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface DatabaseIAMMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface GetDatabaseEncryptionConfig { /** * Fully qualified name of the KMS key to use to encrypt this database. This key must exist * in the same location as the Spanner Database. */ kmsKeyName: string; /** * Fully qualified name of the KMS keys to use to encrypt this database. The keys must exist * in the same locations as the Spanner Database. */ kmsKeyNames: string[]; } interface GetInstanceAutoscalingConfig { /** * Asymmetric autoscaling options for specific replicas. */ asymmetricAutoscalingOptions: outputs.spanner.GetInstanceAutoscalingConfigAsymmetricAutoscalingOption[]; /** * Defines scale in controls to reduce the risk of response latency * and outages due to abrupt scale-in events. Users can define the minimum and * maximum compute capacity allocated to the instance, and the autoscaler will * only scale within that range. Users can either use nodes or processing * units to specify the limits, but should use the same unit to set both the * minLimit and max_limit. */ autoscalingLimits: outputs.spanner.GetInstanceAutoscalingConfigAutoscalingLimit[]; /** * Defines scale in controls to reduce the risk of response latency * and outages due to abrupt scale-in events */ autoscalingTargets: outputs.spanner.GetInstanceAutoscalingConfigAutoscalingTarget[]; } interface GetInstanceAutoscalingConfigAsymmetricAutoscalingOption { /** * A nested object resource. */ overrides: outputs.spanner.GetInstanceAutoscalingConfigAsymmetricAutoscalingOptionOverride[]; /** * A nested object resource. */ replicaSelections: outputs.spanner.GetInstanceAutoscalingConfigAsymmetricAutoscalingOptionReplicaSelection[]; } interface GetInstanceAutoscalingConfigAsymmetricAutoscalingOptionOverride { /** * A nested object resource. */ autoscalingLimits: outputs.spanner.GetInstanceAutoscalingConfigAsymmetricAutoscalingOptionOverrideAutoscalingLimit[]; } interface GetInstanceAutoscalingConfigAsymmetricAutoscalingOptionOverrideAutoscalingLimit { /** * The maximum number of nodes for this specific replica. */ maxNodes: number; /** * The minimum number of nodes for this specific replica. */ minNodes: number; } interface GetInstanceAutoscalingConfigAsymmetricAutoscalingOptionReplicaSelection { /** * The location of the replica to apply asymmetric autoscaling options. */ location: string; } interface GetInstanceAutoscalingConfigAutoscalingLimit { /** * Specifies maximum number of nodes allocated to the instance. If set, this number * should be greater than or equal to min_nodes. */ maxNodes: number; /** * Specifies maximum number of processing units allocated to the instance. * If set, this number should be multiples of 1000 and be greater than or equal to * min_processing_units. */ maxProcessingUnits: number; /** * Specifies number of nodes allocated to the instance. If set, this number * should be greater than or equal to 1. */ minNodes: number; /** * Specifies minimum number of processing units allocated to the instance. * If set, this number should be multiples of 1000. */ minProcessingUnits: number; } interface GetInstanceAutoscalingConfigAutoscalingTarget { /** * Specifies the target high priority cpu utilization percentage that the autoscaler * should be trying to achieve for the instance. * This number is on a scale from 0 (no utilization) to 100 (full utilization).. */ highPriorityCpuUtilizationPercent: number; /** * Specifies the target storage utilization percentage that the autoscaler * should be trying to achieve for the instance. * This number is on a scale from 0 (no utilization) to 100 (full utilization). */ storageUtilizationPercent: number; /** * The target total cpu utilization percentage that the autoscaler should be trying to achieve for the instance. * This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. * If not specified or set to 0, the autoscaler will skip scaling based on total cpu utilization. * The value should be higher than highPriorityCpuUtilizationPercent if present. */ totalCpuUtilizationPercent: number; } interface InstanceAutoscalingConfig { /** * Asymmetric autoscaling options for specific replicas. * Structure is documented below. */ asymmetricAutoscalingOptions?: outputs.spanner.InstanceAutoscalingConfigAsymmetricAutoscalingOption[]; /** * Defines scale in controls to reduce the risk of response latency * and outages due to abrupt scale-in events. Users can define the minimum and * maximum compute capacity allocated to the instance, and the autoscaler will * only scale within that range. Users can either use nodes or processing * units to specify the limits, but should use the same unit to set both the * minLimit and max_limit. * Structure is documented below. */ autoscalingLimits?: outputs.spanner.InstanceAutoscalingConfigAutoscalingLimits; /** * Defines scale in controls to reduce the risk of response latency * and outages due to abrupt scale-in events * Structure is documented below. */ autoscalingTargets?: outputs.spanner.InstanceAutoscalingConfigAutoscalingTargets; } interface InstanceAutoscalingConfigAsymmetricAutoscalingOption { /** * A nested object resource. * Structure is documented below. */ overrides: outputs.spanner.InstanceAutoscalingConfigAsymmetricAutoscalingOptionOverrides; /** * A nested object resource. * Structure is documented below. */ replicaSelection: outputs.spanner.InstanceAutoscalingConfigAsymmetricAutoscalingOptionReplicaSelection; } interface InstanceAutoscalingConfigAsymmetricAutoscalingOptionOverrides { /** * A nested object resource. * Structure is documented below. */ autoscalingLimits: outputs.spanner.InstanceAutoscalingConfigAsymmetricAutoscalingOptionOverridesAutoscalingLimits; } interface InstanceAutoscalingConfigAsymmetricAutoscalingOptionOverridesAutoscalingLimits { /** * The maximum number of nodes for this specific replica. */ maxNodes: number; /** * The minimum number of nodes for this specific replica. */ minNodes: number; } interface InstanceAutoscalingConfigAsymmetricAutoscalingOptionReplicaSelection { /** * The location of the replica to apply asymmetric autoscaling options. */ location: string; } interface InstanceAutoscalingConfigAutoscalingLimits { /** * The maximum number of nodes for this specific replica. */ maxNodes?: number; /** * Specifies maximum number of processing units allocated to the instance. * If set, this number should be multiples of 1000 and be greater than or equal to * min_processing_units. */ maxProcessingUnits?: number; /** * The minimum number of nodes for this specific replica. */ minNodes?: number; /** * Specifies minimum number of processing units allocated to the instance. * If set, this number should be multiples of 1000. */ minProcessingUnits?: number; } interface InstanceAutoscalingConfigAutoscalingTargets { /** * Specifies the target high priority cpu utilization percentage that the autoscaler * should be trying to achieve for the instance. * This number is on a scale from 0 (no utilization) to 100 (full utilization).. */ highPriorityCpuUtilizationPercent?: number; /** * Specifies the target storage utilization percentage that the autoscaler * should be trying to achieve for the instance. * This number is on a scale from 0 (no utilization) to 100 (full utilization). */ storageUtilizationPercent?: number; /** * The target total cpu utilization percentage that the autoscaler should be trying to achieve for the instance. * This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. * If not specified or set to 0, the autoscaler will skip scaling based on total cpu utilization. * The value should be higher than highPriorityCpuUtilizationPercent if present. */ totalCpuUtilizationPercent?: number; } interface InstanceConfigReplica { /** * If true, this location is designated as the default leader location where * leader replicas are placed. */ defaultLeaderLocation?: boolean; /** * The location of the serving resources, e.g. "us-central1". */ location?: string; /** * Indicates the type of replica. See the [replica types * documentation](https://cloud.google.com/spanner/docs/replication#replica_types) * for more details. * Possible values are: `READ_WRITE`, `READ_ONLY`, `WITNESS`. */ type?: string; } interface InstanceIAMBindingCondition { description?: string; expression: string; title: string; } interface InstanceIAMMemberCondition { description?: string; expression: string; title: string; } } export declare namespace sql { interface DatabaseInstanceClone { /** * The name of the allocated ip range for the private ip CloudSQL instance. For example: "google-managed-services-default". If set, the cloned instance ip will be created in the allocated range. The range name must comply with [RFC 1035](https://tools.ietf.org/html/rfc1035). Specifically, the name must be 1-63 characters long and match the regular expression a-z?. */ allocatedIpRange?: string; /** * (SQL Server only, use with `pointInTime`) Clone only the specified databases from the source instance. Clone all databases if empty. */ databaseNames?: string[]; /** * The timestamp of the point in time that should be restored. * * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ pointInTime?: string; /** * (Point-in-time recovery for PostgreSQL only) Clone to an instance in the specified zone. If no zone is specified, clone to the same zone as the source instance. [clone-unavailable-instance](https://cloud.google.com/sql/docs/postgres/clone-instance#clone-unavailable-instance) */ preferredZone?: string; /** * The timestamp of when the source instance was deleted for a clone from a deleted instance. * * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ sourceInstanceDeletionTime?: string; /** * Name of the source instance which will be cloned. */ sourceInstanceName: string; } interface DatabaseInstanceDnsName { /** * The connection type of the DNS name. Can be either `PUBLIC`, `PRIVATE_SERVICES_ACCESS`, or `PRIVATE_SERVICE_CONNECT`. */ connectionType: string; /** * The scope that the DNS name applies to. */ dnsScope: string; /** * The name of the instance. If the name is left * blank, the provider will randomly generate one when the instance is first * created. This is done because after a name is used, it cannot be reused for * up to [one week](https://cloud.google.com/sql/docs/delete-instance). */ name: string; } interface DatabaseInstanceIpAddress { /** * The IPv4 address assigned. */ ipAddress: string; /** * The time this IP address will be retired, in RFC * 3339 format. */ timeToRetire: string; /** * The type of this IP address. */ type: string; } interface DatabaseInstancePointInTimeRestoreContext { /** * The name of the allocated ip range for the private ip CloudSQL instance. For example: "google-managed-services-default". If set, the cloned instance ip will be created in the allocated range. The range name must comply with [RFC 1035](https://tools.ietf.org/html/rfc1035). Specifically, the name must be 1-63 characters long and match the regular expression a-z?. */ allocatedIpRange?: string; /** * The Google Cloud Backup and Disaster Recovery Datasource URI. */ datasource: string; /** * The timestamp of the point in time that should be restored. * * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ pointInTime?: string; /** * Point-in-time recovery of an instance to the specified zone. If no zone is specified, then clone to the same primary zone as the source instance. */ preferredZone?: string; /** * The name of the target instance. */ targetInstance?: string; } interface DatabaseInstanceReplicaConfiguration { /** * PEM representation of the trusted CA's x509 * certificate. */ caCertificate?: string; /** * Specifies if the replica is a cascadable replica. If true, instance must be in different region from primary. * * > **NOTE:** Only supported for SQL Server database. */ cascadableReplica?: boolean; /** * PEM representation of the replica's x509 * certificate. */ clientCertificate?: string; /** * PEM representation of the replica's private key. The * corresponding public key in encoded in the `clientCertificate`. */ clientKey?: string; /** * The number of seconds * between connect retries. MySQL's default is 60 seconds. */ connectRetryInterval?: number; /** * Path to a SQL file in GCS from which replica * instances are created. Format is `gs://bucket/filename`. Note, if the master * instance is a source representation instance this field must be present. */ dumpFilePath?: string; /** * Specifies if the replica is the failover target. * If the field is set to true the replica will be designated as a failover replica. * If the master instance fails, the replica instance will be promoted as * the new master instance. * > **NOTE:** Not supported for Postgres database. */ failoverTarget?: boolean; /** * Time in ms between replication * heartbeats. */ masterHeartbeatPeriod?: number; /** * Password for the replication connection. */ password?: string; /** * Permissible ciphers for use in SSL encryption. */ sslCipher?: string; /** * Username for replication connection. */ username?: string; /** * True if the master's common name * value is checked during the SSL handshake. */ verifyServerCertificate?: boolean; } interface DatabaseInstanceReplicationCluster { /** * Read-only field that indicates whether the replica is a DR replica. */ drReplica: boolean; /** * If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. The standard format of this field is "your-project:your-instance". You can also set this field to "your-instance", but cloud SQL backend will convert it to the aforementioned standard format. */ failoverDrReplicaName?: string; /** * Read-only field which if set, indicates this instance has a private service access (PSA) DNS endpoint that is pointing to the primary instance of the cluster. If this instance is the primary, then the DNS endpoint points to this instance. After a switchover or replica failover operation, this DNS endpoint points to the promoted instance. This is a read-only field, returned to the user as information. This field can exist even if a standalone instance doesn't have a DR replica yet or the DR replica is deleted. */ psaWriteEndpoint: string; } interface DatabaseInstanceRestoreBackupContext { /** * The ID of the backup run to restore from. */ backupRunId: number; /** * The ID of the instance that the backup was taken from. If left empty, * this instance's ID will be used. */ instanceId?: string; /** * The full project ID of the source instance.` */ project?: string; } interface DatabaseInstanceServerCaCert { /** * The CA Certificate used to connect to the SQL Instance via SSL. */ cert: string; /** * The CN valid for the CA Cert. */ commonName: string; /** * Creation time of the CA Cert. */ createTime: string; /** * Expiration time of the CA Cert. */ expirationTime: string; /** * SHA Fingerprint of the CA Cert. */ sha1Fingerprint: string; } interface DatabaseInstanceSettings { /** * This specifies when the instance should be * active. Can be either `ALWAYS`, `NEVER` or `ON_DEMAND`. */ activationPolicy?: string; activeDirectoryConfig?: outputs.sql.DatabaseInstanceSettingsActiveDirectoryConfig; advancedMachineFeatures?: outputs.sql.DatabaseInstanceSettingsAdvancedMachineFeatures; /** * The availability type of the Cloud SQL * instance, high availability (`REGIONAL`) or single zone (`ZONAL`). For all instances, ensure that * `settings.backup_configuration.enabled` is set to `true`. * For MySQL instances, ensure that `settings.backup_configuration.binary_log_enabled` is set to `true`. * For Postgres and SQL Server instances, ensure that `settings.backup_configuration.point_in_time_recovery_enabled` * is set to `true`. Defaults to `ZONAL`. * For read pool instances, this field is read-only. The availability type is changed by specifying * the number of nodes (`nodeCount`). */ availabilityType?: string; backupConfiguration: outputs.sql.DatabaseInstanceSettingsBackupConfiguration; /** * The name of server instance collation. */ collation?: string; /** * The managed connection pool setting for a Cloud SQL instance. */ connectionPoolConfigs: outputs.sql.DatabaseInstanceSettingsConnectionPoolConfig[]; /** * Control the enforcement of Cloud SQL Auth Proxy or Cloud SQL connectors for all the connections, can be `REQUIRED` or `NOT_REQUIRED`. If enabled, all the direct connections are rejected. */ connectorEnforcement: string; /** * Data cache configurations. */ dataCacheConfig: outputs.sql.DatabaseInstanceSettingsDataCacheConfig; /** * Provisioned number of I/O operations per second for the data disk. This field is only used for `HYPERDISK_BALANCED` disk types. */ dataDiskProvisionedIops: number; /** * Provisioned throughput measured in MiB per second for the data disk. This field is only used for `HYPERDISK_BALANCED` disk types. */ dataDiskProvisionedThroughput: number; databaseFlags?: outputs.sql.DatabaseInstanceSettingsDatabaseFlag[]; /** * Enables deletion protection of an instance at the GCP level. Enabling this protection will guard against accidental deletion across all surfaces (API, gcloud, Cloud Console and Terraform) by enabling the [GCP Cloud SQL instance deletion protection](https://cloud.google.com/sql/docs/postgres/deletion-protection). Terraform provider support was introduced in version 4.48.0. Defaults to `false`. */ deletionProtectionEnabled?: boolean; denyMaintenancePeriod?: outputs.sql.DatabaseInstanceSettingsDenyMaintenancePeriod; /** * Enables auto-resizing of the storage size. Defaults to `true`. Note that if `diskSize` is set, future `pulumi up` calls will attempt to delete the instance in order to resize the disk to the value specified in diskSize if it has been resized. To avoid this, ensure that `lifecycle.ignore_changes` is applied to `diskSize`. */ diskAutoresize?: boolean; /** * The maximum size to which storage capacity can be automatically increased. The default value is 0, which specifies that there is no limit. */ diskAutoresizeLimit?: number; /** * The size of data disk, in GB. Size of a running instance cannot be reduced but can be increased. The minimum value is 10GB for `PD_SSD`, `PD_HDD` and 20GB for `HYPERDISK_BALANCED`. Note that this value will override the resizing from `diskAutoresize` if that feature is enabled. To avoid this, set `lifecycle.ignore_changes` on this field. */ diskSize: number; /** * The type of data disk: `PD_SSD`, `PD_HDD`, or `HYPERDISK_BALANCED`. Defaults to `PD_SSD`. `HYPERDISK_BALANCED` is preview. */ diskType: string; /** * The edition of the instance, can be `ENTERPRISE` or `ENTERPRISE_PLUS`. */ edition: string; /** * (Computed) The availability type of * the Cloud SQL instance, high availability (REGIONAL) or single zone * (ZONAL). This field always contains the value that is reported by the API (for * read pools, `settings.0.effective_availability_type` may differ from * `settings.0.availability_type`). */ effectiveAvailabilityType: string; /** * Enables [Cloud SQL instance integration with Dataplex](https://cloud.google.com/sql/docs/mysql/dataplex-catalog-integration). MySQL, Postgres and SQL Server instances are supported for this feature. Defaults to `false`. */ enableDataplexIntegration?: boolean; /** * Enables [Cloud SQL instances to connect to Vertex AI](https://cloud.google.com/sql/docs/postgres/integrate-cloud-sql-with-vertex-ai) and pass requests for real-time predictions and insights. Defaults to `false`. */ enableGoogleMlIntegration?: boolean; /** * Config used to determine the final backup settings for the instance */ finalBackupConfig?: outputs.sql.DatabaseInstanceSettingsFinalBackupConfig; /** * Configuration of Query Insights. */ insightsConfig: outputs.sql.DatabaseInstanceSettingsInsightsConfig; ipConfiguration: outputs.sql.DatabaseInstanceSettingsIpConfiguration; locationPreference: outputs.sql.DatabaseInstanceSettingsLocationPreference; /** * Declares a one-hour maintenance window when an Instance can automatically restart to apply updates. The maintenance window is specified in UTC time. */ maintenanceWindow?: outputs.sql.DatabaseInstanceSettingsMaintenanceWindow; passwordValidationPolicy?: outputs.sql.DatabaseInstanceSettingsPasswordValidationPolicy; /** * Pricing plan for this instance, can only be `PER_USE`. */ pricingPlan?: string; /** * Configuration of Read Pool Auto Scale. */ readPoolAutoScaleConfig: outputs.sql.DatabaseInstanceSettingsReadPoolAutoScaleConfig; /** * When this parameter is set to true, Cloud SQL retains backups of the instance even after the instance is deleted. The `ON_DEMAND` backup will be retained until customer deletes the backup or the project. The `AUTOMATED` backup will be retained based on the backups retention setting. */ retainBackupsOnDelete?: boolean; sqlServerAuditConfig?: outputs.sql.DatabaseInstanceSettingsSqlServerAuditConfig; /** * The machine type to use. See [tiers](https://cloud.google.com/sql/docs/admin-api/v1beta4/tiers) * for more details and supported versions. Postgres supports only shared-core machine types, * and custom machine types such as `db-custom-2-13312`. See the [Custom Machine Type Documentation](https://cloud.google.com/compute/docs/instances/creating-instance-with-custom-machine-type#create) to learn about specifying custom machine types. */ tier: string; /** * The timeZone to be used by the database engine (supported only for SQL Server), in SQL Server timezone format. */ timeZone?: string; /** * A set of key/value user label pairs to assign to the instance. */ userLabels: { [key: string]: string; }; /** * Used to make sure changes to the `settings` block are * atomic. */ version: number; } interface DatabaseInstanceSettingsActiveDirectoryConfig { /** * The domain name for the active directory (e.g., mydomain.com). * Can only be used with SQL Server. */ domain: string; } interface DatabaseInstanceSettingsAdvancedMachineFeatures { /** * The number of threads per core. The value of this flag can be 1 or 2. To disable SMT, set this flag to 1. Only available in Cloud SQL for SQL Server instances. See [smt](https://cloud.google.com/sql/docs/sqlserver/create-instance#smt-create-instance) for more details. */ threadsPerCore?: number; } interface DatabaseInstanceSettingsBackupConfiguration { /** * Backup retention settings. The configuration is detailed below. */ backupRetentionSettings: outputs.sql.DatabaseInstanceSettingsBackupConfigurationBackupRetentionSettings; /** * The backup tier that manages the backups for the instance. */ backupTier: string; /** * True if binary logging is enabled. * Can only be used with MySQL. */ binaryLogEnabled?: boolean; /** * True if backup configuration is enabled. */ enabled?: boolean; /** * The region where the backup will be stored */ location?: string; /** * True if Point-in-time recovery is enabled. Will restart database if enabled after instance creation. Valid only for PostgreSQL and SQL Server instances. Enabled by default for PostgreSQL Enterprise Plus and SQL Server Enterprise Plus instances. */ pointInTimeRecoveryEnabled?: boolean; /** * `HH:MM` format time indicating when backup * configuration starts. */ startTime: string; /** * The number of days of transaction logs we retain for point in time restore, from 1-7. For PostgreSQL Enterprise Plus and SQL Server Enterprise Plus instances, the number of days of retained transaction logs can be set from 1 to 35. */ transactionLogRetentionDays: number; } interface DatabaseInstanceSettingsBackupConfigurationBackupRetentionSettings { /** * Depending on the value of retention_unit, this is used to determine if a backup needs to be deleted. If retentionUnit * is 'COUNT', we will retain this many backups. */ retainedBackups: number; /** * The unit that 'retained_backups' represents. Defaults to `COUNT`. */ retentionUnit?: string; } interface DatabaseInstanceSettingsConnectionPoolConfig { /** * True if the manager connection pooling configuration is enabled. */ connectionPoolingEnabled?: boolean; /** * List of connection pool configuration flags */ flags?: outputs.sql.DatabaseInstanceSettingsConnectionPoolConfigFlag[]; } interface DatabaseInstanceSettingsConnectionPoolConfigFlag { /** * Name of the flag. */ name: string; /** * Value of the flag. */ value: string; } interface DatabaseInstanceSettingsDataCacheConfig { /** * Whether data cache is enabled for the instance. Defaults to `true` for MYSQL Enterprise Plus and PostgreSQL Enterprise Plus instances only. For SQL Server Enterprise Plus instances it defaults to `false`. */ dataCacheEnabled?: boolean; } interface DatabaseInstanceSettingsDatabaseFlag { /** * Name of the flag. */ name: string; /** * Value of the flag. */ value: string; } interface DatabaseInstanceSettingsDenyMaintenancePeriod { /** * "deny maintenance period" end date. If the year of the end date is empty, the year of the start date also must be empty. In this case, it means the no maintenance interval recurs every year. The date is in format yyyy-m-dd (the month is without leading zeros)i.e., 2020-1-01, or 2020-11-01, or mm-dd, i.e., 11-01 */ endDate: string; /** * "deny maintenance period" start date. If the year of the start date is empty, the year of the end date also must be empty. In this case, it means the deny maintenance period recurs every year. The date is in format yyyy-m-dd (the month is without leading zeros)i.e., 2020-1-01, or 2020-11-01, or mm-dd, i.e., 11-01 */ startDate: string; /** * Time in UTC when the "deny maintenance period" starts on startDate and ends on endDate. The time is in format: HH:mm:SS, i.e., 00:00:00 */ time: string; } interface DatabaseInstanceSettingsFinalBackupConfig { /** * True if enabled final backup. */ enabled?: boolean; /** * The number of days we retain the final backup after instance deletion. The valid range is between 1 and 365. For instances managed by BackupDR, the valid range is between 1 day and 99 years. */ retentionDays?: number; } interface DatabaseInstanceSettingsInsightsConfig { /** * True if Query Insights feature is enabled. */ queryInsightsEnabled?: boolean; /** * Number of query execution plans captured by Insights per minute for all queries combined. Between 0 and 20. Default to 5. */ queryPlansPerMinute: number; /** * Maximum query length stored in bytes. Between 256 and 4500. Default to 1024. Higher query lengths are more useful for analytical queries, but they also require more memory. Changing the query length requires you to restart the instance. You can still add tags to queries that exceed the length limit. */ queryStringLength?: number; /** * True if Query Insights will record application tags from query when enabled. */ recordApplicationTags?: boolean; /** * True if Query Insights will record client address when enabled. */ recordClientAddress?: boolean; } interface DatabaseInstanceSettingsIpConfiguration { /** * The name of the allocated ip range for the private ip CloudSQL instance. For example: "google-managed-services-default". If set, the instance ip will be created in the allocated range. The range name must comply with [RFC 1035](https://datatracker.ietf.org/doc/html/rfc1035). Specifically, the name must be 1-63 characters long and match the regular expression a-z?. */ allocatedIpRange?: string; authorizedNetworks?: outputs.sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetwork[]; /** * The custom subject alternative names for an instance with `CUSTOMER_MANAGED_CAS_CA` as the `serverCaMode`. */ customSubjectAlternativeNames?: string[]; /** * Whether Google Cloud services such as BigQuery are allowed to access data in this Cloud SQL instance over a private IP connection. SQLSERVER database type is not supported. */ enablePrivatePathForGoogleCloudServices?: boolean; /** * Whether this Cloud SQL instance should be assigned * a public IPV4 address. At least `ipv4Enabled` must be enabled or a * `privateNetwork` must be configured. */ ipv4Enabled?: boolean; /** * The VPC network from which the Cloud SQL * instance is accessible for private IP. For example,Ā projects/myProject/global/networks/default. * Specifying a network enables private IP. * At least `ipv4Enabled` must be enabled or a `privateNetwork` must be configured. * This setting can be updated, but it cannot be removed after it is set. */ privateNetwork?: string; /** * PSC settings for a Cloud SQL instance. */ pscConfigs?: outputs.sql.DatabaseInstanceSettingsIpConfigurationPscConfig[]; /** * Specify how the server certificate's Certificate Authority is hosted. Supported values are `GOOGLE_MANAGED_INTERNAL_CA` and `GOOGLE_MANAGED_CAS_CA`. */ serverCaMode: string; /** * The resource name of the server CA pool for an instance with `CUSTOMER_MANAGED_CAS_CA` as the `serverCaMode`. */ serverCaPool?: string; /** * Specify how SSL connection should be enforced in DB connections. Supported values are `ALLOW_UNENCRYPTED_AND_ENCRYPTED`, `ENCRYPTED_ONLY`, and `TRUSTED_CLIENT_CERTIFICATE_REQUIRED` (not supported for SQL Server). See [API reference doc](https://cloud.google.com/sql/docs/postgres/admin-api/rest/v1/instances#ipconfiguration) for details. */ sslMode: string; } interface DatabaseInstanceSettingsIpConfigurationAuthorizedNetwork { /** * The [RFC 3339](https://tools.ietf.org/html/rfc3339) * formatted date time string indicating when this whitelist expires. */ expirationTime?: string; /** * A name for this whitelist entry. */ name?: string; /** * A CIDR notation IPv4 or IPv6 address that is allowed to * access this instance. Must be set even if other two attributes are not for * the whitelist to become active. */ value: string; } interface DatabaseInstanceSettingsIpConfigurationPscConfig { /** * List of consumer projects that are allow-listed for PSC connections to this instance. This instance can be connected to with PSC from any network in these projects. Each consumer project in this list may be represented by a project number (numeric) or by a project id (alphanumeric). */ allowedConsumerProjects?: string[]; /** * Name of network attachment resource used to authorize a producer service to connect a PSC interface to the consumer's VPC. For example: "projects/myProject/regions/myRegion/networkAttachments/myNetworkAttachment". This is required to enable outbound connection on a PSC instance. */ networkAttachmentUri?: string; /** * A comma-separated list of networks or a comma-separated list of network-project pairs. Each project in this list is represented by a project number (numeric) or by a project ID (alphanumeric). This allows Private Service Connect connections to be created automatically for the specified networks. */ pscAutoConnections?: outputs.sql.DatabaseInstanceSettingsIpConfigurationPscConfigPscAutoConnection[]; /** * Whether PSC connectivity is enabled for this instance. */ pscEnabled?: boolean; } interface DatabaseInstanceSettingsIpConfigurationPscConfigPscAutoConnection { /** * "The consumer network of this consumer endpoint. This must be a resource path that includes both the host project and the network name. For example, `projects/project1/global/networks/network1`. The consumer host project of this network might be different from the consumer service project." */ consumerNetwork: string; /** * (Output) The connection policy status of the consumer network. */ consumerNetworkStatus: string; /** * The project ID of consumer service project of this consumer endpoint. */ consumerServiceProjectId?: string; /** * (Output) The IP address of the consumer endpoint. */ ipAddress: string; /** * (Output) The connection status of the consumer endpoint. */ status: string; } interface DatabaseInstanceSettingsLocationPreference { /** * A GAE application whose zone to remain * in. Must be in the same region as this instance. */ followGaeApplication?: string; /** * The preferred Compute Engine zone for the secondary/failover. */ secondaryZone?: string; /** * The preferred compute engine * [zone](https://cloud.google.com/compute/docs/zones?hl=en). */ zone?: string; } interface DatabaseInstanceSettingsMaintenanceWindow { /** * Day of week (`1-7`), starting on Monday */ day?: number; /** * Hour of day (`0-23`), ignored if `day` not set */ hour?: number; /** * Receive updates after one week (`canary`) or after two weeks (`stable`) or after five weeks (`week5`) of notification. */ updateTrack?: string; } interface DatabaseInstanceSettingsPasswordValidationPolicy { /** * Checks if the password is a combination of lowercase, uppercase, numeric, and non-alphanumeric characters. */ complexity?: string; /** * Prevents the use of the username in the password. */ disallowUsernameSubstring?: boolean; /** * Enables or disable the password validation policy. */ enablePasswordPolicy: boolean; /** * Specifies the minimum number of characters that the password must have. */ minLength?: number; /** * Specifies the minimum duration after which you can change the password. */ passwordChangeInterval?: string; /** * Specifies the number of previous passwords that you can't reuse. */ reuseInterval?: number; } interface DatabaseInstanceSettingsReadPoolAutoScaleConfig { /** * True if auto scale in is disabled. */ disableScaleIn?: boolean; /** * True if Read Pool Auto Scale is enabled. */ enabled?: boolean; /** * Maximum number of nodes in the read pool. If set to lower than current node count, node count will be updated. */ maxNodeCount?: number; /** * Minimum number of nodes in the read pool. If set to higher than current node count, node count will be updated. */ minNodeCount?: number; /** * The cooldown period for scale in operations. */ scaleInCooldownSeconds?: number; /** * The cooldown period for scale out operations. */ scaleOutCooldownSeconds?: number; /** * Target metrics for Read Pool Auto Scale. Must specify `target_metrics.metric` and `target_metrics.target_value` in subblock. */ targetMetrics?: outputs.sql.DatabaseInstanceSettingsReadPoolAutoScaleConfigTargetMetric[]; } interface DatabaseInstanceSettingsReadPoolAutoScaleConfigTargetMetric { /** * Metric name for Read Pool Auto Scale. */ metric?: string; /** * Target value for Read Pool Auto Scale. */ targetValue?: number; } interface DatabaseInstanceSettingsSqlServerAuditConfig { /** * The name of the destination bucket (e.g., gs://mybucket). */ bucket?: string; /** * How long to keep generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ retentionInterval?: string; /** * How often to upload generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ uploadInterval?: string; } interface GetCaCertsCert { /** * The CA certificate used to connect to the SQL instance via SSL. */ cert: string; /** * The CN valid for the CA cert. */ commonName: string; /** * Creation time of the CA cert. */ createTime: string; /** * Expiration time of the CA cert. */ expirationTime: string; /** * SHA1 fingerprint of the CA cert. */ sha1Fingerprint: string; } interface GetDatabaseInstanceClone { /** * The name of the allocated ip range for the private ip CloudSQL instance. For example: "google-managed-services-default". If set, the cloned instance ip will be created in the allocated range. The range name must comply with [RFC 1035](https://tools.ietf.org/html/rfc1035). Specifically, the name must be 1-63 characters long and match the regular expression a-z?. */ allocatedIpRange: string; /** * (SQL Server only, use with point_in_time) clone only the specified databases from the source instance. Clone all databases if empty. */ databaseNames: string[]; /** * The timestamp of the point in time that should be restored. */ pointInTime: string; /** * (Point-in-time recovery for PostgreSQL only) Clone to an instance in the specified zone. If no zone is specified, clone to the same zone as the source instance. */ preferredZone: string; /** * The timestamp of when the source instance was deleted for a clone from a deleted instance. */ sourceInstanceDeletionTime: string; /** * The name of the instance from which the point in time should be restored. */ sourceInstanceName: string; } interface GetDatabaseInstanceDnsName { connectionType: string; dnsScope: string; /** * The name of the instance. */ name: string; } interface GetDatabaseInstanceIpAddress { ipAddress: string; timeToRetire: string; type: string; } interface GetDatabaseInstancePointInTimeRestoreContext { /** * The name of the allocated IP range for the internal IP Cloud SQL instance. For example: "google-managed-services-default". If you set this, then Cloud SQL creates the IP address for the cloned instance in the allocated range. This range must comply with [RFC 1035](https://tools.ietf.org/html/rfc1035) standards. Specifically, the name must be 1-63 characters long and match the regular expression a-z?. */ allocatedIpRange: string; /** * The Google Cloud Backup and Disaster Recovery Datasource URI. For example: "projects/my-project/locations/us-central1/datasources/my-datasource". */ datasource: string; /** * The date and time to which you want to restore the instance. */ pointInTime: string; /** * Point-in-time recovery of an instance to the specified zone. If no zone is specified, then clone to the same primary zone as the source instance. */ preferredZone: string; /** * The name of the target instance to restore to. */ targetInstance: string; } interface GetDatabaseInstanceReplicaConfiguration { /** * PEM representation of the trusted CA's x509 certificate. */ caCertificate: string; /** * Specifies if a SQL Server replica is a cascadable replica. A cascadable replica is a SQL Server cross region replica that supports replica(s) under it. */ cascadableReplica: boolean; /** * PEM representation of the replica's x509 certificate. */ clientCertificate: string; /** * PEM representation of the replica's private key. The corresponding public key in encoded in the client_certificate. */ clientKey: string; /** * The number of seconds between connect retries. MySQL's default is 60 seconds. */ connectRetryInterval: number; /** * Path to a SQL file in Google Cloud Storage from which replica instances are created. Format is gs://bucket/filename. */ dumpFilePath: string; /** * Specifies if the replica is the failover target. If the field is set to true the replica will be designated as a failover replica. If the master instance fails, the replica instance will be promoted as the new master instance. Not supported for Postgres */ failoverTarget: boolean; /** * Time in ms between replication heartbeats. */ masterHeartbeatPeriod: number; /** * Password for the replication connection. */ password: string; /** * Permissible ciphers for use in SSL encryption. */ sslCipher: string; /** * Username for replication connection. */ username: string; /** * True if the master's common name value is checked during the SSL handshake. */ verifyServerCertificate: boolean; } interface GetDatabaseInstanceReplicationCluster { /** * Read-only field that indicates whether the replica is a DR replica. */ drReplica: boolean; /** * If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. The standard format of this field is "your-project:your-instance". You can also set this field to "your-instance", but cloud SQL backend will convert it to the aforementioned standard format. */ failoverDrReplicaName: string; /** * If set, this field indicates this instance has a private service access (PSA) DNS endpoint that is pointing to the primary instance of the cluster. If this instance is the primary, then the DNS endpoint points to this instance. After a switchover or replica failover operation, this DNS endpoint points to the promoted instance. This is a read-only field, returned to the user as information. This field can exist even if a standalone instance doesn't have a DR replica yet or the DR replica is deleted. */ psaWriteEndpoint: string; } interface GetDatabaseInstanceRestoreBackupContext { /** * The ID of the backup run to restore from. */ backupRunId: number; /** * The ID of the instance that the backup was taken from. */ instanceId: string; /** * The ID of the project in which the resource belongs. */ project: string; } interface GetDatabaseInstanceServerCaCert { /** * The CA Certificate used to connect to the SQL Instance via SSL. */ cert: string; /** * The CN valid for the CA Cert. */ commonName: string; /** * Creation time of the CA Cert. */ createTime: string; /** * Expiration time of the CA Cert. */ expirationTime: string; /** * SHA Fingerprint of the CA Cert. */ sha1Fingerprint: string; } interface GetDatabaseInstanceSetting { /** * This specifies when the instance should be active. Can be either ALWAYS, NEVER or ON_DEMAND. */ activationPolicy: string; activeDirectoryConfigs: outputs.sql.GetDatabaseInstanceSettingActiveDirectoryConfig[]; advancedMachineFeatures: outputs.sql.GetDatabaseInstanceSettingAdvancedMachineFeature[]; /** * The availability type of the Cloud SQL instance, high availability * (REGIONAL) or single zone (ZONAL). For all instances, ensure that * settings.backup_configuration.enabled is set to true. * For MySQL instances, ensure that settings.backup_configuration.binary_log_enabled is set to true. * For Postgres instances, ensure that settings.backup_configuration.point_in_time_recovery_enabled * is set to true. Defaults to ZONAL. * For read pool instances, this field is read-only. The availability type is changed by specifying * the number of nodes (node_count). */ availabilityType: string; backupConfigurations: outputs.sql.GetDatabaseInstanceSettingBackupConfiguration[]; /** * The name of server instance collation. */ collation: string; /** * The managed connection pool setting for a Cloud SQL instance. */ connectionPoolConfigs: outputs.sql.GetDatabaseInstanceSettingConnectionPoolConfig[]; /** * Enables the enforcement of Cloud SQL Auth Proxy or Cloud SQL connectors for all the connections. If enabled, all the direct connections are rejected. */ connectorEnforcement: string; /** * Data cache configurations. */ dataCacheConfigs: outputs.sql.GetDatabaseInstanceSettingDataCacheConfig[]; /** * Provisioned number of I/O operations per second for the data disk. This field is only used for HYPERDISK_BALANCED disk types. */ dataDiskProvisionedIops: number; /** * Provisioned throughput measured in MiB per second for the data disk. This field is only used for HYPERDISK_BALANCED disk types. */ dataDiskProvisionedThroughput: number; databaseFlags: outputs.sql.GetDatabaseInstanceSettingDatabaseFlag[]; /** * Configuration to protect against accidental instance deletion. */ deletionProtectionEnabled: boolean; denyMaintenancePeriods: outputs.sql.GetDatabaseInstanceSettingDenyMaintenancePeriod[]; /** * Enables auto-resizing of the storage size. Defaults to true. */ diskAutoresize: boolean; /** * The maximum size, in GB, to which storage capacity can be automatically increased. The default value is 0, which specifies that there is no limit. */ diskAutoresizeLimit: number; /** * The size of data disk, in GB. Size of a running instance cannot be reduced but can be increased. The minimum value is 10GB for PD_SSD, PD_HDD and 20GB for HYPERDISK_BALANCED. */ diskSize: number; /** * The type of supported data disk is tier dependent and can be PD_SSD or PD_HDD or HYPERDISK_BALANCED. */ diskType: string; /** * The edition of the instance, can be ENTERPRISE or ENTERPRISE_PLUS. */ edition: string; /** * The availability type of the Cloud SQL instance, high availability * (REGIONAL) or single zone (ZONAL). This field always contains the value that is reported by the * API (for read pools, effectiveAvailabilityType may differ from availability_type). */ effectiveAvailabilityType: string; /** * Enables Dataplex Integration. */ enableDataplexIntegration: boolean; /** * Enables Vertex AI Integration. */ enableGoogleMlIntegration: boolean; /** * Config used to determine the final backup settings for the instance */ finalBackupConfigs: outputs.sql.GetDatabaseInstanceSettingFinalBackupConfig[]; /** * Configuration of Query Insights. */ insightsConfigs: outputs.sql.GetDatabaseInstanceSettingInsightsConfig[]; ipConfigurations: outputs.sql.GetDatabaseInstanceSettingIpConfiguration[]; locationPreferences: outputs.sql.GetDatabaseInstanceSettingLocationPreference[]; /** * Declares a one-hour maintenance window when an Instance can automatically restart to apply updates. The maintenance window is specified in UTC time. */ maintenanceWindows: outputs.sql.GetDatabaseInstanceSettingMaintenanceWindow[]; passwordValidationPolicies: outputs.sql.GetDatabaseInstanceSettingPasswordValidationPolicy[]; /** * Pricing plan for this instance, can only be PER_USE. */ pricingPlan: string; /** * Configuration of Read Pool Auto Scale. */ readPoolAutoScaleConfigs: outputs.sql.GetDatabaseInstanceSettingReadPoolAutoScaleConfig[]; /** * When this parameter is set to true, Cloud SQL retains backups of the instance even after the instance is deleted. The ON_DEMAND backup will be retained until customer deletes the backup or the project. The AUTOMATED backup will be retained based on the backups retention setting. */ retainBackupsOnDelete: boolean; sqlServerAuditConfigs: outputs.sql.GetDatabaseInstanceSettingSqlServerAuditConfig[]; /** * The machine type to use. See tiers for more details and supported versions. Postgres supports only shared-core machine types, and custom machine types such as db-custom-2-13312. See the Custom Machine Type Documentation to learn about specifying custom machine types. */ tier: string; /** * The timeZone to be used by the database engine (supported only for SQL Server), in SQL Server timezone format. */ timeZone: string; /** * A set of key/value user label pairs to assign to the instance. */ userLabels: { [key: string]: string; }; /** * Used to make sure changes to the settings block are atomic. */ version: number; } interface GetDatabaseInstanceSettingActiveDirectoryConfig { /** * Domain name of the Active Directory for SQL Server (e.g., mydomain.com). */ domain: string; } interface GetDatabaseInstanceSettingAdvancedMachineFeature { /** * The number of threads per physical core. Can be 1 or 2. */ threadsPerCore: number; } interface GetDatabaseInstanceSettingBackupConfiguration { backupRetentionSettings: outputs.sql.GetDatabaseInstanceSettingBackupConfigurationBackupRetentionSetting[]; /** * Backup tier that manages the backups for the instance. */ backupTier: string; /** * True if binary logging is enabled. If settings.backup_configuration.enabled is false, this must be as well. Can only be used with MySQL. */ binaryLogEnabled: boolean; /** * True if backup configuration is enabled. */ enabled: boolean; /** * Location of the backup configuration. */ location: string; /** * True if Point-in-time recovery is enabled. */ pointInTimeRecoveryEnabled: boolean; /** * HH:MM format time indicating when backup configuration starts. */ startTime: string; /** * The number of days of transaction logs we retain for point in time restore, from 1-7. (For PostgreSQL Enterprise Plus instances, from 1 to 35.) */ transactionLogRetentionDays: number; } interface GetDatabaseInstanceSettingBackupConfigurationBackupRetentionSetting { /** * Number of backups to retain. */ retainedBackups: number; /** * The unit that 'retainedBackups' represents. Defaults to COUNT */ retentionUnit: string; } interface GetDatabaseInstanceSettingConnectionPoolConfig { /** * Whether Managed Connection Pool is enabled for this instance. */ connectionPoolingEnabled: boolean; /** * List of connection pool configuration flags */ flags: outputs.sql.GetDatabaseInstanceSettingConnectionPoolConfigFlag[]; } interface GetDatabaseInstanceSettingConnectionPoolConfigFlag { /** * The name of the instance. */ name: string; /** * Value of the flag. */ value: string; } interface GetDatabaseInstanceSettingDataCacheConfig { /** * Whether data cache is enabled for the instance. */ dataCacheEnabled: boolean; } interface GetDatabaseInstanceSettingDatabaseFlag { /** * The name of the instance. */ name: string; /** * Value of the flag. */ value: string; } interface GetDatabaseInstanceSettingDenyMaintenancePeriod { /** * End date before which maintenance will not take place. The date is in format yyyy-mm-dd i.e., 2020-11-01, or mm-dd, i.e., 11-01 */ endDate: string; /** * Start date after which maintenance will not take place. The date is in format yyyy-mm-dd i.e., 2020-11-01, or mm-dd, i.e., 11-01 */ startDate: string; /** * Time in UTC when the "deny maintenance period" starts on startDate and ends on end_date. The time is in format: HH:mm:SS, i.e., 00:00:00 */ time: string; } interface GetDatabaseInstanceSettingFinalBackupConfig { /** * When this parameter is set to true, the final backup is enabled for the instance */ enabled: boolean; /** * The number of days to retain the final backup after the instance deletion. The valid range is between 1 and 365. For instances managed by BackupDR, the valid range is between 1 day and 99 years. The final backup will be purged at (time_of_instance_deletion + retention_days). */ retentionDays: number; } interface GetDatabaseInstanceSettingInsightsConfig { /** * True if Query Insights feature is enabled. */ queryInsightsEnabled: boolean; /** * Number of query execution plans captured by Insights per minute for all queries combined. Between 0 and 20. Default to 5. For Enterprise Plus instances, from 0 to 200. */ queryPlansPerMinute: number; /** * Maximum query length stored in bytes. Between 256 and 4500. Default to 1024. For Enterprise Plus instances, from 1 to 1048576. */ queryStringLength: number; /** * True if Query Insights will record application tags from query when enabled. */ recordApplicationTags: boolean; /** * True if Query Insights will record client address when enabled. */ recordClientAddress: boolean; } interface GetDatabaseInstanceSettingIpConfiguration { /** * The name of the allocated ip range for the private ip CloudSQL instance. For example: "google-managed-services-default". If set, the instance ip will be created in the allocated range. The range name must comply with RFC 1035. Specifically, the name must be 1-63 characters long and match the regular expression a-z?. */ allocatedIpRange: string; authorizedNetworks: outputs.sql.GetDatabaseInstanceSettingIpConfigurationAuthorizedNetwork[]; /** * The custom subject alternative names for an instance with "CUSTOMER_MANAGED_CAS_CA" as the "serverCaMode". */ customSubjectAlternativeNames: string[]; /** * Whether Google Cloud services such as BigQuery are allowed to access data in this Cloud SQL instance over a private IP connection. SQLSERVER database type is not supported. */ enablePrivatePathForGoogleCloudServices: boolean; /** * Whether this Cloud SQL instance should be assigned a public IPV4 address. At least ipv4Enabled must be enabled or a privateNetwork must be configured. */ ipv4Enabled: boolean; /** * The VPC network from which the Cloud SQL instance is accessible for private IP. For example, projects/myProject/global/networks/default. Specifying a network enables private IP. At least ipv4Enabled must be enabled or a privateNetwork must be configured. This setting can be updated, but it cannot be removed after it is set. */ privateNetwork: string; /** * PSC settings for a Cloud SQL instance. */ pscConfigs: outputs.sql.GetDatabaseInstanceSettingIpConfigurationPscConfig[]; /** * Specify how the server certificate's Certificate Authority is hosted. */ serverCaMode: string; /** * The resource name of the server CA pool for an instance with "CUSTOMER_MANAGED_CAS_CA" as the "serverCaMode". */ serverCaPool: string; /** * Specify how SSL connection should be enforced in DB connections. */ sslMode: string; } interface GetDatabaseInstanceSettingIpConfigurationAuthorizedNetwork { expirationTime: string; /** * The name of the instance. */ name: string; value: string; } interface GetDatabaseInstanceSettingIpConfigurationPscConfig { /** * List of consumer projects that are allow-listed for PSC connections to this instance. This instance can be connected to with PSC from any network in these projects. Each consumer project in this list may be represented by a project number (numeric) or by a project id (alphanumeric). */ allowedConsumerProjects: string[]; /** * Name of network attachment resource used to authorize a producer service to connect a PSC interface to the consumer's VPC. For example: "projects/myProject/regions/myRegion/networkAttachments/myNetworkAttachment". This is required to enable outbound connection on a PSC instance. */ networkAttachmentUri: string; /** * A comma-separated list of networks or a comma-separated list of network-project pairs. Each project in this list is represented by a project number (numeric) or by a project ID (alphanumeric). This allows Private Service Connect connections to be created automatically for the specified networks. */ pscAutoConnections: outputs.sql.GetDatabaseInstanceSettingIpConfigurationPscConfigPscAutoConnection[]; /** * Whether PSC connectivity is enabled for this instance. */ pscEnabled: boolean; } interface GetDatabaseInstanceSettingIpConfigurationPscConfigPscAutoConnection { /** * The consumer network of this consumer endpoint. This must be a resource path that includes both the host project and the network name. The consumer host project of this network might be different from the consumer service project. */ consumerNetwork: string; /** * The connection policy status of the consumer network. */ consumerNetworkStatus: string; /** * The project ID of consumer service project of this consumer endpoint. */ consumerServiceProjectId: string; /** * The IP address of the consumer endpoint. */ ipAddress: string; /** * The connection status of the consumer endpoint. */ status: string; } interface GetDatabaseInstanceSettingLocationPreference { /** * A Google App Engine application whose zone to remain in. Must be in the same region as this instance. */ followGaeApplication: string; /** * The preferred Compute Engine zone for the secondary/failover */ secondaryZone: string; /** * The preferred compute engine zone. */ zone: string; } interface GetDatabaseInstanceSettingMaintenanceWindow { /** * Day of week (1-7), starting on Monday */ day: number; /** * Hour of day (0-23), ignored if day not set */ hour: number; /** * Receive updates after one week (canary) or after two weeks (stable) or after five weeks (week5) of notification. */ updateTrack: string; } interface GetDatabaseInstanceSettingPasswordValidationPolicy { /** * Password complexity. */ complexity: string; /** * Disallow username as a part of the password. */ disallowUsernameSubstring: boolean; /** * Whether the password policy is enabled or not. */ enablePasswordPolicy: boolean; /** * Minimum number of characters allowed. */ minLength: number; /** * Minimum interval after which the password can be changed. This flag is only supported for PostgresSQL. */ passwordChangeInterval: string; /** * Number of previous passwords that cannot be reused. */ reuseInterval: number; } interface GetDatabaseInstanceSettingReadPoolAutoScaleConfig { /** * True if auto scale in is disabled. */ disableScaleIn: boolean; /** * True if Read Pool Auto Scale is enabled. */ enabled: boolean; /** * Maximum number of nodes in the read pool. If set to lower than current node count, node count will be updated. */ maxNodeCount: number; /** * Minimum number of nodes in the read pool. If set to higher than current node count, node count will be updated. */ minNodeCount: number; /** * The cooldown period for scale in operations. */ scaleInCooldownSeconds: number; /** * The cooldown period for scale out operations. */ scaleOutCooldownSeconds: number; /** * Target metrics for Read Pool Auto Scale. */ targetMetrics: outputs.sql.GetDatabaseInstanceSettingReadPoolAutoScaleConfigTargetMetric[]; } interface GetDatabaseInstanceSettingReadPoolAutoScaleConfigTargetMetric { /** * Metric name for Read Pool Auto Scale. */ metric: string; /** * Target value for Read Pool Auto Scale. */ targetValue: number; } interface GetDatabaseInstanceSettingSqlServerAuditConfig { /** * The name of the destination bucket (e.g., gs://mybucket). */ bucket: string; /** * How long to keep generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".. */ retentionInterval: string; /** * How often to upload generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ uploadInterval: string; } interface GetDatabaseInstancesInstance { /** * Available Maintenance versions. */ availableMaintenanceVersions: string[]; /** * The name of the BackupDR backup to restore from. */ backupdrBackup: string; /** * Configuration for creating a new instance as a clone of another instance. */ clones: outputs.sql.GetDatabaseInstancesInstanceClone[]; /** * The connection name of the instance to be used in connection strings. For example, when connecting with Cloud SQL Proxy. */ connectionName: string; /** * To filter out the Cloud SQL instances which are of the specified database version. */ databaseVersion: string; /** * Used to block Terraform from deleting a SQL Instance. Defaults to true. */ deletionProtection: boolean; /** * The instance-level dns name of the instance for PSC instances or public IP CAS instances. */ dnsName: string; /** * The list of DNS names used by this instance. Different connection types for an instance may have different DNS names. DNS names can apply to an individual instance or a cluster of instances. */ dnsNames: outputs.sql.GetDatabaseInstancesInstanceDnsName[]; encryptionKeyName: string; /** * The description of final backup if instance enable create final backup during instance deletion. */ finalBackupDescription: string; /** * The first IPv4 address of any type assigned. This is to support accessing the first address in the list in a terraform output when the resource is configured with a count. */ firstIpAddress: string; /** * The type of the instance. See https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1/instances#SqlInstanceType for supported values. */ instanceType: string; ipAddresses: outputs.sql.GetDatabaseInstancesInstanceIpAddress[]; /** * Maintenance version. */ maintenanceVersion: string; /** * The name of the instance that will act as the master in the replication setup. Note, this requires the master to have binaryLogEnabled set, as well as existing backups. */ masterInstanceName: string; /** * The name of the instance. If the name is left blank, Terraform will randomly generate one when the instance is first created. This is done because after a name is used, it cannot be reused for up to one week. */ name: string; /** * For a read pool instance, the number of nodes in the read pool. For read pools with auto scaling enabled, this field is read only. */ nodeCount: number; /** * Configuration for creating a new instance using point-in-time-restore from backupdr backup. */ pointInTimeRestoreContexts: outputs.sql.GetDatabaseInstancesInstancePointInTimeRestoreContext[]; /** * IPv4 address assigned. This is a workaround for an issue fixed in Terraform 0.12 but also provides a convenient way to access an IP of a specific type without performing filtering in a Terraform config. */ privateIpAddress: string; /** * The ID of the project in which the resources belong. If it is not provided, the provider project is used. */ project: string; /** * The link to service attachment of PSC instance. */ pscServiceAttachmentLink: string; /** * IPv4 address assigned. This is a workaround for an issue fixed in Terraform 0.12 but also provides a convenient way to access an IP of a specific type without performing filtering in a Terraform config. */ publicIpAddress: string; /** * To filter out the Cloud SQL instances which are located in the specified region. */ region: string; /** * The configuration for replication. */ replicaConfigurations: outputs.sql.GetDatabaseInstancesInstanceReplicaConfiguration[]; /** * The replicas of the instance. */ replicaNames: string[]; /** * A primary instance and disaster recovery replica pair. Applicable to MySQL and PostgreSQL. This field can be set if the primary has psaWriteEndpoint set or both the primary and replica are created. */ replicationClusters: outputs.sql.GetDatabaseInstancesInstanceReplicationCluster[]; restoreBackupContexts: outputs.sql.GetDatabaseInstancesInstanceRestoreBackupContext[]; /** * Initial root password. Required for MS SQL Server. */ rootPassword: string; /** * Initial root password. Required for MS SQL Server. * Note: This property is write-only and will not be read from the API. For more info see [updating write-only arguments](https://www.terraform.io/docs/providers/google/guides/using_write_only_arguments.html#updating-write-only-arguments) */ rootPasswordWo: string; /** * Triggers update of rootPasswordWo write-only. For more info see [updating write-only arguments](https://www.terraform.io/docs/providers/google/guides/using_write_only_arguments.html#updating-write-only-arguments) */ rootPasswordWoVersion: string; /** * The URI of the created resource. */ selfLink: string; serverCaCerts: outputs.sql.GetDatabaseInstancesInstanceServerCaCert[]; /** * The service account email address assigned to the instance. */ serviceAccountEmailAddress: string; /** * The settings to use for the database. The configuration is detailed below. */ settings: outputs.sql.GetDatabaseInstancesInstanceSetting[]; } interface GetDatabaseInstancesInstanceClone { /** * The name of the allocated ip range for the private ip CloudSQL instance. For example: "google-managed-services-default". If set, the cloned instance ip will be created in the allocated range. The range name must comply with [RFC 1035](https://tools.ietf.org/html/rfc1035). Specifically, the name must be 1-63 characters long and match the regular expression a-z?. */ allocatedIpRange: string; /** * (SQL Server only, use with point_in_time) clone only the specified databases from the source instance. Clone all databases if empty. */ databaseNames: string[]; /** * The timestamp of the point in time that should be restored. */ pointInTime: string; /** * (Point-in-time recovery for PostgreSQL only) Clone to an instance in the specified zone. If no zone is specified, clone to the same zone as the source instance. */ preferredZone: string; /** * The timestamp of when the source instance was deleted for a clone from a deleted instance. */ sourceInstanceDeletionTime: string; /** * The name of the instance from which the point in time should be restored. */ sourceInstanceName: string; } interface GetDatabaseInstancesInstanceDnsName { connectionType: string; dnsScope: string; name: string; } interface GetDatabaseInstancesInstanceIpAddress { ipAddress: string; timeToRetire: string; type: string; } interface GetDatabaseInstancesInstancePointInTimeRestoreContext { /** * The name of the allocated IP range for the internal IP Cloud SQL instance. For example: "google-managed-services-default". If you set this, then Cloud SQL creates the IP address for the cloned instance in the allocated range. This range must comply with [RFC 1035](https://tools.ietf.org/html/rfc1035) standards. Specifically, the name must be 1-63 characters long and match the regular expression a-z?. */ allocatedIpRange: string; /** * The Google Cloud Backup and Disaster Recovery Datasource URI. For example: "projects/my-project/locations/us-central1/datasources/my-datasource". */ datasource: string; /** * The date and time to which you want to restore the instance. */ pointInTime: string; /** * Point-in-time recovery of an instance to the specified zone. If no zone is specified, then clone to the same primary zone as the source instance. */ preferredZone: string; /** * The name of the target instance to restore to. */ targetInstance: string; } interface GetDatabaseInstancesInstanceReplicaConfiguration { /** * PEM representation of the trusted CA's x509 certificate. */ caCertificate: string; /** * Specifies if a SQL Server replica is a cascadable replica. A cascadable replica is a SQL Server cross region replica that supports replica(s) under it. */ cascadableReplica: boolean; /** * PEM representation of the replica's x509 certificate. */ clientCertificate: string; /** * PEM representation of the replica's private key. The corresponding public key in encoded in the client_certificate. */ clientKey: string; /** * The number of seconds between connect retries. MySQL's default is 60 seconds. */ connectRetryInterval: number; /** * Path to a SQL file in Google Cloud Storage from which replica instances are created. Format is gs://bucket/filename. */ dumpFilePath: string; /** * Specifies if the replica is the failover target. If the field is set to true the replica will be designated as a failover replica. If the master instance fails, the replica instance will be promoted as the new master instance. Not supported for Postgres */ failoverTarget: boolean; /** * Time in ms between replication heartbeats. */ masterHeartbeatPeriod: number; /** * Password for the replication connection. */ password: string; /** * Permissible ciphers for use in SSL encryption. */ sslCipher: string; /** * Username for replication connection. */ username: string; /** * True if the master's common name value is checked during the SSL handshake. */ verifyServerCertificate: boolean; } interface GetDatabaseInstancesInstanceReplicationCluster { /** * Read-only field that indicates whether the replica is a DR replica. */ drReplica: boolean; /** * If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. The standard format of this field is "your-project:your-instance". You can also set this field to "your-instance", but cloud SQL backend will convert it to the aforementioned standard format. */ failoverDrReplicaName: string; /** * If set, this field indicates this instance has a private service access (PSA) DNS endpoint that is pointing to the primary instance of the cluster. If this instance is the primary, then the DNS endpoint points to this instance. After a switchover or replica failover operation, this DNS endpoint points to the promoted instance. This is a read-only field, returned to the user as information. This field can exist even if a standalone instance doesn't have a DR replica yet or the DR replica is deleted. */ psaWriteEndpoint: string; } interface GetDatabaseInstancesInstanceRestoreBackupContext { /** * The ID of the backup run to restore from. */ backupRunId: number; /** * The ID of the instance that the backup was taken from. */ instanceId: string; /** * The ID of the project in which the resources belong. If it is not provided, the provider project is used. */ project: string; } interface GetDatabaseInstancesInstanceServerCaCert { /** * The CA Certificate used to connect to the SQL Instance via SSL. */ cert: string; /** * The CN valid for the CA Cert. */ commonName: string; /** * Creation time of the CA Cert. */ createTime: string; /** * Expiration time of the CA Cert. */ expirationTime: string; /** * SHA Fingerprint of the CA Cert. */ sha1Fingerprint: string; } interface GetDatabaseInstancesInstanceSetting { /** * This specifies when the instance should be active. Can be either ALWAYS, NEVER or ON_DEMAND. */ activationPolicy: string; activeDirectoryConfigs: outputs.sql.GetDatabaseInstancesInstanceSettingActiveDirectoryConfig[]; advancedMachineFeatures: outputs.sql.GetDatabaseInstancesInstanceSettingAdvancedMachineFeature[]; /** * The availability type of the Cloud SQL instance, high availability * (REGIONAL) or single zone (ZONAL). For all instances, ensure that * settings.backup_configuration.enabled is set to true. * For MySQL instances, ensure that settings.backup_configuration.binary_log_enabled is set to true. * For Postgres instances, ensure that settings.backup_configuration.point_in_time_recovery_enabled * is set to true. Defaults to ZONAL. * For read pool instances, this field is read-only. The availability type is changed by specifying * the number of nodes (node_count). */ availabilityType: string; backupConfigurations: outputs.sql.GetDatabaseInstancesInstanceSettingBackupConfiguration[]; /** * The name of server instance collation. */ collation: string; /** * The managed connection pool setting for a Cloud SQL instance. */ connectionPoolConfigs: outputs.sql.GetDatabaseInstancesInstanceSettingConnectionPoolConfig[]; /** * Enables the enforcement of Cloud SQL Auth Proxy or Cloud SQL connectors for all the connections. If enabled, all the direct connections are rejected. */ connectorEnforcement: string; /** * Data cache configurations. */ dataCacheConfigs: outputs.sql.GetDatabaseInstancesInstanceSettingDataCacheConfig[]; /** * Provisioned number of I/O operations per second for the data disk. This field is only used for HYPERDISK_BALANCED disk types. */ dataDiskProvisionedIops: number; /** * Provisioned throughput measured in MiB per second for the data disk. This field is only used for HYPERDISK_BALANCED disk types. */ dataDiskProvisionedThroughput: number; databaseFlags: outputs.sql.GetDatabaseInstancesInstanceSettingDatabaseFlag[]; /** * Configuration to protect against accidental instance deletion. */ deletionProtectionEnabled: boolean; denyMaintenancePeriods: outputs.sql.GetDatabaseInstancesInstanceSettingDenyMaintenancePeriod[]; /** * Enables auto-resizing of the storage size. Defaults to true. */ diskAutoresize: boolean; /** * The maximum size, in GB, to which storage capacity can be automatically increased. The default value is 0, which specifies that there is no limit. */ diskAutoresizeLimit: number; /** * The size of data disk, in GB. Size of a running instance cannot be reduced but can be increased. The minimum value is 10GB for PD_SSD, PD_HDD and 20GB for HYPERDISK_BALANCED. */ diskSize: number; /** * The type of supported data disk is tier dependent and can be PD_SSD or PD_HDD or HYPERDISK_BALANCED. */ diskType: string; /** * The edition of the instance, can be ENTERPRISE or ENTERPRISE_PLUS. */ edition: string; /** * The availability type of the Cloud SQL instance, high availability * (REGIONAL) or single zone (ZONAL). This field always contains the value that is reported by the * API (for read pools, effectiveAvailabilityType may differ from availability_type). */ effectiveAvailabilityType: string; /** * Enables Dataplex Integration. */ enableDataplexIntegration: boolean; /** * Enables Vertex AI Integration. */ enableGoogleMlIntegration: boolean; /** * Config used to determine the final backup settings for the instance */ finalBackupConfigs: outputs.sql.GetDatabaseInstancesInstanceSettingFinalBackupConfig[]; /** * Configuration of Query Insights. */ insightsConfigs: outputs.sql.GetDatabaseInstancesInstanceSettingInsightsConfig[]; ipConfigurations: outputs.sql.GetDatabaseInstancesInstanceSettingIpConfiguration[]; locationPreferences: outputs.sql.GetDatabaseInstancesInstanceSettingLocationPreference[]; /** * Declares a one-hour maintenance window when an Instance can automatically restart to apply updates. The maintenance window is specified in UTC time. */ maintenanceWindows: outputs.sql.GetDatabaseInstancesInstanceSettingMaintenanceWindow[]; passwordValidationPolicies: outputs.sql.GetDatabaseInstancesInstanceSettingPasswordValidationPolicy[]; /** * Pricing plan for this instance, can only be PER_USE. */ pricingPlan: string; /** * Configuration of Read Pool Auto Scale. */ readPoolAutoScaleConfigs: outputs.sql.GetDatabaseInstancesInstanceSettingReadPoolAutoScaleConfig[]; /** * When this parameter is set to true, Cloud SQL retains backups of the instance even after the instance is deleted. The ON_DEMAND backup will be retained until customer deletes the backup or the project. The AUTOMATED backup will be retained based on the backups retention setting. */ retainBackupsOnDelete: boolean; sqlServerAuditConfigs: outputs.sql.GetDatabaseInstancesInstanceSettingSqlServerAuditConfig[]; /** * To filter out the Cloud SQL instances based on the tier(or machine type) of the database instances. */ tier: string; /** * The timeZone to be used by the database engine (supported only for SQL Server), in SQL Server timezone format. */ timeZone: string; /** * A set of key/value user label pairs to assign to the instance. */ userLabels: { [key: string]: string; }; /** * Used to make sure changes to the settings block are atomic. */ version: number; } interface GetDatabaseInstancesInstanceSettingActiveDirectoryConfig { /** * Domain name of the Active Directory for SQL Server (e.g., mydomain.com). */ domain: string; } interface GetDatabaseInstancesInstanceSettingAdvancedMachineFeature { /** * The number of threads per physical core. Can be 1 or 2. */ threadsPerCore: number; } interface GetDatabaseInstancesInstanceSettingBackupConfiguration { backupRetentionSettings: outputs.sql.GetDatabaseInstancesInstanceSettingBackupConfigurationBackupRetentionSetting[]; /** * Backup tier that manages the backups for the instance. */ backupTier: string; /** * True if binary logging is enabled. If settings.backup_configuration.enabled is false, this must be as well. Can only be used with MySQL. */ binaryLogEnabled: boolean; /** * True if backup configuration is enabled. */ enabled: boolean; /** * Location of the backup configuration. */ location: string; /** * True if Point-in-time recovery is enabled. */ pointInTimeRecoveryEnabled: boolean; /** * HH:MM format time indicating when backup configuration starts. */ startTime: string; /** * The number of days of transaction logs we retain for point in time restore, from 1-7. (For PostgreSQL Enterprise Plus instances, from 1 to 35.) */ transactionLogRetentionDays: number; } interface GetDatabaseInstancesInstanceSettingBackupConfigurationBackupRetentionSetting { /** * Number of backups to retain. */ retainedBackups: number; /** * The unit that 'retainedBackups' represents. Defaults to COUNT */ retentionUnit: string; } interface GetDatabaseInstancesInstanceSettingConnectionPoolConfig { /** * Whether Managed Connection Pool is enabled for this instance. */ connectionPoolingEnabled: boolean; /** * List of connection pool configuration flags */ flags: outputs.sql.GetDatabaseInstancesInstanceSettingConnectionPoolConfigFlag[]; } interface GetDatabaseInstancesInstanceSettingConnectionPoolConfigFlag { /** * Name of the flag. */ name: string; /** * Value of the flag. */ value: string; } interface GetDatabaseInstancesInstanceSettingDataCacheConfig { /** * Whether data cache is enabled for the instance. */ dataCacheEnabled: boolean; } interface GetDatabaseInstancesInstanceSettingDatabaseFlag { /** * Name of the flag. */ name: string; /** * Value of the flag. */ value: string; } interface GetDatabaseInstancesInstanceSettingDenyMaintenancePeriod { /** * End date before which maintenance will not take place. The date is in format yyyy-mm-dd i.e., 2020-11-01, or mm-dd, i.e., 11-01 */ endDate: string; /** * Start date after which maintenance will not take place. The date is in format yyyy-mm-dd i.e., 2020-11-01, or mm-dd, i.e., 11-01 */ startDate: string; /** * Time in UTC when the "deny maintenance period" starts on startDate and ends on end_date. The time is in format: HH:mm:SS, i.e., 00:00:00 */ time: string; } interface GetDatabaseInstancesInstanceSettingFinalBackupConfig { /** * When this parameter is set to true, the final backup is enabled for the instance */ enabled: boolean; /** * The number of days to retain the final backup after the instance deletion. The valid range is between 1 and 365. For instances managed by BackupDR, the valid range is between 1 day and 99 years. The final backup will be purged at (time_of_instance_deletion + retention_days). */ retentionDays: number; } interface GetDatabaseInstancesInstanceSettingInsightsConfig { /** * True if Query Insights feature is enabled. */ queryInsightsEnabled: boolean; /** * Number of query execution plans captured by Insights per minute for all queries combined. Between 0 and 20. Default to 5. For Enterprise Plus instances, from 0 to 200. */ queryPlansPerMinute: number; /** * Maximum query length stored in bytes. Between 256 and 4500. Default to 1024. For Enterprise Plus instances, from 1 to 1048576. */ queryStringLength: number; /** * True if Query Insights will record application tags from query when enabled. */ recordApplicationTags: boolean; /** * True if Query Insights will record client address when enabled. */ recordClientAddress: boolean; } interface GetDatabaseInstancesInstanceSettingIpConfiguration { /** * The name of the allocated ip range for the private ip CloudSQL instance. For example: "google-managed-services-default". If set, the instance ip will be created in the allocated range. The range name must comply with RFC 1035. Specifically, the name must be 1-63 characters long and match the regular expression a-z?. */ allocatedIpRange: string; authorizedNetworks: outputs.sql.GetDatabaseInstancesInstanceSettingIpConfigurationAuthorizedNetwork[]; /** * The custom subject alternative names for an instance with "CUSTOMER_MANAGED_CAS_CA" as the "serverCaMode". */ customSubjectAlternativeNames: string[]; /** * Whether Google Cloud services such as BigQuery are allowed to access data in this Cloud SQL instance over a private IP connection. SQLSERVER database type is not supported. */ enablePrivatePathForGoogleCloudServices: boolean; /** * Whether this Cloud SQL instance should be assigned a public IPV4 address. At least ipv4Enabled must be enabled or a privateNetwork must be configured. */ ipv4Enabled: boolean; /** * The VPC network from which the Cloud SQL instance is accessible for private IP. For example, projects/myProject/global/networks/default. Specifying a network enables private IP. At least ipv4Enabled must be enabled or a privateNetwork must be configured. This setting can be updated, but it cannot be removed after it is set. */ privateNetwork: string; /** * PSC settings for a Cloud SQL instance. */ pscConfigs: outputs.sql.GetDatabaseInstancesInstanceSettingIpConfigurationPscConfig[]; /** * Specify how the server certificate's Certificate Authority is hosted. */ serverCaMode: string; /** * The resource name of the server CA pool for an instance with "CUSTOMER_MANAGED_CAS_CA" as the "serverCaMode". */ serverCaPool: string; /** * Specify how SSL connection should be enforced in DB connections. */ sslMode: string; } interface GetDatabaseInstancesInstanceSettingIpConfigurationAuthorizedNetwork { expirationTime: string; name: string; value: string; } interface GetDatabaseInstancesInstanceSettingIpConfigurationPscConfig { /** * List of consumer projects that are allow-listed for PSC connections to this instance. This instance can be connected to with PSC from any network in these projects. Each consumer project in this list may be represented by a project number (numeric) or by a project id (alphanumeric). */ allowedConsumerProjects: string[]; /** * Name of network attachment resource used to authorize a producer service to connect a PSC interface to the consumer's VPC. For example: "projects/myProject/regions/myRegion/networkAttachments/myNetworkAttachment". This is required to enable outbound connection on a PSC instance. */ networkAttachmentUri: string; /** * A comma-separated list of networks or a comma-separated list of network-project pairs. Each project in this list is represented by a project number (numeric) or by a project ID (alphanumeric). This allows Private Service Connect connections to be created automatically for the specified networks. */ pscAutoConnections: outputs.sql.GetDatabaseInstancesInstanceSettingIpConfigurationPscConfigPscAutoConnection[]; /** * Whether PSC connectivity is enabled for this instance. */ pscEnabled: boolean; } interface GetDatabaseInstancesInstanceSettingIpConfigurationPscConfigPscAutoConnection { /** * The consumer network of this consumer endpoint. This must be a resource path that includes both the host project and the network name. The consumer host project of this network might be different from the consumer service project. */ consumerNetwork: string; /** * The connection policy status of the consumer network. */ consumerNetworkStatus: string; /** * The project ID of consumer service project of this consumer endpoint. */ consumerServiceProjectId: string; /** * The IP address of the consumer endpoint. */ ipAddress: string; /** * The connection status of the consumer endpoint. */ status: string; } interface GetDatabaseInstancesInstanceSettingLocationPreference { /** * A Google App Engine application whose zone to remain in. Must be in the same region as this instance. */ followGaeApplication: string; /** * The preferred Compute Engine zone for the secondary/failover */ secondaryZone: string; /** * To filter out the Cloud SQL instances which are located in the specified zone. This zone refers to the Compute Engine zone that the instance is currently serving from. */ zone: string; } interface GetDatabaseInstancesInstanceSettingMaintenanceWindow { /** * Day of week (1-7), starting on Monday */ day: number; /** * Hour of day (0-23), ignored if day not set */ hour: number; /** * Receive updates after one week (canary) or after two weeks (stable) or after five weeks (week5) of notification. */ updateTrack: string; } interface GetDatabaseInstancesInstanceSettingPasswordValidationPolicy { /** * Password complexity. */ complexity: string; /** * Disallow username as a part of the password. */ disallowUsernameSubstring: boolean; /** * Whether the password policy is enabled or not. */ enablePasswordPolicy: boolean; /** * Minimum number of characters allowed. */ minLength: number; /** * Minimum interval after which the password can be changed. This flag is only supported for PostgresSQL. */ passwordChangeInterval: string; /** * Number of previous passwords that cannot be reused. */ reuseInterval: number; } interface GetDatabaseInstancesInstanceSettingReadPoolAutoScaleConfig { /** * True if auto scale in is disabled. */ disableScaleIn: boolean; /** * True if Read Pool Auto Scale is enabled. */ enabled: boolean; /** * Maximum number of nodes in the read pool. If set to lower than current node count, node count will be updated. */ maxNodeCount: number; /** * Minimum number of nodes in the read pool. If set to higher than current node count, node count will be updated. */ minNodeCount: number; /** * The cooldown period for scale in operations. */ scaleInCooldownSeconds: number; /** * The cooldown period for scale out operations. */ scaleOutCooldownSeconds: number; /** * Target metrics for Read Pool Auto Scale. */ targetMetrics: outputs.sql.GetDatabaseInstancesInstanceSettingReadPoolAutoScaleConfigTargetMetric[]; } interface GetDatabaseInstancesInstanceSettingReadPoolAutoScaleConfigTargetMetric { /** * Metric name for Read Pool Auto Scale. */ metric: string; /** * Target value for Read Pool Auto Scale. */ targetValue: number; } interface GetDatabaseInstancesInstanceSettingSqlServerAuditConfig { /** * The name of the destination bucket (e.g., gs://mybucket). */ bucket: string; /** * How long to keep generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".. */ retentionInterval: string; /** * How often to upload generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ uploadInterval: string; } interface GetDatabasesDatabase { /** * The charset value. See MySQL's * [Supported Character Sets and Collations](https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html) * and Postgres' [Character Set Support](https://www.postgresql.org/docs/9.6/static/multibyte.html) * for more details and supported values. Postgres databases only support * a value of 'UTF8' at creation time. */ charset: string; /** * The collation value. See MySQL's * [Supported Character Sets and Collations](https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html) * and Postgres' [Collation Support](https://www.postgresql.org/docs/9.6/static/collation.html) * for more details and supported values. Postgres databases only support * a value of 'en_US.UTF8' at creation time. */ collation: string; /** * The deletion policy for the database. Setting ABANDON allows the resource * to be abandoned rather than deleted. This is useful for Postgres, where databases cannot be * deleted from the API if there are users other than cloudsqlsuperuser with access. Possible * values are: "ABANDON", "DELETE". Defaults to "DELETE". */ deletionPolicy: string; /** * The name of the Cloud SQL database instance in which the database belongs. */ instance: string; /** * The name of the database in the Cloud SQL instance. * This does not include the project ID or instance name. */ name: string; /** * The ID of the project in which the instance belongs. * * > **Note** This datasource performs client-side sorting to provide consistent ordering of the databases. */ project: string; selfLink: string; } interface GetTiersTier { /** * The maximum disk size of this tier in bytes. */ diskQuota: number; /** * The maximum ram usage of this tier in bytes. */ ram: number; /** * The applicable regions for this tier. */ regions: string[]; /** * An identifier for the machine type, for example, db-custom-1-3840. */ tier: string; } interface UserPasswordPolicy { /** * Number of failed attempts allowed before the user get locked. */ allowedFailedAttempts?: number; /** * If true, the check that will lock user after too many failed login attempts will be enabled. */ enableFailedAttemptsCheck?: boolean; /** * If true, the user must specify the current password before changing the password. This flag is supported only for MySQL. */ enablePasswordVerification?: boolean; /** * Password expiration duration with one week grace period. */ passwordExpirationDuration?: string; statuses: outputs.sql.UserPasswordPolicyStatus[]; } interface UserPasswordPolicyStatus { /** * If true, user does not have login privileges. */ locked: boolean; /** * Password expiration duration with one week grace period. */ passwordExpirationTime: string; } interface UserSqlServerUserDetail { /** * If the user has been disabled. */ disabled: boolean; /** * The server roles for this user in the database. */ serverRoles: string[]; } } export declare namespace storage { interface BatchOperationsJobBucketList { /** * List of buckets and their objects to be transformed. * Structure is documented below. */ buckets: outputs.storage.BatchOperationsJobBucketListBuckets; } interface BatchOperationsJobBucketListBuckets { /** * Bucket name for the objects to be transformed. */ bucket: string; /** * contain the manifest source file that is a CSV file in a Google Cloud Storage bucket. * Structure is documented below. */ manifest?: outputs.storage.BatchOperationsJobBucketListBucketsManifest; /** * Specifies objects matching a prefix set. * Structure is documented below. */ prefixList?: outputs.storage.BatchOperationsJobBucketListBucketsPrefixList; } interface BatchOperationsJobBucketListBucketsManifest { /** * Specifies objects in a manifest file. */ manifestLocation?: string; } interface BatchOperationsJobBucketListBucketsPrefixList { /** * (Optional) */ includedObjectPrefixes?: string[]; } interface BatchOperationsJobDeleteObject { /** * enable flag to permanently delete object and all object versions if versioning is enabled on bucket. */ permanentObjectDeletionEnabled: boolean; } interface BatchOperationsJobPutMetadata { /** * Cache-Control directive to specify caching behavior of object data. If omitted and object is accessible to all anonymous users, the default will be public, max-age=3600 */ cacheControl?: string; /** * Content-Disposition of the object data. */ contentDisposition?: string; /** * Content Encoding of the object data. */ contentEncoding?: string; /** * Content-Language of the object data. */ contentLanguage?: string; /** * Content-Type of the object data. */ contentType?: string; /** * User-provided metadata, in key/value pairs. */ customMetadata?: { [key: string]: string; }; /** * Updates the objects fixed custom time metadata. */ customTime?: string; } interface BatchOperationsJobPutObjectHold { /** * set/unset to update event based hold for objects. */ eventBasedHold?: string; /** * set/unset to update temporary based hold for objects. */ temporaryHold?: string; } interface BatchOperationsJobRewriteObject { /** * valid kms key */ kmsKey: string; } interface BucketAutoclass { /** * While set to `true`, autoclass automatically transitions objects in your bucket to appropriate storage classes based on each object's access pattern. */ enabled: boolean; /** * The storage class that objects in the bucket eventually transition to if they are not read for a certain length of time. Supported values include: `NEARLINE`, `ARCHIVE`. */ terminalStorageClass: string; } interface BucketCor { /** * The value, in seconds, to return in the [Access-Control-Max-Age header](https://www.w3.org/TR/cors/#access-control-max-age-response-header) used in preflight responses. */ maxAgeSeconds?: number; /** * The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list of methods, and means "any method". */ methods?: string[]; /** * The list of [Origins](https://tools.ietf.org/html/rfc6454) eligible to receive CORS response headers. Note: "*" is permitted in the list of origins, and means "any Origin". */ origins?: string[]; /** * The list of HTTP headers other than the [simple response headers](https://www.w3.org/TR/cors/#simple-response-header) to give permission for the user-agent to share across domains. */ responseHeaders?: string[]; } interface BucketCustomPlacementConfig { /** * The list of individual regions that comprise a dual-region bucket. See [Cloud Storage bucket locations](https://cloud.google.com/storage/docs/dual-regions#availability) for a list of acceptable regions. **Note**: If any of the dataLocations changes, it will [recreate the bucket](https://cloud.google.com/storage/docs/locations#key-concepts). */ dataLocations: string[]; } interface BucketEncryption { /** * The `id` of a Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no encryption method is specified. * You must pay attention to whether the crypto key is available in the location that this bucket is created in. * See [the docs](https://cloud.google.com/storage/docs/encryption/using-customer-managed-keys) for more details. * * > As per [the docs](https://cloud.google.com/storage/docs/encryption/using-customer-managed-keys) for customer-managed encryption keys, the IAM policy for the * specified key must permit the [automatic Google Cloud Storage service account](https://cloud.google.com/storage/docs/projects#service-accounts) for the bucket's * project to use the specified key for encryption and decryption operations. * Although the service account email address follows a well-known format, the service account is created on-demand and may not necessarily exist for your project * until a relevant action has occurred which triggers its creation. * You should use the [`gcp.storage.getProjectServiceAccount`](https://www.terraform.io/docs/providers/google/d/storage_project_service_account.html) data source to obtain the email * address for the service account when configuring IAM policy on the Cloud KMS key. * This data source calls an API which creates the account if required, ensuring your provider applies cleanly and repeatedly irrespective of the * state of the project. * You should take care for race conditions when the same provider manages IAM policy on the Cloud KMS crypto key. See the data source page for more details. */ defaultKmsKeyName: string; } interface BucketHierarchicalNamespace { /** * Enables hierarchical namespace for the bucket. */ enabled: boolean; } interface BucketIAMBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface BucketIAMMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** This provider considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, the provider will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface BucketIpFilter { /** * While set `true`, allows all service agents to access the bucket regardless of the IP filter configuration. */ allowAllServiceAgentAccess?: boolean; /** * While set `true`, allows cross-org VPCs in the bucket's IP filter configuration. */ allowCrossOrgVpcs?: boolean; /** * The state of the IP filter configuration. Valid values are `Enabled` and `Disabled`. When set to `Enabled`, IP filtering rules are applied to a bucket and all incoming requests to the bucket are evaluated against these rules. When set to `Disabled`, IP filtering rules are not applied to a bucket. * * **Note**: Once ipFilter is setup, it can either be `Enabled` or `Disabled` and cannot be removed from config. * * **Note**: `allowAllServiceAgentAccess` must be supplied when `mode` is set to `Enabled`, it can be ommited for other values. */ mode: string; /** * The public network IP address ranges that can access the bucket and its data. Structure is documented below. */ publicNetworkSource?: outputs.storage.BucketIpFilterPublicNetworkSource; /** * The list of VPC networks that can access the bucket. Structure is documented below. */ vpcNetworkSources?: outputs.storage.BucketIpFilterVpcNetworkSource[]; } interface BucketIpFilterPublicNetworkSource { /** * The list of public IPv4 and IPv6 CIDR ranges that can access the bucket and its data. */ allowedIpCidrRanges: string[]; } interface BucketIpFilterVpcNetworkSource { /** * The list of public or private IPv4 and IPv6 CIDR ranges that can access the bucket. */ allowedIpCidrRanges: string[]; /** * Name of the network. Format: `projects/PROJECT_ID/global/networks/NETWORK_NAME` */ network: string; } interface BucketLifecycleRule { /** * The Lifecycle Rule's action configuration. A single block of this type is supported. Structure is documented below. */ action: outputs.storage.BucketLifecycleRuleAction; /** * The Lifecycle Rule's condition configuration. A single block of this type is supported. Structure is documented below. */ condition: outputs.storage.BucketLifecycleRuleCondition; } interface BucketLifecycleRuleAction { /** * The target [Storage Class](https://cloud.google.com/storage/docs/storage-classes) of objects affected by this Lifecycle Rule. Supported values include: `STANDARD`, `MULTI_REGIONAL`, `REGIONAL`, `NEARLINE`, `COLDLINE`, `ARCHIVE`. */ storageClass?: string; /** * The type of the action of this Lifecycle Rule. Supported values include: `Delete`, `SetStorageClass` and `AbortIncompleteMultipartUpload`. */ type: string; } interface BucketLifecycleRuleCondition { /** * Minimum age of an object in days to satisfy this condition. **Note** To set `0` value of `age`, `sendAgeIfZero` should be set `true` otherwise `0` value of `age` field will be ignored. */ age?: number; /** * A date in the RFC 3339 format YYYY-MM-DD. This condition is satisfied when an object is created before midnight of the specified date in UTC. */ createdBefore?: string; /** * A date in the RFC 3339 format YYYY-MM-DD. This condition is satisfied when the customTime metadata for the object is set to an earlier date than the date used in this lifecycle condition. */ customTimeBefore?: string; /** * Days since the date set in the `customTime` metadata for the object. This condition is satisfied when the current date and time is at least the specified number of days after the `customTime`. Due to a current bug you are unable to set this value to `0` within Terraform. When set to `0` it will be ignored, and your state will treat it as though you supplied no `daysSinceCustomTime` condition. */ daysSinceCustomTime?: number; /** * Relevant only for versioned objects. Number of days elapsed since the noncurrent timestamp of an object. Due to a current bug you are unable to set this value to `0` within Terraform. When set to `0` it will be ignored, and your state will treat it as though you supplied no `daysSinceNoncurrentTime` condition. */ daysSinceNoncurrentTime?: number; /** * One or more matching name prefixes to satisfy this condition. */ matchesPrefixes?: string[]; /** * [Storage Class](https://cloud.google.com/storage/docs/storage-classes) of objects to satisfy this condition. Supported values include: `STANDARD`, `MULTI_REGIONAL`, `REGIONAL`, `NEARLINE`, `COLDLINE`, `ARCHIVE`, `DURABLE_REDUCED_AVAILABILITY`. */ matchesStorageClasses?: string[]; /** * One or more matching name suffixes to satisfy this condition. */ matchesSuffixes?: string[]; /** * Relevant only for versioned objects. The date in RFC 3339 (e.g. `2017-06-13`) when the object became nonconcurrent. Due to a current bug you are unable to set this value to `0` within Terraform. When set to `0` it will be ignored, and your state will treat it as though you supplied no `noncurrentTimeBefore` condition. */ noncurrentTimeBefore?: string; /** * Relevant only for versioned objects. The number of newer versions of an object to satisfy this condition. Due to a current bug you are unable to set this value to `0` within Terraform. When set to `0` it will be ignored and your state will treat it as though you supplied no `numNewerVersions` condition. */ numNewerVersions?: number; /** * While set true, `age` value will be sent in the request even for zero value of the field. This field is only useful and required for setting 0 value to the `age` field. It can be used alone or together with `age` attribute. **NOTE** `age` attibute with `0` value will be ommitted from the API request if `sendAgeIfZero` field is having `false` value. */ sendAgeIfZero?: boolean; /** * While set true, `daysSinceCustomTime` value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the `daysSinceCustomTime` field. It can be used alone or together with `daysSinceCustomTime`. */ sendDaysSinceCustomTimeIfZero?: boolean; /** * While set true, `daysSinceNoncurrentTime` value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the `daysSinceNoncurrentTime` field. It can be used alone or together with `daysSinceNoncurrentTime`. */ sendDaysSinceNoncurrentTimeIfZero?: boolean; /** * While set true, `numNewerVersions` value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the `numNewerVersions` field. It can be used alone or together with `numNewerVersions`. */ sendNumNewerVersionsIfZero?: boolean; /** * Match to live and/or archived objects. Unversioned buckets have only live objects. Supported values include: `"LIVE"`, `"ARCHIVED"`, `"ANY"`. */ withState: string; } interface BucketLogging { /** * The bucket that will receive log objects. */ logBucket: string; /** * The object prefix for log objects. If it's not provided, * by default GCS sets this to this bucket's name. */ logObjectPrefix: string; } interface BucketObjectContexts { /** * A list of custom context key-value pairs. */ customs: outputs.storage.BucketObjectContextsCustom[]; } interface BucketObjectContextsCustom { /** * The time when context was first added to the storage object in RFC 3399 format. */ createTime: string; /** * An individual object context. Context keys and their corresponding values must start with an alphanumeric character. */ key: string; /** * The time when context was last updated in RFC 3399 format. * * */ updateTime: string; /** * The value associated with this context. This field holds the primary information for the given context key. */ value: string; } interface BucketObjectCustomerEncryption { /** * Encryption algorithm. Default: AES256 */ encryptionAlgorithm?: string; /** * Base64 encoded Customer-Supplied Encryption Key. */ encryptionKey: string; } interface BucketObjectRetention { /** * The retention policy mode. Either `Locked` or `Unlocked`. */ mode: string; /** * The time to retain the object until in RFC 3339 format, for example 2012-11-15T16:19:00.094Z. * * The `contexts` block supports - */ retainUntilTime: string; } interface BucketRetentionPolicy { /** * If set to `true`, the bucket will be [locked](https://cloud.google.com/storage/docs/using-bucket-lock#lock-bucket) and permanently restrict edits to the bucket's retention policy. Caution: Locking a bucket is an irreversible action. */ isLocked?: boolean; /** * The period of time, in seconds, that objects in the bucket must be retained and cannot be deleted, overwritten, or archived. The value must be less than 3,155,760,000 seconds. */ retentionPeriod: string; } interface BucketSoftDeletePolicy { /** * Server-determined value that indicates the time from which the policy, or one with a greater retention, was effective. This value is in RFC 3339 format. */ effectiveTime: string; /** * The duration in seconds that soft-deleted objects in the bucket will be retained and cannot be permanently deleted. Default value is 604800. The value must be in between 604800(7 days) and 7776000(90 days). **Note**: To disable the soft delete policy on a bucket, This field must be set to 0. */ retentionDurationSeconds?: number; } interface BucketVersioning { /** * While set to `true`, versioning is fully enabled for this bucket. */ enabled: boolean; } interface BucketWebsite { /** * Behaves as the bucket's directory index where * missing objects are treated as potential directories. */ mainPageSuffix?: string; /** * The custom object to return when a requested * resource is not found. */ notFoundPage?: string; } interface ControlFolderIntelligenceConfigEffectiveIntelligenceConfig { /** * (Output) * The `StorageIntelligence` edition that is applicable for the resource. */ effectiveEdition: string; /** * (Output) * The Intelligence config resource that is applied for the target resource. */ intelligenceConfig: string; } interface ControlFolderIntelligenceConfigFilter { /** * Buckets to exclude from the Storage Intelligence plan. * Structure is documented below. */ excludedCloudStorageBuckets?: outputs.storage.ControlFolderIntelligenceConfigFilterExcludedCloudStorageBuckets; /** * Locations to exclude from the Storage Intelligence plan. * Structure is documented below. */ excludedCloudStorageLocations?: outputs.storage.ControlFolderIntelligenceConfigFilterExcludedCloudStorageLocations; /** * Buckets to include in the Storage Intelligence plan. * Structure is documented below. */ includedCloudStorageBuckets?: outputs.storage.ControlFolderIntelligenceConfigFilterIncludedCloudStorageBuckets; /** * Locations to include in the Storage Intelligence plan. * Structure is documented below. */ includedCloudStorageLocations?: outputs.storage.ControlFolderIntelligenceConfigFilterIncludedCloudStorageLocations; } interface ControlFolderIntelligenceConfigFilterExcludedCloudStorageBuckets { /** * List of bucket id regexes to exclude in the storage intelligence plan. */ bucketIdRegexes: string[]; } interface ControlFolderIntelligenceConfigFilterExcludedCloudStorageLocations { /** * List of locations. */ locations: string[]; } interface ControlFolderIntelligenceConfigFilterIncludedCloudStorageBuckets { /** * List of bucket id regexes to exclude in the storage intelligence plan. */ bucketIdRegexes: string[]; } interface ControlFolderIntelligenceConfigFilterIncludedCloudStorageLocations { /** * List of locations. */ locations: string[]; } interface ControlFolderIntelligenceConfigTrialConfig { /** * (Output) * The time at which the trial expires. */ expireTime: string; } interface ControlOrganizationIntelligenceConfigEffectiveIntelligenceConfig { /** * (Output) * The `StorageIntelligence` edition that is applicable for the resource. */ effectiveEdition: string; /** * (Output) * The Intelligence config resource that is applied for the target resource. */ intelligenceConfig: string; } interface ControlOrganizationIntelligenceConfigFilter { /** * Buckets to exclude from the Storage Intelligence plan. * Structure is documented below. */ excludedCloudStorageBuckets?: outputs.storage.ControlOrganizationIntelligenceConfigFilterExcludedCloudStorageBuckets; /** * Locations to exclude from the Storage Intelligence plan. * Structure is documented below. */ excludedCloudStorageLocations?: outputs.storage.ControlOrganizationIntelligenceConfigFilterExcludedCloudStorageLocations; /** * Buckets to include in the Storage Intelligence plan. * Structure is documented below. */ includedCloudStorageBuckets?: outputs.storage.ControlOrganizationIntelligenceConfigFilterIncludedCloudStorageBuckets; /** * Locations to include in the Storage Intelligence plan. * Structure is documented below. */ includedCloudStorageLocations?: outputs.storage.ControlOrganizationIntelligenceConfigFilterIncludedCloudStorageLocations; } interface ControlOrganizationIntelligenceConfigFilterExcludedCloudStorageBuckets { /** * List of bucket id regexes to exclude in the storage intelligence plan. */ bucketIdRegexes: string[]; } interface ControlOrganizationIntelligenceConfigFilterExcludedCloudStorageLocations { /** * List of locations. */ locations: string[]; } interface ControlOrganizationIntelligenceConfigFilterIncludedCloudStorageBuckets { /** * List of bucket id regexes to exclude in the storage intelligence plan. */ bucketIdRegexes: string[]; } interface ControlOrganizationIntelligenceConfigFilterIncludedCloudStorageLocations { /** * List of locations. */ locations: string[]; } interface ControlOrganizationIntelligenceConfigTrialConfig { /** * (Output) * The time at which the trial expires. */ expireTime: string; } interface ControlProjectIntelligenceConfigEffectiveIntelligenceConfig { /** * (Output) * The `StorageIntelligence` edition that is applicable for the resource. */ effectiveEdition: string; /** * (Output) * The Intelligence config resource that is applied for the target resource. */ intelligenceConfig: string; } interface ControlProjectIntelligenceConfigFilter { /** * Buckets to exclude from the Storage Intelligence plan. * Structure is documented below. */ excludedCloudStorageBuckets?: outputs.storage.ControlProjectIntelligenceConfigFilterExcludedCloudStorageBuckets; /** * Locations to exclude from the Storage Intelligence plan. * Structure is documented below. */ excludedCloudStorageLocations?: outputs.storage.ControlProjectIntelligenceConfigFilterExcludedCloudStorageLocations; /** * Buckets to include in the Storage Intelligence plan. * Structure is documented below. */ includedCloudStorageBuckets?: outputs.storage.ControlProjectIntelligenceConfigFilterIncludedCloudStorageBuckets; /** * Locations to include in the Storage Intelligence plan. * Structure is documented below. */ includedCloudStorageLocations?: outputs.storage.ControlProjectIntelligenceConfigFilterIncludedCloudStorageLocations; } interface ControlProjectIntelligenceConfigFilterExcludedCloudStorageBuckets { /** * List of bucket id regexes to exclude in the storage intelligence plan. */ bucketIdRegexes: string[]; } interface ControlProjectIntelligenceConfigFilterExcludedCloudStorageLocations { /** * List of locations. */ locations: string[]; } interface ControlProjectIntelligenceConfigFilterIncludedCloudStorageBuckets { /** * List of bucket id regexes to exclude in the storage intelligence plan. */ bucketIdRegexes: string[]; } interface ControlProjectIntelligenceConfigFilterIncludedCloudStorageLocations { /** * List of locations. */ locations: string[]; } interface ControlProjectIntelligenceConfigTrialConfig { /** * (Output) * The time at which the trial expires. */ expireTime: string; } interface DefaultObjectAccessControlProjectTeam { /** * The project team associated with the entity */ projectNumber?: string; /** * The team. * Possible values are: `editors`, `owners`, `viewers`. */ team?: string; } interface GetBucketAutoclass { /** * While set to true, autoclass automatically transitions objects in your bucket to appropriate storage classes based on each object's access pattern. */ enabled: boolean; /** * The storage class that objects in the bucket eventually transition to if they are not read for a certain length of time. Supported values include: NEARLINE, ARCHIVE. */ terminalStorageClass: string; } interface GetBucketCor { /** * The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses. */ maxAgeSeconds: number; /** * The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list of methods, and means "any method". */ methods: string[]; /** * The list of Origins eligible to receive CORS response headers. Note: "*" is permitted in the list of origins, and means "any Origin". */ origins: string[]; /** * The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains. */ responseHeaders: string[]; } interface GetBucketCustomPlacementConfig { /** * The list of individual regions that comprise a dual-region bucket. See the docs for a list of acceptable regions. Note: If any of the dataLocations changes, it will recreate the bucket. */ dataLocations: string[]; } interface GetBucketEncryption { /** * A Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no encryption method is specified. You must pay attention to whether the crypto key is available in the location that this bucket is created in. See the docs for more details. */ defaultKmsKeyName: string; } interface GetBucketHierarchicalNamespace { /** * Set this field true to organize bucket with logical file system structure. */ enabled: boolean; } interface GetBucketIpFilter { /** * Whether to allow all service agents to access the bucket regardless of the IP filter configuration. */ allowAllServiceAgentAccess: boolean; /** * Whether to allow cross-org VPCs in the bucket's IP filter configuration. */ allowCrossOrgVpcs: boolean; /** * The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'. */ mode: string; /** * The public network IP address ranges that can access the bucket and its data. */ publicNetworkSources: outputs.storage.GetBucketIpFilterPublicNetworkSource[]; /** * The list of VPC networks that can access the bucket. */ vpcNetworkSources: outputs.storage.GetBucketIpFilterVpcNetworkSource[]; } interface GetBucketIpFilterPublicNetworkSource { /** * The list of public IPv4, IPv6 cidr ranges that are allowed to access the bucket. */ allowedIpCidrRanges: string[]; } interface GetBucketIpFilterVpcNetworkSource { /** * The list of public or private IPv4 and IPv6 CIDR ranges that can access the bucket. */ allowedIpCidrRanges: string[]; /** * Name of the network. Format: projects/{PROJECT_ID}/global/networks/{NETWORK_NAME} */ network: string; } interface GetBucketLifecycleRule { /** * The Lifecycle Rule's action configuration. A single block of this type is supported. */ actions: outputs.storage.GetBucketLifecycleRuleAction[]; /** * The Lifecycle Rule's condition configuration. */ conditions: outputs.storage.GetBucketLifecycleRuleCondition[]; } interface GetBucketLifecycleRuleAction { /** * The target Storage Class of objects affected by this Lifecycle Rule. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE. */ storageClass: string; /** * The type of the action of this Lifecycle Rule. Supported values include: Delete, SetStorageClass and AbortIncompleteMultipartUpload. */ type: string; } interface GetBucketLifecycleRuleCondition { /** * Minimum age of an object in days to satisfy this condition. */ age: number; /** * Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition. */ createdBefore: string; /** * Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition. */ customTimeBefore: string; /** * Number of days elapsed since the user-specified timestamp set on an object. */ daysSinceCustomTime: number; /** * Number of days elapsed since the noncurrent timestamp of an object. This * condition is relevant only for versioned objects. */ daysSinceNoncurrentTime: number; /** * One or more matching name prefixes to satisfy this condition. */ matchesPrefixes: string[]; /** * Storage Class of objects to satisfy this condition. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, DURABLE_REDUCED_AVAILABILITY. */ matchesStorageClasses: string[]; /** * One or more matching name suffixes to satisfy this condition. */ matchesSuffixes: string[]; /** * Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition. */ noncurrentTimeBefore: string; /** * Relevant only for versioned objects. The number of newer versions of an object to satisfy this condition. */ numNewerVersions: number; /** * While set true, age value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the age field. It can be used alone or together with age. */ sendAgeIfZero: boolean; /** * While set true, daysSinceCustomTime value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the daysSinceCustomTime field. It can be used alone or together with days_since_custom_time. */ sendDaysSinceCustomTimeIfZero: boolean; /** * While set true, daysSinceNoncurrentTime value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the daysSinceNoncurrentTime field. It can be used alone or together with days_since_noncurrent_time. */ sendDaysSinceNoncurrentTimeIfZero: boolean; /** * While set true, numNewerVersions value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the numNewerVersions field. It can be used alone or together with num_newer_versions. */ sendNumNewerVersionsIfZero: boolean; /** * Match to live and/or archived objects. Unversioned buckets have only live objects. Supported values include: "LIVE", "ARCHIVED", "ANY". */ withState: string; } interface GetBucketLogging { /** * The bucket that will receive log objects. */ logBucket: string; /** * The object prefix for log objects. If it's not provided, by default Google Cloud Storage sets this to this bucket's name. */ logObjectPrefix: string; } interface GetBucketObjectContentContext { /** * A list of custom context key-value pairs. */ customs: outputs.storage.GetBucketObjectContentContextCustom[]; } interface GetBucketObjectContentContextCustom { /** * The time when context was first added to the storage#object in RFC 3339 format. */ createTime: string; /** * An individual object context. Context keys and their corresponding values must start with an alphanumeric character. */ key: string; /** * The time when context was last updated in RFC 3339 format. */ updateTime: string; /** * The value associated with this context. This field holds the primary information for the given context key. */ value: string; } interface GetBucketObjectContentCustomerEncryption { /** * The encryption algorithm. Default: AES256 */ encryptionAlgorithm: string; /** * Base64 encoded customer supplied encryption key. */ encryptionKey: string; } interface GetBucketObjectContentRetention { /** * The object retention mode. Supported values include: "Unlocked", "Locked". */ mode: string; /** * Time in RFC 3339 (e.g. 2030-01-01T02:03:04Z) until which object retention protects this object. */ retainUntilTime: string; } interface GetBucketObjectContentsBucketObject { /** * The content of the object. */ content: string; /** * Base64 encoded version of the object content. * Use this when dealing with binary data. */ contentBase64: string; /** * Base64 encoded SHA512 checksum of file content. */ contentBase64sha512: string; /** * Hex encoded SHA512 checksum of file content. */ contentHexsha512: string; /** * [Content-Type](https://tools.ietf.org/html/rfc7231#section-3.1.1.5) of the object data. */ contentType: string; /** * A url reference to download this object. */ mediaLink: string; /** * The name of the object. */ name: string; /** * A url reference to this object. */ selfLink: string; } interface GetBucketObjectContext { /** * A list of custom context key-value pairs. */ customs: outputs.storage.GetBucketObjectContextCustom[]; } interface GetBucketObjectContextCustom { /** * The time when context was first added to the storage#object in RFC 3339 format. */ createTime: string; /** * An individual object context. Context keys and their corresponding values must start with an alphanumeric character. */ key: string; /** * The time when context was last updated in RFC 3339 format. */ updateTime: string; /** * The value associated with this context. This field holds the primary information for the given context key. */ value: string; } interface GetBucketObjectCustomerEncryption { /** * The encryption algorithm. Default: AES256 */ encryptionAlgorithm: string; /** * Base64 encoded customer supplied encryption key. */ encryptionKey: string; } interface GetBucketObjectRetention { /** * The object retention mode. Supported values include: "Unlocked", "Locked". */ mode: string; /** * Time in RFC 3339 (e.g. 2030-01-01T02:03:04Z) until which object retention protects this object. */ retainUntilTime: string; } interface GetBucketObjectsBucketObject { /** * [Content-Type](https://tools.ietf.org/html/rfc7231#section-3.1.1.5) of the object data. */ contentType: string; /** * A url reference to download this object. */ mediaLink: string; /** * The name of the object. */ name: string; /** * A url reference to this object. */ selfLink: string; /** * The [StorageClass](https://cloud.google.com/storage/docs/storage-classes) of the bucket object. */ storageClass: string; } interface GetBucketRetentionPolicy { /** * If set to true, the bucket will be locked and permanently restrict edits to the bucket's retention policy. Caution: Locking a bucket is an irreversible action. */ isLocked: boolean; /** * The period of time, in seconds, that objects in the bucket must be retained and cannot be deleted, overwritten, or archived. The value must be less than 3,155,760,000 seconds. */ retentionPeriod: string; } interface GetBucketSoftDeletePolicy { /** * Server-determined value that indicates the time from which the policy, or one with a greater retention, was effective. This value is in RFC 3339 format. */ effectiveTime: string; /** * The duration in seconds that soft-deleted objects in the bucket will be retained and cannot be permanently deleted. Default value is 604800. */ retentionDurationSeconds: number; } interface GetBucketVersioning { /** * While set to true, versioning is fully enabled for this bucket. */ enabled: boolean; } interface GetBucketWebsite { /** * Behaves as the bucket's directory index where missing objects are treated as potential directories. */ mainPageSuffix: string; /** * The custom object to return when a requested resource is not found. */ notFoundPage: string; } interface GetBucketsBucket { /** * User-provided bucket labels, in key/value pairs. */ labels: { [key: string]: string; }; /** * The location of the bucket. */ location: string; /** * The name of the bucket. */ name: string; /** * A url reference to the bucket. */ selfLink: string; /** * The [StorageClass](https://cloud.google.com/storage/docs/storage-classes) of the bucket. */ storageClass: string; } interface GetControlFolderIntelligenceConfigEffectiveIntelligenceConfig { /** * The 'StorageIntelligence' edition that is applicable for the resource. */ effectiveEdition: string; /** * The Intelligence config resource that is applied for the target resource. */ intelligenceConfig: string; } interface GetControlFolderIntelligenceConfigFilter { /** * Buckets to exclude from the Storage Intelligence plan. */ excludedCloudStorageBuckets: outputs.storage.GetControlFolderIntelligenceConfigFilterExcludedCloudStorageBucket[]; /** * Locations to exclude from the Storage Intelligence plan. */ excludedCloudStorageLocations: outputs.storage.GetControlFolderIntelligenceConfigFilterExcludedCloudStorageLocation[]; /** * Buckets to include in the Storage Intelligence plan. */ includedCloudStorageBuckets: outputs.storage.GetControlFolderIntelligenceConfigFilterIncludedCloudStorageBucket[]; /** * Locations to include in the Storage Intelligence plan. */ includedCloudStorageLocations: outputs.storage.GetControlFolderIntelligenceConfigFilterIncludedCloudStorageLocation[]; } interface GetControlFolderIntelligenceConfigFilterExcludedCloudStorageBucket { /** * List of bucket id regexes to exclude in the storage intelligence plan. */ bucketIdRegexes: string[]; } interface GetControlFolderIntelligenceConfigFilterExcludedCloudStorageLocation { /** * List of locations. */ locations: string[]; } interface GetControlFolderIntelligenceConfigFilterIncludedCloudStorageBucket { /** * List of bucket id regexes to exclude in the storage intelligence plan. */ bucketIdRegexes: string[]; } interface GetControlFolderIntelligenceConfigFilterIncludedCloudStorageLocation { /** * List of locations. */ locations: string[]; } interface GetControlFolderIntelligenceConfigTrialConfig { /** * The time at which the trial expires. */ expireTime: string; } interface GetControlOrganizationIntelligenceConfigEffectiveIntelligenceConfig { /** * The 'StorageIntelligence' edition that is applicable for the resource. */ effectiveEdition: string; /** * The Intelligence config resource that is applied for the target resource. */ intelligenceConfig: string; } interface GetControlOrganizationIntelligenceConfigFilter { /** * Buckets to exclude from the Storage Intelligence plan. */ excludedCloudStorageBuckets: outputs.storage.GetControlOrganizationIntelligenceConfigFilterExcludedCloudStorageBucket[]; /** * Locations to exclude from the Storage Intelligence plan. */ excludedCloudStorageLocations: outputs.storage.GetControlOrganizationIntelligenceConfigFilterExcludedCloudStorageLocation[]; /** * Buckets to include in the Storage Intelligence plan. */ includedCloudStorageBuckets: outputs.storage.GetControlOrganizationIntelligenceConfigFilterIncludedCloudStorageBucket[]; /** * Locations to include in the Storage Intelligence plan. */ includedCloudStorageLocations: outputs.storage.GetControlOrganizationIntelligenceConfigFilterIncludedCloudStorageLocation[]; } interface GetControlOrganizationIntelligenceConfigFilterExcludedCloudStorageBucket { /** * List of bucket id regexes to exclude in the storage intelligence plan. */ bucketIdRegexes: string[]; } interface GetControlOrganizationIntelligenceConfigFilterExcludedCloudStorageLocation { /** * List of locations. */ locations: string[]; } interface GetControlOrganizationIntelligenceConfigFilterIncludedCloudStorageBucket { /** * List of bucket id regexes to exclude in the storage intelligence plan. */ bucketIdRegexes: string[]; } interface GetControlOrganizationIntelligenceConfigFilterIncludedCloudStorageLocation { /** * List of locations. */ locations: string[]; } interface GetControlOrganizationIntelligenceConfigTrialConfig { /** * The time at which the trial expires. */ expireTime: string; } interface GetControlProjectIntelligenceConfigEffectiveIntelligenceConfig { /** * The 'StorageIntelligence' edition that is applicable for the resource. */ effectiveEdition: string; /** * The Intelligence config resource that is applied for the target resource. */ intelligenceConfig: string; } interface GetControlProjectIntelligenceConfigFilter { /** * Buckets to exclude from the Storage Intelligence plan. */ excludedCloudStorageBuckets: outputs.storage.GetControlProjectIntelligenceConfigFilterExcludedCloudStorageBucket[]; /** * Locations to exclude from the Storage Intelligence plan. */ excludedCloudStorageLocations: outputs.storage.GetControlProjectIntelligenceConfigFilterExcludedCloudStorageLocation[]; /** * Buckets to include in the Storage Intelligence plan. */ includedCloudStorageBuckets: outputs.storage.GetControlProjectIntelligenceConfigFilterIncludedCloudStorageBucket[]; /** * Locations to include in the Storage Intelligence plan. */ includedCloudStorageLocations: outputs.storage.GetControlProjectIntelligenceConfigFilterIncludedCloudStorageLocation[]; } interface GetControlProjectIntelligenceConfigFilterExcludedCloudStorageBucket { /** * List of bucket id regexes to exclude in the storage intelligence plan. */ bucketIdRegexes: string[]; } interface GetControlProjectIntelligenceConfigFilterExcludedCloudStorageLocation { /** * List of locations. */ locations: string[]; } interface GetControlProjectIntelligenceConfigFilterIncludedCloudStorageBucket { /** * List of bucket id regexes to exclude in the storage intelligence plan. */ bucketIdRegexes: string[]; } interface GetControlProjectIntelligenceConfigFilterIncludedCloudStorageLocation { /** * List of locations. */ locations: string[]; } interface GetControlProjectIntelligenceConfigTrialConfig { /** * The time at which the trial expires. */ expireTime: string; } interface GetInsightsDatasetConfigExcludeCloudStorageBucket { /** * The list of cloud storage buckets/bucket prefix regexes to exclude in the DatasetConfig. */ cloudStorageBuckets: outputs.storage.GetInsightsDatasetConfigExcludeCloudStorageBucketCloudStorageBucket[]; } interface GetInsightsDatasetConfigExcludeCloudStorageBucketCloudStorageBucket { /** * The list of cloud storage bucket names to exclude in the DatasetConfig. * Exactly one of the bucketName and bucketPrefixRegex should be specified. */ bucketName: string; /** * The list of regex patterns for bucket names matching the regex. * Regex should follow the syntax specified in google/re2 on GitHub. * Exactly one of the bucketName and bucketPrefixRegex should be specified. */ bucketPrefixRegex: string; } interface GetInsightsDatasetConfigExcludeCloudStorageLocation { /** * The list of cloud storage locations to exclude in the DatasetConfig. */ locations: string[]; } interface GetInsightsDatasetConfigIdentity { /** * Name of the identity. */ name: string; /** * Type of identity to use for the DatasetConfig. Possible values: ["IDENTITY_TYPE_PER_CONFIG", "IDENTITY_TYPE_PER_PROJECT"] */ type: string; } interface GetInsightsDatasetConfigIncludeCloudStorageBucket { /** * The list of cloud storage buckets/bucket prefix regexes to include in the DatasetConfig. */ cloudStorageBuckets: outputs.storage.GetInsightsDatasetConfigIncludeCloudStorageBucketCloudStorageBucket[]; } interface GetInsightsDatasetConfigIncludeCloudStorageBucketCloudStorageBucket { /** * The list of cloud storage bucket names to include in the DatasetConfig. * Exactly one of the bucketName and bucketPrefixRegex should be specified. */ bucketName: string; /** * The list of regex patterns for bucket names matching the regex. * Regex should follow the syntax specified in google/re2 on GitHub. * Exactly one of the bucketName and bucketPrefixRegex should be specified. */ bucketPrefixRegex: string; } interface GetInsightsDatasetConfigIncludeCloudStorageLocation { /** * The list of cloud storage locations to include in the DatasetConfig. */ locations: string[]; } interface GetInsightsDatasetConfigLink { /** * Dataset name for the linked DatasetConfig. */ dataset: string; /** * State of the linked DatasetConfig. */ linked: boolean; } interface GetInsightsDatasetConfigSourceFolder { /** * The list of folder numbers to include in the DatasetConfig. */ folderNumbers: string[]; } interface GetInsightsDatasetConfigSourceProject { /** * The list of project numbers to include in the DatasetConfig. */ projectNumbers: string[]; } interface InsightsDatasetConfigExcludeCloudStorageBuckets { /** * The list of cloud storage buckets/bucket prefix regexes to exclude in the DatasetConfig. * Structure is documented below. */ cloudStorageBuckets: outputs.storage.InsightsDatasetConfigExcludeCloudStorageBucketsCloudStorageBucket[]; } interface InsightsDatasetConfigExcludeCloudStorageBucketsCloudStorageBucket { /** * The list of cloud storage bucket names to exclude in the DatasetConfig. * Exactly one of the bucketName and bucketPrefixRegex should be specified. */ bucketName?: string; /** * The list of regex patterns for bucket names matching the regex. * Regex should follow the syntax specified in google/re2 on GitHub. * Exactly one of the bucketName and bucketPrefixRegex should be specified. */ bucketPrefixRegex?: string; } interface InsightsDatasetConfigExcludeCloudStorageLocations { /** * The list of cloud storage locations to exclude in the DatasetConfig. */ locations: string[]; } interface InsightsDatasetConfigIdentity { /** * (Output) * Name of the identity. */ name: string; /** * Type of identity to use for the DatasetConfig. * Possible values are: `IDENTITY_TYPE_PER_CONFIG`, `IDENTITY_TYPE_PER_PROJECT`. */ type: string; } interface InsightsDatasetConfigIncludeCloudStorageBuckets { /** * The list of cloud storage buckets/bucket prefix regexes to include in the DatasetConfig. * Structure is documented below. */ cloudStorageBuckets: outputs.storage.InsightsDatasetConfigIncludeCloudStorageBucketsCloudStorageBucket[]; } interface InsightsDatasetConfigIncludeCloudStorageBucketsCloudStorageBucket { /** * The list of cloud storage bucket names to exclude in the DatasetConfig. * Exactly one of the bucketName and bucketPrefixRegex should be specified. */ bucketName?: string; /** * The list of regex patterns for bucket names matching the regex. * Regex should follow the syntax specified in google/re2 on GitHub. * Exactly one of the bucketName and bucketPrefixRegex should be specified. */ bucketPrefixRegex?: string; } interface InsightsDatasetConfigIncludeCloudStorageLocations { /** * The list of cloud storage locations to include in the DatasetConfig. */ locations: string[]; } interface InsightsDatasetConfigLink { /** * (Output) * Dataset name for the linked DatasetConfig. */ dataset: string; /** * (Output) * State of the linked DatasetConfig. */ linked: boolean; } interface InsightsDatasetConfigSourceFolders { /** * The list of folder numbers to include in the DatasetConfig. */ folderNumbers?: string[]; } interface InsightsDatasetConfigSourceProjects { /** * The list of project numbers to include in the DatasetConfig. */ projectNumbers?: string[]; } interface InsightsReportConfigCsvOptions { /** * The delimiter used to separate the fields in the inventory report CSV file. */ delimiter?: string; /** * The boolean that indicates whether or not headers are included in the inventory report CSV file. */ headerRequired?: boolean; /** * The character used to separate the records in the inventory report CSV file. */ recordSeparator?: string; } interface InsightsReportConfigFrequencyOptions { /** * The date to stop generating inventory reports. For example, {"day": 15, "month": 9, "year": 2022}. * Structure is documented below. */ endDate: outputs.storage.InsightsReportConfigFrequencyOptionsEndDate; /** * The frequency in which inventory reports are generated. Values are DAILY or WEEKLY. * Possible values are: `DAILY`, `WEEKLY`. */ frequency: string; /** * The date to start generating inventory reports. For example, {"day": 15, "month": 8, "year": 2022}. * Structure is documented below. */ startDate: outputs.storage.InsightsReportConfigFrequencyOptionsStartDate; } interface InsightsReportConfigFrequencyOptionsEndDate { /** * The day of the month to stop generating inventory reports. */ day: number; /** * The month to stop generating inventory reports. */ month: number; /** * The year to stop generating inventory reports */ year: number; } interface InsightsReportConfigFrequencyOptionsStartDate { /** * The day of the month to start generating inventory reports. */ day: number; /** * The month to start generating inventory reports. */ month: number; /** * The year to start generating inventory reports */ year: number; } interface InsightsReportConfigObjectMetadataReportOptions { /** * The metadata fields included in an inventory report. */ metadataFields: string[]; /** * Options for where the inventory reports are stored. * Structure is documented below. */ storageDestinationOptions: outputs.storage.InsightsReportConfigObjectMetadataReportOptionsStorageDestinationOptions; /** * A nested object resource. * Structure is documented below. */ storageFilters?: outputs.storage.InsightsReportConfigObjectMetadataReportOptionsStorageFilters; } interface InsightsReportConfigObjectMetadataReportOptionsStorageDestinationOptions { /** * The destination bucket that stores the generated inventory reports. */ bucket: string; /** * The path within the destination bucket to store generated inventory reports. */ destinationPath?: string; } interface InsightsReportConfigObjectMetadataReportOptionsStorageFilters { /** * The filter to use when specifying which bucket to generate inventory reports for. */ bucket?: string; } interface InsightsReportConfigParquetOptions { } interface ManagedFolderIamBindingCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface ManagedFolderIamMemberCondition { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. * * > **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the * identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will * consider it to be an entirely different resource and will treat it as such. */ description?: string; /** * Textual representation of an expression in Common Expression Language syntax. */ expression: string; /** * A title for the expression, i.e. a short string describing its purpose. */ title: string; } interface ObjectAccessControlProjectTeam { /** * The project team associated with the entity */ projectNumber?: string; /** * The team. * Possible values are: `editors`, `owners`, `viewers`. */ team?: string; } interface TransferAgentPoolBandwidthLimit { /** * Bandwidth rate in megabytes per second, distributed across all the agents in the pool. */ limitMbps: string; } interface TransferJobEventStream { /** * Specifies the data and time at which Storage Transfer Service stops listening for events from this stream. After this time, any transfers in progress will complete, but no new transfers are initiated.A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ eventStreamExpirationTime?: string; /** * Specifies the date and time that Storage Transfer Service starts listening for events from this stream. If no start time is specified or start time is in the past, Storage Transfer Service starts listening immediately. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ eventStreamStartTime?: string; /** * Specifies a unique name of the resource such as AWS SQS ARN in the form 'arn:aws:sqs:region:account_id:queue_name', or Pub/Sub subscription resource name in the form 'projects/{project}/subscriptions/{sub}'. */ name: string; } interface TransferJobLoggingConfig { /** * For transfers with a PosixFilesystem source, this option enables the Cloud Storage transfer logs for this transfer. */ enableOnPremGcsTransferLogs?: boolean; /** * States in which logActions are logged. Not supported for transfers with PosifxFilesystem data sources; use enableOnPremGcsTransferLogs instead. */ logActionStates?: string[]; /** * Specifies the actions to be logged. Not supported for transfers with PosifxFilesystem data sources; use enableOnPremGcsTransferLogs instead. */ logActions?: string[]; } interface TransferJobNotificationConfig { /** * Event types for which a notification is desired. If empty, send notifications for all event types. The valid types are "TRANSFER_OPERATION_SUCCESS", "TRANSFER_OPERATION_FAILED", "TRANSFER_OPERATION_ABORTED". */ eventTypes?: string[]; /** * The desired format of the notification message payloads. One of "NONE" or "JSON". */ payloadFormat: string; /** * The Topic.name of the Pub/Sub topic to which to publish notifications. Must be of the format: projects/{project}/topics/{topic}. Not matching this format results in an INVALID_ARGUMENT error. */ pubsubTopic: string; } interface TransferJobReplicationSpec { /** * A Google Cloud Storage data sink. Structure documented below. */ gcsDataSink?: outputs.storage.TransferJobReplicationSpecGcsDataSink; /** * A Google Cloud Storage data source. Structure documented below. */ gcsDataSource?: outputs.storage.TransferJobReplicationSpecGcsDataSource; /** * Only objects that satisfy these object conditions are included in the set of data source and data sink objects. Object conditions based on objects' `lastModificationTime` do not exclude objects in a data sink. Structure documented below. */ objectConditions?: outputs.storage.TransferJobReplicationSpecObjectConditions; /** * Characteristics of how to treat files from datasource and sink during job. If the option `deleteObjectsUniqueInSink` is true, object conditions based on objects' `lastModificationTime` are ignored and do not exclude objects in a data source or a data sink. Structure documented below. */ transferOptions?: outputs.storage.TransferJobReplicationSpecTransferOptions; } interface TransferJobReplicationSpecGcsDataSink { /** * Google Cloud Storage bucket name. */ bucketName: string; /** * Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. */ path?: string; } interface TransferJobReplicationSpecGcsDataSource { /** * Google Cloud Storage bucket name. */ bucketName: string; /** * Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. */ path?: string; } interface TransferJobReplicationSpecObjectConditions { /** * `excludePrefixes` must follow the requirements described for `includePrefixes`. See [Requirements](https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec#ObjectConditions). */ excludePrefixes?: string[]; /** * If `includePrefixes` is specified, objects that satisfy the object conditions must have names that start with one of the `includePrefixes` and that do not start with any of the `excludePrefixes`. If `includePrefixes` is not specified, all objects except those that have names starting with one of the `excludePrefixes` must satisfy the object conditions. See [Requirements](https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec#ObjectConditions). */ includePrefixes?: string[]; /** * If specified, only objects with a "last modification time" before this timestamp and objects that don't have a "last modification time" are transferred. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ lastModifiedBefore?: string; /** * If specified, only objects with a "last modification time" on or after this timestamp and objects that don't have a "last modification time" are transferred. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ lastModifiedSince?: string; /** * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ maxTimeElapsedSinceLastModification?: string; /** * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ minTimeElapsedSinceLastModification?: string; } interface TransferJobReplicationSpecTransferOptions { /** * Whether objects should be deleted from the source after they are transferred to the sink. Note that this option and `deleteObjectsUniqueInSink` are mutually exclusive. */ deleteObjectsFromSourceAfterTransfer?: boolean; /** * Whether objects that exist only in the sink should be deleted. Note that this option and * `deleteObjectsFromSourceAfterTransfer` are mutually exclusive. */ deleteObjectsUniqueInSink?: boolean; /** * Specifies the metadata options for running a transfer. Structure documented below. */ metadataOptions?: outputs.storage.TransferJobReplicationSpecTransferOptionsMetadataOptions; /** * Whether overwriting objects that already exist in the sink is allowed. */ overwriteObjectsAlreadyExistingInSink?: boolean; /** * When to overwrite objects that already exist in the sink. If not set, overwrite behavior is determined by `overwriteObjectsAlreadyExistingInSink`. Possible values: ALWAYS, DIFFERENT, NEVER. */ overwriteWhen?: string; } interface TransferJobReplicationSpecTransferOptionsMetadataOptions { /** * Specifies how each object's ACLs should be preserved for transfers between Google Cloud Storage buckets. */ acl?: string; /** * Specifies how each file's POSIX group ID (GID) attribute should be handled by the transfer. */ gid?: string; /** * Specifies how each object's Cloud KMS customer-managed encryption key (CMEK) is preserved for transfers between Google Cloud Storage buckets. */ kmsKey?: string; /** * Specifies how each file's mode attribute should be handled by the transfer. */ mode?: string; /** * Specifies the storage class to set on objects being transferred to Google Cloud Storage buckets. */ storageClass?: string; /** * Specifies how symlinks should be handled by the transfer. */ symlink?: string; /** * Specifies how each object's temporary hold status should be preserved for transfers between Google Cloud Storage buckets. */ temporaryHold?: string; /** * Specifies how each object's timeCreated metadata is preserved for transfers. */ timeCreated?: string; /** * Specifies how each file's POSIX user ID (UID) attribute should be handled by the transfer. */ uid?: string; } interface TransferJobSchedule { /** * Interval between the start of each scheduled transfer. If unspecified, the default value is 24 hours. This value may not be less than 1 hour. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ repeatInterval?: string; /** * The last day the recurring transfer will be run. If `scheduleEndDate` is the same as `scheduleStartDate`, the transfer will be executed only once. Structure documented below. */ scheduleEndDate?: outputs.storage.TransferJobScheduleScheduleEndDate; /** * The first day the recurring transfer is scheduled to run. If `scheduleStartDate` is in the past, the transfer will run for the first time on the following day. Structure documented below. */ scheduleStartDate: outputs.storage.TransferJobScheduleScheduleStartDate; /** * The time in UTC at which the transfer will be scheduled to start in a day. Transfers may start later than this time. If not specified, recurring and one-time transfers that are scheduled to run today will run immediately; recurring transfers that are scheduled to run on a future date will start at approximately midnight UTC on that date. Note that when configuring a transfer with the Cloud Platform Console, the transfer's start time in a day is specified in your local timezone. Structure documented below. */ startTimeOfDay?: outputs.storage.TransferJobScheduleStartTimeOfDay; } interface TransferJobScheduleScheduleEndDate { /** * Day of month. Must be from 1 to 31 and valid for the year and month. */ day: number; /** * Month of year. Must be from 1 to 12. */ month: number; /** * Year of date. Must be from 1 to 9999. */ year: number; } interface TransferJobScheduleScheduleStartDate { /** * Day of month. Must be from 1 to 31 and valid for the year and month. */ day: number; /** * Month of year. Must be from 1 to 12. */ month: number; /** * Year of date. Must be from 1 to 9999. */ year: number; } interface TransferJobScheduleStartTimeOfDay { /** * Hours of day in 24 hour format. Should be from 0 to 23. */ hours: number; /** * Minutes of hour of day. Must be from 0 to 59. */ minutes: number; /** * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. */ nanos: number; /** * Seconds of minutes of the time. Must normally be from 0 to 59. */ seconds: number; } interface TransferJobTransferSpec { /** * An AWS S3 Compatible data source. Structure documented below. */ awsS3CompatibleDataSource?: outputs.storage.TransferJobTransferSpecAwsS3CompatibleDataSource; /** * An AWS S3 data source. Structure documented below. */ awsS3DataSource?: outputs.storage.TransferJobTransferSpecAwsS3DataSource; /** * An Azure Blob Storage data source. Structure documented below. */ azureBlobStorageDataSource?: outputs.storage.TransferJobTransferSpecAzureBlobStorageDataSource; /** * A Google Cloud Storage data sink. Structure documented below. */ gcsDataSink?: outputs.storage.TransferJobTransferSpecGcsDataSink; /** * A Google Cloud Storage data source. Structure documented below. */ gcsDataSource?: outputs.storage.TransferJobTransferSpecGcsDataSource; /** * An HDFS data source. Structure documented below. */ hdfsDataSource?: outputs.storage.TransferJobTransferSpecHdfsDataSource; /** * A HTTP URL data source. Structure documented below. */ httpDataSource?: outputs.storage.TransferJobTransferSpecHttpDataSource; /** * Only objects that satisfy these object conditions are included in the set of data source and data sink objects. Object conditions based on objects' `lastModificationTime` do not exclude objects in a data sink. Structure documented below. */ objectConditions?: outputs.storage.TransferJobTransferSpecObjectConditions; /** * A POSIX data sink. Structure documented below. */ posixDataSink?: outputs.storage.TransferJobTransferSpecPosixDataSink; /** * A POSIX filesystem data source. Structure documented below. */ posixDataSource?: outputs.storage.TransferJobTransferSpecPosixDataSource; /** * Specifies the agent pool name associated with the posix data sink. When unspecified, the default name is used. */ sinkAgentPoolName: string; /** * Specifies the agent pool name associated with the posix data source. When unspecified, the default name is used. */ sourceAgentPoolName: string; /** * Use a manifest file to limit which object are transferred. See [Storage Transfer Service manifest file format](https://cloud.google.com/storage-transfer/docs/manifest). Structure documented below. */ transferManifest?: outputs.storage.TransferJobTransferSpecTransferManifest; /** * Characteristics of how to treat files from datasource and sink during job. If the option `deleteObjectsUniqueInSink` is true, object conditions based on objects' `lastModificationTime` are ignored and do not exclude objects in a data source or a data sink. Structure documented below. */ transferOptions?: outputs.storage.TransferJobTransferSpecTransferOptions; } interface TransferJobTransferSpecAwsS3CompatibleDataSource { /** * Name of the bucket. */ bucketName: string; /** * Endpoint of the storage service. */ endpoint: string; /** * Specifies the path to transfer objects. */ path?: string; /** * Specifies the region to sign requests with. This can be left blank if requests should be signed with an empty region. */ region?: string; /** * S3 compatible metadata. */ s3Metadata?: outputs.storage.TransferJobTransferSpecAwsS3CompatibleDataSourceS3Metadata; } interface TransferJobTransferSpecAwsS3CompatibleDataSourceS3Metadata { /** * Authentication and authorization method used by the storage service. When not specified, Transfer Service will attempt to determine right auth method to use. */ authMethod?: string; /** * The Listing API to use for discovering objects. When not specified, Transfer Service will attempt to determine the right API to use. */ listApi?: string; /** * The network protocol of the agent. When not specified, the default value of NetworkProtocol NETWORK_PROTOCOL_HTTPS is used. */ protocol?: string; /** * API request model used to call the storage service. When not specified, the default value of RequestModel REQUEST_MODEL_VIRTUAL_HOSTED_STYLE is used. */ requestModel?: string; } interface TransferJobTransferSpecAwsS3DataSource { /** * AWS credentials block. */ awsAccessKey?: outputs.storage.TransferJobTransferSpecAwsS3DataSourceAwsAccessKey; /** * S3 Bucket name. */ bucketName: string; /** * The CloudFront distribution domain name pointing to this bucket, to use when fetching. See [Transfer from S3 via CloudFront](https://cloud.google.com/storage-transfer/docs/s3-cloudfront) for more information. Format: https://{id}.cloudfront.net or any valid custom domain. Must begin with https://. */ cloudfrontDomain?: string; /** * The Resource name of a secret in Secret Manager. AWS credentials must be stored in Secret Manager in JSON format. If credentialsSecret is specified, do not specify roleArn or aws_access_key. Format: projects/{projectNumber}/secrets/{secret_name}. */ credentialsSecret?: string; /** * Egress bytes over a Google-managed private network. This network is shared between other users of Storage Transfer Service. */ managedPrivateNetwork?: boolean; /** * S3 Bucket path in bucket to transfer. */ path?: string; /** * The Amazon Resource Name (ARN) of the role to support temporary credentials via 'AssumeRoleWithWebIdentity'. For more information about ARNs, see [IAM ARNs](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns). When a role ARN is provided, Transfer Service fetches temporary credentials for the session using a 'AssumeRoleWithWebIdentity' call for the provided role using the [GoogleServiceAccount][] for this project. */ roleArn?: string; } interface TransferJobTransferSpecAwsS3DataSourceAwsAccessKey { /** * AWS Key ID. */ accessKeyId: string; /** * AWS Secret Access Key. */ secretAccessKey: string; } interface TransferJobTransferSpecAzureBlobStorageDataSource { /** * ) Credentials used to authenticate API requests to Azure block. */ azureCredentials?: outputs.storage.TransferJobTransferSpecAzureBlobStorageDataSourceAzureCredentials; /** * The container to transfer from the Azure Storage account.` */ container: string; /** * ) Full Resource name of a secret in Secret Manager containing [SAS Credentials in JSON form](https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec#azureblobstoragedata:~:text=begin%20with%20a%20%27/%27.-,credentialsSecret,-string). Service Agent for Storage Transfer must have permissions to access secret. If credentialsSecret is specified, do not specify azure_credentials.`, */ credentialsSecret?: string; /** * Federated identity config of a user registered Azure application. Structure documented below. */ federatedIdentityConfig?: outputs.storage.TransferJobTransferSpecAzureBlobStorageDataSourceFederatedIdentityConfig; /** * Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. */ path: string; /** * The name of the Azure Storage account. */ storageAccount: string; } interface TransferJobTransferSpecAzureBlobStorageDataSourceAzureCredentials { /** * Azure shared access signature. See [Grant limited access to Azure Storage resources using shared access signatures (SAS)](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview). */ sasToken: string; } interface TransferJobTransferSpecAzureBlobStorageDataSourceFederatedIdentityConfig { /** * The client (application) ID of the application with federated credentials. */ clientId: string; /** * The client (directory) ID of the application with federated credentials. * * The `scheduleStartDate` and `scheduleEndDate` blocks support: */ tenantId: string; } interface TransferJobTransferSpecGcsDataSink { /** * Google Cloud Storage bucket name. */ bucketName: string; /** * Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. */ path?: string; } interface TransferJobTransferSpecGcsDataSource { /** * Google Cloud Storage bucket name. */ bucketName: string; /** * Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. */ path?: string; } interface TransferJobTransferSpecHdfsDataSource { /** * Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. */ path: string; } interface TransferJobTransferSpecHttpDataSource { /** * The URL that points to the file that stores the object list entries. This file must allow public access. Currently, only URLs with HTTP and HTTPS schemes are supported. */ listUrl: string; } interface TransferJobTransferSpecObjectConditions { /** * `excludePrefixes` must follow the requirements described for `includePrefixes`. See [Requirements](https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec#ObjectConditions). */ excludePrefixes?: string[]; /** * If `includePrefixes` is specified, objects that satisfy the object conditions must have names that start with one of the `includePrefixes` and that do not start with any of the `excludePrefixes`. If `includePrefixes` is not specified, all objects except those that have names starting with one of the `excludePrefixes` must satisfy the object conditions. See [Requirements](https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec#ObjectConditions). */ includePrefixes?: string[]; /** * If specified, only objects with a "last modification time" before this timestamp and objects that don't have a "last modification time" are transferred. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ lastModifiedBefore?: string; /** * If specified, only objects with a "last modification time" on or after this timestamp and objects that don't have a "last modification time" are transferred. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ lastModifiedSince?: string; /** * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ maxTimeElapsedSinceLastModification?: string; /** * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ minTimeElapsedSinceLastModification?: string; } interface TransferJobTransferSpecPosixDataSink { /** * Root directory path to the filesystem. */ rootDirectory: string; } interface TransferJobTransferSpecPosixDataSource { /** * Root directory path to the filesystem. */ rootDirectory: string; } interface TransferJobTransferSpecTransferManifest { /** * The **GCS URI** to the manifest file (CSV or line-delimited). Example: `gs://my-bucket/manifest.csv` */ location: string; } interface TransferJobTransferSpecTransferOptions { /** * Whether objects should be deleted from the source after they are transferred to the sink. Note that this option and `deleteObjectsUniqueInSink` are mutually exclusive. */ deleteObjectsFromSourceAfterTransfer?: boolean; /** * Whether objects that exist only in the sink should be deleted. Note that this option and * `deleteObjectsFromSourceAfterTransfer` are mutually exclusive. */ deleteObjectsUniqueInSink?: boolean; /** * Specifies the metadata options for running a transfer. Structure documented below. */ metadataOptions?: outputs.storage.TransferJobTransferSpecTransferOptionsMetadataOptions; /** * Whether overwriting objects that already exist in the sink is allowed. */ overwriteObjectsAlreadyExistingInSink?: boolean; /** * When to overwrite objects that already exist in the sink. If not set, overwrite behavior is determined by `overwriteObjectsAlreadyExistingInSink`. Possible values: ALWAYS, DIFFERENT, NEVER. */ overwriteWhen?: string; } interface TransferJobTransferSpecTransferOptionsMetadataOptions { /** * Specifies how each object's ACLs should be preserved for transfers between Google Cloud Storage buckets. */ acl?: string; /** * Specifies how each file's POSIX group ID (GID) attribute should be handled by the transfer. */ gid?: string; /** * Specifies how each object's Cloud KMS customer-managed encryption key (CMEK) is preserved for transfers between Google Cloud Storage buckets. */ kmsKey?: string; /** * Specifies how each file's mode attribute should be handled by the transfer. */ mode?: string; /** * Specifies the storage class to set on objects being transferred to Google Cloud Storage buckets. */ storageClass?: string; /** * Specifies how symlinks should be handled by the transfer. */ symlink?: string; /** * Specifies how each object's temporary hold status should be preserved for transfers between Google Cloud Storage buckets. */ temporaryHold?: string; /** * Specifies how each object's timeCreated metadata is preserved for transfers. */ timeCreated?: string; /** * Specifies how each file's POSIX user ID (UID) attribute should be handled by the transfer. */ uid?: string; } } export declare namespace tags { interface GetTagKeysKey { /** * Regular expression constraint for dynamic tag values, follows RE2 syntax. If present, it implicitly allows dynamic values (constrained by the regex). */ allowedValuesRegex: string; /** * Creation time. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ createTime: string; /** * User-assigned description of the TagKey. */ description: string; /** * an identifier for the resource with format `tagKeys/{{name}}` */ name: string; /** * Namespaced name of the TagKey which is in the format `{parentNamespace}/{shortName}`. */ namespacedName: string; /** * The resource name of the parent organization or project. It can be in format `organizations/{org_id}` or `projects/{project_id_or_number}`. */ parent: string; /** * A purpose denotes that this Tag is intended for use in policies of a specific policy engine, and will involve that policy engine in management operations involving this Tag. A purpose does not grant a policy engine exclusive rights to the Tag, and it may be referenced by other policy engines. */ purpose: string; /** * Purpose data corresponds to the policy system that the tag is intended for. See documentation for Purpose for formatting of this field. */ purposeData: { [key: string]: string; }; /** * The user friendly name for a TagKey. The short name should be unique for TagKeys wihting the same tag namespace. */ shortName: string; /** * Update time. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ updateTime: string; } interface GetTagValuesValue { /** * Creation time. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ createTime: string; /** * User-assigned description of the TagValue. */ description: string; /** * an identifier for the resource with format `tagValues/{{name}}` */ name: string; /** * Namespaced name of the TagValue. */ namespacedName: string; /** * The resource name of the parent tagKey in format `tagKey/{name}`. */ parent: string; /** * User-assigned short name for TagValue. The short name should be unique for TagValues within the same parent TagKey. */ shortName: string; /** * Update time. * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". */ updateTime: string; } interface TagKeyIamBindingCondition { description?: string; expression: string; title: string; } interface TagKeyIamMemberCondition { description?: string; expression: string; title: string; } interface TagValueIamBindingCondition { description?: string; expression: string; title: string; } interface TagValueIamMemberCondition { description?: string; expression: string; title: string; } } export declare namespace tpu { interface V2QueuedResourceTpu { /** * The TPU node(s) being requested. * Structure is documented below. */ nodeSpecs?: outputs.tpu.V2QueuedResourceTpuNodeSpec[]; } interface V2QueuedResourceTpuNodeSpec { /** * The node. * Structure is documented below. */ node: outputs.tpu.V2QueuedResourceTpuNodeSpecNode; /** * Unqualified node identifier used to identify the node in the project once provisioned. */ nodeId?: string; /** * The parent resource name. */ parent: string; } interface V2QueuedResourceTpuNodeSpecNode { /** * TPU accelerator type for the TPU. If not specified, this defaults to 'v2-8'. */ acceleratorType: string; /** * Text description of the TPU. */ description?: string; /** * Network configurations for the TPU node. * Structure is documented below. */ networkConfig: outputs.tpu.V2QueuedResourceTpuNodeSpecNodeNetworkConfig; /** * Runtime version for the TPU. */ runtimeVersion: string; } interface V2QueuedResourceTpuNodeSpecNodeNetworkConfig { /** * Allows the TPU node to send and receive packets with non-matching destination or source * IPs. This is required if you plan to use the TPU workers to forward routes. */ canIpForward?: boolean; /** * Indicates that external IP addresses would be associated with the TPU workers. If set to * false, the specified subnetwork or network should have Private Google Access enabled. */ enableExternalIps?: boolean; /** * The name of the network for the TPU node. It must be a preexisting Google Compute Engine * network. If none is provided, "default" will be used. */ network: string; /** * Specifies networking queue count for TPU VM instance's network interface. */ queueCount?: number; /** * The name of the subnetwork for the TPU node. It must be a preexisting Google Compute * Engine subnetwork. If none is provided, "default" will be used. */ subnetwork: string; } interface V2VmAcceleratorConfig { /** * Topology of TPU in chips. */ topology: string; /** * Type of TPU. Please select one of the allowed types: https://cloud.google.com/tpu/docs/reference/rest/v2/AcceleratorConfig#Type */ type: string; } interface V2VmDataDisk { /** * The mode in which to attach this disk. If not specified, the default is READ_WRITE * mode. Only applicable to dataDisks. * Default value is `READ_WRITE`. * Possible values are: `READ_WRITE`, `READ_ONLY`. */ mode?: string; /** * Specifies the full path to an existing disk. For example: * "projects/my-project/zones/us-central1-c/disks/my-disk". */ sourceDisk: string; } interface V2VmNetworkConfig { /** * Allows the TPU node to send and receive packets with non-matching destination or source * IPs. This is required if you plan to use the TPU workers to forward routes. */ canIpForward?: boolean; /** * Indicates that external IP addresses would be associated with the TPU workers. If set to * false, the specified subnetwork or network should have Private Google Access enabled. */ enableExternalIps?: boolean; /** * The name of the network for the TPU node. It must be a preexisting Google Compute Engine * network. If none is provided, "default" will be used. */ network: string; /** * Specifies networking queue count for TPU VM instance's network interface. */ queueCount?: number; /** * The name of the subnetwork for the TPU node. It must be a preexisting Google Compute * Engine subnetwork. If none is provided, "default" will be used. */ subnetwork: string; } interface V2VmNetworkEndpoint { /** * (Output) * The access config for the TPU worker. * Structure is documented below. */ accessConfigs: outputs.tpu.V2VmNetworkEndpointAccessConfig[]; /** * (Output) * The internal IP address of this network endpoint. */ ipAddress: string; /** * (Output) * The port of this network endpoint. */ port: number; } interface V2VmNetworkEndpointAccessConfig { /** * (Output) * An external IP address associated with the TPU worker. */ externalIp: string; } interface V2VmSchedulingConfig { /** * Defines whether the node is preemptible. */ preemptible?: boolean; /** * Whether the node is created under a reservation. */ reserved?: boolean; /** * Optional. Defines whether the node is Spot VM. */ spot?: boolean; } interface V2VmServiceAccount { /** * Email address of the service account. If empty, default Compute service account will be used. */ email: string; /** * The list of scopes to be made available for this service account. If empty, access to all * Cloud APIs will be allowed. */ scopes: string[]; } interface V2VmShieldedInstanceConfig { /** * Defines whether the instance has Secure Boot enabled. */ enableSecureBoot: boolean; } interface V2VmSymptom { /** * (Output) * Timestamp when the Symptom is created. */ createTime: string; /** * (Output) * Detailed information of the current Symptom. */ details: string; /** * (Output) * Type of the Symptom. */ symptomType: string; /** * (Output) * A string used to uniquely distinguish a worker within a TPU node. */ workerId: string; } } export declare namespace transcoder { interface JobConfig { /** * Ad break. * Structure is documented below. */ adBreaks: outputs.transcoder.JobConfigAdBreak[]; /** * List of input assets stored in Cloud Storage. * Structure is documented below. */ editLists: outputs.transcoder.JobConfigEditList[]; /** * List of input assets stored in Cloud Storage. * Structure is documented below. */ elementaryStreams: outputs.transcoder.JobConfigElementaryStream[]; /** * List of encryption configurations for the content. * Structure is documented below. */ encryptions: outputs.transcoder.JobConfigEncryption[]; /** * List of input assets stored in Cloud Storage. * Structure is documented below. */ inputs: outputs.transcoder.JobConfigInput[]; /** * Manifest configuration. * Structure is documented below. */ manifests: outputs.transcoder.JobConfigManifest[]; /** * Multiplexing settings for output stream. * Structure is documented below. */ muxStreams: outputs.transcoder.JobConfigMuxStream[]; /** * Location of output file(s) in a Cloud Storage bucket. * Structure is documented below. */ output: outputs.transcoder.JobConfigOutput; /** * List of overlays on the output video, in descending Z-order. * Structure is documented below. */ overlays: outputs.transcoder.JobConfigOverlay[]; /** * Pub/Sub destination. * Structure is documented below. */ pubsubDestination: outputs.transcoder.JobConfigPubsubDestination; } interface JobConfigAdBreak { /** * Start time in seconds for the ad break, relative to the output file timeline */ startTimeOffset: string; } interface JobConfigEditList { /** * List of values identifying files that should be used in this atom. */ inputs: string[]; /** * A unique key for this atom. */ key: string; /** * Start time in seconds for the atom, relative to the input file timeline. The default is `0s`. */ startTimeOffset: string; } interface JobConfigElementaryStream { /** * Encoding of an audio stream. * Structure is documented below. */ audioStream: outputs.transcoder.JobConfigElementaryStreamAudioStream; /** * A unique key for this atom. */ key: string; /** * Encoding of a video stream. * Structure is documented below. */ videoStream: outputs.transcoder.JobConfigElementaryStreamVideoStream; } interface JobConfigElementaryStreamAudioStream { /** * Audio bitrate in bits per second. */ bitrateBps: number; /** * Number of audio channels. The default is `2`. */ channelCount: number; /** * A list of channel names specifying layout of the audio channels. The default is ["fl", "fr"]. */ channelLayouts: string[]; /** * The codec for this audio stream. The default is `aac`. */ codec: string; /** * The audio sample rate in Hertz. The default is `48000`. */ sampleRateHertz: number; } interface JobConfigElementaryStreamVideoStream { /** * H264 codec settings * Structure is documented below. * * * The `h264` block supports: */ h264: outputs.transcoder.JobConfigElementaryStreamVideoStreamH264; } interface JobConfigElementaryStreamVideoStreamH264 { /** * The video bitrate in bits per second. */ bitrateBps: number; /** * Target CRF level. The default is '21'. */ crfLevel: number; /** * The entropy coder to use. The default is 'cabac'. */ entropyCoder: string; /** * The target video frame rate in frames per second (FPS). */ frameRate: number; /** * Select the GOP size based on the specified duration. The default is '3s'. */ gopDuration: string; /** * The height of the video in pixels. */ heightPixels: number; /** * HLG color format setting for H264. */ hlg?: outputs.transcoder.JobConfigElementaryStreamVideoStreamH264Hlg; /** * Pixel format to use. The default is 'yuv420p'. */ pixelFormat: string; /** * Enforces the specified codec preset. The default is 'veryfast'. */ preset: string; /** * Enforces the specified codec profile. */ profile: string; /** * Specify the mode. The default is 'vbr'. */ rateControlMode: string; /** * SDR color format setting for H264. */ sdr?: outputs.transcoder.JobConfigElementaryStreamVideoStreamH264Sdr; /** * Initial fullness of the Video Buffering Verifier (VBV) buffer in bits. */ vbvFullnessBits: number; /** * Size of the Video Buffering Verifier (VBV) buffer in bits. */ vbvSizeBits: number; /** * The width of the video in pixels. */ widthPixels: number; } interface JobConfigElementaryStreamVideoStreamH264Hlg { } interface JobConfigElementaryStreamVideoStreamH264Sdr { } interface JobConfigEncryption { /** * Configuration for AES-128 encryption. */ aes128?: outputs.transcoder.JobConfigEncryptionAes128; /** * DRM system(s) to use; at least one must be specified. If a DRM system is omitted, it is considered disabled. * Structure is documented below. */ drmSystems: outputs.transcoder.JobConfigEncryptionDrmSystems; /** * Identifier for this set of encryption options. */ id: string; /** * Configuration for MPEG Common Encryption (MPEG-CENC). * Structure is documented below. */ mpegCenc: outputs.transcoder.JobConfigEncryptionMpegCenc; /** * Configuration for SAMPLE-AES encryption. */ sampleAes?: outputs.transcoder.JobConfigEncryptionSampleAes; /** * Configuration for secrets stored in Google Secret Manager. * Structure is documented below. */ secretManagerKeySource: outputs.transcoder.JobConfigEncryptionSecretManagerKeySource; } interface JobConfigEncryptionAes128 { } interface JobConfigEncryptionDrmSystems { /** * Clearkey configuration. */ clearkey?: outputs.transcoder.JobConfigEncryptionDrmSystemsClearkey; /** * Fairplay configuration. */ fairplay?: outputs.transcoder.JobConfigEncryptionDrmSystemsFairplay; /** * Playready configuration. */ playready?: outputs.transcoder.JobConfigEncryptionDrmSystemsPlayready; /** * Widevine configuration. */ widevine?: outputs.transcoder.JobConfigEncryptionDrmSystemsWidevine; } interface JobConfigEncryptionDrmSystemsClearkey { } interface JobConfigEncryptionDrmSystemsFairplay { } interface JobConfigEncryptionDrmSystemsPlayready { } interface JobConfigEncryptionDrmSystemsWidevine { } interface JobConfigEncryptionMpegCenc { /** * Specify the encryption scheme. */ scheme: string; } interface JobConfigEncryptionSampleAes { } interface JobConfigEncryptionSecretManagerKeySource { /** * The name of the Secret Version containing the encryption key in the following format: projects/{project}/secrets/{secret_id}/versions/{version_number}. */ secretVersion: string; } interface JobConfigInput { /** * A unique key for this input. Must be specified when using advanced mapping and edit lists. */ key: string; /** * URI of the media. Input files must be at least 5 seconds in duration and stored in Cloud Storage (for example, gs://bucket/inputs/file.mp4). * If empty, the value is populated from Job.input_uri. */ uri: string; } interface JobConfigManifest { /** * The name of the generated file. The default is `manifest`. */ fileName: string; /** * List of user supplied MuxStream.key values that should appear in this manifest. */ muxStreams: string[]; /** * Type of the manifest. * Possible values are: `MANIFEST_TYPE_UNSPECIFIED`, `HLS`, `DASH`. */ type: string; } interface JobConfigMuxStream { /** * The container format. The default is `mp4`. */ container: string; /** * List of ElementaryStream.key values multiplexed in this stream. */ elementaryStreams: string[]; /** * Identifier of the encryption configuration to use. */ encryptionId: string; /** * The name of the generated file. */ fileName: string; /** * A unique key for this multiplexed stream. */ key: string; /** * Segment settings for ts, fmp4 and vtt. * Structure is documented below. */ segmentSettings: outputs.transcoder.JobConfigMuxStreamSegmentSettings; } interface JobConfigMuxStreamSegmentSettings { /** * Duration of the segments in seconds. The default is `6.0s`. */ segmentDuration: string; } interface JobConfigOutput { /** * URI for the output file(s). For example, gs://my-bucket/outputs/. */ uri: string; } interface JobConfigOverlay { /** * List of animations. The list should be chronological, without any time overlap. * Structure is documented below. */ animations: outputs.transcoder.JobConfigOverlayAnimation[]; /** * Image overlay. * Structure is documented below. */ image: outputs.transcoder.JobConfigOverlayImage; } interface JobConfigOverlayAnimation { /** * Display overlay object with fade animation. * Structure is documented below. */ animationFade: outputs.transcoder.JobConfigOverlayAnimationAnimationFade; } interface JobConfigOverlayAnimationAnimationFade { /** * The time to end the fade animation, in seconds. */ endTimeOffset: string; /** * Required. Type of fade animation: `FADE_IN` or `FADE_OUT`. * The possible values are: * * `FADE_TYPE_UNSPECIFIED`: The fade type is not specified. * * `FADE_IN`: Fade the overlay object into view. * * `FADE_OUT`: Fade the overlay object out of view. * Possible values are: `FADE_TYPE_UNSPECIFIED`, `FADE_IN`, `FADE_OUT`. */ fadeType: string; /** * The time to start the fade animation, in seconds. */ startTimeOffset: string; /** * Normalized coordinates based on output video resolution. * Structure is documented below. */ xy: outputs.transcoder.JobConfigOverlayAnimationAnimationFadeXy; } interface JobConfigOverlayAnimationAnimationFadeXy { /** * Normalized x coordinate. */ x: number; /** * Normalized y coordinate. */ y: number; } interface JobConfigOverlayImage { /** * URI of the image in Cloud Storage. For example, gs://bucket/inputs/image.png. */ uri: string; } interface JobConfigPubsubDestination { /** * The name of the Pub/Sub topic to publish job completion notification to. For example: projects/{project}/topics/{topic}. */ topic?: string; } interface JobTemplateConfig { /** * Ad break. * Structure is documented below. */ adBreaks: outputs.transcoder.JobTemplateConfigAdBreak[]; /** * List of input assets stored in Cloud Storage. * Structure is documented below. */ editLists: outputs.transcoder.JobTemplateConfigEditList[]; /** * List of input assets stored in Cloud Storage. * Structure is documented below. */ elementaryStreams: outputs.transcoder.JobTemplateConfigElementaryStream[]; /** * List of encryption configurations for the content. * Structure is documented below. */ encryptions: outputs.transcoder.JobTemplateConfigEncryption[]; /** * List of input assets stored in Cloud Storage. * Structure is documented below. */ inputs: outputs.transcoder.JobTemplateConfigInput[]; /** * Manifest configuration. * Structure is documented below. */ manifests: outputs.transcoder.JobTemplateConfigManifest[]; /** * Multiplexing settings for output stream. * Structure is documented below. */ muxStreams: outputs.transcoder.JobTemplateConfigMuxStream[]; /** * Location of output file(s) in a Cloud Storage bucket. * Structure is documented below. */ output: outputs.transcoder.JobTemplateConfigOutput; /** * List of overlays on the output video, in descending Z-order. * Structure is documented below. */ overlays: outputs.transcoder.JobTemplateConfigOverlay[]; /** * Pub/Sub destination. * Structure is documented below. */ pubsubDestination: outputs.transcoder.JobTemplateConfigPubsubDestination; } interface JobTemplateConfigAdBreak { /** * Start time in seconds for the ad break, relative to the output file timeline */ startTimeOffset: string; } interface JobTemplateConfigEditList { /** * List of values identifying files that should be used in this atom. */ inputs: string[]; /** * A unique key for this atom. */ key: string; /** * Start time in seconds for the atom, relative to the input file timeline. The default is `0s`. */ startTimeOffset: string; } interface JobTemplateConfigElementaryStream { /** * Encoding of an audio stream. * Structure is documented below. */ audioStream: outputs.transcoder.JobTemplateConfigElementaryStreamAudioStream; /** * A unique key for this atom. */ key: string; /** * Encoding of a video stream. * Structure is documented below. */ videoStream: outputs.transcoder.JobTemplateConfigElementaryStreamVideoStream; } interface JobTemplateConfigElementaryStreamAudioStream { /** * Audio bitrate in bits per second. */ bitrateBps: number; /** * Number of audio channels. The default is `2`. */ channelCount: number; /** * A list of channel names specifying layout of the audio channels. The default is ["fl", "fr"]. */ channelLayouts: string[]; /** * The codec for this audio stream. The default is `aac`. */ codec: string; /** * The audio sample rate in Hertz. The default is `48000`. */ sampleRateHertz: number; } interface JobTemplateConfigElementaryStreamVideoStream { /** * H264 codec settings * Structure is documented below. * * * The `h264` block supports: */ h264: outputs.transcoder.JobTemplateConfigElementaryStreamVideoStreamH264; } interface JobTemplateConfigElementaryStreamVideoStreamH264 { /** * The video bitrate in bits per second. */ bitrateBps: number; /** * Target CRF level. The default is '21'. */ crfLevel: number; /** * The entropy coder to use. The default is 'cabac'. */ entropyCoder: string; /** * The target video frame rate in frames per second (FPS). */ frameRate: number; /** * Select the GOP size based on the specified duration. The default is '3s'. */ gopDuration: string; /** * The height of the video in pixels. */ heightPixels: number; /** * HLG color format setting for H264. */ hlg?: outputs.transcoder.JobTemplateConfigElementaryStreamVideoStreamH264Hlg; /** * Pixel format to use. The default is 'yuv420p'. */ pixelFormat: string; /** * Enforces the specified codec preset. The default is 'veryfast'. */ preset: string; /** * Enforces the specified codec profile. */ profile: string; /** * Specify the mode. The default is 'vbr'. */ rateControlMode: string; /** * SDR color format setting for H264. */ sdr?: outputs.transcoder.JobTemplateConfigElementaryStreamVideoStreamH264Sdr; /** * Initial fullness of the Video Buffering Verifier (VBV) buffer in bits. */ vbvFullnessBits: number; /** * Size of the Video Buffering Verifier (VBV) buffer in bits. */ vbvSizeBits: number; /** * The width of the video in pixels. */ widthPixels: number; } interface JobTemplateConfigElementaryStreamVideoStreamH264Hlg { } interface JobTemplateConfigElementaryStreamVideoStreamH264Sdr { } interface JobTemplateConfigEncryption { /** * Configuration for AES-128 encryption. */ aes128?: outputs.transcoder.JobTemplateConfigEncryptionAes128; /** * DRM system(s) to use; at least one must be specified. If a DRM system is omitted, it is considered disabled. * Structure is documented below. */ drmSystems: outputs.transcoder.JobTemplateConfigEncryptionDrmSystems; /** * Identifier for this set of encryption options. */ id: string; /** * Configuration for MPEG Common Encryption (MPEG-CENC). * Structure is documented below. */ mpegCenc: outputs.transcoder.JobTemplateConfigEncryptionMpegCenc; /** * Configuration for SAMPLE-AES encryption. */ sampleAes?: outputs.transcoder.JobTemplateConfigEncryptionSampleAes; /** * Configuration for secrets stored in Google Secret Manager. * Structure is documented below. */ secretManagerKeySource: outputs.transcoder.JobTemplateConfigEncryptionSecretManagerKeySource; } interface JobTemplateConfigEncryptionAes128 { } interface JobTemplateConfigEncryptionDrmSystems { /** * Clearkey configuration. */ clearkey?: outputs.transcoder.JobTemplateConfigEncryptionDrmSystemsClearkey; /** * Fairplay configuration. */ fairplay?: outputs.transcoder.JobTemplateConfigEncryptionDrmSystemsFairplay; /** * Playready configuration. */ playready?: outputs.transcoder.JobTemplateConfigEncryptionDrmSystemsPlayready; /** * Widevine configuration. */ widevine?: outputs.transcoder.JobTemplateConfigEncryptionDrmSystemsWidevine; } interface JobTemplateConfigEncryptionDrmSystemsClearkey { } interface JobTemplateConfigEncryptionDrmSystemsFairplay { } interface JobTemplateConfigEncryptionDrmSystemsPlayready { } interface JobTemplateConfigEncryptionDrmSystemsWidevine { } interface JobTemplateConfigEncryptionMpegCenc { /** * Specify the encryption scheme. */ scheme: string; } interface JobTemplateConfigEncryptionSampleAes { } interface JobTemplateConfigEncryptionSecretManagerKeySource { /** * The name of the Secret Version containing the encryption key in the following format: projects/{project}/secrets/{secret_id}/versions/{version_number}. */ secretVersion: string; } interface JobTemplateConfigInput { /** * A unique key for this input. Must be specified when using advanced mapping and edit lists. */ key: string; /** * URI of the media. Input files must be at least 5 seconds in duration and stored in Cloud Storage (for example, gs://bucket/inputs/file.mp4). * If empty, the value is populated from Job.input_uri. */ uri: string; } interface JobTemplateConfigManifest { /** * The name of the generated file. The default is `manifest`. */ fileName: string; /** * List of user supplied MuxStream.key values that should appear in this manifest. */ muxStreams: string[]; /** * Type of the manifest. * Possible values are: `MANIFEST_TYPE_UNSPECIFIED`, `HLS`, `DASH`. */ type: string; } interface JobTemplateConfigMuxStream { /** * The container format. The default is `mp4`. */ container: string; /** * List of ElementaryStream.key values multiplexed in this stream. */ elementaryStreams: string[]; /** * Identifier of the encryption configuration to use. */ encryptionId: string; /** * The name of the generated file. */ fileName: string; /** * A unique key for this multiplexed stream. */ key: string; /** * Segment settings for ts, fmp4 and vtt. * Structure is documented below. */ segmentSettings: outputs.transcoder.JobTemplateConfigMuxStreamSegmentSettings; } interface JobTemplateConfigMuxStreamSegmentSettings { /** * Duration of the segments in seconds. The default is `6.0s`. */ segmentDuration: string; } interface JobTemplateConfigOutput { /** * URI for the output file(s). For example, gs://my-bucket/outputs/. */ uri: string; } interface JobTemplateConfigOverlay { /** * List of animations. The list should be chronological, without any time overlap. * Structure is documented below. */ animations: outputs.transcoder.JobTemplateConfigOverlayAnimation[]; /** * Image overlay. * Structure is documented below. */ image: outputs.transcoder.JobTemplateConfigOverlayImage; } interface JobTemplateConfigOverlayAnimation { /** * Display overlay object with fade animation. * Structure is documented below. */ animationFade: outputs.transcoder.JobTemplateConfigOverlayAnimationAnimationFade; } interface JobTemplateConfigOverlayAnimationAnimationFade { /** * The time to end the fade animation, in seconds. */ endTimeOffset: string; /** * Required. Type of fade animation: `FADE_IN` or `FADE_OUT`. * The possible values are: * * `FADE_TYPE_UNSPECIFIED`: The fade type is not specified. * * `FADE_IN`: Fade the overlay object into view. * * `FADE_OUT`: Fade the overlay object out of view. * Possible values are: `FADE_TYPE_UNSPECIFIED`, `FADE_IN`, `FADE_OUT`. */ fadeType: string; /** * The time to start the fade animation, in seconds. */ startTimeOffset: string; /** * Normalized coordinates based on output video resolution. * Structure is documented below. */ xy: outputs.transcoder.JobTemplateConfigOverlayAnimationAnimationFadeXy; } interface JobTemplateConfigOverlayAnimationAnimationFadeXy { /** * Normalized x coordinate. */ x: number; /** * Normalized y coordinate. */ y: number; } interface JobTemplateConfigOverlayImage { /** * URI of the image in Cloud Storage. For example, gs://bucket/inputs/image.png. */ uri: string; } interface JobTemplateConfigPubsubDestination { /** * The name of the Pub/Sub topic to publish job completion notification to. For example: projects/{project}/topics/{topic}. */ topic?: string; } } export declare namespace vectorsearch { interface CollectionVectorSchema { /** * Message describing a dense vector field. * Structure is documented below. */ denseVector?: outputs.vectorsearch.CollectionVectorSchemaDenseVector; /** * The identifier for this object. Format specified above. */ fieldName: string; /** * Message describing a sparse vector field. */ sparseVector?: outputs.vectorsearch.CollectionVectorSchemaSparseVector; } interface CollectionVectorSchemaDenseVector { /** * Dimensionality of the vector field. */ dimensions?: number; /** * Message describing the configuration for generating embeddings for a vector * field using Vertex AI embeddings API. * Structure is documented below. */ vertexEmbeddingConfig?: outputs.vectorsearch.CollectionVectorSchemaDenseVectorVertexEmbeddingConfig; } interface CollectionVectorSchemaDenseVectorVertexEmbeddingConfig { /** * Required: ID of the embedding model to use. See * https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#embeddings-models * for the list of supported models. */ modelId: string; /** * Possible values: * RETRIEVAL_QUERY * RETRIEVAL_DOCUMENT * SEMANTIC_SIMILARITY * CLASSIFICATION * CLUSTERING * QUESTION_ANSWERING * FACT_VERIFICATION * CODE_RETRIEVAL_QUERY */ taskType: string; /** * Required: Text template for the input to the model. The template must * contain one or more references to fields in the DataObject, e.g.: * "Movie Title: {title} ---- Movie Plot: {plot}". */ textTemplate: string; } interface CollectionVectorSchemaSparseVector { } } export declare namespace vertex { interface AiDatasetEncryptionSpec { /** * Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. * Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the resource is created. */ kmsKeyName?: string; } interface AiDeploymentResourcePoolDedicatedResources { /** * A list of the metric specifications that overrides a resource utilization metric. * Structure is documented below. */ autoscalingMetricSpecs?: outputs.vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec[]; /** * The specification of a single machine used by the prediction * Structure is documented below. */ machineSpec: outputs.vertex.AiDeploymentResourcePoolDedicatedResourcesMachineSpec; /** * The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use minReplicaCount as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for maxReplicaCount * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type). */ maxReplicaCount?: number; /** * The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. */ minReplicaCount: number; } interface AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec { /** * The resource metric name. Supported metrics: For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization` */ metricName: string; /** * The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided. */ target?: number; } interface AiDeploymentResourcePoolDedicatedResourcesMachineSpec { /** * The number of accelerators to attach to the machine. */ acceleratorCount?: number; /** * The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType). */ acceleratorType?: string; /** * The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types). */ machineType?: string; } interface AiEndpointDeployedModel { /** * (Output) * A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. * Structure is documented below. */ automaticResources: outputs.vertex.AiEndpointDeployedModelAutomaticResource[]; /** * (Output) * Output only. Timestamp when the DeployedModel was created. */ createTime: string; /** * (Output) * A description of resources that are dedicated to the DeployedModel, and that need a higher degree of manual configuration. * Structure is documented below. */ dedicatedResources: outputs.vertex.AiEndpointDeployedModelDedicatedResource[]; /** * Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters. */ displayName: string; /** * (Output) * These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that Stackdriver logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option. */ enableAccessLogging: boolean; /** * (Output) * If true, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Stackdriver Logging. Only supported for custom-trained Models and AutoML Tabular Models. */ enableContainerLogging: boolean; /** * (Output) * The ID of the DeployedModel. If not provided upon deployment, Vertex AI will generate a value for this ID. This value should be 1-10 characters, and valid characters are /[0-9]/. */ id: string; /** * (Output) * The name of the Model that this is the deployment of. Note that the Model may be in a different location than the DeployedModel's Endpoint. */ model: string; /** * (Output) * Output only. The version ID of the model that is deployed. */ modelVersionId: string; /** * (Output) * Output only. Provide paths for users to send predict/explain/health requests directly to the deployed model services running on Cloud via private services access. This field is populated if network is configured. * Structure is documented below. */ privateEndpoints: outputs.vertex.AiEndpointDeployedModelPrivateEndpoint[]; /** * (Output) * The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. */ serviceAccount: string; /** * (Output) * The resource name of the shared DeploymentResourcePool to deploy on. Format: projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool} */ sharedResources: string; } interface AiEndpointDeployedModelAutomaticResource { /** * (Output) * The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. */ maxReplicaCount: number; /** * (Output) * The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. */ minReplicaCount: number; } interface AiEndpointDeployedModelDedicatedResource { /** * (Output) * The metric specifications that overrides a resource utilization metric (CPU utilization, accelerator's duty cycle, and so on) target value (default to 60 if not set). At most one entry is allowed per metric. If machine_spec.accelerator_count is above 0, the autoscaling will be based on both CPU utilization and accelerator's duty cycle metrics and scale up when either metrics exceeds its target value while scale down if both metrics are under their target value. The default target value is 60 for both metrics. If machine_spec.accelerator_count is 0, the autoscaling will be based on CPU utilization metric only with default target value 60 if not explicitly set. For example, in the case of Online Prediction, if you want to override target CPU utilization to 80, you should set autoscaling_metric_specs.metric_name to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and autoscaling_metric_specs.target to `80`. * Structure is documented below. */ autoscalingMetricSpecs: outputs.vertex.AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpec[]; /** * (Output) * The specification of a single machine used by the prediction. * Structure is documented below. */ machineSpecs: outputs.vertex.AiEndpointDeployedModelDedicatedResourceMachineSpec[]; /** * (Output) * The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. */ maxReplicaCount: number; /** * (Output) * The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. */ minReplicaCount: number; } interface AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpec { /** * (Output) * The resource metric name. Supported metrics: * For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization` */ metricName: string; /** * (Output) * The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided. */ target: number; } interface AiEndpointDeployedModelDedicatedResourceMachineSpec { /** * (Output) * The number of accelerators to attach to the machine. */ acceleratorCount: number; /** * (Output) * The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType). */ acceleratorType: string; /** * (Output) * The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. TODO: Try to better unify the required vs optional. */ machineType: string; } interface AiEndpointDeployedModelPrivateEndpoint { /** * (Output) * Output only. Http(s) path to send explain requests. */ explainHttpUri: string; /** * (Output) * Output only. Http(s) path to send health check requests. */ healthHttpUri: string; /** * (Output) * Output only. Http(s) path to send prediction requests. */ predictHttpUri: string; /** * (Output) * Output only. The name of the service attachment resource. Populated if private service connect is enabled. */ serviceAttachment: string; } interface AiEndpointEncryptionSpec { /** * Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. */ kmsKeyName: string; } interface AiEndpointIamBindingCondition { description?: string; expression: string; title: string; } interface AiEndpointIamMemberCondition { description?: string; expression: string; title: string; } interface AiEndpointPredictRequestResponseLoggingConfig { /** * BigQuery table for logging. If only given a project, a new dataset will be created with name `logging__` where will be made BigQuery-dataset-name compatible (e.g. most special characters will become underscores). If no table name is given, a new table will be created with name `requestResponseLogging` * Structure is documented below. */ bigqueryDestination?: outputs.vertex.AiEndpointPredictRequestResponseLoggingConfigBigqueryDestination; /** * If logging is enabled or not. */ enabled?: boolean; /** * Percentage of requests to be logged, expressed as a fraction in range(0,1] */ samplingRate?: number; } interface AiEndpointPredictRequestResponseLoggingConfigBigqueryDestination { /** * BigQuery URI to a project or table, up to 2000 characters long. When only the project is specified, the Dataset and Table is created. When the full table reference is specified, the Dataset must exist and table must not exist. Accepted forms: - BigQuery path. For example: `bq://projectId` or `bq://projectId.bqDatasetId` or `bq://projectId.bqDatasetId.bqTableId`. */ outputUri?: string; } interface AiEndpointPrivateServiceConnectConfig { /** * Required. If true, expose the IndexEndpoint via private service connect. */ enablePrivateServiceConnect: boolean; /** * (Optional, Beta) * If set to true, enable secure private service connect with IAM authorization. Otherwise, private service connect will be done without authorization. Note latency will be slightly increased if authorization is enabled. */ enableSecurePrivateServiceConnect?: boolean; /** * A list of Projects from which the forwarding rule will target the service attachment. */ projectAllowlists?: string[]; /** * List of projects and networks where the PSC endpoints will be created. This field is used by Online Inference(Prediction) only. * Structure is documented below. */ pscAutomationConfigs?: outputs.vertex.AiEndpointPrivateServiceConnectConfigPscAutomationConfig[]; } interface AiEndpointPrivateServiceConnectConfigPscAutomationConfig { /** * (Output) * Error message if the PSC service automation failed. */ errorMessage: string; /** * (Output) * Forwarding rule created by the PSC service automation. */ forwardingRule: string; /** * (Output) * IP address rule created by the PSC service automation. */ ipAddress: string; /** * The full name of the Google Compute Engine [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks). [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/get): projects/{project}/global/networks/{network}. */ network: string; /** * Project id used to create forwarding rule. */ projectId: string; /** * (Output) * The state of the PSC service automation. */ state: string; } interface AiEndpointWithModelGardenDeploymentDeployConfig { /** * A description of resources that are dedicated to a DeployedModel or * DeployedIndex, and that need a higher degree of manual configuration. * Structure is documented below. */ dedicatedResources?: outputs.vertex.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResources; /** * If true, enable the QMT fast tryout feature for this model if possible. */ fastTryoutEnabled?: boolean; /** * System labels for Model Garden deployments. * These labels are managed by Google and for tracking purposes only. */ systemLabels?: { [key: string]: string; }; } interface AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResources { /** * The metric specifications that overrides a resource * utilization metric (CPU utilization, accelerator's duty cycle, and so on) * target value (default to 60 if not set). At most one entry is allowed per * metric. * If machine_spec.accelerator_count is * above 0, the autoscaling will be based on both CPU utilization and * accelerator's duty cycle metrics and scale up when either metrics exceeds * its target value while scale down if both metrics are under their target * value. The default target value is 60 for both metrics. * If machine_spec.accelerator_count is * 0, the autoscaling will be based on CPU utilization metric only with * default target value 60 if not explicitly set. * For example, in the case of Online Prediction, if you want to override * target CPU utilization to 80, you should set * autoscaling_metric_specs.metric_name * to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and * autoscaling_metric_specs.target to `80`. * Structure is documented below. */ autoscalingMetricSpecs?: outputs.vertex.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesAutoscalingMetricSpec[]; /** * Specification of a single machine. * Structure is documented below. */ machineSpec: outputs.vertex.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpec; /** * The maximum number of replicas that may be deployed on when the traffic * against it increases. If the requested value is too large, the deployment * will error, but if deployment succeeds then the ability to scale to that * many replicas is guaranteed (barring service outages). If traffic increases * beyond what its replicas at maximum may handle, a portion of the traffic * will be dropped. If this value is not provided, will use * minReplicaCount as the default value. * The value of this field impacts the charge against Vertex CPU and GPU * quotas. Specifically, you will be charged for (max_replica_count * * number of cores in the selected machine type) and (max_replica_count * * number of GPUs per replica in the selected machine type). */ maxReplicaCount?: number; /** * The minimum number of machine replicas that will be always deployed on. * This value must be greater than or equal to 1. * If traffic increases, it may dynamically be deployed onto more replicas, * and as traffic decreases, some of these extra replicas may be freed. */ minReplicaCount: number; /** * Number of required available replicas for the deployment to succeed. * This field is only needed when partial deployment/mutation is * desired. If set, the deploy/mutate operation will succeed once * availableReplicaCount reaches required_replica_count, and the rest of * the replicas will be retried. If not set, the default * requiredReplicaCount will be min_replica_count. */ requiredReplicaCount?: number; /** * If true, schedule the deployment workload on [spot * VMs](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms). */ spot?: boolean; } interface AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesAutoscalingMetricSpec { /** * The resource metric name. * Supported metrics: * * For Online Prediction: * * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * * `aiplatform.googleapis.com/prediction/online/cpu/utilization` */ metricName: string; /** * The target resource utilization in percentage (1% - 100%) for the given * metric; once the real usage deviates from the target by a certain * percentage, the machine replicas change. The default value is 60 * (representing 60%) if not provided. */ target?: number; } interface AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpec { /** * The number of accelerators to attach to the machine. */ acceleratorCount?: number; /** * Possible values: * ACCELERATOR_TYPE_UNSPECIFIED * NVIDIA_TESLA_K80 * NVIDIA_TESLA_P100 * NVIDIA_TESLA_V100 * NVIDIA_TESLA_P4 * NVIDIA_TESLA_T4 * NVIDIA_TESLA_A100 * NVIDIA_A100_80GB * NVIDIA_L4 * NVIDIA_H100_80GB * NVIDIA_H100_MEGA_80GB * NVIDIA_H200_141GB * NVIDIA_B200 * TPU_V2 * TPU_V3 * TPU_V4_POD * TPU_V5_LITEPOD */ acceleratorType?: string; /** * The type of the machine. * See the [list of machine types supported for * prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) * See the [list of machine types supported for custom * training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). * For DeployedModel this field is optional, and the default * value is `n1-standard-2`. For BatchPredictionJob or as part of * WorkerPoolSpec this field is required. */ machineType?: string; /** * The number of nodes per replica for multihost GPU deployments. */ multihostGpuNodeCount?: number; /** * A ReservationAffinity can be used to configure a Vertex AI resource (e.g., a * DeployedModel) to draw its Compute Engine resources from a Shared * Reservation, or exclusively from on-demand capacity. * Structure is documented below. */ reservationAffinity?: outputs.vertex.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecReservationAffinity; /** * The topology of the TPUs. Corresponds to the TPU topologies available from * GKE. (Example: tpu_topology: "2x2x1"). */ tpuTopology?: string; } interface AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecReservationAffinity { /** * Corresponds to the label key of a reservation resource. To target a * SPECIFIC_RESERVATION by name, use `compute.googleapis.com/reservation-name` * as the key and specify the name of your reservation as its value. */ key?: string; /** * Specifies the reservation affinity type. * Possible values: * TYPE_UNSPECIFIED * NO_RESERVATION * ANY_RESERVATION * SPECIFIC_RESERVATION */ reservationAffinityType: string; /** * Corresponds to the label values of a reservation resource. This must be the * full resource name of the reservation or reservation block. */ values?: string[]; } interface AiEndpointWithModelGardenDeploymentEndpointConfig { /** * If true, the endpoint will be exposed through a dedicated * DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS * will be isolated from other users' traffic and will have better * performance and reliability. Note: Once you enabled dedicated endpoint, * you won't be able to send request to the shared DNS * {region}-aiplatform.googleapis.com. The limitations will be removed soon. */ dedicatedEndpointEnabled?: boolean; /** * The user-specified display name of the endpoint. If not set, a * default name will be used. */ endpointDisplayName?: string; /** * The configuration for Private Service Connect (PSC). * Structure is documented below. */ privateServiceConnectConfig?: outputs.vertex.AiEndpointWithModelGardenDeploymentEndpointConfigPrivateServiceConnectConfig; } interface AiEndpointWithModelGardenDeploymentEndpointConfigPrivateServiceConnectConfig { /** * Required. If true, expose the IndexEndpoint via private service connect. */ enablePrivateServiceConnect: boolean; /** * A list of Projects from which the forwarding rule will target the service attachment. */ projectAllowlists?: string[]; /** * PSC config that is used to automatically create PSC endpoints in the user projects. * Structure is documented below. */ pscAutomationConfigs?: outputs.vertex.AiEndpointWithModelGardenDeploymentEndpointConfigPrivateServiceConnectConfigPscAutomationConfigs; /** * (Output) * Output only. The name of the generated service attachment resource. * This is only populated if the endpoint is deployed with PrivateServiceConnect. */ serviceAttachment: string; } interface AiEndpointWithModelGardenDeploymentEndpointConfigPrivateServiceConnectConfigPscAutomationConfigs { /** * (Output) * Output only. Error message if the PSC service automation failed. */ errorMessage: string; /** * (Output) * Output only. Forwarding rule created by the PSC service automation. */ forwardingRule: string; /** * (Output) * Output only. IP address rule created by the PSC service automation. */ ipAddress: string; /** * Required. The full name of the Google Compute Engine network. * Format: projects/{project}/global/networks/{network}. */ network: string; /** * Required. Project id used to create forwarding rule. */ projectId: string; /** * (Output) * Output only. The state of the PSC service automation. */ state: string; } interface AiEndpointWithModelGardenDeploymentModelConfig { /** * Whether the user accepts the End User License Agreement (EULA) * for the model. */ acceptEula?: boolean; /** * Specification of a container for serving predictions. Some fields in this * message correspond to fields in the [Kubernetes Container v1 core * specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). * Structure is documented below. */ containerSpec?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpec; /** * The Hugging Face read access token used to access the model * artifacts of gated models. */ huggingFaceAccessToken?: string; /** * If true, the model will deploy with a cached version instead of directly * downloading the model artifacts from Hugging Face. This is suitable for * VPC-SC users with limited internet access. */ huggingFaceCacheEnabled?: boolean; /** * The user-specified display name of the uploaded model. If not * set, a default name will be used. */ modelDisplayName?: string; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpec { /** * Specifies arguments for the command that runs when the container starts. * This overrides the container's * [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify * this field as an array of executable and arguments, similar to a Docker * `CMD`'s "default parameters" form. * If you don't specify this field but do specify the * command field, then the command from the * `command` field runs without any additional arguments. See the * [Kubernetes documentation about how the * `command` and `args` fields interact with a container's `ENTRYPOINT` and * `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes). * If you don't specify this field and don't specify the `command` field, * then the container's * [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd) and * `CMD` determine what runs based on their default behavior. See the Docker * documentation about [how `CMD` and `ENTRYPOINT` * interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). * In this field, you can reference [environment variables * set by Vertex * AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables) * and environment variables set in the env field. * You cannot reference environment variables set in the Docker image. In * order for environment variables to be expanded, reference them by using the * following syntax:$(VARIABLE_NAME) * Note that this differs from Bash variable expansion, which does not use * parentheses. If a variable cannot be resolved, the reference in the input * string is used unchanged. To avoid variable expansion, you can escape this * syntax with `$$`; for example:$$(VARIABLE_NAME) * This field corresponds to the `args` field of the Kubernetes Containers * [v1 core * API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). */ args?: string[]; /** * Specifies the command that runs when the container starts. This overrides * the container's * [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint). * Specify this field as an array of executable and arguments, similar to a * Docker `ENTRYPOINT`'s "exec" form, not its "shell" form. * If you do not specify this field, then the container's `ENTRYPOINT` runs, * in conjunction with the args field or the * container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd), * if either exists. If this field is not specified and the container does not * have an `ENTRYPOINT`, then refer to the Docker documentation about [how * `CMD` and `ENTRYPOINT` * interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). * If you specify this field, then you can also specify the `args` field to * provide additional arguments for this command. However, if you specify this * field, then the container's `CMD` is ignored. See the * [Kubernetes documentation about how the * `command` and `args` fields interact with a container's `ENTRYPOINT` and * `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes). * In this field, you can reference [environment variables set by Vertex * AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables) * and environment variables set in the env field. * You cannot reference environment variables set in the Docker image. In * order for environment variables to be expanded, reference them by using the * following syntax:$(VARIABLE_NAME) * Note that this differs from Bash variable expansion, which does not use * parentheses. If a variable cannot be resolved, the reference in the input * string is used unchanged. To avoid variable expansion, you can escape this * syntax with `$$`; for example:$$(VARIABLE_NAME) * This field corresponds to the `command` field of the Kubernetes Containers * [v1 core * API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). */ commands?: string[]; /** * Deployment timeout. * Limit for deployment timeout is 2 hours. */ deploymentTimeout?: string; /** * List of environment variables to set in the container. After the container * starts running, code running in the container can read these environment * variables. * Additionally, the command and * args fields can reference these variables. Later * entries in this list can also reference earlier entries. For example, the * following example sets the variable `VAR_2` to have the value `foo bar`: * ```json * [ * { * "name": "VAR_1", * "value": "foo" * }, * { * "name": "VAR_2", * "value": "$(VAR_1) bar" * } * ] * ``` * If you switch the order of the variables in the example, then the expansion * does not occur. * This field corresponds to the `env` field of the Kubernetes Containers * [v1 core * API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). * Structure is documented below. */ envs?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecEnv[]; /** * List of ports to expose from the container. Vertex AI sends gRPC * prediction requests that it receives to the first port on this list. Vertex * AI also sends liveness and health checks to this port. * If you do not specify this field, gRPC requests to the container will be * disabled. * Vertex AI does not use ports other than the first one listed. This field * corresponds to the `ports` field of the Kubernetes Containers v1 core API. * Structure is documented below. */ grpcPorts?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecGrpcPort[]; /** * Probe describes a health check to be performed against a container to * determine whether it is alive or ready to receive traffic. * Structure is documented below. */ healthProbe?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbe; /** * HTTP path on the container to send health checks to. Vertex AI * intermittently sends GET requests to this path on the container's IP * address and port to check that the container is healthy. Read more about * [health * checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#health). * For example, if you set this field to `/bar`, then Vertex AI * intermittently sends a GET request to the `/bar` path on the port of your * container specified by the first value of this `ModelContainerSpec`'s * ports field. * If you don't specify this field, it defaults to the following value when * you deploy this Model to an Endpoint:/v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict * The placeholders in this value are replaced as follows: * * ENDPOINT: The last segment (following `endpoints/`)of the * Endpoint.name][] field of the Endpoint where this Model has been * deployed. (Vertex AI makes this value available to your container code * as the [`AIP_ENDPOINT_ID` environment * variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) * * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`. * (Vertex AI makes this value available to your container code as the * [`AIP_DEPLOYED_MODEL_ID` environment * variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) */ healthRoute?: string; /** * URI of the Docker image to be used as the custom container for serving * predictions. This URI must identify an image in Artifact Registry or * Container Registry. Learn more about the [container publishing * requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#publishing), * including permissions requirements for the Vertex AI Service Agent. * The container image is ingested upon ModelService.UploadModel, stored * internally, and this original path is afterwards not used. * To learn about the requirements for the Docker image itself, see * [Custom container * requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#). * You can use the URI to one of Vertex AI's [pre-built container images for * prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers) * in this field. */ imageUri: string; /** * Probe describes a health check to be performed against a container to * determine whether it is alive or ready to receive traffic. * Structure is documented below. */ livenessProbe?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbe; /** * List of ports to expose from the container. Vertex AI sends any * prediction requests that it receives to the first port on this list. Vertex * AI also sends * [liveness and health * checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#liveness) * to this port. * If you do not specify this field, it defaults to following value: * ```json * [ * { * "containerPort": 8080 * } * ] * ``` * Vertex AI does not use ports other than the first one listed. This field * corresponds to the `ports` field of the Kubernetes Containers * [v1 core * API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). * Structure is documented below. */ ports?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecPort[]; /** * HTTP path on the container to send prediction requests to. Vertex AI * forwards requests sent using * projects.locations.endpoints.predict to this * path on the container's IP address and port. Vertex AI then returns the * container's response in the API response. * For example, if you set this field to `/foo`, then when Vertex AI * receives a prediction request, it forwards the request body in a POST * request to the `/foo` path on the port of your container specified by the * first value of this `ModelContainerSpec`'s * ports field. * If you don't specify this field, it defaults to the following value when * you deploy this Model to an Endpoint:/v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict * The placeholders in this value are replaced as follows: * * ENDPOINT: The last segment (following `endpoints/`)of the * Endpoint.name][] field of the Endpoint where this Model has been * deployed. (Vertex AI makes this value available to your container code * as the [`AIP_ENDPOINT_ID` environment * variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) * * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`. * (Vertex AI makes this value available to your container code * as the [`AIP_DEPLOYED_MODEL_ID` environment * variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) */ predictRoute?: string; /** * The amount of the VM memory to reserve as the shared memory for the model * in megabytes. */ sharedMemorySizeMb?: string; /** * Probe describes a health check to be performed against a container to * determine whether it is alive or ready to receive traffic. * Structure is documented below. */ startupProbe?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbe; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecEnv { /** * Name of the environment variable. Must be a valid C identifier. */ name: string; /** * Variables that reference a $(VAR_NAME) are expanded * using the previous defined environment variables in the container and * any service environment variables. If a variable cannot be resolved, * the reference in the input string will be unchanged. The $(VAR_NAME) * syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped * references will never be expanded, regardless of whether the variable * exists or not. */ value: string; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecGrpcPort { /** * The number of the port to expose on the pod's IP address. * Must be a valid port number, between 1 and 65535 inclusive. */ containerPort?: number; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbe { /** * ExecAction specifies a command to execute. * Structure is documented below. */ exec?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeExec; /** * Number of consecutive failures before the probe is considered failed. * Defaults to 3. Minimum value is 1. * Maps to Kubernetes probe argument 'failureThreshold'. */ failureThreshold?: number; /** * GrpcAction checks the health of a container using a gRPC service. * Structure is documented below. */ grpc?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeGrpc; /** * HttpGetAction describes an action based on HTTP Get requests. * Structure is documented below. */ httpGet?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGet; /** * Number of seconds to wait before starting the probe. Defaults to 0. * Minimum value is 0. * Maps to Kubernetes probe argument 'initialDelaySeconds'. */ initialDelaySeconds?: number; /** * How often (in seconds) to perform the probe. Default to 10 seconds. * Minimum value is 1. Must be less than timeout_seconds. * Maps to Kubernetes probe argument 'periodSeconds'. */ periodSeconds?: number; /** * Number of consecutive successes before the probe is considered successful. * Defaults to 1. Minimum value is 1. * Maps to Kubernetes probe argument 'successThreshold'. */ successThreshold?: number; /** * TcpSocketAction probes the health of a container by opening a TCP socket * connection. * Structure is documented below. */ tcpSocket?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeTcpSocket; /** * Number of seconds after which the probe times out. Defaults to 1 second. * Minimum value is 1. Must be greater or equal to period_seconds. * Maps to Kubernetes probe argument 'timeoutSeconds'. */ timeoutSeconds?: number; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeExec { /** * Command is the command line to execute inside the container, the working * directory for the command is root ('/') in the container's filesystem. * The command is simply exec'd, it is not run inside a shell, so * traditional shell instructions ('|', etc) won't work. To use a shell, you * need to explicitly call out to that shell. Exit status of 0 is treated as * live/healthy and non-zero is unhealthy. */ commands?: string[]; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeGrpc { /** * Port number of the gRPC service. Number must be in the range 1 to 65535. */ port?: number; /** * Service is the name of the service to place in the gRPC * HealthCheckRequest. See * https://github.com/grpc/grpc/blob/master/doc/health-checking.md. * If this is not specified, the default behavior is defined by gRPC. */ service?: string; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGet { /** * Host name to connect to, defaults to the model serving container's IP. * You probably want to set "Host" in httpHeaders instead. */ host?: string; /** * Custom headers to set in the request. HTTP allows repeated headers. * Structure is documented below. */ httpHeaders?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGetHttpHeader[]; /** * Path to access on the HTTP server. */ path?: string; /** * Number of the port to access on the container. * Number must be in the range 1 to 65535. */ port?: number; /** * Scheme to use for connecting to the host. * Defaults to HTTP. Acceptable values are "HTTP" or "HTTPS". */ scheme?: string; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGetHttpHeader { /** * The header field name. * This will be canonicalized upon output, so case-variant names will be * understood as the same header. */ name?: string; /** * The header field value */ value?: string; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeTcpSocket { /** * Optional: Host name to connect to, defaults to the model serving * container's IP. */ host?: string; /** * Number of the port to access on the container. * Number must be in the range 1 to 65535. */ port?: number; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbe { /** * ExecAction specifies a command to execute. * Structure is documented below. */ exec?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeExec; /** * Number of consecutive failures before the probe is considered failed. * Defaults to 3. Minimum value is 1. * Maps to Kubernetes probe argument 'failureThreshold'. */ failureThreshold?: number; /** * GrpcAction checks the health of a container using a gRPC service. * Structure is documented below. */ grpc?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeGrpc; /** * HttpGetAction describes an action based on HTTP Get requests. * Structure is documented below. */ httpGet?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGet; /** * Number of seconds to wait before starting the probe. Defaults to 0. * Minimum value is 0. * Maps to Kubernetes probe argument 'initialDelaySeconds'. */ initialDelaySeconds?: number; /** * How often (in seconds) to perform the probe. Default to 10 seconds. * Minimum value is 1. Must be less than timeout_seconds. * Maps to Kubernetes probe argument 'periodSeconds'. */ periodSeconds?: number; /** * Number of consecutive successes before the probe is considered successful. * Defaults to 1. Minimum value is 1. * Maps to Kubernetes probe argument 'successThreshold'. */ successThreshold?: number; /** * TcpSocketAction probes the health of a container by opening a TCP socket * connection. * Structure is documented below. */ tcpSocket?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeTcpSocket; /** * Number of seconds after which the probe times out. Defaults to 1 second. * Minimum value is 1. Must be greater or equal to period_seconds. * Maps to Kubernetes probe argument 'timeoutSeconds'. */ timeoutSeconds?: number; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeExec { /** * Command is the command line to execute inside the container, the working * directory for the command is root ('/') in the container's filesystem. * The command is simply exec'd, it is not run inside a shell, so * traditional shell instructions ('|', etc) won't work. To use a shell, you * need to explicitly call out to that shell. Exit status of 0 is treated as * live/healthy and non-zero is unhealthy. */ commands?: string[]; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeGrpc { /** * Port number of the gRPC service. Number must be in the range 1 to 65535. */ port?: number; /** * Service is the name of the service to place in the gRPC * HealthCheckRequest. See * https://github.com/grpc/grpc/blob/master/doc/health-checking.md. * If this is not specified, the default behavior is defined by gRPC. */ service?: string; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGet { /** * Host name to connect to, defaults to the model serving container's IP. * You probably want to set "Host" in httpHeaders instead. */ host?: string; /** * Custom headers to set in the request. HTTP allows repeated headers. * Structure is documented below. */ httpHeaders?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGetHttpHeader[]; /** * Path to access on the HTTP server. */ path?: string; /** * Number of the port to access on the container. * Number must be in the range 1 to 65535. */ port?: number; /** * Scheme to use for connecting to the host. * Defaults to HTTP. Acceptable values are "HTTP" or "HTTPS". */ scheme?: string; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGetHttpHeader { /** * The header field name. * This will be canonicalized upon output, so case-variant names will be * understood as the same header. */ name?: string; /** * The header field value */ value?: string; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeTcpSocket { /** * Optional: Host name to connect to, defaults to the model serving * container's IP. */ host?: string; /** * Number of the port to access on the container. * Number must be in the range 1 to 65535. */ port?: number; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecPort { /** * The number of the port to expose on the pod's IP address. * Must be a valid port number, between 1 and 65535 inclusive. */ containerPort?: number; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbe { /** * ExecAction specifies a command to execute. * Structure is documented below. */ exec?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeExec; /** * Number of consecutive failures before the probe is considered failed. * Defaults to 3. Minimum value is 1. * Maps to Kubernetes probe argument 'failureThreshold'. */ failureThreshold?: number; /** * GrpcAction checks the health of a container using a gRPC service. * Structure is documented below. */ grpc?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeGrpc; /** * HttpGetAction describes an action based on HTTP Get requests. * Structure is documented below. */ httpGet?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGet; /** * Number of seconds to wait before starting the probe. Defaults to 0. * Minimum value is 0. * Maps to Kubernetes probe argument 'initialDelaySeconds'. */ initialDelaySeconds?: number; /** * How often (in seconds) to perform the probe. Default to 10 seconds. * Minimum value is 1. Must be less than timeout_seconds. * Maps to Kubernetes probe argument 'periodSeconds'. */ periodSeconds?: number; /** * Number of consecutive successes before the probe is considered successful. * Defaults to 1. Minimum value is 1. * Maps to Kubernetes probe argument 'successThreshold'. */ successThreshold?: number; /** * TcpSocketAction probes the health of a container by opening a TCP socket * connection. * Structure is documented below. */ tcpSocket?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeTcpSocket; /** * Number of seconds after which the probe times out. Defaults to 1 second. * Minimum value is 1. Must be greater or equal to period_seconds. * Maps to Kubernetes probe argument 'timeoutSeconds'. */ timeoutSeconds?: number; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeExec { /** * Command is the command line to execute inside the container, the working * directory for the command is root ('/') in the container's filesystem. * The command is simply exec'd, it is not run inside a shell, so * traditional shell instructions ('|', etc) won't work. To use a shell, you * need to explicitly call out to that shell. Exit status of 0 is treated as * live/healthy and non-zero is unhealthy. */ commands?: string[]; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeGrpc { /** * Port number of the gRPC service. Number must be in the range 1 to 65535. */ port?: number; /** * Service is the name of the service to place in the gRPC * HealthCheckRequest. See * https://github.com/grpc/grpc/blob/master/doc/health-checking.md. * If this is not specified, the default behavior is defined by gRPC. */ service?: string; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGet { /** * Host name to connect to, defaults to the model serving container's IP. * You probably want to set "Host" in httpHeaders instead. */ host?: string; /** * Custom headers to set in the request. HTTP allows repeated headers. * Structure is documented below. */ httpHeaders?: outputs.vertex.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGetHttpHeader[]; /** * Path to access on the HTTP server. */ path?: string; /** * Number of the port to access on the container. * Number must be in the range 1 to 65535. */ port?: number; /** * Scheme to use for connecting to the host. * Defaults to HTTP. Acceptable values are "HTTP" or "HTTPS". */ scheme?: string; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGetHttpHeader { /** * The header field name. * This will be canonicalized upon output, so case-variant names will be * understood as the same header. */ name?: string; /** * The header field value */ value?: string; } interface AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeTcpSocket { /** * Optional: Host name to connect to, defaults to the model serving * container's IP. */ host?: string; /** * Number of the port to access on the container. * Number must be in the range 1 to 65535. */ port?: number; } interface AiFeatureGroupBigQuery { /** * The BigQuery source URI that points to either a BigQuery Table or View. * Structure is documented below. */ bigQuerySource: outputs.vertex.AiFeatureGroupBigQueryBigQuerySource; /** * Columns to construct entityId / row keys. If not provided defaults to entityId. */ entityIdColumns?: string[]; } interface AiFeatureGroupBigQueryBigQuerySource { /** * BigQuery URI to a table, up to 2000 characters long. For example: `bq://projectId.bqDatasetId.bqTableId.` */ inputUri: string; } interface AiFeatureGroupIamBindingCondition { description?: string; expression: string; title: string; } interface AiFeatureGroupIamMemberCondition { description?: string; expression: string; title: string; } interface AiFeatureOnlineStoreBigtable { /** * Autoscaling config applied to Bigtable Instance. * Structure is documented below. */ autoScaling: outputs.vertex.AiFeatureOnlineStoreBigtableAutoScaling; /** * Optional. If true, enable direct access to the Bigtable instance. */ enableDirectBigtableAccess?: boolean; /** * The zone where the Bigtable instance will be created. */ zone: string; } interface AiFeatureOnlineStoreBigtableAutoScaling { /** * A percentage of the cluster's CPU capacity. Can be from 10% to 80%. When a cluster's CPU utilization exceeds the target that you have set, Bigtable immediately adds nodes to the cluster. When CPU utilization is substantially lower than the target, Bigtable removes nodes. If not set will default to 50%. */ cpuUtilizationTarget: number; /** * The maximum number of nodes to scale up to. Must be greater than or equal to minNodeCount, and less than or equal to 10 times of 'minNodeCount'. */ maxNodeCount: number; /** * The minimum number of nodes to scale down to. Must be greater than or equal to 1. */ minNodeCount: number; } interface AiFeatureOnlineStoreDedicatedServingEndpoint { /** * Private service connect config. * Structure is documented below. */ privateServiceConnectConfig?: outputs.vertex.AiFeatureOnlineStoreDedicatedServingEndpointPrivateServiceConnectConfig; /** * (Output) * Domain name to use for this FeatureOnlineStore */ publicEndpointDomainName: string; /** * (Output) * Name of the service attachment resource. Applicable only if private service connect is enabled and after FeatureViewSync is created. */ serviceAttachment: string; } interface AiFeatureOnlineStoreDedicatedServingEndpointPrivateServiceConnectConfig { /** * If set to true, customers will use private service connection to send request. Otherwise, the connection will set to public endpoint. */ enablePrivateServiceConnect: boolean; /** * A list of Projects from which the forwarding rule will target the service attachment. */ projectAllowlists?: string[]; } interface AiFeatureOnlineStoreEmbeddingManagement { /** * Enable embedding management. */ enabled?: boolean; } interface AiFeatureOnlineStoreEncryptionSpec { /** * The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the compute resource is created. */ kmsKeyName: string; } interface AiFeatureOnlineStoreFeatureviewBigQuerySource { /** * Columns to construct entityId / row keys. Start by supporting 1 only. */ entityIdColumns: string[]; /** * The BigQuery view URI that will be materialized on each sync trigger based on FeatureView.SyncConfig. */ uri: string; } interface AiFeatureOnlineStoreFeatureviewFeatureRegistrySource { /** * List of features that need to be synced to Online Store. * Structure is documented below. */ featureGroups: outputs.vertex.AiFeatureOnlineStoreFeatureviewFeatureRegistrySourceFeatureGroup[]; /** * The project number of the parent project of the feature Groups. */ projectNumber?: string; } interface AiFeatureOnlineStoreFeatureviewFeatureRegistrySourceFeatureGroup { /** * Identifier of the feature group. */ featureGroupId: string; /** * Identifiers of features under the feature group. */ featureIds: string[]; } interface AiFeatureOnlineStoreFeatureviewIamBindingCondition { description?: string; expression: string; title: string; } interface AiFeatureOnlineStoreFeatureviewIamMemberCondition { description?: string; expression: string; title: string; } interface AiFeatureOnlineStoreFeatureviewSyncConfig { /** * If true, syncs the FeatureView in a continuous manner to Online Store. */ continuous?: boolean; /** * Cron schedule (https://en.wikipedia.org/wiki/Cron) to launch scheduled runs. * To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: "CRON_TZ=${IANA_TIME_ZONE}" or "TZ=${IANA_TIME_ZONE}". */ cron: string; } interface AiFeatureOnlineStoreFeatureviewVectorSearchConfig { /** * Configuration options for using brute force search, which simply implements the standard linear search in the database for each query. It is primarily meant for benchmarking and to generate the ground truth for approximate search. */ bruteForceConfig?: outputs.vertex.AiFeatureOnlineStoreFeatureviewVectorSearchConfigBruteForceConfig; /** * Column of crowding. This column contains crowding attribute which is a constraint on a neighbor list produced by nearest neighbor search requiring that no more than some value k' of the k neighbors returned have the same value of crowdingAttribute. */ crowdingColumn?: string; /** * The distance measure used in nearest neighbor search. * For details on allowed values, see the [API documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.featureOnlineStores.featureViews#DistanceMeasureType). * Possible values are: `SQUARED_L2_DISTANCE`, `COSINE_DISTANCE`, `DOT_PRODUCT_DISTANCE`. */ distanceMeasureType?: string; /** * Column of embedding. This column contains the source data to create index for vector search. */ embeddingColumn: string; /** * The number of dimensions of the input embedding. */ embeddingDimension?: number; /** * Columns of features that are used to filter vector search results. */ filterColumns?: string[]; /** * Configuration options for the tree-AH algorithm (Shallow tree + Asymmetric Hashing). Please refer to this paper for more details: https://arxiv.org/abs/1908.10396 * Structure is documented below. */ treeAhConfig?: outputs.vertex.AiFeatureOnlineStoreFeatureviewVectorSearchConfigTreeAhConfig; } interface AiFeatureOnlineStoreFeatureviewVectorSearchConfigBruteForceConfig { } interface AiFeatureOnlineStoreFeatureviewVectorSearchConfigTreeAhConfig { /** * Number of embeddings on each leaf node. The default value is 1000 if not set. */ leafNodeEmbeddingCount: string; } interface AiFeatureOnlineStoreIamBindingCondition { description?: string; expression: string; title: string; } interface AiFeatureOnlineStoreIamMemberCondition { description?: string; expression: string; title: string; } interface AiFeatureOnlineStoreOptimized { } interface AiFeatureStoreEncryptionSpec { /** * The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the compute resource is created. */ kmsKeyName: string; } interface AiFeatureStoreEntityTypeIamBindingCondition { description?: string; expression: string; title: string; } interface AiFeatureStoreEntityTypeIamMemberCondition { description?: string; expression: string; title: string; } interface AiFeatureStoreEntityTypeMonitoringConfig { /** * Threshold for categorical features of anomaly detection. This is shared by all types of Featurestore Monitoring for categorical features (i.e. Features with type (Feature.ValueType) BOOL or STRING). * Structure is documented below. */ categoricalThresholdConfig?: outputs.vertex.AiFeatureStoreEntityTypeMonitoringConfigCategoricalThresholdConfig; /** * The config for ImportFeatures Analysis Based Feature Monitoring. * Structure is documented below. */ importFeaturesAnalysis?: outputs.vertex.AiFeatureStoreEntityTypeMonitoringConfigImportFeaturesAnalysis; /** * Threshold for numerical features of anomaly detection. This is shared by all objectives of Featurestore Monitoring for numerical features (i.e. Features with type (Feature.ValueType) DOUBLE or INT64). * Structure is documented below. */ numericalThresholdConfig?: outputs.vertex.AiFeatureStoreEntityTypeMonitoringConfigNumericalThresholdConfig; /** * The config for Snapshot Analysis Based Feature Monitoring. * Structure is documented below. */ snapshotAnalysis?: outputs.vertex.AiFeatureStoreEntityTypeMonitoringConfigSnapshotAnalysis; } interface AiFeatureStoreEntityTypeMonitoringConfigCategoricalThresholdConfig { /** * Specify a threshold value that can trigger the alert. For categorical feature, the distribution distance is calculated by L-inifinity norm. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. The default value is 0.3. */ value: number; } interface AiFeatureStoreEntityTypeMonitoringConfigImportFeaturesAnalysis { /** * Defines the baseline to do anomaly detection for feature values imported by each [entityTypes.importFeatureValues][] operation. The value must be one of the values below: * * LATEST_STATS: Choose the later one statistics generated by either most recent snapshot analysis or previous import features analysis. If non of them exists, skip anomaly detection and only generate a statistics. * * MOST_RECENT_SNAPSHOT_STATS: Use the statistics generated by the most recent snapshot analysis if exists. * * PREVIOUS_IMPORT_FEATURES_STATS: Use the statistics generated by the previous import features analysis if exists. */ anomalyDetectionBaseline?: string; /** * Whether to enable / disable / inherite default hebavior for import features analysis. The value must be one of the values below: * * DEFAULT: The default behavior of whether to enable the monitoring. EntityType-level config: disabled. * * ENABLED: Explicitly enables import features analysis. EntityType-level config: by default enables import features analysis for all Features under it. * * DISABLED: Explicitly disables import features analysis. EntityType-level config: by default disables import features analysis for all Features under it. */ state?: string; } interface AiFeatureStoreEntityTypeMonitoringConfigNumericalThresholdConfig { /** * Specify a threshold value that can trigger the alert. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. The default value is 0.3. */ value: number; } interface AiFeatureStoreEntityTypeMonitoringConfigSnapshotAnalysis { /** * The monitoring schedule for snapshot analysis. For EntityType-level config: unset / disabled = true indicates disabled by default for Features under it; otherwise by default enable snapshot analysis monitoring with monitoringInterval for Features under it. */ disabled?: boolean; /** * (Optional, Beta, Deprecated) * Configuration of the snapshot analysis based monitoring pipeline running interval. The value is rolled up to full day. * A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". * * > **Warning:** `monitoringInterval` is deprecated and will be removed in a future release. * * @deprecated `monitoringInterval` is deprecated and will be removed in a future release. */ monitoringInterval: string; /** * Configuration of the snapshot analysis based monitoring pipeline running interval. The value indicates number of days. The default value is 1. * If both FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days and [FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval][] are set when creating/updating EntityTypes/Features, FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days will be used. */ monitoringIntervalDays?: number; /** * Customized export features time window for snapshot analysis. Unit is one day. The default value is 21 days. Minimum value is 1 day. Maximum value is 4000 days. */ stalenessDays?: number; } interface AiFeatureStoreIamBindingCondition { description?: string; expression: string; title: string; } interface AiFeatureStoreIamMemberCondition { description?: string; expression: string; title: string; } interface AiFeatureStoreOnlineServingConfig { /** * The number of nodes for each cluster. The number of nodes will not scale automatically but can be scaled manually by providing different values when updating. */ fixedNodeCount?: number; /** * Online serving scaling configuration. Only one of fixedNodeCount and scaling can be set. Setting one will reset the other. * Structure is documented below. */ scaling?: outputs.vertex.AiFeatureStoreOnlineServingConfigScaling; } interface AiFeatureStoreOnlineServingConfigScaling { /** * The maximum number of nodes to scale up to. Must be greater than minNodeCount, and less than or equal to 10 times of 'minNodeCount'. */ maxNodeCount: number; /** * The minimum number of nodes to scale down to. Must be greater than or equal to 1. */ minNodeCount: number; } interface AiIndexDeployedIndex { /** * (Output) * The ID of the DeployedIndex in the above IndexEndpoint. */ deployedIndexId: string; /** * (Output) * A resource name of the IndexEndpoint. */ indexEndpoint: string; } interface AiIndexEncryptionSpec { /** * Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. */ kmsKeyName: string; } interface AiIndexEndpointDeployedIndexAutomaticResources { /** * The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If maxReplicaCount is not set, the default value is minReplicaCount. The max allowed replica count is 1000. * The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. */ maxReplicaCount: number; /** * The minimum number of replicas this DeployedModel will be always deployed on. If minReplicaCount is not set, the default value is 2 (we don't provide SLA when minReplicaCount=1). * If traffic against it increases, it may dynamically be deployed onto more replicas up to [maxReplicaCount](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/AutomaticResources#FIELDS.max_replica_count), and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. */ minReplicaCount: number; } interface AiIndexEndpointDeployedIndexDedicatedResources { /** * The minimum number of replicas this DeployedModel will be always deployed on. * Structure is documented below. */ machineSpec: outputs.vertex.AiIndexEndpointDeployedIndexDedicatedResourcesMachineSpec; /** * The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If maxReplicaCount is not set, the default value is minReplicaCount */ maxReplicaCount: number; /** * The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. */ minReplicaCount: number; } interface AiIndexEndpointDeployedIndexDedicatedResourcesMachineSpec { /** * The type of the machine. * See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) * See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). * For [DeployedModel](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.endpoints#DeployedModel) this field is optional, and the default value is n1-standard-2. For [BatchPredictionJob](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchPredictionJob) or as part of [WorkerPoolSpec](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/CustomJobSpec#WorkerPoolSpec) this field is required. */ machineType?: string; } interface AiIndexEndpointDeployedIndexDeployedIndexAuthConfig { /** * Defines the authentication provider that the DeployedIndex uses. * Structure is documented below. */ authProvider?: outputs.vertex.AiIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProvider; } interface AiIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProvider { /** * A list of allowed JWT issuers. Each entry must be a valid Google service account, in the following format: service-account-name@project-id.iam.gserviceaccount.com */ allowedIssuers?: string[]; /** * The list of JWT audiences. that are allowed to access. A JWT containing any of these audiences will be accepted. */ audiences?: string[]; } interface AiIndexEndpointDeployedIndexPrivateEndpoint { /** * (Output) * The ip address used to send match gRPC requests. */ matchGrpcAddress: string; /** * (Output) * PscAutomatedEndpoints is populated if private service connect is enabled if PscAutomatedConfig is set. * Structure is documented below. */ pscAutomatedEndpoints: outputs.vertex.AiIndexEndpointDeployedIndexPrivateEndpointPscAutomatedEndpoint[]; /** * (Output) * The name of the service attachment resource. Populated if private service connect is enabled. */ serviceAttachment: string; } interface AiIndexEndpointDeployedIndexPrivateEndpointPscAutomatedEndpoint { /** * (Output) * ip Address created by the automated forwarding rule. */ matchAddress: string; /** * (Output) * Corresponding network in pscAutomationConfigs. */ network: string; /** * (Output) * Corresponding projectId in pscAutomationConfigs */ projectId: string; } interface AiIndexEndpointEncryptionSpec { /** * Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. */ kmsKeyName: string; } interface AiIndexEndpointPrivateServiceConnectConfig { /** * If set to true, the IndexEndpoint is created without private service access. */ enablePrivateServiceConnect: boolean; /** * A list of Projects from which the forwarding rule will target the service attachment. */ projectAllowlists?: string[]; /** * List of projects and networks where the PSC endpoints will be created. This field is used by Online Inference(Prediction) only. * Structure is documented below. */ pscAutomationConfigs?: outputs.vertex.AiIndexEndpointPrivateServiceConnectConfigPscAutomationConfig[]; } interface AiIndexEndpointPrivateServiceConnectConfigPscAutomationConfig { /** * The full name of the Google Compute Engine [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks). [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/get): projects/{project}/global/networks/{network}. */ network: string; /** * Project id used to create forwarding rule. */ projectId: string; } interface AiIndexIndexStat { /** * (Output) * The number of shards in the Index. */ shardsCount: number; /** * (Output) * The number of vectors in the Index. */ vectorsCount: string; } interface AiIndexMetadata { /** * The configuration of the Matching Engine Index. * Structure is documented below. */ config: outputs.vertex.AiIndexMetadataConfig; /** * Allows inserting, updating or deleting the contents of the Matching Engine Index. * The string must be a valid Cloud Storage directory path. If this * field is set when calling IndexService.UpdateIndex, then no other * Index field can be also updated as part of the same call. * The expected structure and format of the files this URI points to is * described at https://cloud.google.com/vertex-ai/docs/matching-engine/using-matching-engine#input-data-format */ contentsDeltaUri?: string; /** * If this field is set together with contentsDeltaUri when calling IndexService.UpdateIndex, * then existing content of the Index will be replaced by the data from the contentsDeltaUri. */ isCompleteOverwrite?: boolean; } interface AiIndexMetadataConfig { /** * The configuration with regard to the algorithms used for efficient search. This field may be required based on your configuration. * Structure is documented below. */ algorithmConfig?: outputs.vertex.AiIndexMetadataConfigAlgorithmConfig; /** * The default number of neighbors to find via approximate search before exact reordering is * performed. Exact reordering is a procedure where results returned by an * approximate search algorithm are reordered via a more expensive distance computation. * Required if tree-AH algorithm is used. */ approximateNeighborsCount?: number; /** * The number of dimensions of the input vectors. */ dimensions: number; /** * The distance measure used in nearest neighbor search. The value must be one of the followings: * * SQUARED_L2_DISTANCE: Euclidean (L_2) Distance * * L1_DISTANCE: Manhattan (L_1) Distance * * COSINE_DISTANCE: Cosine Distance. Defined as 1 - cosine similarity. * * DOT_PRODUCT_DISTANCE: Dot Product Distance. Defined as a negative of the dot product */ distanceMeasureType?: string; /** * Type of normalization to be carried out on each vector. The value must be one of the followings: * * UNIT_L2_NORM: Unit L2 normalization type * * NONE: No normalization type is specified. */ featureNormType?: string; /** * Index data is split into equal parts to be processed. These are called "shards". * The shard size must be specified when creating an index. The value must be one of the followings: * * SHARD_SIZE_SMALL: Small (2GB) * * SHARD_SIZE_MEDIUM: Medium (20GB) * * SHARD_SIZE_LARGE: Large (50GB) */ shardSize: string; } interface AiIndexMetadataConfigAlgorithmConfig { /** * Configuration options for using brute force search, which simply implements the * standard linear search in the database for each query. */ bruteForceConfig?: outputs.vertex.AiIndexMetadataConfigAlgorithmConfigBruteForceConfig; /** * Configuration options for using the tree-AH algorithm (Shallow tree + Asymmetric Hashing). * Please refer to this paper for more details: https://arxiv.org/abs/1908.10396 * Structure is documented below. */ treeAhConfig?: outputs.vertex.AiIndexMetadataConfigAlgorithmConfigTreeAhConfig; } interface AiIndexMetadataConfigAlgorithmConfigBruteForceConfig { } interface AiIndexMetadataConfigAlgorithmConfigTreeAhConfig { /** * Number of embeddings on each leaf node. The default value is 1000 if not set. */ leafNodeEmbeddingCount?: number; /** * The default percentage of leaf nodes that any query may be searched. Must be in * range 1-100, inclusive. The default value is 10 (means 10%) if not set. */ leafNodesToSearchPercent?: number; } interface AiMetadataStoreEncryptionSpec { /** * Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. * Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the resource is created. */ kmsKeyName?: string; } interface AiMetadataStoreState { /** * (Output) * The disk utilization of the MetadataStore in bytes. */ diskUtilizationBytes: string; } interface AiRagEngineConfigRagManagedDbConfig { /** * Basic tier is a cost-effective and low compute tier suitable for the following cases: Experimenting with RagManagedDb, Small data size, Latency insensitive workload, Only using RAG Engine with external vector DBs. * NOTE: This is the default tier if not explicitly chosen. */ basic?: outputs.vertex.AiRagEngineConfigRagManagedDbConfigBasic; /** * Scaled tier offers production grade performance along with autoscaling functionality. It is suitable for customers with large amounts of data or performance sensitive workloads. */ scaled?: outputs.vertex.AiRagEngineConfigRagManagedDbConfigScaled; /** * Disables the RAG Engine service and deletes all your data held within this service. This will halt the billing of the service. * NOTE: Once deleted the data cannot be recovered. To start using RAG Engine again, you will need to update the tier by calling the UpdateRagEngineConfig API. */ unprovisioned?: outputs.vertex.AiRagEngineConfigRagManagedDbConfigUnprovisioned; } interface AiRagEngineConfigRagManagedDbConfigBasic { } interface AiRagEngineConfigRagManagedDbConfigScaled { } interface AiRagEngineConfigRagManagedDbConfigUnprovisioned { } interface AiReasoningEngineEncryptionSpec { /** * Required. The Cloud KMS resource identifier of the customer managed * encryption key used to protect a resource. Has the form: * projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. * The key needs to be in the same region as where the compute resource * is created. */ kmsKeyName: string; } interface AiReasoningEngineSpec { /** * Optional. The OSS agent framework used to develop the agent. */ agentFramework?: string; /** * Optional. Declarations for object class methods in OpenAPI * specification format. */ classMethods?: string; /** * Optional. The specification of a Reasoning Engine deployment. * Structure is documented below. */ deploymentSpec?: outputs.vertex.AiReasoningEngineSpecDeploymentSpec; /** * Optional. User provided package spec of the ReasoningEngine. * Ignored when users directly specify a deployment image through * deploymentSpec.first_party_image_override, but keeping the * fieldBehavior to avoid introducing breaking changes. * Structure is documented below. */ packageSpec?: outputs.vertex.AiReasoningEngineSpecPackageSpec; /** * Optional. The service account that the Reasoning Engine artifact runs * as. It should have "roles/storage.objectViewer" for reading the user * project's Cloud Storage and "roles/aiplatform.user" for using Vertex * extensions. If not specified, the Vertex AI Reasoning Engine service * Agent in the project will be used. */ serviceAccount?: string; /** * Specification for deploying from source code. * Structure is documented below. */ sourceCodeSpec?: outputs.vertex.AiReasoningEngineSpecSourceCodeSpec; } interface AiReasoningEngineSpecDeploymentSpec { /** * Optional. Concurrency for each container and agent server. * Recommended value: 2 * cpu + 1. Defaults to 9. */ containerConcurrency: number; /** * Optional. Environment variables to be set with the Reasoning * Engine deployment. * Structure is documented below. */ envs?: outputs.vertex.AiReasoningEngineSpecDeploymentSpecEnv[]; /** * Optional. The maximum number of application instances that can be * launched to handle increased traffic. Defaults to 100. * Range: [1, 1000]. If VPC-SC or PSC-I is enabled, the acceptable * range is [1, 100]. */ maxInstances: number; /** * Optional. The minimum number of application instances that will be * kept running at all times. Defaults to 1. Range: [0, 10]. */ minInstances: number; /** * Optional. Configuration for PSC-Interface. * Structure is documented below. */ pscInterfaceConfig?: outputs.vertex.AiReasoningEngineSpecDeploymentSpecPscInterfaceConfig; /** * Optional. Resource limits for each container. * Only 'cpu' and 'memory' keys are supported. * Defaults to {"cpu": "4", "memory": "4Gi"}. * The only supported values for CPU are '1', '2', '4', '6' and '8'. * For more information, go to * https://cloud.google.com/run/docs/configuring/cpu. * The only supported values for memory are '1Gi', '2Gi', ... '32 Gi'. * For more information, go to * https://cloud.google.com/run/docs/configuring/memory-limits. */ resourceLimits: { [key: string]: string; }; /** * Optional. Environment variables where the value is a secret in * Cloud Secret Manager. To use this feature, add 'Secret Manager * Secret Accessor' role (roles/secretmanager.secretAccessor) to AI * Platform Reasoning Engine service Agent. * Structure is documented below. */ secretEnvs?: outputs.vertex.AiReasoningEngineSpecDeploymentSpecSecretEnv[]; } interface AiReasoningEngineSpecDeploymentSpecEnv { /** * The name of the environment variable. Must be a valid * C identifier. */ name: string; /** * Variables that reference a $(VAR_NAME) are expanded using * the previous defined environment variables in the container * and any service environment variables. If a variable cannot * be resolved, the reference in the input string will be * unchanged. The $(VAR_NAME) syntax can be escaped with a * double $$, ie: $$(VAR_NAME). Escaped references will never * be expanded, regardless of whether the variable exists * or not. */ value: string; } interface AiReasoningEngineSpecDeploymentSpecPscInterfaceConfig { /** * Optional. DNS peering configurations. * When specified, Vertex AI will attempt to configure DNS * peering zones in the tenant project VPC to resolve the * specified domains using the target network's Cloud DNS. * The user must grant the dns.peer role to the Vertex AI * service Agent on the target project. * Structure is documented below. */ dnsPeeringConfigs?: outputs.vertex.AiReasoningEngineSpecDeploymentSpecPscInterfaceConfigDnsPeeringConfig[]; /** * Optional. The name of the Compute Engine network attachment * to attach to the resource within the region and user project. * To specify this field, you must have already created a network attachment. * This field is only used for resources using PSC-Interface. */ networkAttachment?: string; } interface AiReasoningEngineSpecDeploymentSpecPscInterfaceConfigDnsPeeringConfig { /** * Required. The DNS name suffix of the zone being peered * to, e.g., "my-internal-domain.corp.". * Must end with a dot. */ domain: string; /** * Required. The VPC network name in the targetProject * where the DNS zone specified by 'domain' is visible. */ targetNetwork: string; /** * Required. The project id hosting the Cloud DNS managed * zone that contains the 'domain'. * The Vertex AI service Agent requires the dns.peer role * on this project. */ targetProject: string; } interface AiReasoningEngineSpecDeploymentSpecSecretEnv { /** * The name of the environment variable. Must be a valid C * identifier. */ name: string; /** * Reference to a secret stored in the Cloud Secret Manager * that will provide the value for this environment variable. * Structure is documented below. */ secretRef: outputs.vertex.AiReasoningEngineSpecDeploymentSpecSecretEnvSecretRef; } interface AiReasoningEngineSpecDeploymentSpecSecretEnvSecretRef { /** * The name of the secret in Cloud Secret Manager. * Format: {secret_name}. */ secret: string; /** * The Cloud Secret Manager secret version. Can be 'latest' * for the latest version, an integer for a specific * version, or a version alias. */ version?: string; } interface AiReasoningEngineSpecPackageSpec { /** * Optional. The Cloud Storage URI of the dependency files in tar.gz * format. */ dependencyFilesGcsUri?: string; /** * Optional. The Cloud Storage URI of the pickled python object. */ pickleObjectGcsUri?: string; /** * Optional. The Python version. Currently support 3.8, 3.9, 3.10, * 3.11, 3.12, 3.13. If not specified, default value is 3.10. */ pythonVersion?: string; /** * Optional. The Cloud Storage URI of the requirements.txtfile */ requirementsGcsUri?: string; } interface AiReasoningEngineSpecSourceCodeSpec { /** * Specification for source code to be fetched from a Git repository managed through the Developer Connect service. * Structure is documented below. */ developerConnectSource?: outputs.vertex.AiReasoningEngineSpecSourceCodeSpecDeveloperConnectSource; /** * Source code is provided directly in the request. * Structure is documented below. */ inlineSource?: outputs.vertex.AiReasoningEngineSpecSourceCodeSpecInlineSource; /** * Specification for running a Python application from source. * Structure is documented below. */ pythonSpec?: outputs.vertex.AiReasoningEngineSpecSourceCodeSpecPythonSpec; } interface AiReasoningEngineSpecSourceCodeSpecDeveloperConnectSource { /** * The Developer Connect configuration that defines the specific repository, revision, and directory to use as the source code root. * Structure is documented below. */ config: outputs.vertex.AiReasoningEngineSpecSourceCodeSpecDeveloperConnectSourceConfig; } interface AiReasoningEngineSpecSourceCodeSpecDeveloperConnectSourceConfig { /** * Directory, relative to the source root, in which to run the build. */ dir: string; /** * The Developer Connect Git repository link, formatted as projects/*/locations/*/connections/*/gitRepositoryLink/*. */ gitRepositoryLink: string; /** * The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. */ revision: string; } interface AiReasoningEngineSpecSourceCodeSpecInlineSource { /** * Required. Input only. * The application source code archive, provided as a compressed * tarball (.tar.gz) file. A base64-encoded string. */ sourceArchive?: string; } interface AiReasoningEngineSpecSourceCodeSpecPythonSpec { /** * Optional. The Python module to load as the entrypoint, * specified as a fully qualified module name. For example: * path.to.agent. If not specified, defaults to "agent". * The project root will be added to Python sys.path, allowing * imports to be specified relative to the root. */ entrypointModule?: string; /** * Optional. The name of the callable object within the * entrypointModule to use as the application If not specified, * defaults to "rootAgent". */ entrypointObject?: string; /** * Optional. The path to the requirements file, relative to the * source root. If not specified, defaults to "requirements.txt". */ requirementsFile?: string; /** * Optional. The version of Python to use. Support version * includes 3.9, 3.10, 3.11, 3.12, 3.13. If not specified, * default value is 3.10. */ version?: string; } interface AiTensorboardEncryptionSpec { /** * The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. * Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the resource is created. */ kmsKeyName: string; } interface GetAiIndexDeployedIndex { /** * The ID of the DeployedIndex in the above IndexEndpoint. */ deployedIndexId: string; /** * A resource name of the IndexEndpoint. */ indexEndpoint: string; } interface GetAiIndexEncryptionSpec { /** * Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: 'projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key'. The key needs to be in the same region as where the compute resource is created. */ kmsKeyName: string; } interface GetAiIndexIndexStat { /** * The number of shards in the Index. */ shardsCount: number; /** * The number of vectors in the Index. */ vectorsCount: string; } interface GetAiIndexMetadata { /** * The configuration of the Matching Engine Index. */ configs: outputs.vertex.GetAiIndexMetadataConfig[]; /** * Allows inserting, updating or deleting the contents of the Matching Engine Index. * The string must be a valid Cloud Storage directory path. If this * field is set when calling IndexService.UpdateIndex, then no other * Index field can be also updated as part of the same call. * The expected structure and format of the files this URI points to is * described at https://cloud.google.com/vertex-ai/docs/matching-engine/using-matching-engine#input-data-format */ contentsDeltaUri: string; /** * If this field is set together with contentsDeltaUri when calling IndexService.UpdateIndex, * then existing content of the Index will be replaced by the data from the contentsDeltaUri. */ isCompleteOverwrite: boolean; } interface GetAiIndexMetadataConfig { /** * The configuration with regard to the algorithms used for efficient search. This field may be required based on your configuration. */ algorithmConfigs: outputs.vertex.GetAiIndexMetadataConfigAlgorithmConfig[]; /** * The default number of neighbors to find via approximate search before exact reordering is * performed. Exact reordering is a procedure where results returned by an * approximate search algorithm are reordered via a more expensive distance computation. * Required if tree-AH algorithm is used. */ approximateNeighborsCount: number; /** * The number of dimensions of the input vectors. */ dimensions: number; /** * The distance measure used in nearest neighbor search. The value must be one of the followings: * * SQUARED_L2_DISTANCE: Euclidean (L_2) Distance * * L1_DISTANCE: Manhattan (L_1) Distance * * COSINE_DISTANCE: Cosine Distance. Defined as 1 - cosine similarity. * * DOT_PRODUCT_DISTANCE: Dot Product Distance. Defined as a negative of the dot product */ distanceMeasureType: string; /** * Type of normalization to be carried out on each vector. The value must be one of the followings: * * UNIT_L2_NORM: Unit L2 normalization type * * NONE: No normalization type is specified. */ featureNormType: string; /** * Index data is split into equal parts to be processed. These are called "shards". * The shard size must be specified when creating an index. The value must be one of the followings: * * SHARD_SIZE_SMALL: Small (2GB) * * SHARD_SIZE_MEDIUM: Medium (20GB) * * SHARD_SIZE_LARGE: Large (50GB) */ shardSize: string; } interface GetAiIndexMetadataConfigAlgorithmConfig { /** * Configuration options for using brute force search, which simply implements the * standard linear search in the database for each query. */ bruteForceConfigs: outputs.vertex.GetAiIndexMetadataConfigAlgorithmConfigBruteForceConfig[]; /** * Configuration options for using the tree-AH algorithm (Shallow tree + Asymmetric Hashing). * Please refer to this paper for more details: https://arxiv.org/abs/1908.10396 */ treeAhConfigs: outputs.vertex.GetAiIndexMetadataConfigAlgorithmConfigTreeAhConfig[]; } interface GetAiIndexMetadataConfigAlgorithmConfigBruteForceConfig { } interface GetAiIndexMetadataConfigAlgorithmConfigTreeAhConfig { /** * Number of embeddings on each leaf node. The default value is 1000 if not set. */ leafNodeEmbeddingCount: number; /** * The default percentage of leaf nodes that any query may be searched. Must be in * range 1-100, inclusive. The default value is 10 (means 10%) if not set. */ leafNodesToSearchPercent: number; } } export declare namespace vmwareengine { interface ClusterAutoscalingSettings { /** * The map with autoscaling policies applied to the cluster. * The key is the identifier of the policy. * It must meet the following requirements: * * Only contains 1-63 alphanumeric characters and hyphens * * Begins with an alphabetical character * * Ends with a non-hyphen character * * Not formatted as a UUID * * Complies with [RFC 1034](https://datatracker.ietf.org/doc/html/rfc1034) (section 3.5) * Currently the map must contain only one element * that describes the autoscaling policy for compute nodes. * Structure is documented below. */ autoscalingPolicies: outputs.vmwareengine.ClusterAutoscalingSettingsAutoscalingPolicy[]; /** * The minimum duration between consecutive autoscale operations. * It starts once addition or removal of nodes is fully completed. * Minimum cool down period is 30m. * Cool down period must be in whole minutes (for example, 30m, 31m, 50m). * Mandatory for successful addition of autoscaling settings in cluster. */ coolDownPeriod?: string; /** * Maximum number of nodes of any type in a cluster. * Mandatory for successful addition of autoscaling settings in cluster. */ maxClusterNodeCount?: number; /** * Minimum number of nodes of any type in a cluster. * Mandatory for successful addition of autoscaling settings in cluster. */ minClusterNodeCount?: number; } interface ClusterAutoscalingSettingsAutoscalingPolicy { /** * The identifier for this object. Format specified above. */ autoscalePolicyId: string; /** * Utilization thresholds pertaining to amount of consumed memory. * Structure is documented below. */ consumedMemoryThresholds?: outputs.vmwareengine.ClusterAutoscalingSettingsAutoscalingPolicyConsumedMemoryThresholds; /** * Utilization thresholds pertaining to CPU utilization. * Structure is documented below. */ cpuThresholds?: outputs.vmwareengine.ClusterAutoscalingSettingsAutoscalingPolicyCpuThresholds; /** * The canonical identifier of the node type to add or remove. */ nodeTypeId: string; /** * Number of nodes to add to a cluster during a scale-out operation. * Must be divisible by 2 for stretched clusters. */ scaleOutSize: number; /** * Utilization thresholds pertaining to amount of consumed storage. * Structure is documented below. */ storageThresholds?: outputs.vmwareengine.ClusterAutoscalingSettingsAutoscalingPolicyStorageThresholds; } interface ClusterAutoscalingSettingsAutoscalingPolicyConsumedMemoryThresholds { /** * The utilization triggering the scale-in operation in percent. */ scaleIn: number; /** * The utilization triggering the scale-out operation in percent. */ scaleOut: number; } interface ClusterAutoscalingSettingsAutoscalingPolicyCpuThresholds { /** * The utilization triggering the scale-in operation in percent. */ scaleIn: number; /** * The utilization triggering the scale-out operation in percent. */ scaleOut: number; } interface ClusterAutoscalingSettingsAutoscalingPolicyStorageThresholds { /** * The utilization triggering the scale-in operation in percent. */ scaleIn: number; /** * The utilization triggering the scale-out operation in percent. */ scaleOut: number; } interface ClusterDatastoreMountConfig { /** * Optional. NFS is accessed by hosts in either read or readWrite mode * Default value used will be READ_WRITE * Possible values: * READ_ONLY * READ_WRITE */ accessMode?: string; /** * The resource name of the datastore to unmount. * The datastore requested to be mounted should be in same region/zone as the * cluster. * Resource names are schemeless URIs that follow the conventions in * https://cloud.google.com/apis/design/resource_names. * For example: * `projects/my-project/locations/us-central1/datastores/my-datastore` */ datastore: string; /** * The network configuration for the datastore. * Structure is documented below. */ datastoreNetwork: outputs.vmwareengine.ClusterDatastoreMountConfigDatastoreNetwork; /** * (Output) * File share name. */ fileShare: string; /** * Optional. If set to true, the colocation requirement will be ignored. * If set to false, the colocation requirement will be enforced. * Colocation requirement is the requirement that the cluster must be in the * same region/zone of datastore. */ ignoreColocation?: boolean; /** * Optional. The NFS protocol supported by the NFS volume. * Default value used will be NFS_V3 * Possible values: * NFS_V3 */ nfsVersion?: string; /** * (Output) * Server IP addresses of the NFS volume. * For NFS 3, you can only provide a single * server IP address or DNS names. */ servers: string[]; } interface ClusterDatastoreMountConfigDatastoreNetwork { /** * Optional. The number of connections of the NFS volume. * Supported from vsphere 8.0u1. Possible values are 1-4. * Default value is 4. */ connectionCount?: number; /** * Optional. The Maximal Transmission Unit (MTU) of the datastore. * MTU value can range from 1330-9000. If not set, system sets * default MTU size to 1500. */ mtu?: number; /** * (Output) * The resource name of the network peering, used to access the * file share by clients on private cloud. Resource names are schemeless * URIs that follow the conventions in * https://cloud.google.com/apis/design/resource_names. * e.g. * projects/my-project/locations/us-central1/networkPeerings/my-network-peering */ networkPeering: string; /** * The resource name of the subnet * Resource names are schemeless URIs that follow the conventions in * https://cloud.google.com/apis/design/resource_names. * e.g. projects/my-project/locations/us-central1/subnets/my-subnet */ subnet: string; } interface ClusterNodeTypeConfig { /** * Customized number of cores available to each node of the type. * This number must always be one of `nodeType.availableCustomCoreCounts`. * If zero is provided max value from `nodeType.availableCustomCoreCounts` will be used. * Once the customer is created then corecount cannot be changed. */ customCoreCount?: number; /** * The number of nodes of this type in the cluster. */ nodeCount: number; /** * The identifier for this object. Format specified above. */ nodeTypeId: string; } interface DatastoreNfsDatastore { /** * Google service file service configuration * Structure is documented below. */ googleFileService?: outputs.vmwareengine.DatastoreNfsDatastoreGoogleFileService; /** * Third party file service configuration * Structure is documented below. */ thirdPartyFileService?: outputs.vmwareengine.DatastoreNfsDatastoreThirdPartyFileService; } interface DatastoreNfsDatastoreGoogleFileService { /** * Google filestore instance resource name * e.g. projects/my-project/locations/me-west1-b/instances/my-instance */ filestoreInstance?: string; /** * Google netapp volume resource name * e.g. projects/my-project/locations/me-west1-b/volumes/my-volume */ netappVolume?: string; } interface DatastoreNfsDatastoreThirdPartyFileService { /** * Required * Mount Folder name */ fileShare: string; /** * Required to identify vpc peering used for NFS access * network name of NFS's vpc * e.g. projects/project-id/global/networks/my-network_id */ network: string; /** * Server IP addresses of the NFS file service. * NFS v3, provide a single IP address or DNS name. * Multiple servers can be supported in future when NFS 4.1 protocol support * is enabled. */ servers: string[]; } interface ExternalAccessRuleDestinationIpRange { /** * The name of an `ExternalAddress` resource. */ externalAddress?: string; /** * An IP address range in the CIDR format. */ ipAddressRange?: string; } interface ExternalAccessRuleSourceIpRange { /** * A single IP address. */ ipAddress?: string; /** * An IP address range in the CIDR format. */ ipAddressRange?: string; } interface GetClusterAutoscalingSetting { /** * The map with autoscaling policies applied to the cluster. * The key is the identifier of the policy. * It must meet the following requirements: * * Only contains 1-63 alphanumeric characters and hyphens * * Begins with an alphabetical character * * Ends with a non-hyphen character * * Not formatted as a UUID * * Complies with [RFC 1034](https://datatracker.ietf.org/doc/html/rfc1034) (section 3.5) * * Currently the map must contain only one element * that describes the autoscaling policy for compute nodes. */ autoscalingPolicies: outputs.vmwareengine.GetClusterAutoscalingSettingAutoscalingPolicy[]; /** * The minimum duration between consecutive autoscale operations. * It starts once addition or removal of nodes is fully completed. * Minimum cool down period is 30m. * Cool down period must be in whole minutes (for example, 30m, 31m, 50m). * Mandatory for successful addition of autoscaling settings in cluster. */ coolDownPeriod: string; /** * Maximum number of nodes of any type in a cluster. * Mandatory for successful addition of autoscaling settings in cluster. */ maxClusterNodeCount: number; /** * Minimum number of nodes of any type in a cluster. * Mandatory for successful addition of autoscaling settings in cluster. */ minClusterNodeCount: number; } interface GetClusterAutoscalingSettingAutoscalingPolicy { autoscalePolicyId: string; /** * Utilization thresholds pertaining to amount of consumed memory. */ consumedMemoryThresholds: outputs.vmwareengine.GetClusterAutoscalingSettingAutoscalingPolicyConsumedMemoryThreshold[]; /** * Utilization thresholds pertaining to CPU utilization. */ cpuThresholds: outputs.vmwareengine.GetClusterAutoscalingSettingAutoscalingPolicyCpuThreshold[]; /** * The canonical identifier of the node type to add or remove. */ nodeTypeId: string; /** * Number of nodes to add to a cluster during a scale-out operation. * Must be divisible by 2 for stretched clusters. */ scaleOutSize: number; /** * Utilization thresholds pertaining to amount of consumed storage. */ storageThresholds: outputs.vmwareengine.GetClusterAutoscalingSettingAutoscalingPolicyStorageThreshold[]; } interface GetClusterAutoscalingSettingAutoscalingPolicyConsumedMemoryThreshold { /** * The utilization triggering the scale-in operation in percent. */ scaleIn: number; /** * The utilization triggering the scale-out operation in percent. */ scaleOut: number; } interface GetClusterAutoscalingSettingAutoscalingPolicyCpuThreshold { /** * The utilization triggering the scale-in operation in percent. */ scaleIn: number; /** * The utilization triggering the scale-out operation in percent. */ scaleOut: number; } interface GetClusterAutoscalingSettingAutoscalingPolicyStorageThreshold { /** * The utilization triggering the scale-in operation in percent. */ scaleIn: number; /** * The utilization triggering the scale-out operation in percent. */ scaleOut: number; } interface GetClusterDatastoreMountConfig { /** * Optional. NFS is accessed by hosts in either read or readWrite mode * Default value used will be READ_WRITE * Possible values: * READ_ONLY * READ_WRITE */ accessMode: string; /** * The resource name of the datastore to unmount. * The datastore requested to be mounted should be in same region/zone as the * cluster. * Resource names are schemeless URIs that follow the conventions in * https://cloud.google.com/apis/design/resource_names. * For example: * 'projects/my-project/locations/us-central1/datastores/my-datastore' */ datastore: string; /** * The network configuration for the datastore. */ datastoreNetworks: outputs.vmwareengine.GetClusterDatastoreMountConfigDatastoreNetwork[]; /** * File share name. */ fileShare: string; /** * Optional. If set to true, the colocation requirement will be ignored. * If set to false, the colocation requirement will be enforced. * Colocation requirement is the requirement that the cluster must be in the * same region/zone of datastore. */ ignoreColocation: boolean; /** * Optional. The NFS protocol supported by the NFS volume. * Default value used will be NFS_V3 * Possible values: * NFS_V3 */ nfsVersion: string; /** * Server IP addresses of the NFS volume. * For NFS 3, you can only provide a single * server IP address or DNS names. */ servers: string[]; } interface GetClusterDatastoreMountConfigDatastoreNetwork { /** * Optional. The number of connections of the NFS volume. * Supported from vsphere 8.0u1. Possible values are 1-4. * Default value is 4. */ connectionCount: number; /** * Optional. The Maximal Transmission Unit (MTU) of the datastore. * MTU value can range from 1330-9000. If not set, system sets * default MTU size to 1500. */ mtu: number; /** * The resource name of the network peering, used to access the * file share by clients on private cloud. Resource names are schemeless * URIs that follow the conventions in * https://cloud.google.com/apis/design/resource_names. * e.g. * projects/my-project/locations/us-central1/networkPeerings/my-network-peering */ networkPeering: string; /** * The resource name of the subnet * Resource names are schemeless URIs that follow the conventions in * https://cloud.google.com/apis/design/resource_names. * e.g. projects/my-project/locations/us-central1/subnets/my-subnet */ subnet: string; } interface GetClusterNodeTypeConfig { /** * Customized number of cores available to each node of the type. * This number must always be one of 'nodeType.availableCustomCoreCounts'. * If zero is provided max value from 'nodeType.availableCustomCoreCounts' will be used. * Once the customer is created then corecount cannot be changed. */ customCoreCount: number; /** * The number of nodes of this type in the cluster. */ nodeCount: number; nodeTypeId: string; } interface GetDatastoreNfsDatastore { /** * Google service file service configuration */ googleFileServices: outputs.vmwareengine.GetDatastoreNfsDatastoreGoogleFileService[]; /** * Third party file service configuration */ thirdPartyFileServices: outputs.vmwareengine.GetDatastoreNfsDatastoreThirdPartyFileService[]; } interface GetDatastoreNfsDatastoreGoogleFileService { /** * Google filestore instance resource name * e.g. projects/my-project/locations/me-west1-b/instances/my-instance */ filestoreInstance: string; /** * Google netapp volume resource name * e.g. projects/my-project/locations/me-west1-b/volumes/my-volume */ netappVolume: string; } interface GetDatastoreNfsDatastoreThirdPartyFileService { /** * Required * Mount Folder name */ fileShare: string; /** * Required to identify vpc peering used for NFS access * network name of NFS's vpc * e.g. projects/project-id/global/networks/my-network_id */ network: string; /** * Server IP addresses of the NFS file service. * NFS v3, provide a single IP address or DNS name. * Multiple servers can be supported in future when NFS 4.1 protocol support * is enabled. */ servers: string[]; } interface GetExternalAccessRuleDestinationIpRange { /** * The name of an 'ExternalAddress' resource. */ externalAddress: string; /** * An IP address range in the CIDR format. */ ipAddressRange: string; } interface GetExternalAccessRuleSourceIpRange { /** * A single IP address. */ ipAddress: string; /** * An IP address range in the CIDR format. */ ipAddressRange: string; } interface GetNetworkPolicyExternalIp { /** * True if the service is enabled; false otherwise. */ enabled: boolean; /** * State of the service. New values may be added to this enum when appropriate. */ state: string; } interface GetNetworkPolicyInternetAccess { /** * True if the service is enabled; false otherwise. */ enabled: boolean; /** * State of the service. New values may be added to this enum when appropriate. */ state: string; } interface GetNetworkVpcNetwork { /** * The relative resource name of the service VPC network this VMware Engine network is attached to. * For example: projects/123123/global/networks/my-network */ network: string; /** * Type of VPC network (INTRANET, INTERNET, or GOOGLE_CLOUD) */ type: string; } interface GetPrivateCloudHcx { /** * Fully qualified domain name of the appliance. */ fqdn: string; /** * Internal IP address of the appliance. */ internalIp: string; /** * State of the appliance. Possible values: ["ACTIVE", "CREATING"] */ state: string; /** * Version of the appliance. */ version: string; } interface GetPrivateCloudManagementCluster { /** * Configuration of the autoscaling applied to this cluster * Private cloud must have a minimum of 3 nodes to add autoscale settings */ autoscalingSettings: outputs.vmwareengine.GetPrivateCloudManagementClusterAutoscalingSetting[]; /** * The user-provided identifier of the new Cluster. The identifier must meet the following requirements: * * Only contains 1-63 alphanumeric characters and hyphens * * Begins with an alphabetical character * * Ends with a non-hyphen character * * Not formatted as a UUID * * Complies with RFC 1034 (https://datatracker.ietf.org/doc/html/rfc1034) (section 3.5) */ clusterId: string; /** * The map of cluster node types in this cluster, * where the key is canonical identifier of the node type (corresponds to the NodeType). */ nodeTypeConfigs: outputs.vmwareengine.GetPrivateCloudManagementClusterNodeTypeConfig[]; /** * The stretched cluster configuration for the private cloud. */ stretchedClusterConfigs: outputs.vmwareengine.GetPrivateCloudManagementClusterStretchedClusterConfig[]; } interface GetPrivateCloudManagementClusterAutoscalingSetting { /** * The map with autoscaling policies applied to the cluster. * The key is the identifier of the policy. * It must meet the following requirements: * * Only contains 1-63 alphanumeric characters and hyphens * * Begins with an alphabetical character * * Ends with a non-hyphen character * * Not formatted as a UUID * * Complies with [RFC 1034](https://datatracker.ietf.org/doc/html/rfc1034) (section 3.5) * * Currently the map must contain only one element * that describes the autoscaling policy for compute nodes. */ autoscalingPolicies: outputs.vmwareengine.GetPrivateCloudManagementClusterAutoscalingSettingAutoscalingPolicy[]; /** * The minimum duration between consecutive autoscale operations. * It starts once addition or removal of nodes is fully completed. * Minimum cool down period is 30m. * Cool down period must be in whole minutes (for example, 30m, 31m, 50m). * Mandatory for successful addition of autoscaling settings in cluster. */ coolDownPeriod: string; /** * Maximum number of nodes of any type in a cluster. * Mandatory for successful addition of autoscaling settings in cluster. */ maxClusterNodeCount: number; /** * Minimum number of nodes of any type in a cluster. * Mandatory for successful addition of autoscaling settings in cluster. */ minClusterNodeCount: number; } interface GetPrivateCloudManagementClusterAutoscalingSettingAutoscalingPolicy { autoscalePolicyId: string; /** * Utilization thresholds pertaining to amount of consumed memory. */ consumedMemoryThresholds: outputs.vmwareengine.GetPrivateCloudManagementClusterAutoscalingSettingAutoscalingPolicyConsumedMemoryThreshold[]; /** * Utilization thresholds pertaining to CPU utilization. */ cpuThresholds: outputs.vmwareengine.GetPrivateCloudManagementClusterAutoscalingSettingAutoscalingPolicyCpuThreshold[]; /** * The canonical identifier of the node type to add or remove. */ nodeTypeId: string; /** * Number of nodes to add to a cluster during a scale-out operation. * Must be divisible by 2 for stretched clusters. */ scaleOutSize: number; /** * Utilization thresholds pertaining to amount of consumed storage. */ storageThresholds: outputs.vmwareengine.GetPrivateCloudManagementClusterAutoscalingSettingAutoscalingPolicyStorageThreshold[]; } interface GetPrivateCloudManagementClusterAutoscalingSettingAutoscalingPolicyConsumedMemoryThreshold { /** * The utilization triggering the scale-in operation in percent. */ scaleIn: number; /** * The utilization triggering the scale-out operation in percent. */ scaleOut: number; } interface GetPrivateCloudManagementClusterAutoscalingSettingAutoscalingPolicyCpuThreshold { /** * The utilization triggering the scale-in operation in percent. */ scaleIn: number; /** * The utilization triggering the scale-out operation in percent. */ scaleOut: number; } interface GetPrivateCloudManagementClusterAutoscalingSettingAutoscalingPolicyStorageThreshold { /** * The utilization triggering the scale-in operation in percent. */ scaleIn: number; /** * The utilization triggering the scale-out operation in percent. */ scaleOut: number; } interface GetPrivateCloudManagementClusterNodeTypeConfig { /** * Customized number of cores available to each node of the type. * This number must always be one of 'nodeType.availableCustomCoreCounts'. * If zero is provided max value from 'nodeType.availableCustomCoreCounts' will be used. * This cannot be changed once the PrivateCloud is created. */ customCoreCount: number; /** * The number of nodes of this type in the cluster. */ nodeCount: number; nodeTypeId: string; } interface GetPrivateCloudManagementClusterStretchedClusterConfig { /** * Zone that will remain operational when connection between the two zones is lost. * Specify the zone in the following format: projects/{project}/locations/{location}. */ preferredLocation: string; /** * Additional zone for a higher level of availability and load balancing. * Specify the zone in the following format: projects/{project}/locations/{location}. */ secondaryLocation: string; } interface GetPrivateCloudNetworkConfig { /** * DNS Server IP of the Private Cloud. */ dnsServerIp: string; /** * Management CIDR used by VMware management appliances. */ managementCidr: string; /** * The IP address layout version of the management IP address range. * Possible versions include: * * managementIpAddressLayoutVersion=1: Indicates the legacy IP address layout used by some existing private clouds. This is no longer supported for new private clouds * as it does not support all features. * * managementIpAddressLayoutVersion=2: Indicates the latest IP address layout * used by all newly created private clouds. This version supports all current features. */ managementIpAddressLayoutVersion: number; /** * The relative resource name of the VMware Engine network attached to the private cloud. * Specify the name in the following form: projects/{project}/locations/{location}/vmwareEngineNetworks/{vmwareEngineNetworkId} * where {project} can either be a project number or a project ID. */ vmwareEngineNetwork: string; /** * The canonical name of the VMware Engine network in * the form: projects/{project_number}/locations/{location}/vmwareEngineNetworks/{vmwareEngineNetworkId} */ vmwareEngineNetworkCanonical: string; } interface GetPrivateCloudNsx { /** * Fully qualified domain name of the appliance. */ fqdn: string; /** * Internal IP address of the appliance. */ internalIp: string; /** * State of the appliance. Possible values: ["ACTIVE", "CREATING"] */ state: string; /** * Version of the appliance. */ version: string; } interface GetPrivateCloudVcenter { /** * Fully qualified domain name of the appliance. */ fqdn: string; /** * Internal IP address of the appliance. */ internalIp: string; /** * State of the appliance. Possible values: ["ACTIVE", "CREATING"] */ state: string; /** * Version of the appliance. */ version: string; } interface GetSubnetDhcpAddressRange { /** * The first IP address of the range. */ firstAddress: string; /** * The last IP address of the range. */ lastAddress: string; } interface NetworkPolicyExternalIp { /** * True if the service is enabled; false otherwise. */ enabled?: boolean; /** * (Output) * State of the service. New values may be added to this enum when appropriate. */ state: string; } interface NetworkPolicyInternetAccess { /** * True if the service is enabled; false otherwise. */ enabled?: boolean; /** * (Output) * State of the service. New values may be added to this enum when appropriate. */ state: string; } interface NetworkVpcNetwork { /** * (Output) * The relative resource name of the service VPC network this VMware Engine network is attached to. * For example: projects/123123/global/networks/my-network */ network: string; /** * VMware Engine network type. * Possible values are: `LEGACY`, `STANDARD`. */ type: string; } interface PrivateCloudHcx { /** * Fully qualified domain name of the appliance. */ fqdn?: string; /** * Internal IP address of the appliance. */ internalIp?: string; /** * State of the appliance. * Possible values are: `ACTIVE`, `CREATING`. */ state?: string; /** * Version of the appliance. */ version?: string; } interface PrivateCloudManagementCluster { /** * Configuration of the autoscaling applied to this cluster * Private cloud must have a minimum of 3 nodes to add autoscale settings * Structure is documented below. */ autoscalingSettings?: outputs.vmwareengine.PrivateCloudManagementClusterAutoscalingSettings; /** * The user-provided identifier of the new Cluster. The identifier must meet the following requirements: * * Only contains 1-63 alphanumeric characters and hyphens * * Begins with an alphabetical character * * Ends with a non-hyphen character * * Not formatted as a UUID * * Complies with RFC 1034 (https://datatracker.ietf.org/doc/html/rfc1034) (section 3.5) */ clusterId: string; /** * The map of cluster node types in this cluster, * where the key is canonical identifier of the node type (corresponds to the NodeType). * Structure is documented below. */ nodeTypeConfigs?: outputs.vmwareengine.PrivateCloudManagementClusterNodeTypeConfig[]; /** * The stretched cluster configuration for the private cloud. * Structure is documented below. */ stretchedClusterConfig?: outputs.vmwareengine.PrivateCloudManagementClusterStretchedClusterConfig; } interface PrivateCloudManagementClusterAutoscalingSettings { /** * The map with autoscaling policies applied to the cluster. * The key is the identifier of the policy. * It must meet the following requirements: * * Only contains 1-63 alphanumeric characters and hyphens * * Begins with an alphabetical character * * Ends with a non-hyphen character * * Not formatted as a UUID * * Complies with [RFC 1034](https://datatracker.ietf.org/doc/html/rfc1034) (section 3.5) * Currently the map must contain only one element * that describes the autoscaling policy for compute nodes. * Structure is documented below. */ autoscalingPolicies: outputs.vmwareengine.PrivateCloudManagementClusterAutoscalingSettingsAutoscalingPolicy[]; /** * The minimum duration between consecutive autoscale operations. * It starts once addition or removal of nodes is fully completed. * Minimum cool down period is 30m. * Cool down period must be in whole minutes (for example, 30m, 31m, 50m). * Mandatory for successful addition of autoscaling settings in cluster. */ coolDownPeriod?: string; /** * Maximum number of nodes of any type in a cluster. * Mandatory for successful addition of autoscaling settings in cluster. */ maxClusterNodeCount?: number; /** * Minimum number of nodes of any type in a cluster. * Mandatory for successful addition of autoscaling settings in cluster. */ minClusterNodeCount?: number; } interface PrivateCloudManagementClusterAutoscalingSettingsAutoscalingPolicy { /** * The identifier for this object. Format specified above. */ autoscalePolicyId: string; /** * Utilization thresholds pertaining to amount of consumed memory. * Structure is documented below. */ consumedMemoryThresholds?: outputs.vmwareengine.PrivateCloudManagementClusterAutoscalingSettingsAutoscalingPolicyConsumedMemoryThresholds; /** * Utilization thresholds pertaining to CPU utilization. * Structure is documented below. */ cpuThresholds?: outputs.vmwareengine.PrivateCloudManagementClusterAutoscalingSettingsAutoscalingPolicyCpuThresholds; /** * The canonical identifier of the node type to add or remove. */ nodeTypeId: string; /** * Number of nodes to add to a cluster during a scale-out operation. * Must be divisible by 2 for stretched clusters. */ scaleOutSize: number; /** * Utilization thresholds pertaining to amount of consumed storage. * Structure is documented below. */ storageThresholds?: outputs.vmwareengine.PrivateCloudManagementClusterAutoscalingSettingsAutoscalingPolicyStorageThresholds; } interface PrivateCloudManagementClusterAutoscalingSettingsAutoscalingPolicyConsumedMemoryThresholds { /** * The utilization triggering the scale-in operation in percent. */ scaleIn: number; /** * The utilization triggering the scale-out operation in percent. */ scaleOut: number; } interface PrivateCloudManagementClusterAutoscalingSettingsAutoscalingPolicyCpuThresholds { /** * The utilization triggering the scale-in operation in percent. */ scaleIn: number; /** * The utilization triggering the scale-out operation in percent. */ scaleOut: number; } interface PrivateCloudManagementClusterAutoscalingSettingsAutoscalingPolicyStorageThresholds { /** * The utilization triggering the scale-in operation in percent. */ scaleIn: number; /** * The utilization triggering the scale-out operation in percent. */ scaleOut: number; } interface PrivateCloudManagementClusterNodeTypeConfig { /** * Customized number of cores available to each node of the type. * This number must always be one of `nodeType.availableCustomCoreCounts`. * If zero is provided max value from `nodeType.availableCustomCoreCounts` will be used. * This cannot be changed once the PrivateCloud is created. */ customCoreCount?: number; /** * The number of nodes of this type in the cluster. */ nodeCount: number; /** * The identifier for this object. Format specified above. */ nodeTypeId: string; } interface PrivateCloudManagementClusterStretchedClusterConfig { /** * Zone that will remain operational when connection between the two zones is lost. * Specify the zone in the following format: projects/{project}/locations/{location}. */ preferredLocation?: string; /** * Additional zone for a higher level of availability and load balancing. * Specify the zone in the following format: projects/{project}/locations/{location}. */ secondaryLocation?: string; } interface PrivateCloudNetworkConfig { /** * (Output) * DNS Server IP of the Private Cloud. */ dnsServerIp: string; /** * Management CIDR used by VMware management appliances. */ managementCidr: string; /** * (Output) * The IP address layout version of the management IP address range. * Possible versions include: * * managementIpAddressLayoutVersion=1: Indicates the legacy IP address layout used by some existing private clouds. This is no longer supported for new private clouds * as it does not support all features. * * managementIpAddressLayoutVersion=2: Indicates the latest IP address layout * used by all newly created private clouds. This version supports all current features. */ managementIpAddressLayoutVersion: number; /** * The relative resource name of the VMware Engine network attached to the private cloud. * Specify the name in the following form: projects/{project}/locations/{location}/vmwareEngineNetworks/{vmwareEngineNetworkId} * where {project} can either be a project number or a project ID. */ vmwareEngineNetwork?: string; /** * (Output) * The canonical name of the VMware Engine network in * the form: projects/{project_number}/locations/{location}/vmwareEngineNetworks/{vmwareEngineNetworkId} */ vmwareEngineNetworkCanonical: string; } interface PrivateCloudNsx { /** * Fully qualified domain name of the appliance. */ fqdn?: string; /** * Internal IP address of the appliance. */ internalIp?: string; /** * State of the appliance. * Possible values are: `ACTIVE`, `CREATING`. */ state?: string; /** * Version of the appliance. */ version?: string; } interface PrivateCloudVcenter { /** * Fully qualified domain name of the appliance. */ fqdn?: string; /** * Internal IP address of the appliance. */ internalIp?: string; /** * State of the appliance. * Possible values are: `ACTIVE`, `CREATING`. */ state?: string; /** * Version of the appliance. */ version?: string; } interface SubnetDhcpAddressRange { /** * (Output) * The first IP address of the range. */ firstAddress: string; /** * (Output) * The last IP address of the range. */ lastAddress: string; } } export declare namespace vpcaccess { interface ConnectorSubnet { /** * Subnet name (relative, not fully qualified). E.g. if the full subnet selfLink is * https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetName} the correct input for this field would be {subnetName}" */ name?: string; /** * Project in which the subnet exists. If not set, this project is assumed to be the project for which the connector create request was issued. */ projectId: string; } interface GetConnectorSubnet { /** * Name of the resource. * * - - - */ name: string; /** * Project in which the subnet exists. If not set, this project is assumed to be the project for which the connector create request was issued. */ projectId: string; } } export declare namespace workbench { interface InstanceGceSetup { /** * The hardware accelerators used on this instance. If you use accelerators, make sure that your configuration has * [enough vCPUs and memory to support the `machineType` you have selected](https://cloud.google.com/compute/docs/gpus/#gpus-list). * Currently supports only one accelerator configuration. * Structure is documented below. */ acceleratorConfigs?: outputs.workbench.InstanceGceSetupAcceleratorConfig[]; /** * The definition of a boot disk. * Structure is documented below. */ bootDisk: outputs.workbench.InstanceGceSetupBootDisk; /** * Confidential instance configuration. * Structure is documented below. */ confidentialInstanceConfig?: outputs.workbench.InstanceGceSetupConfidentialInstanceConfig; /** * Use a container image to start the workbench instance. * Structure is documented below. */ containerImage?: outputs.workbench.InstanceGceSetupContainerImage; /** * Data disks attached to the VM instance. Currently supports only one data disk. * Structure is documented below. */ dataDisks: outputs.workbench.InstanceGceSetupDataDisks; /** * Optional. If true, no external IP will be assigned to this VM instance. */ disablePublicIp: boolean; /** * Optional. Flag to enable ip forwarding or not, default false/off. * https://cloud.google.com/vpc/docs/using-routes#canipforward */ enableIpForwarding?: boolean; /** * Optional. The machine type of the VM instance. https://cloud.google.com/compute/docs/machine-resource */ machineType: string; /** * Optional. Custom metadata to apply to this instance. */ metadata: { [key: string]: string; }; /** * The network interfaces for the VM. Supports only one interface. * Structure is documented below. */ networkInterfaces: outputs.workbench.InstanceGceSetupNetworkInterface[]; /** * Reservations that this instance can consume from. * Structure is documented below. */ reservationAffinity: outputs.workbench.InstanceGceSetupReservationAffinity; /** * The service account that serves as an identity for the VM instance. Currently supports only one service account. * Structure is documented below. */ serviceAccounts: outputs.workbench.InstanceGceSetupServiceAccount[]; /** * A set of Shielded Instance options. See [Images using supported Shielded * VM features](https://cloud.google.com/compute/docs/instances/modifying-shielded-vm). * Not all combinations are valid. * Structure is documented below. */ shieldedInstanceConfig: outputs.workbench.InstanceGceSetupShieldedInstanceConfig; /** * Optional. The Compute Engine tags to add to instance (see [Tagging * instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). */ tags: string[]; /** * Definition of a custom Compute Engine virtual machine image for starting * a workbench instance with the environment installed directly on the VM. * Structure is documented below. */ vmImage: outputs.workbench.InstanceGceSetupVmImage; } interface InstanceGceSetupAcceleratorConfig { /** * Optional. Count of cores of this accelerator. */ coreCount?: string; /** * Optional. Type of this accelerator. * Possible values are: `NVIDIA_TESLA_P100`, `NVIDIA_TESLA_V100`, `NVIDIA_TESLA_P4`, `NVIDIA_TESLA_T4`, `NVIDIA_TESLA_A100`, `NVIDIA_A100_80GB`, `NVIDIA_L4`, `NVIDIA_TESLA_T4_VWS`, `NVIDIA_TESLA_P100_VWS`, `NVIDIA_TESLA_P4_VWS`. */ type?: string; } interface InstanceGceSetupBootDisk { /** * Optional. Input only. Disk encryption method used on the boot and * data disks, defaults to GMEK. * Possible values are: `GMEK`, `CMEK`. */ diskEncryption: string; /** * Optional. The size of the boot disk in GB attached to this instance, * up to a maximum of 64000 GB (64 TB). If not specified, this defaults to the * recommended value of 150GB. */ diskSizeGb: string; /** * Optional. Indicates the type of the disk. * Possible values are: `PD_STANDARD`, `PD_SSD`, `PD_BALANCED`, `PD_EXTREME`. */ diskType: string; /** * 'Optional. The KMS key used to encrypt the disks, only * applicable if diskEncryption is CMEK. Format: `projects/{project_id}/locations/{location}/keyRings/{key_ring_id}/cryptoKeys/{key_id}` * Learn more about using your own encryption keys.' */ kmsKey?: string; } interface InstanceGceSetupConfidentialInstanceConfig { /** * Defines the type of technology used by the confidential instance. * Possible values are: `SEV`. */ confidentialInstanceType?: string; } interface InstanceGceSetupContainerImage { /** * The path to the container image repository. * For example: gcr.io/{project_id}/{imageName} */ repository: string; /** * The tag of the container image. If not specified, this defaults to the latest tag. */ tag?: string; } interface InstanceGceSetupDataDisks { /** * Optional. Input only. Disk encryption method used on the boot * and data disks, defaults to GMEK. * Possible values are: `GMEK`, `CMEK`. */ diskEncryption: string; /** * Optional. The size of the disk in GB attached to this VM instance, * up to a maximum of 64000 GB (64 TB). If not specified, this defaults to * 100. */ diskSizeGb: string; /** * Optional. Input only. Indicates the type of the disk. * Possible values are: `PD_STANDARD`, `PD_SSD`, `PD_BALANCED`, `PD_EXTREME`. */ diskType?: string; /** * 'Optional. The KMS key used to encrypt the disks, * only applicable if diskEncryption is CMEK. Format: `projects/{project_id}/locations/{location}/keyRings/{key_ring_id}/cryptoKeys/{key_id}` * Learn more about using your own encryption keys.' */ kmsKey?: string; } interface InstanceGceSetupNetworkInterface { /** * Optional. An array of configurations for this interface. Currently, only one access * config, ONE_TO_ONE_NAT, is supported. If no accessConfigs specified, the * instance will have an external internet access through an ephemeral * external IP address. * Structure is documented below. */ accessConfigs: outputs.workbench.InstanceGceSetupNetworkInterfaceAccessConfig[]; /** * Optional. The name of the VPC that this VM instance is in. */ network: string; /** * Optional. The type of vNIC to be used on this interface. This * may be gVNIC or VirtioNet. * Possible values are: `VIRTIO_NET`, `GVNIC`. */ nicType?: string; /** * Optional. The name of the subnet that this VM instance is in. */ subnet: string; } interface InstanceGceSetupNetworkInterfaceAccessConfig { /** * An external IP address associated with this instance. Specify an unused * static external IP address available to the project or leave this field * undefined to use an IP from a shared ephemeral IP address pool. If you * specify a static external IP address, it must live in the same region as * the zone of the instance. */ externalIp: string; } interface InstanceGceSetupReservationAffinity { /** * Specifies the type of reservation from which this instance can consume resources: * RESERVATION_ANY (default), RESERVATION_SPECIFIC, or RESERVATION_NONE. * Possible values are: `RESERVATION_NONE`, `RESERVATION_ANY`, `RESERVATION_SPECIFIC`. */ consumeReservationType: string; /** * Corresponds to the label key of a reservation resource. To target a * RESERVATION_SPECIFIC by name, use compute.googleapis.com/reservation-name * as the key and specify the name of your reservation as its value. */ key?: string; /** * Corresponds to the label values of a reservation resource. This can be * either a name to a reservation in the same project or * "projects/different-project/reservations/some-reservation-name" * to target a shared reservation in the same zone but in a different project. */ values?: string[]; } interface InstanceGceSetupServiceAccount { /** * Optional. Email address of the service account. */ email: string; /** * (Output) * Output only. The list of scopes to be made available for this * service account. Set by the CLH to https://www.googleapis.com/auth/cloud-platform */ scopes: string[]; } interface InstanceGceSetupShieldedInstanceConfig { /** * Optional. Defines whether the VM instance has integrity monitoring * enabled. Enables monitoring and attestation of the boot integrity of the VM * instance. The attestation is performed against the integrity policy baseline. * This baseline is initially derived from the implicitly trusted boot image * when the VM instance is created. Enabled by default. */ enableIntegrityMonitoring?: boolean; /** * Optional. Defines whether the VM instance has Secure Boot enabled. * Secure Boot helps ensure that the system only runs authentic software by verifying * the digital signature of all boot components, and halting the boot process * if signature verification fails. Disabled by default. */ enableSecureBoot?: boolean; /** * Optional. Defines whether the VM instance has the vTPM enabled. * Enabled by default. */ enableVtpm?: boolean; } interface InstanceGceSetupVmImage { /** * Optional. Use this VM image family to find the image; the newest * image in this family will be used. */ family?: string; /** * Optional. Use VM image name to find the image. */ name?: string; /** * The name of the Google Cloud project that this VM image belongs to. * Format: {project_id} */ project?: string; } interface InstanceHealthInfo { } interface InstanceIamBindingCondition { description?: string; expression: string; title: string; } interface InstanceIamMemberCondition { description?: string; expression: string; title: string; } interface InstanceUpgradeHistory { /** * Optional. Action. Rolloback or Upgrade. */ action?: string; /** * Optional. The container image before this instance upgrade. */ containerImage?: string; /** * An RFC3339 timestamp in UTC time. This in the format of yyyy-MM-ddTHH:mm:ss.SSSZ. * The milliseconds portion (".SSS") is optional. */ createTime?: string; /** * Optional. The framework of this workbench instance. */ framework?: string; /** * Optional. The snapshot of the boot disk of this workbench instance before upgrade. */ snapshot?: string; /** * (Output) * Output only. The state of this instance upgrade history entry. */ state: string; /** * Optional. Target VM Version, like m63. */ targetVersion?: string; /** * Optional. The version of the workbench instance before this upgrade. */ version?: string; /** * Optional. The VM image before this instance upgrade. */ vmImage?: string; } } export declare namespace workstations { interface WorkstationClusterCondition { /** * (Output) * The status code, which should be an enum value of google.rpc.Code. */ code: number; /** * (Output) * A list of messages that carry the error details. */ details: { [key: string]: string; }[]; /** * (Output) * Human readable message indicating details about the current status. */ message: string; } interface WorkstationClusterDomainConfig { /** * Domain used by Workstations for HTTP ingress. */ domain: string; } interface WorkstationClusterPrivateClusterConfig { /** * Additional project IDs that are allowed to attach to the workstation cluster's service attachment. * By default, the workstation cluster's project and the VPC host project (if different) are allowed. */ allowedProjects: string[]; /** * (Output) * Hostname for the workstation cluster. * This field will be populated only when private endpoint is enabled. * To access workstations in the cluster, create a new DNS zone mapping this domain name to an internal IP address and a forwarding rule mapping that address to the service attachment. */ clusterHostname: string; /** * Whether Workstations endpoint is private. */ enablePrivateEndpoint: boolean; /** * (Output) * Service attachment URI for the workstation cluster. * The service attachment is created when private endpoint is enabled. * To access workstations in the cluster, configure access to the managed service using (Private Service Connect)[https://cloud.google.com/vpc/docs/configure-private-service-connect-services]. */ serviceAttachmentUri: string; } interface WorkstationConfigAllowedPort { /** * Starting port number for the current range of ports. Valid ports are 22, 80, and ports within the range 1024-65535. */ first?: number; /** * Ending port number for the current range of ports. Valid ports are 22, 80, and ports within the range 1024-65535. */ last?: number; } interface WorkstationConfigCondition { /** * (Output) * The status code, which should be an enum value of google.rpc.Code. */ code: number; /** * (Output) * A list of messages that carry the error details. */ details: { [key: string]: string; }[]; /** * (Output) * Human readable message indicating details about the current status. */ message: string; } interface WorkstationConfigContainer { /** * Arguments passed to the entrypoint. */ args?: string[]; /** * If set, overrides the default ENTRYPOINT specified by the image. */ commands?: string[]; /** * Environment variables passed to the container. * The elements are of the form "KEY=VALUE" for the environment variable "KEY" being given the value "VALUE". */ env?: { [key: string]: string; }; /** * Docker image defining the container. This image must be accessible by the config's service account. */ image: string; /** * If set, overrides the USER specified in the image with the given uid. */ runAsUser?: number; /** * If set, overrides the default DIR specified by the image. */ workingDir?: string; } interface WorkstationConfigEncryptionKey { /** * The name of the Google Cloud KMS encryption key. */ kmsKey: string; /** * The service account to use with the specified KMS key. */ kmsKeyServiceAccount: string; } interface WorkstationConfigEphemeralDirectory { /** * An EphemeralDirectory backed by a Compute Engine persistent disk. * Structure is documented below. */ gcePd: outputs.workstations.WorkstationConfigEphemeralDirectoryGcePd; /** * Location of this directory in the running workstation. */ mountPath: string; } interface WorkstationConfigEphemeralDirectoryGcePd { /** * Type of the disk to use. Defaults to `"pd-standard"`. */ diskType: string; /** * Whether the disk is read only. If true, the disk may be shared by multiple VMs and `sourceSnapshot` must be set. */ readOnly?: boolean; /** * Name of the disk image to use as the source for the disk. * Must be empty `sourceSnapshot` is set. * Updating `sourceImage` will update content in the ephemeral directory after the workstation is restarted. */ sourceImage?: string; /** * Name of the snapshot to use as the source for the disk. * Must be empty if `sourceImage` is set. * Must be empty if `readOnly` is false. * Updating `sourceSnapshot` will update content in the ephemeral directory after the workstation is restarted. */ sourceSnapshot?: string; } interface WorkstationConfigHost { /** * A runtime using a Compute Engine instance. * Structure is documented below. */ gceInstance: outputs.workstations.WorkstationConfigHostGceInstance; } interface WorkstationConfigHostGceInstance { /** * An accelerator card attached to the instance. * Structure is documented below. */ accelerators?: outputs.workstations.WorkstationConfigHostGceInstanceAccelerator[]; /** * A list of the boost configurations that workstations created using this workstation configuration are allowed to use. * Structure is documented below. */ boostConfigs?: outputs.workstations.WorkstationConfigHostGceInstanceBoostConfig[]; /** * Size of the boot disk in GB. */ bootDiskSizeGb: number; /** * A set of Compute Engine Confidential VM instance options. * Structure is documented below. */ confidentialInstanceConfig: outputs.workstations.WorkstationConfigHostGceInstanceConfidentialInstanceConfig; /** * Whether instances have no public IP address. */ disablePublicIpAddresses?: boolean; /** * Whether to disable SSH access to the VM. */ disableSsh?: boolean; /** * Whether to enable nested virtualization on the Compute Engine VMs backing the Workstations. * See https://cloud.google.com/workstations/docs/reference/rest/v1beta/projects.locations.workstationClusters.workstationConfigs#GceInstance.FIELDS.enable_nested_virtualization */ enableNestedVirtualization?: boolean; /** * The name of a Compute Engine machine type. */ machineType: string; /** * Number of instances to pool for faster workstation startup. */ poolSize: number; /** * Email address of the service account that will be used on VM instances used to support this config. This service account must have permission to pull the specified container image. If not set, VMs will run without a service account, in which case the image must be publicly accessible. */ serviceAccount: string; /** * Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. */ serviceAccountScopes: string[]; /** * A set of Compute Engine Shielded instance options. * Structure is documented below. */ shieldedInstanceConfig: outputs.workstations.WorkstationConfigHostGceInstanceShieldedInstanceConfig; /** * Network tags to add to the Compute Engine machines backing the Workstations. */ tags?: string[]; /** * Resource manager tags to be bound to the VM instances backing the Workstations. * Tag keys and values have the same definition as * https://docs.cloud.google.com/resource-manager/docs/tags/tags-overview * Keys must be in the format `tagKeys/{tag_key_id}`, and * values are in the format `tagValues/456`. */ vmTags?: { [key: string]: string; }; } interface WorkstationConfigHostGceInstanceAccelerator { /** * Number of accelerator cards exposed to the instance. */ count: number; /** * Type of accelerator resource to attach to the instance, for example, "nvidia-tesla-p100". */ type: string; } interface WorkstationConfigHostGceInstanceBoostConfig { /** * An accelerator card attached to the boost instance. * Structure is documented below. */ accelerators?: outputs.workstations.WorkstationConfigHostGceInstanceBoostConfigAccelerator[]; /** * Size of the boot disk in GB. The minimum boot disk size is `30` GB. Defaults to `50` GB. */ bootDiskSizeGb: number; /** * Whether to enable nested virtualization on the Compute Engine VMs backing boosted Workstations. * See https://cloud.google.com/workstations/docs/reference/rest/v1beta/projects.locations.workstationClusters.workstationConfigs#GceInstance.FIELDS.enable_nested_virtualization */ enableNestedVirtualization: boolean; /** * The id to be used for the boost config. */ id: string; /** * The type of machine that boosted VM instances will use—for example, e2-standard-4. For more information about machine types that Cloud Workstations supports, see the list of available machine types https://cloud.google.com/workstations/docs/available-machine-types. Defaults to e2-standard-4. */ machineType?: string; /** * Number of instances to pool for faster workstation boosting. */ poolSize: number; } interface WorkstationConfigHostGceInstanceBoostConfigAccelerator { /** * Number of accelerator cards exposed to the instance. */ count: number; /** * Type of accelerator resource to attach to the instance, for example, "nvidia-tesla-p100". */ type: string; } interface WorkstationConfigHostGceInstanceConfidentialInstanceConfig { /** * Whether the instance has confidential compute enabled. */ enableConfidentialCompute?: boolean; } interface WorkstationConfigHostGceInstanceShieldedInstanceConfig { /** * Whether the instance has integrity monitoring enabled. */ enableIntegrityMonitoring?: boolean; /** * Whether the instance has Secure Boot enabled. */ enableSecureBoot?: boolean; /** * Whether the instance has the vTPM enabled. */ enableVtpm?: boolean; } interface WorkstationConfigIamBindingCondition { description?: string; expression: string; title: string; } interface WorkstationConfigIamMemberCondition { description?: string; expression: string; title: string; } interface WorkstationConfigPersistentDirectory { /** * A directory to persist across workstation sessions, backed by a Compute Engine regional persistent disk. Can only be updated if not empty during creation. * Structure is documented below. */ gcePd: outputs.workstations.WorkstationConfigPersistentDirectoryGcePd; /** * Location of this directory in the running workstation. */ mountPath: string; } interface WorkstationConfigPersistentDirectoryGcePd { /** * Type of the disk to use. Defaults to `"pd-standard"`. */ diskType: string; /** * Type of file system that the disk should be formatted with. The workstation image must support this file system type. Must be empty if `sourceSnapshot` is set. Defaults to `ext4`. */ fsType: string; /** * Whether the persistent disk should be deleted when the workstation is deleted. Valid values are `DELETE` and `RETAIN`. Defaults to `DELETE`. * Possible values are: `DELETE`, `RETAIN`. */ reclaimPolicy?: string; /** * The GB capacity of a persistent home directory for each workstation created with this configuration. Must be empty if `sourceSnapshot` is set. * Valid values are `10`, `50`, `100`, `200`, `500`, or `1000`. Defaults to `200`. If less than `200` GB, the `diskType` must be `pd-balanced` or `pd-ssd`. */ sizeGb: number; /** * Name of the snapshot to use as the source for the disk. * Must be empty if `sourceImage` is set. * Must be empty if `readOnly` is false. * Updating `sourceSnapshot` will update content in the ephemeral directory after the workstation is restarted. */ sourceSnapshot?: string; } interface WorkstationConfigReadinessCheck { /** * Path to which the request should be sent. */ path: string; /** * Port to which the request should be sent. */ port: number; } interface WorkstationIamBindingCondition { description?: string; expression: string; title: string; } interface WorkstationIamMemberCondition { description?: string; expression: string; title: string; } }