import { GoogleAuthOptions, GoogleAuth } from 'google-auth-library'; import { TaskType } from '../common/types.js'; export { CitationMetadata, CodeExecutionTool, Content, FunctionCallingMode, FunctionDeclarationsTool, GenerateContentCandidate, GenerateContentRequest, GenerateContentResponse, GenerateContentStreamResult, GoogleMaps, GoogleMapsTool, GoogleSearchRetrieval, GoogleSearchRetrievalTool, GroundingMetadata, HarmBlockThreshold, HarmCategory, ImagenInstance, ImagenParameters, ImagenPredictRequest, ImagenPredictResponse, ImagenPrediction, RetrievalTool, TaskTypeSchema, Tool, ToolConfig, isCodeExecutionTool, isFunctionDeclarationsTool, isGoogleMapsTool, isGoogleSearchRetrievalTool, isObject, isRetrievalTool } from '../common/types.js'; import 'genkit'; /** * Copyright 2025 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** Options for Vertex AI plugin configuration */ interface VertexPluginOptions { /** The Vertex API key for express mode */ apiKey?: string | false; /** The Google Cloud project id to call. */ projectId?: string; /** The Google Cloud region to call. */ location?: string; /** Provide custom authentication configuration for connecting to Vertex AI. */ googleAuth?: GoogleAuthOptions; /** Enables additional debug traces (e.g. raw model API call details). */ experimental_debugTraces?: boolean; /** Use `responseSchema` field instead of `responseJsonSchema`. */ legacyResponseSchema?: boolean; } interface BaseClientOptions { /** timeout in milli seconds. time out value needs to be non negative. */ timeout?: number; signal?: AbortSignal; } interface RegionalClientOptions extends BaseClientOptions { kind: 'regional'; location: string; projectId: string; authClient: GoogleAuth; apiKey?: string; } interface GlobalClientOptions extends BaseClientOptions { kind: 'global'; location: 'global'; projectId: string; authClient: GoogleAuth; apiKey?: string; } interface ExpressClientOptions extends BaseClientOptions { kind: 'express'; apiKey: string | false | undefined; } /** Resolved options for use with the client */ type ClientOptions = RegionalClientOptions | GlobalClientOptions | ExpressClientOptions; /** * Request options params. */ interface RequestOptions { /** an apiKey to use for this request if applicable */ apiKey?: string | false | undefined; /** timeout in milli seconds. time out value needs to be non negative. */ timeout?: number; /** * Version of API endpoint to call (e.g. "v1" or "v1beta"). If not specified, * defaults to 'v1beta'. */ apiVersion?: string; /** * Value for x-goog-api-client header to set on the API request. This is * intended for wrapper SDKs to set additional SDK identifiers for the * backend. */ apiClient?: string; /** * Value for custom HTTP headers to set on the HTTP request. */ customHeaders?: Headers; } interface Model { name: string; launchStage: string; } interface ListModelsResponse { publisherModels: Model[]; } interface TextEmbeddingInstance { task_type?: TaskType; content: string; title?: string; } interface MultimodalEmbeddingInstance { text?: string; image?: { bytesBase64Encoded?: string; gcsUri?: string; mimeType?: string; }; video?: { bytesBase64Encoded?: string; gcsUri?: string; videoSegmentConfig?: { startOffsetSec: number; endOffsetSec: number; intervalSec: number; }; }; parameters?: { dimension: number; }; } declare type EmbeddingInstance = TextEmbeddingInstance | MultimodalEmbeddingInstance; declare interface TextEmbeddingPrediction { embeddings: { statistics: { truncated: boolean; token_count: number; }; values: number[]; }; } declare interface VideoEmbedding { startOffsetSec: number; endOffsetSec: number; embedding: number[]; } declare interface MultimodalEmbeddingPrediction { textEmbedding?: number[]; imageEmbedding?: number[]; videoEmbeddings?: VideoEmbedding[]; } declare function isMultimodalEmbeddingPrediction(value: unknown): value is MultimodalEmbeddingPrediction; declare type EmbeddingPrediction = TextEmbeddingPrediction | MultimodalEmbeddingPrediction; declare interface EmbedContentRequest { instances: EmbeddingInstance[]; parameters: EmbedContentConfig; } declare interface EmbedContentResponse { predictions: EmbeddingPrediction[]; } /** Optional parameters for the embed content method. */ declare interface EmbedContentConfig { /** Type of task for which the embedding will be used. */ taskType?: string; /** Title for the text. Only applicable when TaskType is `RETRIEVAL_DOCUMENT`. */ title?: string; /** Reduced dimension for the output embedding. If set, excessive values in the output embedding are truncated from the end. Supported by newer models since 2024 only. You cannot set this value if using the earlier model (`models/embedding-001`). */ outputDimensionality?: number; /** The MIME type of the input. */ mimeType?: string; /** Vertex API only. Whether to silently truncate inputs longer than the max sequence length. If this option is set to false, oversized inputs will lead to an INVALID_ARGUMENT error, similar to other text APIs. */ autoTruncate?: boolean; } declare type EmbeddingResult = { embedding: number[]; metadata?: Record; }; declare interface VeoMedia { bytesBase64Encoded?: string; gcsUri?: string; mimeType?: string; } declare interface VeoReferenceImage { image: VeoMedia; referenceType: string; } declare interface VeoMask extends VeoMedia { mask: string; } declare interface VeoInstance { prompt: string; image?: VeoMedia; lastFrame?: VeoMedia; video?: VeoMedia; referenceImages?: VeoReferenceImage[]; } declare interface VeoParameters { aspectRatio?: string; durationSeconds?: number; enhancePrompt?: boolean; generateAudio?: boolean; negativePrompt?: string; personGeneration?: string; resolution?: string; sampleCount?: number; seed?: number; storageUri?: string; } declare interface VeoPredictRequest { instances: VeoInstance[]; parameters: VeoParameters; } declare interface Operation { name: string; done?: boolean; error?: { code: number; message: string; details?: unknown; }; clientOptions?: ClientOptions; } declare interface VeoOperation extends Operation { response?: { raiMediaFilteredCount?: number; videos: VeoMedia[]; }; } declare interface VeoOperationRequest { operationName: string; } declare interface LyriaParameters { sampleCount?: number; } declare interface LyriaPredictRequest { instances: LyriaInstance[]; parameters: LyriaParameters; } declare interface LyriaPredictResponse { predictions: LyriaPrediction[]; } declare interface LyriaPrediction { bytesBase64Encoded: string; mimeType: string; } declare interface LyriaInstance { prompt: string; negativePrompt?: string; seed?: number; } export { type ClientOptions, type EmbedContentConfig, type EmbedContentRequest, type EmbedContentResponse, type EmbeddingInstance, type EmbeddingPrediction, type EmbeddingResult, type ExpressClientOptions, type GlobalClientOptions, type ListModelsResponse, type LyriaInstance, type LyriaParameters, type LyriaPredictRequest, type LyriaPredictResponse, type LyriaPrediction, type Model, type MultimodalEmbeddingPrediction, type Operation, type RegionalClientOptions, type RequestOptions, type TextEmbeddingPrediction, type VeoInstance, type VeoMask, type VeoMedia, type VeoOperation, type VeoOperationRequest, type VeoParameters, type VeoPredictRequest, type VeoReferenceImage, type VertexPluginOptions, type VideoEmbedding, isMultimodalEmbeddingPrediction };