import { Ok, Err } from 'neverthrow'; /** * Necessary to verify the signature of a request. */ type ReceiverConfig = { /** * The current signing key. Get it from `https://console.upstash.com/qstash * * If not provided, value will be inferred from environment variables based on QSTASH_REGION * and UPSTASH_REGION header. */ currentSigningKey?: string; /** * The next signing key. Get it from `https://console.upstash.com/qstash * * If not provided, value will be inferred from environment variables based on QSTASH_REGION * and UPSTASH_REGION header. */ nextSigningKey?: string; }; type VerifyRequest = { /** * The signature from the `upstash-signature` header. */ signature: string; /** * The raw request body. */ body: string; /** * URL of the endpoint where the request was sent to. * * Omit empty to disable checking the url. */ url?: string; /** * Number of seconds to tolerate when checking `nbf` and `exp` claims, to deal with small clock differences among different servers * * @default 0 */ clockTolerance?: number; /** * The region from the `upstash-region` header. * * Used to infer which signing keys to use for verification in multi-region setups. */ upstashRegion?: string; }; declare class SignatureError extends Error { constructor(message: string); } /** * Receiver offers a simple way to verify the signature of a request. */ declare class Receiver { private readonly currentSigningKey?; private readonly nextSigningKey?; constructor(config?: ReceiverConfig); /** * Verify the signature of a request. * * Tries to verify the signature with the current signing key. * If that fails, maybe because you have rotated the keys recently, it will * try to verify the signature with the next signing key. * * If that fails, the signature is invalid and a `SignatureError` is thrown. */ verify(request: VerifyRequest): Promise; /** * Verify signature with a specific signing key */ private verifyWithKey; private verifyBodyAndUrl; } type Unit = "s" | "m" | "h" | "d"; type Duration = `${bigint}${Unit}`; type State = "CREATED" | "ACTIVE" | "DELIVERED" | "ERROR" | "RETRY" | "FAILED" | "CANCELED" | "IN_PROGRESS"; type HTTPMethods = "GET" | "POST" | "PUT" | "DELETE" | "PATCH"; type Log = { time: number; state: State; messageId: string; nextDeliveryTime?: number; error?: string; url: string; urlGroup?: string; topicName?: string; endpointName?: string; header?: Record; body?: string; label?: string; }; /** * Deprecated. Use the `Log` type instead. * * @deprecated */ type Event = Log; type LogPayload = Omit & { topicName: string; }; /** * Deprecated. Use the `EventPayload` type instead. * * @deprecated */ type EventPayload = LogPayload; type GetLogsPayload = { cursor?: string; events: LogPayload[]; }; /** * Deprecated. use the `GetLogsPayload` type instead. * * @deprecated */ type GetEventsPayload = GetLogsPayload; type WithCursor = T & { cursor?: number; }; type BodyInit = Blob | FormData | URLSearchParams | ReadableStream | string; type HeadersInit = Headers | Record | [string, string][] | IterableIterator<[string, string]>; type RequestOptions = RequestInit & { backend?: string; }; type ChatRateLimit = { "limit-requests": string | null; "limit-tokens": string | null; "remaining-requests": string | null; "remaining-tokens": string | null; "reset-requests": string | null; "reset-tokens": string | null; }; type RateLimit = { limit: string | null; remaining: string | null; reset: string | null; }; type FlowControl = { /** * flow control key */ key: string; } & ({ /** * number of requests which can be active with the same flow control key */ parallelism: number; /** * number of requests to activate per second with the same flow control key * * @deprecated use rate instead */ ratePerSecond?: number; /** * number of requests to activate within the period with the same flow control key. * * Default period is a second. */ rate?: number; /** * The time interval for the `rate` limit. * * For example, if `rate` is 10 and `period` is "1s" (or 1), then 10 requests can be activated per second. * If `rate` is 5 and `period` is "1m" (or 60), then 5 requests can be activated per minute. * * Defaults to "1s" (one second) if not specified. * * Can be specified as a number (in seconds) or a duration string (e.g., "10s", "5m", "1h", "2d"). */ period?: Duration | number; } | { /** * number of requests which can be active with the same flow control key */ parallelism?: number; /** * number of requests to activate per second with the same flow control key * * @deprecated use rate instead */ ratePerSecond: number; /** * number of requests to activate within the period with the same flow control key. * Default period is a second. */ rate?: number; /** * The time interval for the `rate` limit. * * For example, if `rate` is 10 and `period` is "1s" (or 1), then 10 requests can be activated per second. * If `rate` is 5 and `period` is "1m" (or 60), then 5 requests can be activated per minute. * * Defaults to "1s" (one second) if not specified. * * Can be specified as a number (in seconds) or a duration string (e.g., "10s", "5m", "1h", "2d"). */ period?: Duration | number; } | { /** * number of requests which can be active with the same flow control key */ parallelism?: number; /** * number of requests to activate per second with the same flow control key * * @deprecated use rate instead */ ratePerSecond?: number; /** * number of requests to activate within the period with the same flow control key. * Default period is a second. */ rate: number; /** * The time interval for the `rate` limit. * * For example, if `rate` is 10 and `period` is "1s" (or 1), then 10 requests can be activated per second. * If `rate` is 5 and `period` is "1m" (or 60), then 5 requests can be activated per minute. * * Defaults to "1s" (one second) if not specified. * * Can be specified as a number (in seconds) or a duration string (e.g., "10s", "5m", "1h", "2d"). */ period?: Duration | number; }); type ProviderInfo = { /** * full url used for request */ url: string; /** * base url of the request */ baseUrl: string; /** * route elements which will follow the baseUrl */ route: string[]; /** * headers to include in the request */ appendHeaders: Record; /** * provider owner */ owner: Owner; /** * method to use in the request */ method: HTTPMethods; }; type ApiKind = "llm" | "email"; type Owner = EmailOwner | LLMOwner; type PublishApi> = { name: TName; provider?: TProvider; }; /** * Email */ type EmailOwner = "resend"; type PublishEmailApi = Required>>; /** * LLM */ type LLMOwner = "upstash" | "openai" | "anthropic" | "custom"; type LLMOptions = { analytics?: { name: "helicone"; token: string; }; }; type PublishLLMApi = PublishApi<"llm", BaseProvider<"llm", LLMOwner>> & LLMOptions; declare abstract class BaseProvider { abstract readonly apiKind: TName; abstract readonly method: HTTPMethods; readonly baseUrl: string; token: string; readonly owner: TOwner; constructor(baseUrl: string, token: string, owner: TOwner); /** * called before returning the final request * * @param request */ abstract onFinish(request: ProviderInfo, options: unknown): ProviderInfo; abstract getRoute(): string[]; abstract getHeaders(options: unknown): Record; getUrl(): string; } declare class LLMProvider extends BaseProvider<"llm", LLMOwner> { readonly apiKind = "llm"; readonly organization?: string; readonly method = "POST"; constructor(baseUrl: string, token: string, owner: TOwner, organization?: string); getRoute(): string[]; getHeaders(options: LLMOptions): Record; /** * Checks if callback exists and adds analytics in place if it's set. * * @param request * @param options */ onFinish(providerInfo: ProviderInfo, options: LLMOptions): ProviderInfo; } /** * @deprecated as of version 2.7.17. Will be removed in qstash-js 3.0.0. * * Please use an alternative LLM provider. * * openai: https://upstash.com/docs/qstash/integrations/llm * anthropic: https://upstash.com/docs/qstash/integrations/anthropic */ declare const upstash: () => LLMProvider<"upstash">; declare const openai: ({ token, organization, }: { token: string; organization?: string; }) => LLMProvider<"openai">; declare const anthropic: ({ token }: { token: string; }) => LLMProvider<"anthropic">; declare const custom: ({ baseUrl, token, }: { baseUrl: string; token: string; }) => LLMProvider<"custom">; type ChatCompletionMessage = { role: "system" | "assistant" | "user"; content: string; }; type ChatModel = "meta-llama/Meta-Llama-3-8B-Instruct" | "mistralai/Mistral-7B-Instruct-v0.2"; type ChatResponseFormat = { type: "text" | "json_object"; }; type TopLogprob = { token: string; bytes: number[]; logprob: number; }; type ChatCompletionTokenLogprob = { token: string; bytes: number[]; logprob: number; top_logprobs: TopLogprob[]; }; type ChoiceLogprobs = { content: ChatCompletionTokenLogprob[]; }; type Choice = { finish_reason: "stop" | "length"; index: number; logprobs: ChoiceLogprobs; message: ChatCompletionMessage; }; type CompletionUsage = { completion_tokens: number; prompt_tokens: number; total_tokens: number; }; type ChatCompletion = { id: string; choices: Choice[]; created: number; model: string; object: "chat.completion"; system_fingerprint: string; usage: CompletionUsage; }; type ChunkChoice = { delta: ChatCompletionMessage; finish_reason: "stop" | "length"; index: number; logprobs: ChoiceLogprobs; }; type ChatCompletionChunk = { id: string; choices: ChunkChoice[]; created: number; model: string; object: "chat.completion.chunk"; system_fingerprint: string; usage: CompletionUsage; }; type StreamEnabled = { stream: true; }; type StreamDisabled = { stream: false; } | object; type StreamParameter = StreamEnabled | StreamDisabled; type OpenAIChatModel = "gpt-4-turbo" | "gpt-4-turbo-2024-04-09" | "gpt-4-0125-preview" | "gpt-4-turbo-preview" | "gpt-4-1106-preview" | "gpt-4-vision-preview" | "gpt-4" | "gpt-4-0314" | "gpt-4-0613" | "gpt-4-32k" | "gpt-4-32k-0314" | "gpt-4-32k-0613" | "gpt-3.5-turbo" | "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-0301" | "gpt-3.5-turbo-0613" | "gpt-3.5-turbo-1106" | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo-16k-0613"; type ChatRequestCommonFields = { frequency_penalty?: number; logit_bias?: Record; logprobs?: boolean; top_logprobs?: number; max_tokens?: number; n?: number; presence_penalty?: number; response_format?: ChatResponseFormat; seed?: number; stop?: string | string[]; temperature?: number; top_p?: number; }; type PromptChatRequestFields = ChatRequestCommonFields & { system: string; user: string; }; type ChatRequestFields = ChatRequestCommonFields & { messages: ChatCompletionMessage[]; }; type ChatRequestProviders = { provider: LLMProvider<"openai">; model: OpenAIChatModel; analytics?: { name: "helicone"; token: string; }; } | { provider: LLMProvider<"custom">; model: string; analytics?: { name: "helicone"; token: string; }; } | { provider: LLMProvider<"upstash">; model: ChatModel; analytics?: { name: "helicone"; token: string; }; }; type PromptChatRequest = ChatRequestProviders & PromptChatRequestFields & TStream; type ChatRequest = ChatRequestProviders & ChatRequestFields & TStream; type UpstashRequest = { /** * The path to the resource. */ path: string[]; /** * A BodyInit object or null to set request's body. */ body?: BodyInit | null; /** * A Headers object, an object literal, or an array of two-item arrays to set * request's headers. */ headers?: HeadersInit; /** * A boolean to set request's keepalive. */ keepalive?: boolean; /** * A string to set request's method. */ method?: HTTPMethods; query?: Record; /** * if enabled, call `res.json()` * * @default true */ parseResponseAsJson?: boolean; /** * optionally overwrite the baseUrl of the http. * * default value of the http is base qstash url. */ baseUrl?: string; }; type UpstashResponse = TResult & { error?: string; }; type Requester = { request: (request: UpstashRequest) => Promise>; requestStream: (request: UpstashRequest) => AsyncIterable; headers?: Headers; telemetryHeaders?: Headers; }; type RetryConfig = false | { /** * The number of retries to attempt before giving up. * * @default 5 */ retries?: number; /** * A backoff function receives the current retry cound and returns a number in milliseconds to wait before retrying. * * @default * ```ts * Math.exp(retryCount) * 50 * ``` */ backoff?: (retryCount: number) => number; }; type Message = { /** * A unique identifier for this message. */ messageId: string; /** * The url group name if this message was sent to a urlGroup. */ urlGroup?: string; /** * Deprecated. The topic name if this message was sent to a urlGroup. Use urlGroup instead */ topicName?: string; /** * The url where this message is sent to. */ url: string; /** * The endpoint name of the message if the endpoint is given a * name within the url group. */ endpointName?: string; /** * The api name if this message was sent to an api */ api?: string; /** * The http method used to deliver the message */ method?: HTTPMethods; /** * The http headers sent along with the message to your API. */ header?: Record; /** * The http body sent to your API */ body?: string; /** * The base64 encoded body if the body contains non-UTF-8 characters, * `None` otherwise. */ bodyBase64?: string; /** * Maxmimum number of retries. */ maxRetries?: number; /** * The retry delay expression for this message, * if retry_delay was set when publishing the message. */ retryDelayExpression?: PublishRequest["retryDelay"]; /** * A unix timestamp (milliseconds) after which this message may get delivered. */ notBefore?: number; /** * A unix timestamp (milliseconds) when this messages was created. */ createdAt: number; /** * The callback url if configured. */ callback?: string; /** * The failure callback url if configured. */ failureCallback?: string; /** * The queue name if this message was sent to a queue. */ queueName?: string; /** * The scheduleId of the message if the message is triggered by a schedule */ scheduleId?: string; /** * IP address of the publisher of this message */ callerIp?: string; /** * flow control key */ flowControlKey: string; /** * number of requests which can be active with the same flow control key */ parallelism?: number; /** * number of requests to activate per second with the same flow control key * * @deprecated use rate instead */ ratePerSecond?: number; /** * number of requests to activate within the period with the same flow control key. * Default period is a second. */ rate?: number; /** * The time interval during which the specified `rate` of requests can be activated * using the same flow control key. * * In seconds. */ period?: number; /** * The label assigned to the message for filtering purposes. */ label?: string; }; type MessagePayload = Omit & { topicName: string; }; declare class Messages { private readonly http; constructor(http: Requester); /** * Get a message */ get(messageId: string): Promise; /** * Cancel a message */ delete(messageId: string): Promise; deleteMany(messageIds: string[]): Promise; deleteAll(): Promise; } type DlqMessage = Message & { /** * The unique id within the DLQ */ dlqId: string; /** * The HTTP status code of the last failed delivery attempt */ responseStatus?: number; /** * The response headers of the last failed delivery attempt */ responseHeader?: Record; /** * The response body of the last failed delivery attempt if it is * composed of UTF-8 characters only, `None` otherwise. */ responseBody?: string; /** * The base64 encoded response body of the last failed delivery attempt * if the response body contains non-UTF-8 characters, `None` otherwise. */ responseBodyBase64?: string; }; type DLQFilter = { /** * Filter DLQ entries by message id */ messageId?: string; /** * Filter DLQ entries by url */ url?: string; /** * Filter DLQ entries by url group name */ urlGroup?: string; /** * Filter DLQ entries by api name */ api?: string; /** * Filter DLQ entries by queue name */ queueName?: string; /** * Filter DLQ entries by schedule id */ scheduleId?: string; /** * Filter DLQ entries by starting time, in milliseconds */ fromDate?: number; /** * Filter DLQ entries by ending time, in milliseconds */ toDate?: number; /** * Filter DLQ entries by label */ label?: string; /** * Filter DLQ entries by HTTP status of the response */ responseStatus?: number; /** * Filter DLQ entries by IP address of the publisher of the message */ callerIp?: string; }; declare class DLQ { private readonly http; constructor(http: Requester); /** * List messages in the dlq */ listMessages(options?: { cursor?: string; count?: number; filter?: DLQFilter; }): Promise<{ messages: DlqMessage[]; cursor?: string; }>; /** * Remove a message from the dlq using it's `dlqId` */ delete(dlqMessageId: string): Promise; /** * Remove multiple messages from the dlq using their `dlqId`s */ deleteMany(request: { dlqIds: string[]; }): Promise<{ deleted: number; }>; } declare class Chat { private http; private token; constructor(http: Requester, token: string); private static toChatRequest; /** * Calls the Upstash completions api given a ChatRequest. * * Returns a ChatCompletion or a stream of ChatCompletionChunks * if stream is enabled. * * @param request ChatRequest with messages * @returns Chat completion or stream */ create: (request: ChatRequest) => Promise : ChatCompletion>; /** * Calls the Upstash completions api given a ChatRequest. * * Returns a ChatCompletion or a stream of ChatCompletionChunks * if stream is enabled. * * @param request ChatRequest with messages * @returns Chat completion or stream */ private createThirdParty; private getAuthorizationToken; /** * Calls the Upstash completions api given a PromptRequest. * * Returns a ChatCompletion or a stream of ChatCompletionChunks * if stream is enabled. * * @param request PromptRequest with system and user messages. * Note that system parameter shouldn't be passed in the case of * mistralai/Mistral-7B-Instruct-v0.2 model. * @returns Chat completion or stream */ prompt: (request: PromptChatRequest) => Promise : ChatCompletion>; } type QueueResponse = { createdAt: number; updatedAt: number; name: string; parallelism: number; lag: number; paused: boolean; }; type UpsertQueueRequest = { /** * The number of parallel consumers consuming from the queue. * * @default 1 */ parallelism?: number; /** * Whether to pause the queue or not. A paused queue will not * deliver new messages until it is resumed. * * @default false */ paused?: boolean; }; declare class Queue { private readonly http; private readonly queueName; constructor(http: Requester, queueName?: string); /** * Create or update the queue */ upsert(request: UpsertQueueRequest): Promise; /** * Get the queue details */ get(): Promise; /** * List queues */ list(): Promise; /** * Delete the queue */ delete(): Promise; /** * Enqueue a message to a queue. */ enqueue(request: TRequest): Promise>; /** * Enqueue a message to a queue, serializing the body to JSON. */ enqueueJSON = PublishRequest>(request: TRequest): Promise>; /** * Pauses the queue. * * A paused queue will not deliver messages until * it is resumed. */ pause(): Promise; /** * Resumes the queue. */ resume(): Promise; } type Schedule = { scheduleId: string; cron: string; createdAt: number; destination: string; method: string; header?: Record; body?: string; bodyBase64?: string; retries: number; delay?: number; callback?: string; failureCallback?: string; callerIp?: string; isPaused: boolean; queueName?: string; flowControlKey?: string; parallelism?: number; rate?: number; /** * @deprecated use rate instead */ ratePerSecond?: number; /** * The time interval during which the specified `rate` of requests can be activated * using the same flow control key. * * In seconds. */ period?: number; /** * The retry delay expression for this schedule, * if retry_delay was set when creating the schedule. */ retryDelayExpression?: PublishRequest["retryDelay"]; /** * The label assigned to the schedule for filtering purposes. */ label?: string; /** * The timestamp of the last scheduled execution. */ lastScheduleTime?: number; /** * The timestamp of the next scheduled execution. */ nextScheduleTime?: number; /** * The states of the last scheduled messages. * * Maps message id to state */ lastScheduleStates?: Record; /** * The IP address of the caller who created the schedule. */ callerIP?: string; }; type CreateScheduleRequest = { /** * Either a URL or urlGroup name */ destination: string; /** * The message to send. * * This can be anything, but please set the `Content-Type` header accordingly. * * You can leave this empty if you want to send a message with no body. */ body?: BodyInit; /** * Optionally send along headers with the message. * These headers will be sent to your destination. * * We highly recommend sending a `Content-Type` header along, as this will help your destination * server to understand the content of the message. */ headers?: HeadersInit; /** * Optionally delay the delivery of this message. * * In seconds. * * @default undefined */ delay?: Duration | number; /** * In case your destination server is unavailable or returns a status code outside of the 200-299 * range, we will retry the request after a certain amount of time. * * Configure how many times you would like the delivery to be retried * * @default The maximum retry quota associated with your account. */ retries?: number; /** * Use a callback url to forward the response of your destination server to your callback url. * * The callback url must be publicly accessible * * @default undefined */ callback?: string; /** * Use a failure callback url to handle messages that could not be delivered. * * The failure callback url must be publicly accessible * * @default undefined */ failureCallback?: string; /** * The method to use when sending a request to your API * * @default `POST` */ method?: HTTPMethods; /** * Specify a cron expression to repeatedly send this message to the destination. */ cron: string; /** * The HTTP timeout value to use while calling the destination URL. * When a timeout is specified, it will be used instead of the maximum timeout * value permitted by the QStash plan. It is useful in scenarios, where a message * should be delivered with a shorter timeout. * * In seconds. * * @default undefined */ timeout?: Duration | number; /** * Schedule id to use. * * Can be used to update the settings of an existing schedule. * * @default undefined */ scheduleId?: string; /** * Queue name to schedule the message over. */ queueName?: string; /** * Settings for controlling the number of active requests * and number of requests per second with the same key. */ flowControl?: FlowControl; /** * Assign a label to the schedule to filter logs later. * * @default undefined */ label?: string; } & Pick; declare class Schedules { private readonly http; constructor(http: Requester); /** * Create a schedule */ create(request: CreateScheduleRequest): Promise<{ scheduleId: string; }>; /** * Get a schedule */ get(scheduleId: string): Promise; /** * List your schedules */ list(): Promise; /** * Delete a schedule */ delete(scheduleId: string): Promise; /** * Pauses the schedule. * * A paused schedule will not deliver messages until * it is resumed. */ pause({ schedule }: { schedule: string; }): Promise; /** * Resumes the schedule. */ resume({ schedule }: { schedule: string; }): Promise; } type Endpoint = { /** * The name of the endpoint (optional) */ name?: string; /** * The url of the endpoint */ url: string; }; type AddEndpointsRequest = { /** * The name of the url group. * Must be unique and only contain alphanumeric, hyphen, underscore and periods. */ name: string; endpoints: Endpoint[]; }; type RemoveEndpointsRequest = { /** * The name of the url group. * Must be unique and only contain alphanumeric, hyphen, underscore and periods. */ name: string; endpoints: ({ name: string; url?: string; } | { name?: string; url: string; })[]; }; type UrlGroup = { /** * A unix timestamp (milliseconds) */ createdAt: number; /** * A unix timestamp (milliseconds) */ updatedAt: number; /** * The name of this url group. */ name: string; /** * A list of all subscribed endpoints */ endpoints: Endpoint[]; }; declare class UrlGroups { private readonly http; constructor(http: Requester); /** * Create a new url group with the given name and endpoints */ addEndpoints(request: AddEndpointsRequest): Promise; /** * Remove endpoints from a url group. */ removeEndpoints(request: RemoveEndpointsRequest): Promise; /** * Get a list of all url groups. */ list(): Promise; /** * Get a single url group */ get(name: string): Promise; /** * Delete a url group */ delete(name: string): Promise; } /** * Base class outlining steps. Basically, each step kind (run/sleep/sleepUntil) * should have two methods: getPlanStep & getResultStep. * * getPlanStep works the same way for all so it's implemented here. * The different step types will implement their own getResultStep method. */ declare abstract class BaseLazyStep { readonly stepName: string; abstract readonly stepType: StepType; constructor(stepName: string); /** * plan step to submit when step will run parallel with other * steps (parallel call state `first`) * * @param concurrent number of steps running parallel * @param targetStep target step id corresponding to this step * @returns */ abstract getPlanStep(concurrent: number, targetStep: number): Step; /** * result step to submit after the step executes. Used in single step executions * and when a plan step executes in parallel executions (parallel call state `partial`). * * @param concurrent * @param stepId */ abstract getResultStep(concurrent: number, stepId: number): Promise>; } declare const LOG_LEVELS: readonly ["DEBUG", "INFO", "SUBMIT", "WARN", "ERROR"]; type LogLevel = (typeof LOG_LEVELS)[number]; type ChatLogEntry = { timestamp: number; workflowRunId: string; logLevel: LogLevel; eventType: "ENDPOINT_START" | "SUBMIT_THIRD_PARTY_RESULT" | "CREATE_CONTEXT" | "SUBMIT_FIRST_INVOCATION" | "RUN_SINGLE" | "RUN_PARALLEL" | "SUBMIT_STEP" | "SUBMIT_CLEANUP" | "RESPONSE_WORKFLOW" | "RESPONSE_DEFAULT" | "ERROR"; details: unknown; }; type WorkflowLoggerOptions = { logLevel: LogLevel; logOutput: "console"; }; declare class WorkflowLogger { private logs; private options; private workflowRunId?; constructor(options: WorkflowLoggerOptions); log(level: LogLevel, eventType: ChatLogEntry["eventType"], details?: unknown): Promise; setWorkflowRunId(workflowRunId: string): void; private writeToConsole; private shouldLog; static getLogger(verbose?: boolean | WorkflowLogger): WorkflowLogger | undefined; } declare class AutoExecutor { private context; private promises; private activeLazyStepList?; private debug?; private readonly nonPlanStepCount; private readonly steps; private indexInCurrentList; stepCount: number; planStepCount: number; protected executingStep: string | false; constructor(context: WorkflowContext, steps: Step[], debug?: WorkflowLogger); /** * Adds the step function to the list of step functions to run in * parallel. After adding the function, defers the execution, so * that if there is another step function to be added, it's also * added. * * After all functions are added, list of functions are executed. * If there is a single function, it's executed by itself. If there * are multiple, they are run in parallel. * * If a function is already executing (this.executingStep), this * means that there is a nested step which is not allowed. In this * case, addStep throws QStashWorkflowError. * * @param stepInfo step plan to add * @returns result of the step function */ addStep(stepInfo: BaseLazyStep): Promise; /** * Wraps a step function to set this.executingStep to step name * before running and set this.executingStep to False after execution * ends. * * this.executingStep allows us to detect nested steps which are not * allowed. * * @param stepName name of the step being wrapped * @param stepFunction step function to wrap * @returns wrapped step function */ wrapStep(stepName: string, stepFunction: StepFunction): TResult | Promise; /** * Executes a step: * - If the step result is available in the steps, returns the result * - If the result is not avaiable, runs the function * - Sends the result to QStash * * @param lazyStep lazy step to execute * @returns step result */ protected runSingle(lazyStep: BaseLazyStep): Promise; /** * Runs steps in parallel. * * @param stepName parallel step name * @param stepFunctions list of async functions to run in parallel * @returns results of the functions run in parallel */ protected runParallel(parallelSteps: { [K in keyof TResults]: BaseLazyStep; }): Promise; /** * Determines the parallel call state * * First filters the steps to get the steps which are after `initialStepCount` parameter. * * Depending on the remaining steps, decides the parallel state: * - "first": If there are no steps * - "last" If there are equal to or more than `2 * parallelStepCount`. We multiply by two * because each step in a parallel execution will have 2 steps: a plan step and a result * step. * - "partial": If the last step is a plan step * - "discard": If the last step is not a plan step. This means that the parallel execution * is in progress (there are still steps to run) and one step has finished and submitted * its result to QStash * * @param parallelStepCount number of steps to run in parallel * @param initialStepCount steps after the parallel invocation * @returns parallel call state */ protected getParallelCallState(parallelStepCount: number, initialStepCount: number): ParallelCallState; /** * sends the steps to QStash as batch * * @param steps steps to send */ private submitStepsToQStash; /** * Get the promise by executing the lazt steps list. If there is a single * step, we call `runSingle`. Otherwise `runParallel` is called. * * @param lazyStepList steps list to execute * @returns promise corresponding to the execution */ private getExecutionPromise; /** * @param lazyStepList steps we executed * @param result result of the promise from `getExecutionPromise` * @param index index of the current step * @returns result[index] if lazyStepList > 1, otherwise result */ private static getResult; private deferExecution; } /** * Upstash Workflow context * * See the docs for fields and methods https://upstash.com/docs/qstash/workflows/basics/context */ declare class WorkflowContext { protected readonly executor: AutoExecutor; protected readonly steps: Step[]; /** * QStash client of the workflow * * Can be overwritten by passing `qstashClient` parameter in `serve`: * * ```ts * import { Client } from "@upstash/qstash" * * export const POST = serve( * async (context) => { * ... * }, * { * qstashClient: new Client({...}) * } * ) * ``` */ readonly qstashClient: WorkflowClient; /** * Run id of the workflow */ readonly workflowRunId: string; /** * URL of the workflow * * Can be overwritten by passing a `url` parameter in `serve`: * * ```ts * export const POST = serve( * async (context) => { * ... * }, * { * url: "new-url-value" * } * ) * ``` */ readonly url: string; /** * URL to call in case of workflow failure with QStash failure callback * * https://upstash.com/docs/qstash/features/callbacks#what-is-a-failure-callback * * Can be overwritten by passing a `failureUrl` parameter in `serve`: * * ```ts * export const POST = serve( * async (context) => { * ... * }, * { * failureUrl: "new-url-value" * } * ) * ``` */ readonly failureUrl?: string; /** * Payload of the request which started the workflow. * * To specify its type, you can define `serve` as follows: * * ```ts * // set requestPayload type to MyPayload: * export const POST = serve( * async (context) => { * ... * } * ) * ``` * * By default, `serve` tries to apply `JSON.parse` to the request payload. * If your payload is encoded in a format other than JSON, you can utilize * the `initialPayloadParser` parameter: * * ```ts * export const POST = serve( * async (context) => { * ... * }, * { * initialPayloadParser: (initialPayload) => {return doSomething(initialPayload)} * } * ) * ``` */ readonly requestPayload: TInitialPayload; /** * headers of the initial request */ readonly headers: Headers; /** * initial payload as a raw string */ readonly rawInitialPayload: string; /** * Map of environment variables and their values. * * Can be set using the `env` option of serve: * * ```ts * export const POST = serve( * async (context) => { * const key = context.env["API_KEY"]; * }, * { * env: { * "API_KEY": "*****"; * } * } * ) * ``` * * Default value is set to `process.env`. */ readonly env: Record; /** * Number of retries */ readonly retries: number; constructor({ qstashClient, workflowRunId, headers, steps, url, failureUrl, debug, initialPayload, rawInitialPayload, env, retries, }: { qstashClient: WorkflowClient; workflowRunId: string; headers: Headers; steps: Step[]; url: string; failureUrl?: string; debug?: WorkflowLogger; initialPayload: TInitialPayload; rawInitialPayload?: string; env?: Record; retries?: number; }); /** * Executes a workflow step * * ```typescript * const result = await context.run("step 1", () => { * return "result" * }) * ``` * * Can also be called in parallel and the steps will be executed * simulatenously: * * ```typescript * const [result1, result2] = await Promise.all([ * context.run("step 1", () => { * return "result1" * }) * context.run("step 2", async () => { * return await fetchResults() * }) * ]) * ``` * * @param stepName name of the step * @param stepFunction step function to be executed * @returns result of the step function */ run(stepName: string, stepFunction: StepFunction): Promise; /** * Stops the execution for the duration provided. * * @param stepName * @param duration sleep duration in seconds * @returns undefined */ sleep(stepName: string, duration: number): Promise; /** * Stops the execution until the date time provided. * * @param stepName * @param datetime time to sleep until. Can be provided as a number (in unix seconds), * as a Date object or a string (passed to `new Date(datetimeString)`) * @returns undefined */ sleepUntil(stepName: string, datetime: Date | string | number): Promise; /** * Makes a third party call through QStash in order to make a * network call without consuming any runtime. * * ```ts * const postResult = await context.call( * "post call step", * `https://www.some-endpoint.com/api`, * "POST", * "my-payload" * ); * ``` * * tries to parse the result of the request as JSON. If it's * not a JSON which can be parsed, simply returns the response * body as it is. * * @param stepName * @param url url to call * @param method call method * @param body call body * @param headers call headers * @returns call result (parsed as JSON if possible) */ call(stepName: string, url: string, method: HTTPMethods, body?: TBody, headers?: Record): Promise; /** * Adds steps to the executor. Needed so that it can be overwritten in * DisabledWorkflowContext. */ protected addStep(step: BaseLazyStep): Promise; } /** * Workflow context which throws QStashWorkflowAbort before running the steps. * * Used for making a dry run before running any steps to check authentication. * * Consider an endpoint like this: * ```ts * export const POST = serve({ * routeFunction: context => { * if (context.headers.get("authentication") !== "Bearer secretPassword") { * console.error("Authentication failed."); * return; * } * * // ... * } * }) * ``` * * the serve method will first call the routeFunction with an DisabledWorkflowContext. * Here is the action we take in different cases * - "step-found": we will run the workflow related sections of `serve`. * - "run-ended": simply return success and end the workflow * - error: returns 500. */ declare class DisabledWorkflowContext extends WorkflowContext { private static readonly disabledMessage; /** * overwrite the WorkflowContext.addStep method to always raise QStashWorkflowAbort * error in order to stop the execution whenever we encounter a step. * * @param _step */ protected addStep(_step: BaseLazyStep): Promise; /** * copies the passed context to create a DisabledWorkflowContext. Then, runs the * route function with the new context. * * - returns "run-ended" if there are no steps found or * if the auth failed and user called `return` * - returns "step-found" if DisabledWorkflowContext.addStep is called. * - if there is another error, returns the error. * * @param routeFunction */ static tryAuthentication(routeFunction: RouteFunction, context: WorkflowContext): Promise | Err>; } /** * Interface for Client with required methods * * Neeeded to resolve import issues */ type WorkflowClient = { batchJSON: InstanceType["batchJSON"]; publishJSON: InstanceType["publishJSON"]; http: InstanceType["http"]; }; /** * Interface for Receiver with required methods * * Neeeded to resolve import issues */ type WorkflowReceiver = { verify: InstanceType["verify"]; }; declare const StepTypes: readonly ["Initial", "Run", "SleepFor", "SleepUntil", "Call"]; type StepType = (typeof StepTypes)[number]; type ThirdPartyCallFields = { /** * Third party call URL. Set when context.call is used. */ callUrl: string; /** * Third party call method. Set when context.call is used. */ callMethod: HTTPMethods; /** * Third party call body. Set when context.call is used. */ callBody: TBody; /** * Third party call headers. Set when context.call is used. */ callHeaders: Record; }; type Step = { /** * index of the step */ stepId: number; /** * name of the step */ stepName: string; /** * type of the step (Initial/Run/SleepFor/SleepUntil/Call) */ stepType: StepType; /** * step result. Set if context.run or context.call are used. */ out?: TResult; /** * sleep duration in seconds. Set when context.sleep is used. */ sleepFor?: number; /** * unix timestamp (in seconds) to wait until. Set when context.sleepUntil is used. */ sleepUntil?: number; /** * number of steps running concurrently if the step is in a parallel run. * Set to 1 if step is not parallel. */ concurrent: number; /** * target step of a plan step. In other words, the step to assign the * result of a plan step. * * undefined if the step is not a plan step (of a parallel run). Otherwise, * set to the target step. */ targetStep?: number; } & (ThirdPartyCallFields | { [P in keyof ThirdPartyCallFields]?: never; }); type RawStep = { messageId: string; body: string; callType: "step" | "toCallback" | "fromCallback"; }; type SyncStepFunction = () => TResult; type AsyncStepFunction = () => Promise; type StepFunction = AsyncStepFunction | SyncStepFunction; type ParallelCallState = "first" | "partial" | "discard" | "last"; type RouteFunction = (context: WorkflowContext) => Promise; type FinishCondition = "success" | "duplicate-step" | "fromCallback" | "auth-fail" | "failure-callback"; type WorkflowServeOptions = { /** * QStash client */ qstashClient?: WorkflowClient; /** * Function called to return a response after each step execution * * @param workflowRunId * @returns response */ onStepFinish?: (workflowRunId: string, finishCondition: FinishCondition) => TResponse; /** * Function to parse the initial payload passed by the user */ initialPayloadParser?: (initialPayload: string) => TInitialPayload; /** * Url of the endpoint where the workflow is set up. * * If not set, url will be inferred from the request. */ url?: string; /** * Verbose mode * * Disabled if not set. If set to true, a logger is created automatically. * * Alternatively, a WorkflowLogger can be passed. */ verbose?: WorkflowLogger | true; /** * Receiver to verify *all* requests by checking if they come from QStash * * By default, a receiver is created from the env variables * QSTASH_CURRENT_SIGNING_KEY and QSTASH_NEXT_SIGNING_KEY if they are set. */ receiver?: WorkflowReceiver; /** * Url to call if QStash retries are exhausted while executing the workflow */ failureUrl?: string; /** * Failure function called when QStash retries are exhausted while executing * the workflow. Will overwrite `failureUrl` parameter with the workflow * endpoint if passed. * * @param context workflow context at the moment of error * @param failStatus error status * @param failResponse error message * @returns void */ failureFunction?: (context: Omit, failStatus: number, failResponse: string, failHeader: Record) => Promise | void; /** * Base Url of the workflow endpoint * * Can be used to set if there is a local tunnel or a proxy between * QStash and the workflow endpoint. * * Will be set to the env variable UPSTASH_WORKFLOW_URL if not passed. * If the env variable is not set, the url will be infered as usual from * the `request.url` or the `url` parameter in `serve` options. * * @default undefined */ baseUrl?: string; /** * Optionally, one can pass an env object mapping environment * variables to their keys. * * Useful in cases like cloudflare with hono. */ env?: Record; /** * Number of retries to use in workflow requests * * 3 by default */ retries?: number; }; /** * Payload passed as body in failureFunction */ type FailureFunctionPayload = { /** * error name */ error: string; /** * error message */ message: string; }; /** * Makes all fields except the ones selected required */ type RequiredExceptFields = Omit, K> & Partial>; /** * Fills the options with default values if they are not provided. * * Default values for: * - qstashClient: QStash client created with QSTASH_URL and QSTASH_TOKEN env vars * - onStepFinish: returns a Response with workflowRunId & finish condition in the body (status: 200) * - initialPayloadParser: calls JSON.parse if initial request body exists. * - receiver: a Receiver if the required env vars are set * - baseUrl: env variable UPSTASH_WORKFLOW_URL * * @param options options including the client, onFinish and initialPayloadParser * @returns */ declare const processOptions: (options?: WorkflowServeOptions) => RequiredExceptFields, "verbose" | "receiver" | "url" | "failureFunction" | "failureUrl" | "baseUrl">; /** * Creates an async method that handles incoming requests and runs the provided * route function as a workflow. * * @param routeFunction - A function that uses WorkflowContext as a parameter and runs a workflow. * @param options - Options including the client, onFinish callback, and initialPayloadParser. * @returns An async method that consumes incoming requests and runs the workflow. * * @deprecated as of version 2.7.17. Will be removed in qstash-js 3.0.0. * Please use https://github.com/upstash/workflow-js * Migration Guide: https://upstash.com/docs/workflow/migration */ declare const serve: (routeFunction: RouteFunction, options?: WorkflowServeOptions) => ((request: TRequest) => Promise); /** * @deprecated as of version 2.7.17. Will be removed in qstash-js 3.0.0. * Please use https://github.com/upstash/workflow-js * Migration Guide: https://upstash.com/docs/workflow/migration */ declare class Workflow { private readonly http; constructor(http: Requester); /** * Cancel an ongoing workflow * * @param workflowRunId run id of the workflow to delete * @returns true if workflow is succesfully deleted. Otherwise throws QStashError */ cancel(workflowRunId: string): Promise; } type ClientConfig = { /** * Url of the QStash api server. * * This is only used for testing. * * If not provided, value of the QSTASH_URL environment * variable will be used if it exists. If the QSTASH_URL * environment variable isn't set either, default is used. * * @default "https://qstash.upstash.io" */ baseUrl?: string; /** * The authorization token from the upstash console. * * If not provided, value of the QSTASH_TOKEN environment * variable will be used if it exists. */ token?: string; /** * Configure how the client should retry requests. */ retry?: RetryConfig; /** * Global headers to send with each request. * These can be overridden by the headers in the request. */ headers?: HeadersInit; /** * Enable telemetry to help us improve the SDK. * The sdk will send the sdk version, platform and node version as telemetry headers. * * @default true */ enableTelemetry?: boolean; }; type PublishBatchRequest = PublishRequest & { queueName?: string; }; type PublishRequest = { /** * The message to send. * * This can be anything, but please set the `Content-Type` header accordingly. * * You can leave this empty if you want to send a message with no body. */ body?: TBody; /** * Optionally send along headers with the message. * These headers will be sent to your destination. * * We highly recommend sending a `Content-Type` header along, as this will help your destination * server to understand the content of the message. */ headers?: HeadersInit; /** * Optionally delay the delivery of this message. * * In seconds. * * @default undefined */ delay?: Duration | number; /** * Optionally set the absolute delay of this message. * This will override the delay option. * The message will not delivered until the specified time. * * Unix timestamp in seconds. * * @default undefined */ notBefore?: number; /** * Provide a unique id for deduplication. This id will be used to detect duplicate messages. * If a duplicate message is detected, the request will be accepted but not enqueued. * * We store deduplication ids for 90 days. Afterwards it is possible that the message with the * same deduplication id is delivered again. * * When scheduling a message, the deduplication happens before the schedule is created. * * @default undefined */ deduplicationId?: string; /** * If true, the message content will get hashed and used as deduplication id. * If a duplicate message is detected, the request will be accepted but not enqueued. * * The content based hash includes the following values: * - All headers, except Upstash-Authorization, this includes all headers you are sending. * - The entire raw request body The destination from the url path * * We store deduplication ids for 90 days. Afterwards it is possible that the message with the * same deduplication id is delivered again. * * When scheduling a message, the deduplication happens before the schedule is created. * * @default false */ contentBasedDeduplication?: boolean; /** * In case your destination server is unavaialble or returns a status code outside of the 200-299 * range, we will retry the request after a certain amount of time. * * Configure how many times you would like the delivery to be retried up to the maxRetries limit * defined in your plan. * * @default 3 */ retries?: number; /** * Delay between retries. * * By default, the `retryDelay` is exponential backoff. * More details can be found in: https://upstash.com/docs/qstash/features/retry. * * The `retryDelay` option allows you to customize the delay (in milliseconds) between retry attempts when message delivery fails. * * You can use mathematical expressions and the following built-in functions to calculate the delay dynamically. * The special variable `retried` represents the current retry attempt count (starting from 0). * * Supported functions: * - `pow` * - `sqrt` * - `abs` * - `exp` * - `floor` * - `ceil` * - `round` * - `min` * - `max` * * Examples of valid `retryDelay` values: * ```ts * 1000 // 1 second * 1000 * (1 + retried) // 1 second multiplied by the current retry attempt * pow(2, retried) // 2 to the power of the current retry attempt * max(10, pow(2, retried)) // The greater of 10 or 2^retried * ``` */ retryDelay?: string; /** * Use a failure callback url to handle messages that could not be delivered. * * The failure callback url must be publicly accessible * * @default undefined */ failureCallback?: string; /** * The method to use when sending a request to your API * * @default `POST` */ method?: HTTPMethods; /** * The HTTP timeout value to use while calling the destination URL. * When a timeout is specified, it will be used instead of the maximum timeout * value permitted by the QStash plan. It is useful in scenarios, where a message * should be delivered with a shorter timeout. * * In seconds. * * @default undefined */ timeout?: Duration | number; /** * Settings for controlling the number of active requests * and number of requests per second with the same key. */ flowControl?: FlowControl; /** * Assign a label to the request to filter logs later. * * @default undefined */ label?: string; } & ({ /** * The url where the message should be sent to. */ url: string; urlGroup?: never; api?: never; topic?: never; /** * Use a callback url to forward the response of your destination server to your callback url. * * The callback url must be publicly accessible * * @default undefined */ callback?: string; } | { url?: never; /** * The url group the message should be sent to. */ urlGroup: string; api?: never; topic?: never; /** * Use a callback url to forward the response of your destination server to your callback url. * * The callback url must be publicly accessible * * @default undefined */ callback?: string; } | { url?: string; urlGroup?: never; /** * The api endpoint the request should be sent to. */ api: PublishLLMApi; topic?: never; /** * Use a callback url to forward the response of your destination server to your callback url. * * The callback url must be publicly accessible * * @default undefined */ callback: string; } | { url?: never; urlGroup?: never; /** * The api endpoint the request should be sent to. */ api: PublishEmailApi; topic?: never; callback?: string; } | { url?: never; urlGroup?: never; api?: never; /** * Deprecated. The topic the message should be sent to. Same as urlGroup * * @deprecated */ topic?: string; /** * Use a callback url to forward the response of your destination server to your callback url. * * The callback url must be publicly accessible * * @default undefined */ callback?: string; }); type PublishJsonRequest = Omit & { /** * The message to send. * This can be anything as long as it can be serialized to JSON. */ body: unknown; }; type LogsRequest = { cursor?: string | number; filter?: LogsRequestFilter; }; /** * Deprecated. Use `LogsRequest` instead. * * @deprecated */ type EventsRequest = LogsRequest; type LogsRequestFilter = { messageId?: string; state?: State; url?: string; urlGroup?: string; topicName?: string; api?: string; scheduleId?: string; queueName?: string; fromDate?: number; toDate?: number; count?: number; label?: string; }; type GetLogsResponse = { cursor?: string; logs: Log[]; /** * Deprecated. Use the `logs` field instead. * * @deprecated */ events: Log[]; }; /** * Deprecated. Use `GetLogsResponse` instead. * * @deprecated */ type GetEventsResponse = GetLogsResponse; type QueueRequest = { queueName?: string; }; declare class Client { http: Requester; private token; constructor(config?: ClientConfig); /** * Access the urlGroup API. * * Create, read, update or delete urlGroups. */ get urlGroups(): UrlGroups; /** * Deprecated. Use urlGroups instead. * * Access the topic API. * * Create, read, update or delete topics. */ get topics(): UrlGroups; /** * Access the dlq API. * * List or remove messages from the DLQ. */ get dlq(): DLQ; /** * Access the message API. * * Read or cancel messages. */ get messages(): Messages; /** * Access the schedule API. * * Create, read or delete schedules. */ get schedules(): Schedules; /** * Access the workflow API. * * cancel workflows. * * @deprecated as of version 2.7.17. Will be removed in qstash-js 3.0.0. * Please use @upstash/workflow instead https://github.com/upstash/workflow-js * Migration Guide: https://upstash.com/docs/workflow/migration */ get workflow(): Workflow; /** * Access the queue API. * * Create, read, update or delete queues. */ queue(request?: QueueRequest): Queue; /** * Access the Chat API. * * @deprecated This will be removed in qstash-js 3.0.0. Please use an alternative SDK for interacting with LLMs. * * Use the create or prompt methods. */ chat(): Chat; publish(request: TRequest): Promise>; /** * publishJSON is a utility wrapper around `publish` that automatically serializes the body * and sets the `Content-Type` header to `application/json`. */ publishJSON = PublishRequest>(request: TRequest): Promise>; /** * Batch publish messages to QStash. */ batch(request: PublishBatchRequest[]): Promise[]>; /** * Batch publish messages to QStash, serializing each body to JSON. */ batchJSON = PublishBatchRequest>(request: TRequest[]): Promise[]>; /** * Retrieve your logs. * * The logs endpoint is paginated and returns only 100 logs at a time. * If you want to receive more logs, you can use the cursor to paginate. * * The cursor is a unix timestamp with millisecond precision * * @example * ```ts * let cursor = Date.now() * const logs: Log[] = [] * while (cursor > 0) { * const res = await qstash.logs({ cursor }) * logs.push(...res.logs) * cursor = res.cursor ?? 0 * } * ``` */ logs(request?: LogsRequest): Promise; /** * @deprecated Will be removed in the next major release. Use the `logs` method instead. * * Retrieve your logs. * * The logs endpoint is paginated and returns only 100 logs at a time. * If you want to receive more logs, you can use the cursor to paginate. * * The cursor is a unix timestamp with millisecond precision * * @example * ```ts * let cursor = Date.now() * const logs: Log[] = [] * while (cursor > 0) { * const res = await qstash.logs({ cursor }) * logs.push(...res.logs) * cursor = res.cursor ?? 0 * } * ``` */ events(request?: LogsRequest): Promise; } type PublishToApiResponse = { messageId: string; }; type PublishToUrlResponse = PublishToApiResponse & { url: string; deduplicated?: boolean; }; type PublishToUrlGroupsResponse = PublishToUrlResponse[]; type PublishResponse = TRequest extends { url: string; } ? PublishToUrlResponse : TRequest extends { urlGroup: string; } ? PublishToUrlGroupsResponse : PublishToApiResponse; export { type StreamEnabled as $, type AddEndpointsRequest as A, BaseProvider as B, type ChatRateLimit as C, type EventPayload as D, type EmailOwner as E, type FailureFunctionPayload as F, type GetLogsResponse as G, type HTTPMethods as H, type GetLogsPayload as I, type GetEventsPayload as J, type BodyInit as K, type LLMOwner as L, type Message as M, type HeadersInit as N, type RequestOptions as O, type ProviderInfo as P, type QueueRequest as Q, type RateLimit as R, type Step as S, type FlowControl as T, type UrlGroup as U, type VerifyRequest as V, type WithCursor as W, Chat as X, type ChatCompletionMessage as Y, type ChatCompletion as Z, type ChatCompletionChunk as _, type ReceiverConfig as a, type StreamDisabled as a0, type StreamParameter as a1, type OpenAIChatModel as a2, type PromptChatRequest as a3, type ChatRequest as a4, upstash as a5, openai as a6, anthropic as a7, custom as a8, type RouteFunction as a9, type WorkflowServeOptions as aa, Workflow as ab, processOptions as ac, serve as ad, WorkflowContext as ae, DisabledWorkflowContext as af, type WorkflowClient as ag, type WorkflowReceiver as ah, StepTypes as ai, type StepType as aj, type RawStep as ak, type SyncStepFunction as al, type AsyncStepFunction as am, type StepFunction as an, type ParallelCallState as ao, type FinishCondition as ap, type RequiredExceptFields as aq, type LogLevel as ar, type WorkflowLoggerOptions as as, WorkflowLogger as at, SignatureError as b, Receiver as c, type PublishBatchRequest as d, type PublishRequest as e, type PublishJsonRequest as f, type LogsRequest as g, type EventsRequest as h, type GetEventsResponse as i, Client as j, type PublishToApiResponse as k, type PublishToUrlResponse as l, type PublishToUrlGroupsResponse as m, type PublishResponse as n, type MessagePayload as o, Messages as p, type Schedule as q, type CreateScheduleRequest as r, Schedules as s, type Endpoint as t, type RemoveEndpointsRequest as u, UrlGroups as v, type State as w, type Log as x, type Event as y, type LogPayload as z };