{"version":3,"file":"llms.cjs","names":["BaseLLM","GenerationChunk","wrapOpenAIClientError","getEndpoint","getHeadersWithUserAgent","OpenAIClient"],"sources":["../src/llms.ts"],"sourcesContent":["import type { TiktokenModel } from \"js-tiktoken/lite\";\nimport { type ClientOptions, OpenAI as OpenAIClient } from \"openai\";\nimport { calculateMaxTokens } from \"@langchain/core/language_models/base\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport { GenerationChunk, type LLMResult } from \"@langchain/core/outputs\";\nimport { getEnvironmentVariable } from \"@langchain/core/utils/env\";\nimport {\n  BaseLLM,\n  type BaseLLMParams,\n} from \"@langchain/core/language_models/llms\";\nimport { chunkArray } from \"@langchain/core/utils/chunk_array\";\nimport type {\n  OpenAIApiKey,\n  OpenAICallOptions,\n  OpenAICoreRequestOptions,\n  OpenAIInput,\n} from \"./types.js\";\nimport {\n  OpenAIEndpointConfig,\n  getEndpoint,\n  getHeadersWithUserAgent,\n} from \"./utils/azure.js\";\nimport { wrapOpenAIClientError } from \"./utils/client.js\";\n\nexport type { OpenAICallOptions, OpenAIInput };\n\n/**\n * Interface for tracking token usage in OpenAI calls.\n */\ninterface TokenUsage {\n  completionTokens?: number;\n  promptTokens?: number;\n  totalTokens?: number;\n}\n\n/**\n * Wrapper around OpenAI large language models.\n *\n * To use you should have the `openai` package installed, with the\n * `OPENAI_API_KEY` environment variable set.\n *\n * To use with Azure, import the `AzureOpenAI` class.\n *\n * @remarks\n * Any parameters that are valid to be passed to {@link\n * https://platform.openai.com/docs/api-reference/completions/create |\n * `openai.createCompletion`} can be passed through {@link modelKwargs}, even\n * if not explicitly available on this class.\n * @example\n * ```typescript\n * const model = new OpenAI({\n *   modelName: \"gpt-4\",\n *   temperature: 0.7,\n *   maxTokens: 1000,\n *   maxRetries: 5,\n * });\n *\n * const res = await model.invoke(\n *   \"Question: What would be a good company name for a company that makes colorful socks?\\nAnswer:\"\n * );\n * console.log({ res });\n * ```\n */\nexport class OpenAI<CallOptions extends OpenAICallOptions = OpenAICallOptions>\n  extends BaseLLM<CallOptions>\n  implements Partial<OpenAIInput>\n{\n  static lc_name() {\n    return \"OpenAI\";\n  }\n\n  get callKeys() {\n    return [...super.callKeys, \"options\"];\n  }\n\n  lc_serializable = true;\n\n  get lc_secrets(): { [key: string]: string } | undefined {\n    return {\n      openAIApiKey: \"OPENAI_API_KEY\",\n      apiKey: \"OPENAI_API_KEY\",\n      organization: \"OPENAI_ORGANIZATION\",\n    };\n  }\n\n  get lc_aliases(): Record<string, string> {\n    return {\n      modelName: \"model\",\n      openAIApiKey: \"openai_api_key\",\n      apiKey: \"openai_api_key\",\n    };\n  }\n\n  temperature?: number;\n\n  maxTokens?: number;\n\n  topP?: number;\n\n  frequencyPenalty?: number;\n\n  presencePenalty?: number;\n\n  n = 1;\n\n  bestOf?: number;\n\n  logitBias?: Record<string, number>;\n\n  model = \"gpt-3.5-turbo-instruct\";\n\n  /** @deprecated Use \"model\" instead */\n  modelName: string;\n\n  modelKwargs?: OpenAIInput[\"modelKwargs\"];\n\n  batchSize = 20;\n\n  timeout?: number;\n\n  stop?: string[];\n\n  stopSequences?: string[];\n\n  user?: string;\n\n  streaming = false;\n\n  openAIApiKey?: OpenAIApiKey;\n\n  apiKey?: OpenAIApiKey;\n\n  organization?: string;\n\n  protected client: OpenAIClient;\n\n  protected clientConfig: ClientOptions;\n\n  constructor(\n    fields?: Partial<OpenAIInput> &\n      BaseLLMParams & {\n        configuration?: ClientOptions;\n      }\n  ) {\n    super(fields ?? {});\n    this._addVersion(\"@langchain/openai\", __PKG_VERSION__);\n\n    this.openAIApiKey =\n      fields?.apiKey ??\n      fields?.openAIApiKey ??\n      getEnvironmentVariable(\"OPENAI_API_KEY\");\n    this.apiKey = this.openAIApiKey;\n\n    this.organization =\n      fields?.configuration?.organization ??\n      getEnvironmentVariable(\"OPENAI_ORGANIZATION\");\n\n    this.model = fields?.model ?? fields?.modelName ?? this.model;\n    if (\n      (this.model?.startsWith(\"gpt-3.5-turbo\") ||\n        this.model?.startsWith(\"gpt-4\") ||\n        this.model?.startsWith(\"o1\")) &&\n      !this.model?.includes(\"-instruct\")\n    ) {\n      throw new Error(\n        [\n          `Your chosen OpenAI model, \"${this.model}\", is a chat model and not a text-in/text-out LLM.`,\n          `Passing it into the \"OpenAI\" class is no longer supported.`,\n          `Please use the \"ChatOpenAI\" class instead.`,\n          \"\",\n          `See this page for more information:`,\n          \"|\",\n          `└> https://js.langchain.com/docs/integrations/chat/openai`,\n        ].join(\"\\n\")\n      );\n    }\n    this.modelName = this.model;\n    this.modelKwargs = fields?.modelKwargs ?? {};\n    this.batchSize = fields?.batchSize ?? this.batchSize;\n    this.timeout = fields?.timeout;\n\n    this.temperature = fields?.temperature ?? this.temperature;\n    this.maxTokens = fields?.maxTokens ?? this.maxTokens;\n    this.topP = fields?.topP ?? this.topP;\n    this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty;\n    this.presencePenalty = fields?.presencePenalty ?? this.presencePenalty;\n    this.n = fields?.n ?? this.n;\n    this.bestOf = fields?.bestOf ?? this.bestOf;\n    this.logitBias = fields?.logitBias;\n    this.stop = fields?.stopSequences ?? fields?.stop;\n    this.stopSequences = this.stop;\n    this.user = fields?.user;\n\n    this.streaming = fields?.streaming ?? false;\n\n    if (this.streaming && this.bestOf && this.bestOf > 1) {\n      throw new Error(\"Cannot stream results when bestOf > 1\");\n    }\n\n    this.clientConfig = {\n      apiKey: this.apiKey,\n      organization: this.organization,\n      dangerouslyAllowBrowser: true,\n      ...fields?.configuration,\n    };\n  }\n\n  /**\n   * Get the parameters used to invoke the model\n   */\n  invocationParams(\n    options?: this[\"ParsedCallOptions\"]\n  ): Omit<OpenAIClient.CompletionCreateParams, \"prompt\"> {\n    return {\n      model: this.model,\n      temperature: this.temperature,\n      max_tokens: this.maxTokens,\n      top_p: this.topP,\n      frequency_penalty: this.frequencyPenalty,\n      presence_penalty: this.presencePenalty,\n      n: this.n,\n      best_of: this.bestOf,\n      logit_bias: this.logitBias,\n      stop: options?.stop ?? this.stopSequences,\n      user: this.user,\n      stream: this.streaming,\n      ...this.modelKwargs,\n    };\n  }\n\n  /** @ignore */\n  _identifyingParams(): Omit<OpenAIClient.CompletionCreateParams, \"prompt\"> & {\n    model_name: string;\n  } & ClientOptions {\n    return {\n      model_name: this.model,\n      ...this.invocationParams(),\n      ...this.clientConfig,\n    };\n  }\n\n  /**\n   * Get the identifying parameters for the model\n   */\n  identifyingParams(): Omit<OpenAIClient.CompletionCreateParams, \"prompt\"> & {\n    model_name: string;\n  } & ClientOptions {\n    return this._identifyingParams();\n  }\n\n  /**\n   * Call out to OpenAI's endpoint with k unique prompts\n   *\n   * @param [prompts] - The prompts to pass into the model.\n   * @param [options] - Optional list of stop words to use when generating.\n   * @param [runManager] - Optional callback manager to use when generating.\n   *\n   * @returns The full LLM output.\n   *\n   * @example\n   * ```ts\n   * import { OpenAI } from \"langchain/llms/openai\";\n   * const openai = new OpenAI();\n   * const response = await openai.generate([\"Tell me a joke.\"]);\n   * ```\n   */\n  async _generate(\n    prompts: string[],\n    options: this[\"ParsedCallOptions\"],\n    runManager?: CallbackManagerForLLMRun\n  ): Promise<LLMResult> {\n    const subPrompts = chunkArray(prompts, this.batchSize);\n    const choices: OpenAIClient.CompletionChoice[] = [];\n    const tokenUsage: TokenUsage = {};\n\n    const params = this.invocationParams(options);\n\n    if (params.max_tokens === -1) {\n      if (prompts.length !== 1) {\n        throw new Error(\n          \"max_tokens set to -1 not supported for multiple inputs\"\n        );\n      }\n      params.max_tokens = await calculateMaxTokens({\n        prompt: prompts[0],\n        // Cast here to allow for other models that may not fit the union\n        modelName: this.model as TiktokenModel,\n      });\n    }\n\n    for (let i = 0; i < subPrompts.length; i += 1) {\n      const data = params.stream\n        ? await (async () => {\n            const choices: OpenAIClient.CompletionChoice[] = [];\n            let response: Omit<OpenAIClient.Completion, \"choices\"> | undefined;\n            const stream = await this.completionWithRetry(\n              {\n                ...params,\n                stream: true,\n                prompt: subPrompts[i],\n              },\n              options\n            );\n            for await (const message of stream) {\n              // on the first message set the response properties\n              if (!response) {\n                response = {\n                  id: message.id,\n                  object: message.object,\n                  created: message.created,\n                  model: message.model,\n                };\n              }\n\n              // on all messages, update choice\n              for (const part of message.choices) {\n                if (!choices[part.index]) {\n                  choices[part.index] = part;\n                } else {\n                  const choice = choices[part.index];\n                  choice.text += part.text;\n                  choice.finish_reason = part.finish_reason;\n                  choice.logprobs = part.logprobs;\n                }\n                // oxlint-disable-next-line no-void\n                void runManager?.handleLLMNewToken(part.text, {\n                  prompt: Math.floor(part.index / this.n),\n                  completion: part.index % this.n,\n                });\n              }\n            }\n            if (options.signal?.aborted) {\n              throw new Error(\"AbortError\");\n            }\n            return { ...response, choices };\n          })()\n        : await this.completionWithRetry(\n            {\n              ...params,\n              stream: false,\n              prompt: subPrompts[i],\n            },\n            {\n              signal: options.signal,\n              ...options.options,\n            }\n          );\n\n      choices.push(...data.choices);\n      const {\n        completion_tokens: completionTokens,\n        prompt_tokens: promptTokens,\n        total_tokens: totalTokens,\n      } = data.usage\n        ? data.usage\n        : {\n            completion_tokens: undefined,\n            prompt_tokens: undefined,\n            total_tokens: undefined,\n          };\n\n      if (completionTokens) {\n        tokenUsage.completionTokens =\n          (tokenUsage.completionTokens ?? 0) + completionTokens;\n      }\n\n      if (promptTokens) {\n        tokenUsage.promptTokens = (tokenUsage.promptTokens ?? 0) + promptTokens;\n      }\n\n      if (totalTokens) {\n        tokenUsage.totalTokens = (tokenUsage.totalTokens ?? 0) + totalTokens;\n      }\n    }\n\n    const generations = chunkArray(choices, this.n).map((promptChoices) =>\n      promptChoices.map((choice) => ({\n        text: choice.text ?? \"\",\n        generationInfo: {\n          finishReason: choice.finish_reason,\n          logprobs: choice.logprobs,\n        },\n      }))\n    );\n    return {\n      generations,\n      llmOutput: { tokenUsage },\n    };\n  }\n\n  // TODO(jacoblee): Refactor with _generate(..., {stream: true}) implementation?\n  async *_streamResponseChunks(\n    input: string,\n    options: this[\"ParsedCallOptions\"],\n    runManager?: CallbackManagerForLLMRun\n  ): AsyncGenerator<GenerationChunk> {\n    const params = {\n      ...this.invocationParams(options),\n      prompt: input,\n      stream: true as const,\n    };\n    const stream = await this.completionWithRetry(params, options);\n    for await (const data of stream) {\n      const choice = data?.choices[0];\n      if (!choice) {\n        continue;\n      }\n      const chunk = new GenerationChunk({\n        text: choice.text,\n        generationInfo: {\n          finishReason: choice.finish_reason,\n        },\n      });\n      yield chunk;\n      // oxlint-disable-next-line no-void\n      void runManager?.handleLLMNewToken(chunk.text ?? \"\");\n    }\n    if (options.signal?.aborted) {\n      throw new Error(\"AbortError\");\n    }\n  }\n\n  /**\n   * Calls the OpenAI API with retry logic in case of failures.\n   * @param request The request to send to the OpenAI API.\n   * @param options Optional configuration for the API call.\n   * @returns The response from the OpenAI API.\n   */\n  async completionWithRetry(\n    request: OpenAIClient.CompletionCreateParamsStreaming,\n    options?: OpenAICoreRequestOptions\n  ): Promise<AsyncIterable<OpenAIClient.Completion>>;\n\n  async completionWithRetry(\n    request: OpenAIClient.CompletionCreateParamsNonStreaming,\n    options?: OpenAICoreRequestOptions\n  ): Promise<OpenAIClient.Completions.Completion>;\n\n  async completionWithRetry(\n    request:\n      | OpenAIClient.CompletionCreateParamsStreaming\n      | OpenAIClient.CompletionCreateParamsNonStreaming,\n    options?: OpenAICoreRequestOptions\n  ): Promise<\n    AsyncIterable<OpenAIClient.Completion> | OpenAIClient.Completions.Completion\n  > {\n    const requestOptions = this._getClientOptions(options);\n    return this.caller.call(async () => {\n      try {\n        const res = await this.client.completions.create(\n          request,\n          requestOptions\n        );\n        return res;\n      } catch (e) {\n        const error = wrapOpenAIClientError(e);\n        throw error;\n      }\n    });\n  }\n\n  /**\n   * Calls the OpenAI API with retry logic in case of failures.\n   * @param request The request to send to the OpenAI API.\n   * @param options Optional configuration for the API call.\n   * @returns The response from the OpenAI API.\n   */\n  protected _getClientOptions(\n    options: OpenAICoreRequestOptions | undefined\n  ): OpenAICoreRequestOptions {\n    if (!this.client) {\n      const openAIEndpointConfig: OpenAIEndpointConfig = {\n        baseURL: this.clientConfig.baseURL,\n      };\n\n      const endpoint = getEndpoint(openAIEndpointConfig);\n\n      const params = {\n        ...this.clientConfig,\n        baseURL: endpoint,\n        timeout: this.timeout,\n        maxRetries: 0,\n      };\n\n      if (!params.baseURL) {\n        delete params.baseURL;\n      }\n\n      params.defaultHeaders = getHeadersWithUserAgent(params.defaultHeaders);\n\n      this.client = new OpenAIClient(params);\n    }\n    const requestOptions = {\n      ...this.clientConfig,\n      ...options,\n    } as OpenAICoreRequestOptions;\n    return requestOptions;\n  }\n\n  _llmType() {\n    return \"openai\";\n  }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA+DA,IAAa,SAAb,cACUA,qCAAAA,QAEV;CACE,OAAO,UAAU;AACf,SAAO;;CAGT,IAAI,WAAW;AACb,SAAO,CAAC,GAAG,MAAM,UAAU,UAAU;;CAGvC,kBAAkB;CAElB,IAAI,aAAoD;AACtD,SAAO;GACL,cAAc;GACd,QAAQ;GACR,cAAc;GACf;;CAGH,IAAI,aAAqC;AACvC,SAAO;GACL,WAAW;GACX,cAAc;GACd,QAAQ;GACT;;CAGH;CAEA;CAEA;CAEA;CAEA;CAEA,IAAI;CAEJ;CAEA;CAEA,QAAQ;;CAGR;CAEA;CAEA,YAAY;CAEZ;CAEA;CAEA;CAEA;CAEA,YAAY;CAEZ;CAEA;CAEA;CAEA;CAEA;CAEA,YACE,QAIA;AACA,QAAM,UAAU,EAAE,CAAC;AACnB,OAAK,YAAY,qBAAA,QAAqC;AAEtD,OAAK,eACH,QAAQ,UACR,QAAQ,iBAAA,GAAA,0BAAA,wBACe,iBAAiB;AAC1C,OAAK,SAAS,KAAK;AAEnB,OAAK,eACH,QAAQ,eAAe,iBAAA,GAAA,0BAAA,wBACA,sBAAsB;AAE/C,OAAK,QAAQ,QAAQ,SAAS,QAAQ,aAAa,KAAK;AACxD,OACG,KAAK,OAAO,WAAW,gBAAgB,IACtC,KAAK,OAAO,WAAW,QAAQ,IAC/B,KAAK,OAAO,WAAW,KAAK,KAC9B,CAAC,KAAK,OAAO,SAAS,YAAY,CAElC,OAAM,IAAI,MACR;GACE,8BAA8B,KAAK,MAAM;GACzC;GACA;GACA;GACA;GACA;GACA;GACD,CAAC,KAAK,KAAK,CACb;AAEH,OAAK,YAAY,KAAK;AACtB,OAAK,cAAc,QAAQ,eAAe,EAAE;AAC5C,OAAK,YAAY,QAAQ,aAAa,KAAK;AAC3C,OAAK,UAAU,QAAQ;AAEvB,OAAK,cAAc,QAAQ,eAAe,KAAK;AAC/C,OAAK,YAAY,QAAQ,aAAa,KAAK;AAC3C,OAAK,OAAO,QAAQ,QAAQ,KAAK;AACjC,OAAK,mBAAmB,QAAQ,oBAAoB,KAAK;AACzD,OAAK,kBAAkB,QAAQ,mBAAmB,KAAK;AACvD,OAAK,IAAI,QAAQ,KAAK,KAAK;AAC3B,OAAK,SAAS,QAAQ,UAAU,KAAK;AACrC,OAAK,YAAY,QAAQ;AACzB,OAAK,OAAO,QAAQ,iBAAiB,QAAQ;AAC7C,OAAK,gBAAgB,KAAK;AAC1B,OAAK,OAAO,QAAQ;AAEpB,OAAK,YAAY,QAAQ,aAAa;AAEtC,MAAI,KAAK,aAAa,KAAK,UAAU,KAAK,SAAS,EACjD,OAAM,IAAI,MAAM,wCAAwC;AAG1D,OAAK,eAAe;GAClB,QAAQ,KAAK;GACb,cAAc,KAAK;GACnB,yBAAyB;GACzB,GAAG,QAAQ;GACZ;;;;;CAMH,iBACE,SACqD;AACrD,SAAO;GACL,OAAO,KAAK;GACZ,aAAa,KAAK;GAClB,YAAY,KAAK;GACjB,OAAO,KAAK;GACZ,mBAAmB,KAAK;GACxB,kBAAkB,KAAK;GACvB,GAAG,KAAK;GACR,SAAS,KAAK;GACd,YAAY,KAAK;GACjB,MAAM,SAAS,QAAQ,KAAK;GAC5B,MAAM,KAAK;GACX,QAAQ,KAAK;GACb,GAAG,KAAK;GACT;;;CAIH,qBAEkB;AAChB,SAAO;GACL,YAAY,KAAK;GACjB,GAAG,KAAK,kBAAkB;GAC1B,GAAG,KAAK;GACT;;;;;CAMH,oBAEkB;AAChB,SAAO,KAAK,oBAAoB;;;;;;;;;;;;;;;;;;CAmBlC,MAAM,UACJ,SACA,SACA,YACoB;EACpB,MAAM,cAAA,GAAA,kCAAA,YAAwB,SAAS,KAAK,UAAU;EACtD,MAAM,UAA2C,EAAE;EACnD,MAAM,aAAyB,EAAE;EAEjC,MAAM,SAAS,KAAK,iBAAiB,QAAQ;AAE7C,MAAI,OAAO,eAAe,IAAI;AAC5B,OAAI,QAAQ,WAAW,EACrB,OAAM,IAAI,MACR,yDACD;AAEH,UAAO,aAAa,OAAA,GAAA,qCAAA,oBAAyB;IAC3C,QAAQ,QAAQ;IAEhB,WAAW,KAAK;IACjB,CAAC;;AAGJ,OAAK,IAAI,IAAI,GAAG,IAAI,WAAW,QAAQ,KAAK,GAAG;GAC7C,MAAM,OAAO,OAAO,SAChB,OAAO,YAAY;IACjB,MAAM,UAA2C,EAAE;IACnD,IAAI;IACJ,MAAM,SAAS,MAAM,KAAK,oBACxB;KACE,GAAG;KACH,QAAQ;KACR,QAAQ,WAAW;KACpB,EACD,QACD;AACD,eAAW,MAAM,WAAW,QAAQ;AAElC,SAAI,CAAC,SACH,YAAW;MACT,IAAI,QAAQ;MACZ,QAAQ,QAAQ;MAChB,SAAS,QAAQ;MACjB,OAAO,QAAQ;MAChB;AAIH,UAAK,MAAM,QAAQ,QAAQ,SAAS;AAClC,UAAI,CAAC,QAAQ,KAAK,OAChB,SAAQ,KAAK,SAAS;WACjB;OACL,MAAM,SAAS,QAAQ,KAAK;AAC5B,cAAO,QAAQ,KAAK;AACpB,cAAO,gBAAgB,KAAK;AAC5B,cAAO,WAAW,KAAK;;AAGpB,kBAAY,kBAAkB,KAAK,MAAM;OAC5C,QAAQ,KAAK,MAAM,KAAK,QAAQ,KAAK,EAAE;OACvC,YAAY,KAAK,QAAQ,KAAK;OAC/B,CAAC;;;AAGN,QAAI,QAAQ,QAAQ,QAClB,OAAM,IAAI,MAAM,aAAa;AAE/B,WAAO;KAAE,GAAG;KAAU;KAAS;OAC7B,GACJ,MAAM,KAAK,oBACT;IACE,GAAG;IACH,QAAQ;IACR,QAAQ,WAAW;IACpB,EACD;IACE,QAAQ,QAAQ;IAChB,GAAG,QAAQ;IACZ,CACF;AAEL,WAAQ,KAAK,GAAG,KAAK,QAAQ;GAC7B,MAAM,EACJ,mBAAmB,kBACnB,eAAe,cACf,cAAc,gBACZ,KAAK,QACL,KAAK,QACL;IACE,mBAAmB,KAAA;IACnB,eAAe,KAAA;IACf,cAAc,KAAA;IACf;AAEL,OAAI,iBACF,YAAW,oBACR,WAAW,oBAAoB,KAAK;AAGzC,OAAI,aACF,YAAW,gBAAgB,WAAW,gBAAgB,KAAK;AAG7D,OAAI,YACF,YAAW,eAAe,WAAW,eAAe,KAAK;;AAa7D,SAAO;GACL,cAAA,GAAA,kCAAA,YAV6B,SAAS,KAAK,EAAE,CAAC,KAAK,kBACnD,cAAc,KAAK,YAAY;IAC7B,MAAM,OAAO,QAAQ;IACrB,gBAAgB;KACd,cAAc,OAAO;KACrB,UAAU,OAAO;KAClB;IACF,EAAE,CACJ;GAGC,WAAW,EAAE,YAAY;GAC1B;;CAIH,OAAO,sBACL,OACA,SACA,YACiC;EACjC,MAAM,SAAS;GACb,GAAG,KAAK,iBAAiB,QAAQ;GACjC,QAAQ;GACR,QAAQ;GACT;EACD,MAAM,SAAS,MAAM,KAAK,oBAAoB,QAAQ,QAAQ;AAC9D,aAAW,MAAM,QAAQ,QAAQ;GAC/B,MAAM,SAAS,MAAM,QAAQ;AAC7B,OAAI,CAAC,OACH;GAEF,MAAM,QAAQ,IAAIC,wBAAAA,gBAAgB;IAChC,MAAM,OAAO;IACb,gBAAgB,EACd,cAAc,OAAO,eACtB;IACF,CAAC;AACF,SAAM;AAED,eAAY,kBAAkB,MAAM,QAAQ,GAAG;;AAEtD,MAAI,QAAQ,QAAQ,QAClB,OAAM,IAAI,MAAM,aAAa;;CAoBjC,MAAM,oBACJ,SAGA,SAGA;EACA,MAAM,iBAAiB,KAAK,kBAAkB,QAAQ;AACtD,SAAO,KAAK,OAAO,KAAK,YAAY;AAClC,OAAI;AAKF,WAJY,MAAM,KAAK,OAAO,YAAY,OACxC,SACA,eACD;YAEM,GAAG;AAEV,UADcC,eAAAA,sBAAsB,EAAE;;IAGxC;;;;;;;;CASJ,kBACE,SAC0B;AAC1B,MAAI,CAAC,KAAK,QAAQ;GAKhB,MAAM,WAAWC,cAAAA,YAJkC,EACjD,SAAS,KAAK,aAAa,SAC5B,CAEiD;GAElD,MAAM,SAAS;IACb,GAAG,KAAK;IACR,SAAS;IACT,SAAS,KAAK;IACd,YAAY;IACb;AAED,OAAI,CAAC,OAAO,QACV,QAAO,OAAO;AAGhB,UAAO,iBAAiBC,cAAAA,wBAAwB,OAAO,eAAe;AAEtE,QAAK,SAAS,IAAIC,OAAAA,OAAa,OAAO;;AAMxC,SAJuB;GACrB,GAAG,KAAK;GACR,GAAG;GACJ;;CAIH,WAAW;AACT,SAAO"}