declare const PreTrainedModel_base: new () => { (...args: any[]): any; _call(...args: any[]): any; }; /** * A base class for pre-trained models that provides the model configuration and an ONNX session. * @extends Callable */ export class PreTrainedModel extends PreTrainedModel_base { /** * Instantiate one of the model classes of the library from a pretrained model. * * The model class to instantiate is selected based on the `model_type` property of the config object * (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible) * * @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either: * - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. * Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a * user or organization name, like `dbmdz/bert-base-german-cased`. * - A path to a *directory* containing model weights, e.g., `./my_model_directory/`. * @param {PretrainedOptions} options Additional options for loading the model. * * @returns {Promise} A new instance of the `PreTrainedModel` class. */ static from_pretrained(pretrained_model_name_or_path: string, { quantized, progress_callback, config, cache_dir, local_files_only, revision, }?: PretrainedOptions): Promise; /** * Creates a new instance of the `PreTrainedModel` class. * @param {Object} config The model configuration. * @param {any} session session for the model. */ constructor(config: any, session: any); config: any; session: any; /** * Disposes of all the ONNX sessions that were created during inference. * @returns {Promise} An array of promises, one for each ONNX session that is being disposed. * @todo Use https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/FinalizationRegistry */ dispose(): Promise; /** * Runs the model with the provided inputs * @param {Object} model_inputs Object containing input tensors * @returns {Promise} Object containing output tensors */ _call(model_inputs: any): Promise; /** * Forward method for a pretrained model. If not overridden by a subclass, the correct forward method * will be chosen based on the model type. * @param {Object} model_inputs The input data to the model in the format specified in the ONNX model. * @returns {Promise} The output data from the model in the format specified in the ONNX model. * @throws {Error} This method must be implemented in subclasses. */ forward(model_inputs: any): Promise; /** * @param {GenerationConfig} generation_config * @param {number} input_ids_seq_length The starting sequence length for the input ids. * @returns {LogitsProcessorList} */ _get_logits_processor(generation_config: GenerationConfig, input_ids_seq_length: number, logits_processor?: any): LogitsProcessorList; /** * This function merges multiple generation configs together to form a final generation config to be used by the model for text generation. * It first creates an empty `GenerationConfig` object, then it applies the model's own `generation_config` property to it. Finally, if a `generation_config` object was passed in the arguments, it overwrites the corresponding properties in the final config with those of the passed config object. * * @param {GenerationConfig} generation_config A `GenerationConfig` object containing generation parameters. * @returns {GenerationConfig} The final generation config object to be used by the model for text generation. */ _get_generation_config(generation_config: GenerationConfig): GenerationConfig; /** * @typedef {import('./utils/maths.js').TypedArray} TypedArray */ /** * Generates text based on the given inputs and generation configuration using the model. * @param {Tensor|Array|TypedArray} inputs An array of input token IDs. * @param {Object|null} generation_config The generation configuration to use. If null, default configuration will be used. * @param {Object|null} logits_processor An optional logits processor to use. If null, a new LogitsProcessorList instance will be created. * @param {Object} options options * @param {Object} [options.inputs_attention_mask=null] An optional attention mask for the inputs. * @returns {Promise} An array of generated output sequences, where each sequence is an array of token IDs. * @throws {Error} Throws an error if the inputs array is empty. */ generate(inputs: any[] | import("./utils/maths.js").TypedArray | Tensor, generation_config?: any | null, logits_processor?: any | null, { inputs_attention_mask }?: { inputs_attention_mask?: any; }): Promise; /** * Groups an array of beam objects by their ids. * * @param {Array} beams The array of beam objects to group. * @returns {Array} An array of arrays, where each inner array contains beam objects with the same id. */ groupBeams(beams: any[]): any[]; /** * Returns an object containing past key values from the given decoder results object. * * @param {Object} decoderResults The decoder results object. * @param {Object} pastKeyValues The previous past key values. * @returns {Object} An object containing past key values. */ getPastKeyValues(decoderResults: any, pastKeyValues: any): any; /** * Adds past key values to the decoder feeds object. If pastKeyValues is null, creates new tensors for past key values. * * @param {Object} decoderFeeds The decoder feeds object to add past key values to. * @param {Object} pastKeyValues An object containing past key values. * @param {boolean} [hasDecoder=false] Whether the model has a decoder. */ addPastKeyValues(decoderFeeds: any, pastKeyValues: any, hasDecoder?: boolean): void; } export class ModelOutput { } /** * Base class for model's outputs, with potential hidden states and attentions. */ export class BaseModelOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.last_hidden_state Sequence of hidden-states at the output of the last layer of the model. * @param {Tensor} [output.hidden_states] Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. * @param {Tensor} [output.attentions] Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. */ constructor({ last_hidden_state, hidden_states, attentions }: { last_hidden_state: Tensor; hidden_states?: Tensor; attentions?: Tensor; }); last_hidden_state: Tensor; hidden_states: Tensor; attentions: Tensor; } export class BertPreTrainedModel extends PreTrainedModel { } export class BertModel extends BertPreTrainedModel { } /** * BertForMaskedLM is a class representing a BERT model for masked language modeling. * @extends BertPreTrainedModel */ export class BertForMaskedLM extends BertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} An object containing the model's output logits for masked language modeling. */ _call(model_inputs: any): Promise; } /** * BertForSequenceClassification is a class representing a BERT model for sequence classification. * @extends BertPreTrainedModel */ export class BertForSequenceClassification extends BertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} An object containing the model's output logits for sequence classification. */ _call(model_inputs: any): Promise; } /** * BertForTokenClassification is a class representing a BERT model for token classification. * @extends BertPreTrainedModel */ export class BertForTokenClassification extends BertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} An object containing the model's output logits for token classification. */ _call(model_inputs: any): Promise; } /** * BertForQuestionAnswering is a class representing a BERT model for question answering. * @extends BertPreTrainedModel */ export class BertForQuestionAnswering extends BertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} An object containing the model's output logits for question answering. */ _call(model_inputs: any): Promise; } export class DistilBertPreTrainedModel extends PreTrainedModel { } export class DistilBertModel extends DistilBertPreTrainedModel { } /** * DistilBertForSequenceClassification is a class representing a DistilBERT model for sequence classification. * @extends DistilBertPreTrainedModel */ export class DistilBertForSequenceClassification extends DistilBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} An object containing the model's output logits for sequence classification. */ _call(model_inputs: any): Promise; } /** * DistilBertForTokenClassification is a class representing a DistilBERT model for token classification. * @extends DistilBertPreTrainedModel */ export class DistilBertForTokenClassification extends DistilBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} An object containing the model's output logits for token classification. */ _call(model_inputs: any): Promise; } /** * DistilBertForQuestionAnswering is a class representing a DistilBERT model for question answering. * @extends DistilBertPreTrainedModel */ export class DistilBertForQuestionAnswering extends DistilBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} An object containing the model's output logits for question answering. */ _call(model_inputs: any): Promise; } /** * DistilBertForMaskedLM is a class representing a DistilBERT model for masking task. * @extends DistilBertPreTrainedModel */ export class DistilBertForMaskedLM extends DistilBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} returned object */ _call(model_inputs: any): Promise; } export class MobileBertPreTrainedModel extends PreTrainedModel { } export class MobileBertModel extends MobileBertPreTrainedModel { } /** * MobileBertForMaskedLM is a class representing a MobileBERT model for masking task. * @extends MobileBertPreTrainedModel */ export class MobileBertForMaskedLM extends MobileBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} returned object */ _call(model_inputs: any): Promise; } /** * @extends MobileBertPreTrainedModel */ export class MobileBertForSequenceClassification extends MobileBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} returned object */ _call(model_inputs: any): Promise; } /** * @extends MobileBertPreTrainedModel */ export class MobileBertForQuestionAnswering extends MobileBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} returned object */ _call(model_inputs: any): Promise; } export class SqueezeBertPreTrainedModel extends PreTrainedModel { } export class SqueezeBertModel extends SqueezeBertPreTrainedModel { } export class SqueezeBertForMaskedLM extends SqueezeBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} returned object */ _call(model_inputs: any): Promise; } export class SqueezeBertForSequenceClassification extends SqueezeBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} returned object */ _call(model_inputs: any): Promise; } export class SqueezeBertForQuestionAnswering extends SqueezeBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} returned object */ _call(model_inputs: any): Promise; } export class AlbertPreTrainedModel extends PreTrainedModel { } export class AlbertModel extends AlbertPreTrainedModel { } export class AlbertForSequenceClassification extends AlbertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} returned object */ _call(model_inputs: any): Promise; } export class AlbertForQuestionAnswering extends AlbertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} returned object */ _call(model_inputs: any): Promise; } export class AlbertForMaskedLM extends AlbertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} returned object */ _call(model_inputs: any): Promise; } export class T5PreTrainedModel extends PreTrainedModel { } export class T5Model extends T5PreTrainedModel { /** * Generates text based on the provided arguments. * @throws {Error} Throws an error as the current model class (T5Model) is not compatible with `.generate()`. * @returns {Promise} * @param {any[]} args */ generate(...args: any[]): Promise; } /** * T5Model is a class representing a T5 model for conditional generation. * @extends T5PreTrainedModel */ export class T5ForConditionalGeneration extends T5PreTrainedModel { /** * Creates a new instance of the `T5ForConditionalGeneration` class. * @param {Object} config The model configuration. * @param {any} session session for the model. * @param {any} decoder_merged_session session for the decoder. * @param {GenerationConfig} generation_config The generation configuration. */ constructor(config: any, session: any, decoder_merged_session: any, generation_config: GenerationConfig); decoder_merged_session: any; generation_config: GenerationConfig; num_decoder_layers: any; num_decoder_heads: any; decoder_dim_kv: any; num_encoder_layers: any; num_encoder_heads: any; encoder_dim_kv: any; /** * Generates the start beams for a given set of inputs and output length. * @param {number[][]} inputs The input token IDs. * @param {number} numOutputTokens The desired output length. * @returns {Array} The start beams. */ getStartBeams(inputs: number[][], numOutputTokens: number, ...args: any[]): any[]; /** * Runs a single step of the beam search generation algorithm. * @param {any} beam The current beam being generated. * @returns {Promise} The updated beam after a single generation step. */ runBeam(beam: any): Promise; /** * Updates the given beam with a new token ID. * @param {any} beam The current beam. * @param {number} newTokenId The new token ID to add to the output sequence. */ updateBeam(beam: any, newTokenId: number): void; } export class MT5PreTrainedModel extends PreTrainedModel { } export class MT5Model extends MT5PreTrainedModel { /** * * @param {...any} args * @returns {Promise} * @throws {Error} */ generate(...args: any[]): Promise; } /** * A class representing a conditional sequence-to-sequence model based on the MT5 architecture. * * @extends MT5PreTrainedModel */ export class MT5ForConditionalGeneration extends MT5PreTrainedModel { /** * Creates a new instance of the `MT5ForConditionalGeneration` class. * @param {any} config The model configuration. * @param {any} session The ONNX session containing the encoder weights. * @param {any} decoder_merged_session The ONNX session containing the merged decoder weights. * @param {GenerationConfig} generation_config The generation configuration. */ constructor(config: any, session: any, decoder_merged_session: any, generation_config: GenerationConfig); decoder_merged_session: any; generation_config: GenerationConfig; num_decoder_layers: any; num_decoder_heads: any; decoder_dim_kv: any; num_encoder_layers: any; num_encoder_heads: any; encoder_dim_kv: any; /** * Generates the start beams for the given input tokens and output sequence length. * * @param {any[]} inputs The input sequence. * @param {number} numOutputTokens The desired length of the output sequence. * @param {...*} args Additional arguments to pass to the `seq2seqStartBeams` function. * @returns {any[]} An array of `Beam` objects representing the start beams. */ getStartBeams(inputs: any[], numOutputTokens: number, ...args: any[]): any[]; /** * Runs a single step of the beam search generation algorithm. * @param {any} beam The current beam being generated. * @returns {Promise} The updated beam after a single generation step. */ runBeam(beam: any): Promise; /** * Updates the given beam with the new predicted token. * @param {any} beam The beam to update. * @param {number} newTokenId The index of the predicted token. */ updateBeam(beam: any, newTokenId: number): void; } export class BartPretrainedModel extends PreTrainedModel { } /** * BART encoder and decoder model. * * @hideconstructor * @extends BartPretrainedModel */ export class BartModel extends BartPretrainedModel { /** * Throws an error because the current model class (BartModel) is not compatible with `.generate()`. * * @throws {Error} The current model class (BartModel) is not compatible with `.generate()`. * @returns {Promise} */ generate(...args: any[]): Promise; } /** * BART model with a language model head for conditional generation. * @extends BartPretrainedModel */ export class BartForConditionalGeneration extends BartPretrainedModel { /** * Creates a new instance of the `BartForConditionalGeneration` class. * @param {Object} config The configuration object for the Bart model. * @param {Object} session The ONNX session used to execute the model. * @param {Object} decoder_merged_session The ONNX session used to execute the decoder. * @param {Object} generation_config The generation configuration object. */ constructor(config: any, session: any, decoder_merged_session: any, generation_config: any); decoder_merged_session: any; generation_config: any; num_decoder_layers: any; num_decoder_heads: any; decoder_dim_kv: number; num_encoder_layers: any; num_encoder_heads: any; encoder_dim_kv: number; /** * Returns the initial beam for generating output text. * @param {Object} inputs The input object containing the encoded input text. * @param {number} numOutputTokens The maximum number of output tokens to generate. * @param {...any} args Additional arguments to pass to the sequence-to-sequence generation function. * @returns {any} The initial beam for generating output text. */ getStartBeams(inputs: any, numOutputTokens: number, ...args: any[]): any; /** * Runs a single step of the beam search generation algorithm. * @param {any} beam The current beam being generated. * @returns {Promise} The updated beam after a single generation step. */ runBeam(beam: any): Promise; /** * Updates the beam by appending the newly generated token ID to the list of output token IDs. * @param {any} beam The current beam being generated. * @param {number} newTokenId The ID of the newly generated token to append to the list of output token IDs. */ updateBeam(beam: any, newTokenId: number): void; } export class BartForSequenceClassification extends BartPretrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} An object containing the model's output logits for sequence classification. */ _call(model_inputs: any): Promise; } export class RobertaPreTrainedModel extends PreTrainedModel { } export class RobertaModel extends RobertaPreTrainedModel { } /** * RobertaForMaskedLM class for performing masked language modeling on Roberta models. * @extends RobertaPreTrainedModel */ export class RobertaForMaskedLM extends RobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} returned object */ _call(model_inputs: any): Promise; } /** * RobertaForSequenceClassification class for performing sequence classification on Roberta models. * @extends RobertaPreTrainedModel */ export class RobertaForSequenceClassification extends RobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} returned object */ _call(model_inputs: any): Promise; } /** * RobertaForTokenClassification class for performing token classification on Roberta models. * @extends RobertaPreTrainedModel */ export class RobertaForTokenClassification extends RobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} An object containing the model's output logits for token classification. */ _call(model_inputs: any): Promise; } /** * RobertaForQuestionAnswering class for performing question answering on Roberta models. * @extends RobertaPreTrainedModel */ export class RobertaForQuestionAnswering extends RobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} returned object */ _call(model_inputs: any): Promise; } export class XLMRobertaPreTrainedModel extends PreTrainedModel { } export class XLMRobertaModel extends XLMRobertaPreTrainedModel { } /** * XLMRobertaForMaskedLM class for performing masked language modeling on XLMRoberta models. * @extends XLMRobertaPreTrainedModel */ export class XLMRobertaForMaskedLM extends XLMRobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} returned object */ _call(model_inputs: any): Promise; } /** * XLMRobertaForSequenceClassification class for performing sequence classification on XLMRoberta models. * @extends XLMRobertaPreTrainedModel */ export class XLMRobertaForSequenceClassification extends XLMRobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} returned object */ _call(model_inputs: any): Promise; } /** * XLMRobertaForTokenClassification class for performing token classification on XLMRoberta models. * @extends XLMRobertaPreTrainedModel */ export class XLMRobertaForTokenClassification extends XLMRobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} An object containing the model's output logits for token classification. */ _call(model_inputs: any): Promise; } /** * XLMRobertaForQuestionAnswering class for performing question answering on XLMRoberta models. * @extends XLMRobertaPreTrainedModel */ export class XLMRobertaForQuestionAnswering extends XLMRobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise} returned object */ _call(model_inputs: any): Promise; } export class WhisperPreTrainedModel extends PreTrainedModel { } /** * WhisperModel class for training Whisper models without a language model head. * @extends WhisperPreTrainedModel */ export class WhisperModel extends WhisperPreTrainedModel { /** * Throws an error when attempting to generate output since this model doesn't have a language model head. * @throws Error * @returns {Promise} * @param {any[]} args */ generate(...args: any[]): Promise; } /** * WhisperForConditionalGeneration class for generating conditional outputs from Whisper models. * @extends WhisperPreTrainedModel */ export class WhisperForConditionalGeneration extends WhisperPreTrainedModel { /** * Creates a new instance of the `WhisperForConditionalGeneration` class. * @param {Object} config Configuration object for the model. * @param {Object} session ONNX Session object for the model. * @param {Object} decoder_merged_session ONNX Session object for the decoder. * @param {Object} generation_config Configuration object for the generation process. */ constructor(config: any, session: any, decoder_merged_session: any, generation_config: any); decoder_merged_session: any; generation_config: any; num_decoder_layers: any; num_decoder_heads: any; decoder_dim_kv: number; num_encoder_layers: any; num_encoder_heads: any; encoder_dim_kv: number; /** * Generates outputs based on input and generation configuration. * @param {Object} inputs Input data for the model. * @param {Object} generation_config Configuration object for the generation process. * @param {Object} logits_processor Optional logits processor object. * @returns {Promise} Promise object represents the generated outputs. */ generate(inputs: any, generation_config?: any, logits_processor?: any): Promise; /** * Gets the start beams for generating outputs. * @param {Array} inputTokenIds Array of input token IDs. * @param {number} numOutputTokens Number of output tokens to generate. * @returns {Array} Array of start beams. */ getStartBeams(inputTokenIds: any[], numOutputTokens: number, ...args: any[]): any[]; /** * Runs a single step of the beam search generation algorithm. * @param {any} beam The current beam being generated. * @returns {Promise} The updated beam after a single generation step. */ runBeam(beam: any): Promise; /** * Updates the beam by appending the newly generated token ID to the list of output token IDs. * @param {any} beam The current beam being generated. * @param {number} newTokenId The ID of the newly generated token to append to the list of output token IDs. */ updateBeam(beam: any, newTokenId: number): void; } /** * Vision Encoder-Decoder model based on OpenAI's GPT architecture for image captioning and other vision tasks * @extends PreTrainedModel */ export class VisionEncoderDecoderModel extends PreTrainedModel { /** * Creates a new instance of the `VisionEncoderDecoderModel` class. * @param {Object} config The configuration object specifying the hyperparameters and other model settings. * @param {Object} session The ONNX session containing the encoder model. * @param {any} decoder_merged_session The ONNX session containing the merged decoder model. */ constructor(config: any, session: any, decoder_merged_session: any); decoder_merged_session: any; num_layers: any; num_heads: any; dim_kv: number; /** * Generate beam search outputs for the given input pixels and number of output tokens. * * @param {array} inputs The input pixels as a Tensor. * @param {number} numOutputTokens The number of output tokens to generate. * @param {...*} args Optional additional arguments to pass to seq2seqStartBeams. * @returns {any} An array of Beam objects representing the top-K output sequences. */ getStartBeams(inputs: any[], numOutputTokens: number, ...args: any[]): any; /** * Runs a single step of the beam search generation algorithm. * @param {any} beam The current beam being generated. * @returns {Promise} The updated beam after a single generation step. */ runBeam(beam: any): Promise; /** * Update the given beam with the additional predicted token ID. * * @param {any} beam The current beam. * @param {number} newTokenId The new predicted token ID to add to the beam's output sequence. */ updateBeam(beam: any, newTokenId: number): void; } export class CLIPPreTrainedModel extends PreTrainedModel { } export class CLIPModel extends CLIPPreTrainedModel { } export class GPT2PreTrainedModel extends PreTrainedModel { num_heads: any; num_layers: any; dim_kv: number; } export class GPT2Model extends GPT2PreTrainedModel { /** * GPT2Model is not compatible with `.generate()`, as it doesn't have a language model head. * @param {...any} args * @throws {Error} * @returns {Promise} */ generate(...args: any[]): Promise; } /** * GPT-2 language model head on top of the GPT-2 base model. This model is suitable for text generation tasks. * @extends GPT2PreTrainedModel */ export class GPT2LMHeadModel extends GPT2PreTrainedModel { /** * Initializes and returns the beam for text generation task * @param {Tensor} inputTokenIds The input token ids. * @param {number} numOutputTokens The number of tokens to be generated. * @param {Tensor} inputs_attention_mask Optional input attention mask. * @returns {any} A Beam object representing the initialized beam. */ getStartBeams(inputTokenIds: Tensor, numOutputTokens: number, inputs_attention_mask: Tensor): any; /** * Runs a single step of the beam search generation algorithm. * @param {any} beam The current beam being generated. * @returns {Promise} The updated beam after a single generation step. */ runBeam(beam: any): Promise; /** * Updates the given beam with the new generated token id. * @param {any} beam The Beam object representing the beam. * @param {number} newTokenId The new generated token id to be added to the beam. */ updateBeam(beam: any, newTokenId: number): void; } export class GPTNeoPreTrainedModel extends PreTrainedModel { num_heads: any; num_layers: any; dim_kv: number; } export class GPTNeoModel extends GPTNeoPreTrainedModel { /** * * @param {...any} args * @throws {Error} * @returns {Promise} */ generate(...args: any[]): Promise; } export class GPTNeoForCausalLM extends GPTNeoPreTrainedModel { /** * Initializes and returns the beam for text generation task * @param {Tensor} inputTokenIds The input token ids. * @param {number} numOutputTokens The number of tokens to be generated. * @param {Tensor} inputs_attention_mask Optional input attention mask. * @returns {any} A Beam object representing the initialized beam. */ getStartBeams(inputTokenIds: Tensor, numOutputTokens: number, inputs_attention_mask: Tensor): any; /** * Runs a single step of the beam search generation algorithm. * @param {any} beam The current beam being generated. * @returns {Promise} The updated beam after a single generation step. */ runBeam(beam: any): Promise; /** * Updates the given beam with the new generated token id. * @param {any} beam The Beam object representing the beam. * @param {number} newTokenId The new generated token id to be added to the beam. */ updateBeam(beam: any, newTokenId: number): void; } export class CodeGenPreTrainedModel extends PreTrainedModel { num_heads: any; num_layers: any; dim_kv: number; } /** * CodeGenModel is a class representing a code generation model without a language model head. * * @extends CodeGenPreTrainedModel */ export class CodeGenModel extends CodeGenPreTrainedModel { /** * Throws an error indicating that the current model class is not compatible with `.generate()`, * as it doesn't have a language model head. * * @throws {Error} The current model class is not compatible with `.generate()` * * @param {...any} args Arguments passed to the generate function * @returns {Promise} */ generate(...args: any[]): Promise; } /** * CodeGenForCausalLM is a class that represents a code generation model based on the GPT-2 architecture. It extends the `CodeGenPreTrainedModel` class. * @extends CodeGenPreTrainedModel */ export class CodeGenForCausalLM extends CodeGenPreTrainedModel { /** * Initializes and returns the beam for text generation task * @param {Tensor} inputTokenIds The input token ids. * @param {number} numOutputTokens The number of tokens to be generated. * @param {Tensor} inputs_attention_mask Optional input attention mask. * @returns {any} A Beam object representing the initialized beam. */ getStartBeams(inputTokenIds: Tensor, numOutputTokens: number, inputs_attention_mask: Tensor): any; /** * Runs a single step of the beam search generation algorithm. * @param {any} beam The current beam being generated. * @returns {Promise} The updated beam after a single generation step. */ runBeam(beam: any): Promise; /** * Updates the given beam with the new generated token id. * @param {any} beam The Beam object representing the beam. * @param {number} newTokenId The new generated token id to be added to the beam. */ updateBeam(beam: any, newTokenId: number): void; } export class ViTPreTrainedModel extends PreTrainedModel { } export class ViTForImageClassification extends ViTPreTrainedModel { /** * @param {any} model_inputs */ _call(model_inputs: any): Promise; } export class MobileViTPreTrainedModel extends PreTrainedModel { } export class MobileViTForImageClassification extends MobileViTPreTrainedModel { /** * @param {any} model_inputs */ _call(model_inputs: any): Promise; } export class DetrPreTrainedModel extends PreTrainedModel { } export class DetrForObjectDetection extends DetrPreTrainedModel { /** * @param {any} model_inputs */ _call(model_inputs: any): Promise; } export class DetrForSegmentation extends DetrPreTrainedModel { /** * Runs the model with the provided inputs * @param {Object} model_inputs Model inputs * @returns {Promise} Object containing segmentation outputs */ _call(model_inputs: any): Promise; } export class DetrObjectDetectionOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Classification logits (including no-object) for all queries. * @param {Tensor} output.pred_boxes Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). * These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). */ constructor({ logits, pred_boxes }: { logits: Tensor; pred_boxes: Tensor; }); logits: Tensor; pred_boxes: Tensor; } export class DetrSegmentationOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits The output logits of the model. * @param {Tensor} output.pred_boxes Predicted boxes. * @param {Tensor} output.pred_masks Predicted masks. */ constructor({ logits, pred_boxes, pred_masks }: { logits: Tensor; pred_boxes: Tensor; pred_masks: Tensor; }); logits: Tensor; pred_boxes: Tensor; pred_masks: Tensor; } export class SamPreTrainedModel extends PreTrainedModel { } export class SamModel extends SamPreTrainedModel { /** * @param {Object} model_inputs * @param {Tensor} model_inputs.pixel_values Pixel values as a Tensor with shape `(batch_size, num_channels, height, width)`. * @param {Tensor} model_inputs.input_points Input 2D spatial points with shape `(batch_size, num_points, 2)`. This is used by the prompt encoder to encode the prompt. * @todo Add support for `input_labels`, `input_boxes`, `input_masks`, and `image_embeddings`. */ _call(model_inputs: { pixel_values: Tensor; input_points: Tensor; }): Promise; } /** * Base class for Segment-Anything model's output. */ export class SamImageSegmentationOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.iou_scores The output logits of the model. * @param {Tensor} output.pred_masks Predicted boxes. */ constructor({ iou_scores, pred_masks }: { iou_scores: Tensor; pred_masks: Tensor; }); iou_scores: Tensor; pred_masks: Tensor; } export class MarianPreTrainedModel extends PreTrainedModel { } export class MarianModel extends MarianPreTrainedModel { /** * * @param {...any} args * @throws {Error} * @returns {Promise} */ generate(...args: any[]): Promise; } export class MarianMTModel extends MarianPreTrainedModel { /** * Creates a new instance of the `MarianMTModel` class. * @param {Object} config The model configuration object. * @param {Object} session The ONNX session object. * @param {any} decoder_merged_session * @param {any} generation_config */ constructor(config: any, session: any, decoder_merged_session: any, generation_config: any); decoder_merged_session: any; generation_config: any; num_decoder_layers: any; num_decoder_heads: any; decoder_dim_kv: number; num_encoder_layers: any; num_encoder_heads: any; encoder_dim_kv: number; /** * Initializes and returns the beam for text generation task * @param {any[]} inputs The input token ids. * @param {number} numOutputTokens The number of tokens to be generated. * @returns {any} A Beam object representing the initialized beam. * @param {any[]} args */ getStartBeams(inputs: any[], numOutputTokens: number, ...args: any[]): any; /** * Runs a single step of the beam search generation algorithm. * @param {any} beam The current beam being generated. * @returns {Promise} The updated beam after a single generation step. */ runBeam(beam: any): Promise; /** * @param {any} beam * @param {any} newTokenId */ updateBeam(beam: any, newTokenId: any): void; /** * @param {any} model_inputs * @returns {Promise} */ forward(model_inputs: any): Promise; } export class M2M100PreTrainedModel extends PreTrainedModel { } export class M2M100Model extends M2M100PreTrainedModel { /** * * @param {...any} args * @throws {Error} * @returns {Promise} */ generate(...args: any[]): Promise; } export class M2M100ForConditionalGeneration extends M2M100PreTrainedModel { /** * Creates a new instance of the `M2M100ForConditionalGeneration` class. * @param {Object} config The model configuration object. * @param {Object} session The ONNX session object. * @param {any} decoder_merged_session * @param {any} generation_config */ constructor(config: any, session: any, decoder_merged_session: any, generation_config: any); decoder_merged_session: any; generation_config: any; num_decoder_layers: any; num_decoder_heads: any; decoder_dim_kv: number; num_encoder_layers: any; num_encoder_heads: any; encoder_dim_kv: number; /** * Initializes and returns the beam for text generation task * @param {any[]} inputs The input token ids. * @param {number} numOutputTokens The number of tokens to be generated. * @returns {any} A Beam object representing the initialized beam. * @param {any[]} args */ getStartBeams(inputs: any[], numOutputTokens: number, ...args: any[]): any; /** * Runs a single step of the beam search generation algorithm. * @param {any} beam The current beam being generated. * @returns {Promise} The updated beam after a single generation step. */ runBeam(beam: any): Promise; /** * @param {any} beam * @param {any} newTokenId */ updateBeam(beam: any, newTokenId: any): void; /** * @param {any} model_inputs * @returns {Promise} */ forward(model_inputs: any): Promise; } /** * Base class of all AutoModels. Contains the `from_pretrained` function * which is used to instantiate pretrained models. */ export class PretrainedMixin { /** * Mapping from model type to model class. * @type {Map[]} */ static MODEL_CLASS_MAPPINGS: Map[]; /** * Whether to attempt to instantiate the base class (`PretrainedModel`) if * the model type is not found in the mapping. */ static BASE_IF_FAIL: boolean; /** * Instantiate one of the model classes of the library from a pretrained model. * * The model class to instantiate is selected based on the `model_type` property of the config object * (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible) * * @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either: * - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. * Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a * user or organization name, like `dbmdz/bert-base-german-cased`. * - A path to a *directory* containing model weights, e.g., `./my_model_directory/`. * @param {PretrainedOptions} options Additional options for loading the model. * * @returns {Promise} A new instance of the `PreTrainedModel` class. */ static from_pretrained(pretrained_model_name_or_path: string, { quantized, progress_callback, config, cache_dir, local_files_only, revision, }?: import("./utils/hub.js").PretrainedOptions): Promise; } /** * Helper class which is used to instantiate pretrained models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModel.from_pretrained('bert-base-uncased'); */ export class AutoModel extends PretrainedMixin { static MODEL_CLASS_MAPPINGS: Map[]; } /** * Helper class which is used to instantiate pretrained sequence classification models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForSequenceClassification.from_pretrained('distilbert-base-uncased-finetuned-sst-2-english'); */ export class AutoModelForSequenceClassification extends PretrainedMixin { static MODEL_CLASS_MAPPINGS: Map[]; } /** * Helper class which is used to instantiate pretrained token classification models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForTokenClassification.from_pretrained('Davlan/distilbert-base-multilingual-cased-ner-hrl'); */ export class AutoModelForTokenClassification extends PretrainedMixin { static MODEL_CLASS_MAPPINGS: Map[]; } /** * Helper class which is used to instantiate pretrained sequence-to-sequence models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForSeq2SeqLM.from_pretrained('t5-small'); */ export class AutoModelForSeq2SeqLM extends PretrainedMixin { static MODEL_CLASS_MAPPINGS: Map[]; } /** * Helper class which is used to instantiate pretrained causal language models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForCausalLM.from_pretrained('gpt2'); */ export class AutoModelForCausalLM extends PretrainedMixin { static MODEL_CLASS_MAPPINGS: Map[]; } /** * Helper class which is used to instantiate pretrained masked language models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForMaskedLM.from_pretrained('bert-base-uncased'); */ export class AutoModelForMaskedLM extends PretrainedMixin { static MODEL_CLASS_MAPPINGS: Map[]; } /** * Helper class which is used to instantiate pretrained question answering models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForQuestionAnswering.from_pretrained('distilbert-base-cased-distilled-squad'); */ export class AutoModelForQuestionAnswering extends PretrainedMixin { static MODEL_CLASS_MAPPINGS: Map[]; } /** * Helper class which is used to instantiate pretrained vision-to-sequence models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForVision2Seq.from_pretrained('nlpconnect/vit-gpt2-image-captioning'); */ export class AutoModelForVision2Seq extends PretrainedMixin { static MODEL_CLASS_MAPPINGS: Map[]; } /** * Helper class which is used to instantiate pretrained image classification models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForImageClassification.from_pretrained('google/vit-base-patch16-224'); */ export class AutoModelForImageClassification extends PretrainedMixin { static MODEL_CLASS_MAPPINGS: Map[]; } /** * Helper class which is used to instantiate pretrained image segmentation models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForImageSegmentation.from_pretrained('facebook/detr-resnet-50-panoptic'); */ export class AutoModelForImageSegmentation extends PretrainedMixin { static MODEL_CLASS_MAPPINGS: Map[]; } /** * Helper class which is used to instantiate pretrained object detection models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForObjectDetection.from_pretrained('facebook/detr-resnet-50'); */ export class AutoModelForObjectDetection extends PretrainedMixin { static MODEL_CLASS_MAPPINGS: Map[]; } /** * Helper class which is used to instantiate pretrained object detection models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForMaskGeneration.from_pretrained('Xenova/sam-vit-base'); */ export class AutoModelForMaskGeneration extends PretrainedMixin { static MODEL_CLASS_MAPPINGS: Map[]; } export class Seq2SeqLMOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits The output logits of the model. * @param {Tensor} output.past_key_values An tensor of key/value pairs that represent the previous state of the model. * @param {Tensor} output.encoder_outputs The output of the encoder in a sequence-to-sequence model. */ constructor({ logits, past_key_values, encoder_outputs }: { logits: Tensor; past_key_values: Tensor; encoder_outputs: Tensor; }); logits: Tensor; past_key_values: Tensor; encoder_outputs: Tensor; } /** * Base class for outputs of sentence classification models. */ export class SequenceClassifierOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits classification (or regression if config.num_labels==1) scores (before SoftMax). */ constructor({ logits }: { logits: Tensor; }); logits: Tensor; } /** * Base class for outputs of token classification models. */ export class TokenClassifierOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Classification scores (before SoftMax). */ constructor({ logits }: { logits: Tensor; }); logits: Tensor; } /** * Base class for masked language models outputs. */ export class MaskedLMOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). */ constructor({ logits }: { logits: Tensor; }); logits: Tensor; } /** * Base class for outputs of question answering models. */ export class QuestionAnsweringModelOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.start_logits Span-start scores (before SoftMax). * @param {Tensor} output.end_logits Span-end scores (before SoftMax). */ constructor({ start_logits, end_logits }: { start_logits: Tensor; end_logits: Tensor; }); start_logits: Tensor; end_logits: Tensor; } /** * Base class for causal language model (or autoregressive) outputs. */ export class CausalLMOutputWithPast extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Prediction scores of the language modeling head (scores for each vocabulary token before softmax). * @param {Tensor} output.past_key_values Contains pre-computed hidden-states (key and values in the self-attention blocks) * that can be used (see `past_key_values` input) to speed up sequential decoding. */ constructor({ logits, past_key_values }: { logits: Tensor; past_key_values: Tensor; }); logits: Tensor; past_key_values: Tensor; } export type PretrainedOptions = import('./utils/hub.js').PretrainedOptions; import { GenerationConfig } from './utils/generation.js'; import { LogitsProcessorList } from './utils/generation.js'; import { Tensor } from './utils/tensor.js'; export {}; //# sourceMappingURL=models.d.ts.map