export { modelMappings } from "./mappings"; export declare const modelSettings: { "@hf/thebloke/deepseek-coder-6.7b-instruct-awq": { type: string; inputsDefaultsStream: { max_tokens: number; }; inputsDefaults: { max_tokens: number; }; preProcessingArgs: { promptTemplate: string; defaultContext: string; defaultPromptMessages: (context: string, prompt: string) => { role: string; content: string; }[]; }; postProcessingFunc: (r: any, inputs: any) => any; postProcessingFuncStream: (r: any, inputs: any, len: number) => any; }; "@hf/thebloke/deepseek-coder-6.7b-base-awq": { type: string; inputsDefaultsStream: { max_tokens: number; }; inputsDefaults: { max_tokens: number; }; preProcessingArgs: { promptTemplate: string; defaultContext: string; defaultPromptMessages: (context: string, prompt: string) => { role: string; content: string; }[]; }; postProcessingFunc: (r: any, inputs: any) => any; postProcessingFuncStream: (r: any, inputs: any, len: number) => any; }; "@hf/thebloke/llamaguard-7b-awq": { type: string; inputsDefaultsStream: { max_tokens: number; }; inputsDefaults: { max_tokens: number; }; preProcessingArgs: { promptTemplate: string; defaultContext: string; defaultPromptMessages: (context: string, prompt: string) => { role: string; content: string; }[]; }; postProcessingFunc: (r: any, inputs: any) => any; postProcessingFuncStream: (r: any, inputs: any, len: number) => any; }; "@hf/thebloke/openchat_3.5-awq": { experimental: boolean; type: string; inputsDefaultsStream: { max_tokens: number; }; inputsDefaults: { max_tokens: number; }; preProcessingArgs: { promptTemplate: string; defaultContext: string; defaultPromptMessages: (context: string, prompt: string) => { role: string; content: string; }[]; }; postProcessingFunc: (r: any, inputs: any) => any; postProcessingFuncStream: (r: any, inputs: any, len: number) => any; }; "@hf/thebloke/openhermes-2.5-mistral-7b-awq": { type: string; inputsDefaultsStream: { max_tokens: number; }; inputsDefaults: { max_tokens: number; }; preProcessingArgs: { promptTemplate: string; defaultContext: string; defaultPromptMessages: (context: string, prompt: string) => { role: string; content: string; }[]; }; postProcessingFunc: (r: any, inputs: any) => any; postProcessingFuncStream: (r: any, inputs: any, len: number) => any; }; "@hf/thebloke/neural-chat-7b-v3-1-awq": { type: string; inputsDefaultsStream: { max_tokens: number; }; inputsDefaults: { max_tokens: number; }; preProcessingArgs: { promptTemplate: string; defaultContext: string; defaultPromptMessages: (context: string, prompt: string) => { role: string; content: string; }[]; }; postProcessingFunc: (r: any, inputs: any) => any; postProcessingFuncStream: (r: any, inputs: any, len: number) => any; }; "@hf/thebloke/llama-2-13b-chat-awq": { type: string; inputsDefaultsStream: { max_tokens: number; }; inputsDefaults: { max_tokens: number; }; preProcessingArgs: { promptTemplate: string; defaultContext: string; defaultPromptMessages: (context: string, prompt: string) => { role: string; content: string; }[]; }; postProcessingFunc: (r: any, inputs: any) => any; postProcessingFuncStream: (r: any, inputs: any, len: number) => any; }; "@hf/thebloke/zephyr-7b-beta-awq": { type: string; inputsDefaultsStream: { max_tokens: number; }; inputsDefaults: { max_tokens: number; }; preProcessingArgs: { promptTemplate: string; defaultContext: string; defaultPromptMessages: (context: string, prompt: string) => { role: string; content: string; }[]; }; postProcessingFunc: (r: any, inputs: any) => any; postProcessingFuncStream: (r: any, inputs: any, len: number) => any; }; "@hf/thebloke/mistral-7b-instruct-v0.1-awq": { type: string; inputsDefaultsStream: { max_tokens: number; }; inputsDefaults: { max_tokens: number; }; preProcessingArgs: { promptTemplate: string; defaultContext: string; defaultPromptMessages: (context: string, prompt: string) => { role: string; content: string; }[]; }; postProcessingFunc: (r: any, inputs: any) => any; postProcessingFuncStream: (r: any, inputs: any, len: number) => any; }; "@hf/mistral/mistral-7b-instruct-v0.2": { type: string; inputsDefaultsStream: { max_tokens: number; }; inputsDefaults: { max_tokens: number; }; preProcessingArgs: { promptTemplate: string; defaultContext: string; defaultPromptMessages: (context: string, prompt: string) => { role: string; content: string; }[]; }; postProcessingFunc: (r: any, inputs: any) => any; postProcessingFuncStream: (r: any, inputs: any, len: number) => any; }; "@hf/google/gemma-7b-it": { type: string; inputsDefaultsStream: { max_tokens: number; }; inputsDefaults: { max_tokens: number; }; preProcessingArgs: { promptTemplate: string; defaultContext: string; defaultPromptMessages: (context: string, prompt: string) => { role: string; content: string; }[]; }; postProcessingFunc: (r: any, inputs: any) => any; postProcessingFuncStream: (r: any, inputs: any, len: number) => any; }; "@hf/nousresearch/hermes-2-pro-mistral-7b": { type: string; inputsDefaultsStream: { max_tokens: number; }; inputsDefaults: { max_tokens: number; }; preProcessingArgs: { promptTemplate: string; defaultContext: string; defaultPromptMessages: (context: string, prompt: string) => { role: string; content: string; }[]; }; postProcessingFunc: (r: any, inputs: any) => any; postProcessingFuncStream: (r: any, inputs: any, len: number) => any; }; "@hf/nexusflow/starling-lm-7b-beta": { type: string; inputsDefaultsStream: { max_tokens: number; }; inputsDefaults: { max_tokens: number; }; preProcessingArgs: { promptTemplate: string; defaultContext: string; defaultPromptMessages: (context: string, prompt: string) => { role: string; content: string; }[]; }; postProcessingFunc: (r: any, inputs: any) => any; postProcessingFuncStream: (r: any, inputs: any, len: number) => any; }; "@hf/thebloke/codellama-7b-instruct-awq": { type: string; inputsDefaultsStream: { max_tokens: number; }; inputsDefaults: { max_tokens: number; }; preProcessingArgs: { promptTemplate: string; defaultContext: string; defaultPromptMessages: (context: string, prompt: string) => { role: string; content: string; }[]; }; postProcessingFunc: (r: any, inputs: any) => any; postProcessingFuncStream: (r: any, inputs: any, len: number) => any; }; "@cf/microsoft/phi-2": any; "@cf/defog/sqlcoder-7b-2": any; "@cf/deepseek-ai/deepseek-math-7b-instruct": any; "@cf/tiiuae/falcon-7b-instruct": any; "@cf/thebloke/discolm-german-7b-v1-awq": any; "@cf/qwen/qwen1.5-14b-chat-awq": any; "@cf/qwen/qwen1.5-0.5b-chat": any; "@cf/qwen/qwen1.5-1.8b-chat": any; "@cf/qwen/qwen1.5-7b-chat-awq": any; "@cf/tinyllama/tinyllama-1.1b-chat-v1.0": any; "@cf/openchat/openchat-3.5-0106": any; "@cf/google/gemma-2b-it-lora": any; "@cf/google/gemma-7b-it-lora": any; "@cf/mistral/mistral-7b-instruct-v0.2-lora": any; "@cf/mistral/mixtral-8x7b-instruct-v0.1-awq": any; "@cf/meta-llama/llama-2-7b-chat-hf-lora": any; "@cf/mistral/mistral-7b-instruct-v0.1-vllm": any; "@cf/meta/llama-3-8b-instruct": any; "@cf/llava-hf/llava-1.5-7b-hf": { inputsDefaults: { temperature: number; }; preProcessingArgs: { defaultContext: string; promptTemplate: string; defaultPromptMessages: (context: string, prompt: string) => { role: string; content: string; }[]; }; generateTensorsFunc: (preProcessedInputs: any) => import("../tensor").Tensor[]; }; "@cf/nexaaidev/octopus-v2": any; "@cf/m-a-p/opencodeinterpreter-ds-6.7b": any; "@cf/fblgit/una-cybertron-7b-v2-bf16": any; "@cf/deepseek-ai/deepseek-coder-7b-instruct-v1.5": any; "@cf/unum/uform-gen2-qwen-500m": { postProcessingFunc: (response: any, inputs: any) => any; }; "@cf/jpmorganchase/roberta-spam": { experimental: boolean; }; "@cf/sven/test": { experimental: boolean; }; "@cf/inml/inml-roberta-dga": { experimental: boolean; postProcessingFunc: (r: any, inputs: any) => { label: string; score: any; }[]; }; "@hf/sentence-transformers/all-minilm-l6-v2": { experimental: boolean; }; "@cf/meta/llama-2-7b-chat-fp16": any; "@cf/meta/llama-2-7b-chat-int8": any; "@cf/openai/whisper": { postProcessingFunc: (response: any, inputs: any) => { text: any; word_count: number; words: any; vtt: string; }; }; "@cf/openai/whisper-tiny-en": { postProcessingFunc: (response: any, inputs: any) => { text: any; word_count: number; words: any; vtt: string; }; }; "@cf/openai/whisper-sherpa": { postProcessingFunc: (response: any, inputs: any) => { text: any; word_count: number; words: any; vtt: string; }; }; "@cf/mistral/mistral-7b-instruct-v0.1": any; "@cf/stabilityai/stable-diffusion-xl-turbo": { experimental: boolean; postProcessingFunc: (r: any, input: any) => Uint8Array; }; }; export declare const modelAliases: { "@hf/mistral/mistral-7b-instruct-v0.2": string[]; "@cf/meta/llama-3-8b-instruct": string[]; "@cf/mistral/mistral-7b-instruct-v0.1-vllm": string[]; }; //# sourceMappingURL=index.d.ts.map