import { type Layer, Tensor } from '../tensor.ts'; import type { TqdmOnProgress } from '../helpers/tqdm.ts'; export * from './optim.ts'; export * from './state.ts'; export * from './datasets.ts'; /** * Abstract model to simplify calling, loading and saving different models. * You only need to implement list of layers to create a new model * ```ts * import { Layer, Tensor } from "@jsgrad/jsgrad" * export class MNIST extends Model { * layers: Layer[] = [ * new Conv2d(1, 32, 5), * Tensor.relu, * // ...other layers * ] * } * const mnist = new MNIST() * * // call the model with some input * mnist.call(Tensor.rand([1, 1, 28, 28])) * * // load weigths from ./mnist.safetensors * await mnist.load("./mnist.safetensors") * * //save weigths to ./mnist.safetensors * await mnist.save("./mnist.safetensors") * ``` */ export declare abstract class Model { DEFAULT_LOAD?: string; abstract layers: Layer[]; /** * Call model with a Tensor input, returns a Tensor with output. * ```ts * import { MNIST, Tensor } from "@jsgrad/jsgrad" * * const model = new MNIST() * const res = model.call(Tensor.rand([1, 1, 28, 28])) * console.log(await res.tolist()) * ``` */ call: (x: Tensor) => Tensor; /** * Load model weights from a .safetensors file at the given path or absolute URL * ```ts * import { MNIST } from "@jsgrad/jsgrad" * const model = new MNIST() * await model.load("./model.safetensors") * ``` */ load: (path?: string | Tensor, onProgress?: TqdmOnProgress) => Promise; /** * Save model weights to a .safetensors file at the given path * ```ts * import { MNIST } from "@jsgrad/jsgrad" * * const model = new MNIST() * await model.save("./model.safetensors") * ``` */ save: (path: string) => Promise; } /** * Applies Batch Normalization over a 2D || 3D input. * * - Described: https://paperswithcode.com/method/batch-normalization * - Paper: https://arxiv.org/abs/1502.03167v3 * * See: `Tensor.batchnorm` * * ```python exec="true" session="tensor" * from tinygrad import Tensor, dtypes, nn * import numpy as np * np.set_printoptions(precision=4) * ``` * * ```python exec="true" source="above" session="tensor" result="python" * norm = nn.BatchNorm(3) * t = Tensor.rand(2, 3, 4, 4) * console.log(t.mean().item(), t.std().item()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = norm(t) * console.log(t.mean().item(), t.std().item()) * ``` */ export declare class BatchNorm { eps: number; track_running_stats: boolean; momentum: number; weight?: Tensor; bias?: Tensor; num_batches_tracked: Tensor; running_mean?: Tensor; running_var?: Tensor; constructor(sz: number, eps?: number, affine?: boolean, track_running_stats?: boolean, momentum?: number); calc_stats: (x: Tensor) => [Tensor, Tensor]; call: (x: Tensor) => Tensor; } export declare class BatchNorm2d extends BatchNorm { } export declare class BatchNorm3d extends BatchNorm { } /** * Applies a 2D convolution over an input signal composed of several input planes. * * See: https://pytorch.org/docs/stable/generated/torch.nn.Conv2d * * ```python exec="true" source="above" session="tensor" result="python" * conv = nn.Conv2d(1, 1, 3) * t = Tensor.rand(1, 1, 4, 4) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = conv(t) * console.log(t.numpy()) * ``` */ export declare class Conv2d { kernel_size: number[]; stride: number; dilation: number; groups: number; padding: number | number[]; weight: Tensor; bias?: Tensor; constructor(in_channels: number, out_channels: number, kernel_size: number | number[], stride?: number, padding?: number | number[] | string, dilation?: number, groups?: number, bias?: boolean); call: (x: Tensor) => Tensor; } /** * Applies a 1D convolution over an input signal composed of several input planes. * * See: https://pytorch.org/docs/stable/generated/torch.nn.Conv1d * * ```python exec="true" source="above" session="tensor" result="python" * conv = nn.Conv1d(1, 1, 3) * t = Tensor.rand(1, 1, 4) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = conv(t) * console.log(t.numpy()) * ``` */ export declare class Conv1d extends Conv2d { constructor(in_channels: number, out_channels: number, kernel_size: number, stride?: number, padding?: number | string, dilation?: number, groups?: number, bias?: boolean); } /** * Applies a 2D transposed convolution operator over an input image. * * See: https://pytorch.org/docs/stable/generated/torch.nn.ConvTranspose2d * * ```python exec="true" source="above" session="tensor" result="python" * conv = nn.ConvTranspose2d(1, 1, 3) * t = Tensor.rand(1, 1, 4, 4) * print(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = conv(t) * print(t.numpy()) * ``` */ export declare class ConvTranspose2d extends Conv2d { output_padding: number; constructor(in_channels: number, out_channels: number, kernel_size: number | number[], stride?: number, padding?: number, output_padding?: number, dilation?: number, groups?: number, bias?: boolean); call: (x: Tensor) => Tensor; } /** * * Applies a 1D transposed convolution operator over an input signal composed of several input planes. * See: https://pytorch.org/docs/stable/generated/torch.nn.ConvTranspose1d * ```python exec="true" source="above" session="tensor" result="python" * conv = nn.ConvTranspose1d(1, 1, 3) * t = Tensor.rand(1, 1, 4) * print(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = conv(t) * print(t.numpy()) * ``` */ export declare class ConvTranspose1d extends ConvTranspose2d { constructor(in_channels: number, out_channels: number, kernel_size: number, stride?: number, padding?: number, output_padding?: number, dilation?: number, groups?: number, bias?: boolean); } /** * Applies a linear transformation to the incoming data. * * See: https://pytorch.org/docs/stable/generated/torch.nn.Linear * * ```python exec="true" source="above" session="tensor" result="python" * lin = nn.Linear(3, 4) * t = Tensor.rand(2, 3) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = lin(t) * console.log(t.numpy()) * ``` */ export declare class Linear { weight: Tensor; bias?: Tensor; constructor(in_features: number, out_features: number, bias?: boolean); call: (x: Tensor) => Tensor; } /** * * Applies Group Normalization over a mini-batch of inputs. * - Described: https://paperswithcode.com/method/group-normalization * - Paper: https://arxiv.org/abs/1803.08494v3 * ```python exec="true" source="above" session="tensor" result="python" * norm = nn.GroupNorm(2, 12) * t = Tensor.rand(2, 12, 4, 4) * 2 + 1 * print(t.mean().item(), t.std().item()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = norm(t) * print(t.mean().item(), t.std().item()) * ``` */ export declare class GroupNorm { num_groups: number; num_channels: number; eps: number; weight: Tensor | undefined; bias: Tensor | undefined; constructor(num_groups: number, num_channels: number, eps?: number, affine?: boolean); call: (x: Tensor) => Tensor; } /** * Applies Instance Normalization over a mini-batch of inputs. * * - Described: https://paperswithcode.com/method/instance-normalization * - Paper: https://arxiv.org/abs/1607.08022v3 * * ```python exec="true" source="above" session="tensor" result="python" * norm = nn.InstanceNorm(3) * t = Tensor.rand(2, 3, 4, 4) * 2 + 1 * print(t.mean().item(), t.std().item()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = norm(t) * print(t.mean().item(), t.std().item()) * ``` */ export declare class InstanceNorm { num_features: number; eps: number; weight: Tensor | undefined; bias: Tensor | undefined; constructor(num_features: number, eps?: number, affine?: boolean); call: (x: Tensor) => Tensor; } /** * Applies Layer Normalization over a mini-batch of inputs. * * - Described: https://paperswithcode.com/method/layer-normalization * - Paper: https://arxiv.org/abs/1607.06450v1 * * ```python exec="true" source="above" session="tensor" result="python" * norm = nn.LayerNorm(3) * t = Tensor.rand(2, 5, 3) * 2 + 1 * console.log(t.mean().item(), t.std().item()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = norm(t) * console.log(t.mean().item(), t.std().item()) * ``` */ export declare class LayerNorm { normalized_shape: number[]; axis: number[]; eps: number; elementwise_affine: boolean; weight?: Tensor; bias?: Tensor; constructor(normalized_shape: number | number[], eps?: number, elementwise_affine?: boolean); call(x: Tensor): Tensor; } /** * Applies Layer Normalization over a mini-batch of 2D inputs. * * See: `LayerNorm` * * ```python exec="true" source="above" session="tensor" result="python" * norm = nn.LayerNorm2d(3) * t = Tensor.rand(2, 3, 4, 4) * 2 + 1 * console.log(t.mean().item(), t.std().item()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = norm(t) * console.log(t.mean().item(), t.std().item()) * ``` */ export declare class LayerNorm2d extends LayerNorm { call(x: Tensor): Tensor; } /** * Applies Root Mean Square Normalization to input. * * - Described: https://paperswithcode.com/method/rmsnorm * - Paper: https://arxiv.org/abs/1910.07467 * * ```python exec="true" source="above" session="tensor" result="python" * norm = nn.RMSNorm(4) * t = Tensor.arange(12, dtype=dtypes.number).reshape(3, 4) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(norm(t).numpy()) * ``` */ export declare class RMSNorm { eps: number; weight: Tensor; constructor(dim: number, eps?: number); _norm: (x: Tensor) => Tensor; call: (x: Tensor) => Tensor; } /** * A simple lookup table that stores embeddings of a fixed dictionary and size. * * See: https://pytorch.org/docs/stable/generated/torch.nn.Embedding * * ```python exec="true" source="above" session="tensor" result="python" * emb = nn.Embedding(10, 3) * print(emb(Tensor([1, 2, 3, 1])).numpy()) * ``` */ export declare class Embedding { vocab_size: number; embed_size: number; weight: Tensor; arange: Tensor | undefined; constructor(vocab_size: number, embed_size: number); call: (idx: Tensor) => Tensor; } /** * A long short-term memory (LSTM) cell. * * Args: * input_size: The number of expected features in the input `x` * hidden_size: The number of features in the hidden state `h` * bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh` */ export declare class LSTMCell { weight_ih: Tensor; weight_hh: Tensor; bias_ih: Tensor | undefined; bias_hh: Tensor | undefined; constructor(input_size: number, hidden_size: number, bias?: boolean); call: (x: Tensor, hc?: [Tensor, Tensor]) => [Tensor, Tensor]; }