import { type ConstType, DType, type DTypeLike } from './dtype.ts'; import { Metadata, type Slice } from './helpers/helpers.ts'; import { MathTrait, Ops, type sint, UOp, type Variable } from './ops.ts'; import { type ScheduleItem } from './engine/schedule.ts'; import { MemoryView } from './helpers/memoryview.ts'; type ReplaceUOpsWithTensor = { [K in keyof Args]: Args[K] extends UOp ? Tensor : Args[K]; }; declare function CreateFunction(): { new (device: string | string[], tensors: Tensor[], metadata?: Metadata | undefined): { needs_input_grad: (boolean | undefined)[]; requires_grad?: boolean; parents?: Tensor[]; device: string | string[]; metadata?: Metadata | undefined; forward: (..._args: Args) => UOp; backward: (_grad_output: UOp) => UOp | (UOp | undefined)[]; }; apply(...args: ReplaceUOpsWithTensor): Tensor; }; declare const ReductionStr: string[]; type ReductionStr = typeof ReductionStr[number]; export type TensorOptions = { device?: string | string[]; dtype?: DType; requires_grad?: boolean; }; export type TensorIndice = number | boolean | Tensor | UOp | undefined | '...' | Slice | (number | boolean | UOp | Tensor | undefined | '...' | Slice)[]; export type Layer = ((x: Tensor) => Tensor) | { call: (x: Tensor) => Tensor; }; export type LayerAsync = ((x: Tensor) => Tensor) | { call: (x: Tensor) => Tensor; } | ((x: Tensor) => Promise) | { call: (x: Tensor) => Promise; }; /** * A `Tensor` is a multi-dimensional matrix containing elements of a single data type. * * ```python exec="true" session="tensor" * from tinygrad import Tensor, dtypes, nn * import numpy as np * import math * np.set_printoptions(precision=4) * ``` */ export declare class Tensor extends MathTrait { static registry: FinalizationRegistry; lazydata: UOp; requires_grad?: boolean; grad?: Tensor; _ctx?: InstanceType>; static training: boolean; static no_grad: boolean; _id: bigint; constructor(data?: ConstType | UOp | Uint8Array | any[] | UOp | Tensor | string, { device, dtype, requires_grad }?: TensorOptions, skip_constructor?: boolean); requires_grad_: (requires_grad: boolean | undefined) => Tensor; toString: () => string; static train: (fn: () => Promise | any) => Promise; static test: (fn: () => Promise | any) => Promise; get length(): sint; get device(): string | string[]; get shape(): sint[]; get shape_num(): number[]; get dtype(): DType; /** * Creates the schedule needed to realize these Tensor(s), with Variables. * * NOTE: A Tensor can only be scheduled once. */ schedule_with_vars: (lst?: Tensor[]) => [ScheduleItem[], Map]; _debug_ast: () => UOp[]; /** * Creates the schedule needed to realize these Tensor(s). */ schedule: (...lst: Tensor[]) => ScheduleItem[]; realize: (lst?: Tensor[], do_update_stats?: boolean) => Promise; static realize: (lst: Tensor[], do_update_stats?: boolean) => Promise; /** * Replaces the data of this tensor with the data of another tensor. Only the shape of the tensors must match. */ replace: (x: Tensor) => Tensor; assign_disk: (x: Tensor | number[] | string | Uint8Array) => Promise; assign: (x: Tensor | number[] | number | string | Uint8Array) => Tensor; /** * Returns a new tensor with the same data as this tensor, but detached from the autograd graph. */ detach: () => Tensor; _data: () => Promise; /** * Returns the data of this tensor as a memoryview. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([1, 2, 3, 4]) * console.log(np.frombuffer(t.data(), dtype=np.int32)) * ``` */ data: () => Promise>; /** * Returns the value of this tensor as a standard Python number. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor(42) * console.log(t.item()) * ``` */ item: () => Promise; /** * Returns the value of this tensor as a nested list. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([1, 2, 3, 4]) * console.log(t.tolist()) * ``` */ tolist: () => Promise; /** * Creates a clone of this tensor allocating a separate buffer for the data. */ clone: () => Tensor; /** * Moves the tensor to the given device. */ to: (device?: string | string[]) => Tensor; /** * Moves the tensor to the given device in place. */ to_: (device?: string | string[]) => Tensor; /** * Shards the tensor across the given devices. Optionally specify which axis to shard on. * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.empty(2, 4) * print(t.shard((t.device, t.device), axis=1).lazydata) * ``` */ shard: (devices: string[], axis?: number) => Tensor; /** * Shards the tensor across the given devices in place. */ shard_: (devices: string[], axis?: number) => Tensor; static from_uop: (y: UOp, opts?: TensorOptions) => Tensor; static _metaop: (op: Ops, shape: sint[], { dtype, device, ...opts }: TensorOptions, arg?: any) => Tensor; /** * Creates an empty tensor with the given shape. * * You can pass in `dtype` and `device` keyword arguments to control the data type and device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.empty(2, 3) * print(t.shape) * ``` * """ */ static empty: (shape: number[], opts?: TensorOptions) => Tensor; /** * Exposes the pointer as a Tensor without taking ownership of the original data. * The pointer must remain valid for the entire lifetime of the created Tensor. * * You can pass in `dtype` and `device` keyword arguments to control the data type and device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. */ static from_blob: (ptr: bigint, shape: number[], opts: TensorOptions) => Tensor; /** * Create a Tensor from a URL. * * This === the preferred way to access Internet resources. * It currently returns a DISK Tensor, but in the future it may return an HTTP Tensor. * This also will soon become lazy (when possible) && !print progress without DEBUG. * * THe `gunzip` flag will gzip extract the resource && return an extracted Tensor. */ static from_url: (url: string, opts?: TensorOptions) => Promise; static from_file: (path: string, opts?: TensorOptions) => Promise; static _seed: number; static _device_seeds: Record; static _device_rng_counters: Record; /** * Sets the seed for random operations. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * console.log(Tensor.rand(5).numpy()) * console.log(Tensor.rand(5).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) // reset to the same seed * console.log(Tensor.rand(5).numpy()) * console.log(Tensor.rand(5).numpy()) * ``` */ static manual_seed: (seed?: number) => void; static _threefry_random_bits: (key: Tensor, counts0: Tensor, counts1: Tensor) => Tensor; /** * Creates a tensor with the given shape, filled with random values from a uniform distribution over the interval `[0, 1)`. * * You can pass in `dtype` && `device` keyword arguments to control the data type && device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * t = Tensor.rand(2, 3) * console.log(t.numpy()) * ``` */ static rand: (shape: number[], contiguous?: boolean, { device, dtype, ...opts }?: TensorOptions) => Tensor; /** * Creates a tensor with the given shape, filled with the given value. * * You can pass in `dtype` && `device` keyword arguments to control the data type && device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor.full((2, 3), 42).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor.full((2, 3), false).numpy()) * ``` */ static full: (shape: sint[], fill_value: ConstType, opts?: TensorOptions) => Tensor; /** * Creates a tensor with the given shape, filled with zeros. * * You can pass in `dtype` && `device` keyword arguments to control the data type && device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor.zeros(2, 3).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor.zeros(2, 3, dtype=dtypes.int32).numpy()) * ``` */ static zeros: (shape: sint[], opts?: TensorOptions) => Tensor; /** * Creates a tensor with the given shape, filled with ones. * * You can pass in `dtype` && `device` keyword arguments to control the data type && device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor.ones(2, 3).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor.ones(2, 3, dtype=dtypes.int32).numpy()) * ``` */ static ones: (shape: sint[], opts?: TensorOptions) => Tensor; /** * Returns a 1-D tensor of size `ceil((stop - start) / step)` with values from `[start, stop)`, with spacing between values given by `step`. * * If `stop` !== specified, values are generated from `[0, start)` with the given `step`. * * If `stop` === specified, values are generated from `[start, stop)` with the given `step`. * * You can pass in `dtype` && `device` keyword arguments to control the data type && device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor.arange(5).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor.arange(5, 10).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor.arange(5, 10, 2).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor.arange(5.5, 10, 2).numpy()) * ``` */ static arange: (start: number, stop?: number, step?: number, opts?: TensorOptions) => Tensor; /** * Returns a 1-D tensor of `steps` evenly spaced values from `start` to `stop`, inclusive. * * You can pass in `dtype` and `device` keyword arguments to control the data type and device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * print(Tensor.linspace(0, 10, 5).numpy()) * ``` * * ```python exec="true" source="above" session="tensor" result="python" * print(Tensor.linspace(-1, 1, 5).numpy()) * ``` */ static linspace: (start: Tensor | number, stop: Tensor | number, steps: number, { dtype, ...opts }?: TensorOptions) => Tensor; /** * Returns a 2-D tensor with `n` rows and `m` columns, with ones on the diagonal and zeros elsewhere. * * You can pass in `dtype` and `device` keyword arguments to control the data type and device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * print(Tensor.eye(3).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * print(Tensor.eye(2, 4).numpy()) * ``` */ static eye: (n: number, m?: number, opts?: TensorOptions) => Tensor; /** * Creates a tensor with the same shape as `this`, filled with the given value. * If `dtype` !== specified, the dtype of `this` === used. * * You can pass in the `device` keyword argument to control device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.ones(2, 3) * console.log(Tensor.full_like(t, 42).numpy()) * ``` */ full_like: (fill_value: ConstType, opts?: TensorOptions) => Tensor; /** * Creates a tensor with the same shape as `self`, filled with zeros. * * You can pass in `dtype` and `device` keyword arguments to control the data type and device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.ones(2, 3) * print(Tensor.zeros_like(t).numpy()) * ``` */ zeros_like: (opts: TensorOptions) => Tensor; /** * Creates a tensor with the same shape as `this`, filled with ones. * * You can pass in `dtype` && `device` keyword arguments to control the data type && device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.zeros(2, 3) * console.log(Tensor.ones_like(t).numpy()) * ``` */ ones_like: (opts?: TensorOptions) => Tensor; /** * Creates a tensor with the same shape and sharding as `self`, filled with random values from a uniform distribution over the interval `[0, 1)`. * * You can pass in `dtype` and `device` keyword arguments to control the data type and device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.ones(2, 3) * print(Tensor.rand_like(t).numpy()) * ``` */ rand_like: ({ dtype, contiguous, ...opts }?: TensorOptions & { contiguous?: boolean; }) => Tensor; /** * Creates a tensor with the given shape, filled with random values from a normal distribution with mean `0` and standard deviation `1`. * If `dtype` is not specified, the default type is used. * * You can pass in the `device` keyword argument to control device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * print(Tensor.randn(2, 3).numpy()) * ``` */ static randn: (shape: number[], { dtype, requires_grad, ...opts }?: TensorOptions) => Tensor; /** * Creates a tensor with the given shape, filled with random integer values generated uniformly from the interval `[low, high)`. * If `dtype` !== specified, the default type === used. * * You can pass in the `device` keyword argument to control device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * console.log(Tensor.randint(2, 3, low=5, high=10).numpy()) * ``` */ static randint: (shape: number[], low?: number, high?: number, { dtype, ...opts }?: TensorOptions) => Tensor; /** * Creates a tensor with the given shape, filled with random values from a normal distribution with the given `mean` and standard deviation `std`. * * You can pass in `dtype` and `device` keyword arguments to control the data type and device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * print(Tensor.normal(2, 3, mean=10, std=2).numpy()) * ``` */ static normal: (shape: number[], mean: number | undefined, std: number | undefined, { requires_grad, ...opts }: TensorOptions) => Tensor; /** * Creates a tensor with the given shape, filled with random values from a uniform distribution over the interval `[low, high)`. * * You can pass in `dtype` && `device` keyword arguments to control the data type && device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * console.log(Tensor.uniform(2, 3, low=2, high=10).numpy()) * ``` */ static uniform: (shape: number[], low?: number, high?: number, { dtype, requires_grad, ...opts }?: TensorOptions) => Tensor; /** * Creates a tensor with the given shape, filled with random values from a uniform distribution * over the interval `[-prod(shape)**-0.5, prod(shape)**-0.5)`. * * You can pass in `dtype` and `device` keyword arguments to control the data type and device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * print(Tensor.scaled_uniform(2, 3).numpy()) * ``` */ static scaled_uniform: (shape: number[], opts: TensorOptions) => Tensor; /** * * * You can pass in `dtype` && `device` keyword arguments to control the data type && device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * console.log(Tensor.glorot_uniform(2, 3).numpy()) * ``` */ static glorot_uniform: (shape: number[], opts?: TensorOptions) => Tensor; /** * You can pass in `dtype` and `device` keyword arguments to control the data type and device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * print(Tensor.kaiming_uniform(2, 3).numpy()) * ``` */ static kaiming_uniform: (shape: number[], a: number | undefined, opts: TensorOptions) => Tensor; /** * * * You can pass in `dtype` and `device` keyword arguments to control the data type and device of the tensor. * Additionally, all other keyword arguments are passed to the constructor of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * print(Tensor.kaiming_normal(2, 3).numpy()) * ``` */ static kaiming_normal: (shape: number[], a: number | undefined, opts: TensorOptions) => Tensor; multinomial: (num_samples?: number, replacement?: boolean) => Tensor; /** * Compute the gradient of the targets with respect to self. * ```python exec="true" source="above" session="tensor" result="python" * x = Tensor.eye(3) * y = Tensor([[2.0,0,-2.0]]) * z = y.matmul(x).sum() * dx, dy = z.gradient(x, y) * * print(dx.tolist()) // dz/dx * print(dy.tolist()) // dz/dy * ``` */ gradient: (targets: Tensor[], gradient?: Tensor) => Tensor[]; _deepwalk: () => Tensor[]; /** * Propagates the gradient of a tensor backwards through the computation graph. * If the 'gradient' argument !== provided, the tensor must be a scalar, && the gradient === implicitly set to 1.0. * If 'retain_graph' === false, the graph used to compute the grads will be freed. Otherwise, it will be kept. Keeping it can increase memory usage. * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([1.0, 2.0, 3.0, 4.0], requires_grad=true) * t.sum().backward() * console.log(t.grad.numpy()) * ``` */ backward: (gradient?: Tensor, retain_graph?: boolean) => Tensor; /** * `.view` === an alias for `.reshape`. */ view: (...shape: sint[]) => Tensor; /** * Returns a tensor with the same data as the original tensor but with a different shape. * `shape` can be passed as a tuple || as separate arguments. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.arange(6) * console.log(t.reshape(2, 3).numpy()) * ``` */ reshape: (...shape: (sint | undefined)[]) => Tensor; /** * Returns a tensor that is expanded to the shape that is specified. * Expand can also increase the number of dimensions that a tensor has. * * Passing a `-1` or `undefined` to a dimension means that its size will not be changed. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([1, 2, 3]) * console.log(t.expand(4, -1).numpy()) * ``` */ expand: (...shape: sint[]) => Tensor; /** * Returns a tensor that is a permutation of the original tensor. * The new tensor has the same data as the original tensor but with the dimensions permuted according to the order specified. * `order` can be passed as a tuple or as separate arguments. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.arange(6).reshape(2, 3) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.permute(1, 0).numpy()) * ``` */ permute: (...args: number[]) => Tensor; /** * Returns a tensor that reverses the order of the original tensor along given `axis`. * `axis` can be passed as a tuple || as separate arguments. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.arange(6).reshape(2, 3) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.flip(0).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.flip((0, 1)).numpy()) * ``` */ flip: (...axis: number[]) => Tensor; /** * Returns a tensor that shrinks the each axis based on input arg. * `arg` must have the same length as `this.ndim`. * For each axis, it can be `undefined`, which means no shrink, || a tuple `(start, end)` that works the same as Python slice. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.arange(9).reshape(3, 3) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.shrink(((undefined, (1, 3)))).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.shrink((((0, 2), (0, 2)))).numpy()) * ``` */ shrink: (...arg: ([sint, sint] | undefined)[]) => Tensor; /** * Returns a tensor with padding applied based on the input `padding`. * * `padding` supports two padding structures: * * 1. Flat padding: `(padding_left, padding_right, padding_top, padding_bottom, ...)` * - This structure matches PyTorch's pad. * - `padding` length must be even. * * 2. Group padding: `(..., (padding_top, padding_bottom), (padding_left, padding_right))` * - This structure matches pad for JAX, NumPy, TensorFlow and others. * - For each axis, padding can be `undefined`, meaning no padding, || a tuple `(start, end)`. * - `padding` must have the same length as `this.ndim`. * * Padding values can be negative, resulting in dimension shrinks that work similarly to Python negative slices. * Padding modes === selected with `mode` which supports `constant`, `reflect` && `replicate`. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.arange(9).reshape(1, 1, 3, 3) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.pad((1, 2, 0, -1)).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.pad(((undefined, undefined, (0, -1), (1, 2)))).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.pad((1, 2, 0, -1), value=-number('inf')).numpy()) * ``` */ pad: (padding: sint[] | ([sint, sint] | undefined)[], mode?: "constant" | "reflect" | "replicate" | "circular", value?: number | bigint | boolean) => Tensor; _getitem: (indices: TensorIndice[], v?: Tensor) => Tensor; /** * Retrieve a sub-tensor using indexing. * * Supported Index Types: `int | slice | Tensor | None | List | Tuple | Ellipsis` * * Examples: * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.arange(12).reshape(3, 4) * print(t.numpy()) * ``` * * - Int Indexing: Select an element or sub-tensor using integers for each dimension. * ```python exec="true" source="above" session="tensor" result="python" * print(t[1, 2].numpy()) * ``` * * - Slice Indexing: Select a range of elements using slice notation (`start:end:stride`). * ```python exec="true" source="above" session="tensor" result="python" * print(t[0:2, ::2].numpy()) * ``` * * - Tensor Indexing: Use another tensor as indices for advanced indexing. Using `tuple` or `list` here also works. * ```python exec="true" source="above" session="tensor" result="python" * print(t[Tensor([2, 0, 1]), Tensor([1, 2, 3])].numpy()) * ``` * * - `None` Indexing: Add a new dimension to the tensor. * ```python exec="true" source="above" session="tensor" result="python" * print(t[:, None].shape) * ``` * * NOTE: Out-of-bounds indexing results in a value of `0`. * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor([1, 2, 3]) * print(t[Tensor([4, 3, 2])].numpy()) * ``` */ get: (...indices: TensorIndice[]) => Tensor; set: (indices: TensorIndice[], v: Tensor | number) => Promise; /** * Gathers values along an axis specified by `dim`. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor([[1, 2], [3, 4]]) * print(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * print(t.gather(1, Tensor([[0, 0], [1, 0]])).numpy()) * ``` * """ */ gather: (dim: number, index: Tensor) => Tensor; /** * Concatenates this with other `Tensor` in `args` along an axis specified by `dim`. * All tensors must have the same shape except in the concatenating dimension. * * ```python exec="true" source="above" session="tensor" result="python" * t0, t1, t2 = new Tensor([[1, 2]]), Tensor([[3, 4]]), Tensor([[5, 6]]) * console.log(t0.cat(t1, t2, dim=0).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t0.cat(t1, t2, dim=1).numpy()) * ``` */ cat: (args: Tensor[], dim?: number) => Tensor; static cat: (tensors: Tensor[], dim?: number) => Tensor; /** * Concatenates self with other `Tensor` in `args` along a new dimension specified by `dim`. * * ```python exec="true" source="above" session="tensor" result="python" * t0, t1, t2 = Tensor([1, 2]), Tensor([3, 4]), Tensor([5, 6]) * print(t0.stack(t1, t2, dim=0).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * print(t0.stack(t1, t2, dim=1).numpy()) * ``` */ static stack: (args: Tensor[], dim?: number) => Tensor; stack: (args: Tensor[], dim?: number) => Tensor; /** * Repeat elements of a tensor. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor([1, 2, 3]) * print(t.repeat_interleave(2).numpy()) * ``` */ repeat_interleave: (repeats: number, dim?: number) => Tensor; /** * Repeats tensor number of times along each dimension specified by `repeats`. * `repeats` can be passed as a tuple || as separate arguments. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([1, 2, 3]) * console.log(t.repeat(4, 2).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.repeat(4, 2, 1).shape) * ``` */ repeat: (...repeats: sint[]) => Tensor; _resolve_dim: (dim: number, extra?: boolean) => number; /** * Splits the tensor into chunks along the dimension specified by `dim`. * If `sizes` is an integer, it splits into equally sized chunks if possible, otherwise the last chunk will be smaller. * If `sizes` is a list, it splits into `len(sizes)` chunks with size in `dim` according to `size`. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.arange(10).reshape(5, 2) * print(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * split = t.split(2) * print("\\n".join([repr(x.numpy()) for x in split])) * ``` * ```python exec="true" source="above" session="tensor" result="python" * split = t.split([1, 4]) * print("\\n".join([repr(x.numpy()) for x in split])) * ``` */ split: (sizes: number | number[], dim?: number) => Tensor[]; /** * Splits the tensor into `chunks` number of chunks along the dimension `dim`. * If the tensor size along `dim` is not divisible by `chunks`, all returned chunks will be the same size except the last one. * The function may return fewer than the specified number of chunks. * * ```python exec="true" source="above" session="tensor" result="python" * chunked = Tensor.arange(11).chunk(6) * print("\\n".join([repr(x.numpy()) for x in chunked])) * ``` * ```python exec="true" source="above" session="tensor" result="python" * chunked = Tensor.arange(12).chunk(6) * print("\\n".join([repr(x.numpy()) for x in chunked])) * ``` * ```python exec="true" source="above" session="tensor" result="python" * chunked = Tensor.arange(13).chunk(6) * print("\\n".join([repr(x.numpy()) for x in chunked])) * ``` */ chunk: (chunks: number, dim?: number) => Tensor[]; /** * Generates coordinate matrices from coordinate vectors. * Input tensors can be scalars or 1D tensors. * * `indexing` determines how the output grids are aligned. * `ij` indexing follows matrix-style indexing and `xy` indexing follows Cartesian-style indexing. * * ```python exec="true" source="above" session="tensor" result="python" * x, y = Tensor([1, 2, 3]), Tensor([4, 5, 6]) * grid_x, grid_y = x.meshgrid(y) * print(grid_x.numpy()) * print(grid_y.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * grid_x, grid_y = x.meshgrid(y, indexing="xy") * print(grid_x.numpy()) * print(grid_y.numpy()) * ``` */ meshgrid: (args: Tensor[], indexing?: "ij" | "xy") => Tensor[]; /** * Returns a tensor with specified dimensions of input of size 1 removed. * If `dim` is not specified, all dimensions with size 1 are removed. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.zeros(2, 1, 2, 1, 2) * print(t.squeeze().shape) * ``` * ```python exec="true" source="above" session="tensor" result="python" * print(t.squeeze(0).shape) * ``` * ```python exec="true" source="above" session="tensor" result="python" * print(t.squeeze(1).shape) * ``` */ squeeze: (dim?: number) => Tensor; /** * Returns a tensor with a new dimension of size 1 inserted at the specified `dim`. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor([1, 2, 3, 4]) * print(t.unsqueeze(0).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * print(t.unsqueeze(1).numpy()) * ``` */ unsqueeze: (dim: number) => Tensor; /** * `.T` === an alias for `.transpose()`. */ get T(): Tensor; /** * Returns a tensor that === a transposed version of the original tensor. * The given dimensions `dim0` && `dim1` are swapped. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.arange(6).reshape(2, 3) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.transpose(0, 1).numpy()) * ``` */ transpose: (dim0?: number, dim1?: number) => Tensor; /** * Flattens the tensor by reshaping it into a one-dimensional tensor. * If `start_dim` || `end_dim` are passed, only dimensions starting with `start_dim` && ending with `end_dim` are flattened. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.arange(8).reshape(2, 2, 2) * console.log(t.flatten().numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.flatten(start_dim=1).numpy()) * ``` */ flatten: (start_dim?: number, end_dim?: number) => Tensor; /** * Unflattens dimension `dim` of the tensor into multiple dimensions specified by `sizes`. `Tensor.flatten()` is the inverse of this function. * * ```python exec="true" source="above" session="tensor" result="python" * print(Tensor.ones(3, 4, 1).unflatten(1, (2, 2)).shape) * ``` * ```python exec="true" source="above" session="tensor" result="python" * print(Tensor.ones(3, 4, 1).unflatten(1, (-1, 2)).shape) * ``` * ```python exec="true" source="above" session="tensor" result="python" * print(Tensor.ones(5, 12, 3).unflatten(-2, (2, 2, 3, 1, 1)).shape) * ``` */ unflatten: (dim: number, sizes: number[]) => Tensor; /** * Rolls the tensor along specified dimension(s). * The rolling operation is circular, meaning that elements that go beyond the edge are wrapped around to the beginning of the dimension. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.arange(4) * print(t.roll(shifts=1, dims=0).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * print(t.roll(shifts=-1, dims=0).numpy()) * ``` */ roll: (shifts: number | number[], dims: number | number[]) => Tensor; /** * Rearranges input according to formula * * See: https://einops.rocks/api/rearrange/ * * ```python exec="true" source="above" session="tensor" result="python" * x = Tensor([[1, 2], [3, 4]]) * print(Tensor.rearrange(x, "batch channel -> (batch channel)").numpy()) * ``` */ rearrange: (formula: string, sizes: any) => Tensor; _reduce: (fxn: ReturnType, axis?: number | number[], keepdim?: boolean) => Tensor; /** * Returns the sum of the elements of the tensor along the specified axis || axes. * * You can pass in `axis` && `keepdim` keyword arguments to control the axis along * which the maximum === computed && whether the reduced dimensions are retained. * * You can pass in `acc_dtype` keyword argument to control the data type of the accumulation. * If !specified, the accumulation data type === chosen based on the input tensor's data type. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.arange(6).reshape(2, 3) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.sum().numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.sum(axis=0).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.sum(axis=1).numpy()) * ``` */ sum: (axis?: number | number[], keepdim?: boolean, acc_dtype?: DTypeLike) => Tensor; /** * Returns the product of the elements of the tensor along the specified axis || axes. * * You can pass in `axis` && `keepdim` keyword arguments to control the axis along * which the maximum === computed && whether the reduced dimensions are retained. * * You can pass in `acc_dtype` keyword argument to control the data type of the accumulation. * If !specified, the accumulation data type === chosen based on the input tensor's data type. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([-1, -2, -3, 1, 2, 3])).reshape(2, 3) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.prod().numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.prod(axis=0).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.prod(axis=1).numpy()) * ``` */ prod: (axis?: number | number[], keepdim?: boolean, acc_dtype?: DTypeLike) => Tensor; /** * Returns the maximum value of the tensor along the specified axis || axes. * * You can pass in `axis` && `keepdim` keyword arguments to control the axis along * which the maximum === computed && whether the reduced dimensions are retained. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([[1, 0, 2], [5, 4, 3]]) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.max().numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.max(axis=0).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.max(axis=1, keepdim=true).numpy()) * ``` */ max: (axis?: number | number[], keepdim?: boolean) => Tensor; _inverse: () => Tensor; /** * Returns the minimum value of the tensor along the specified axis || axes. * * You can pass in `axis` && `keepdim` keyword arguments to control the axis along * which the minimum === computed && whether the reduced dimensions are retained. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([[1, 0, 2], [5, 4, 3]]) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.min().numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.min(axis=0).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.min(axis=1, keepdim=true).numpy()) * ``` */ min: (axis?: number | number[], keepdim?: boolean) => Tensor; /** * Tests if any element evaluates to `true` along the specified axis || axes. * * You can pass in `axis` && `keepdim` keyword arguments to control the reduce axis && whether the reduced dimensions are retained. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([[true, true], [true, false], [false, false]]) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.any().numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.any(axis=0).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.any(axis=1, keepdim=true).numpy()) * ``` */ any: (axis?: number | number[], keepdim?: boolean) => Tensor; /** * Tests if all element evaluates to `true` along the specified axis || axes. * * You can pass in `axis` && `keepdim` keyword arguments to control the reduce axis && whether the reduced dimensions are retained. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([[true, true], [true, false], [false, false]]) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.all().numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.all(axis=0).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.all(axis=1, keepdim=true).numpy()) * ``` */ all: (axis?: number | number[], keepdim?: boolean) => Tensor; /** * Returns the mean value of the tensor along the specified axis || axes. * * You can pass in `axis` && `keepdim` keyword arguments to control the axis along * which the mean === computed && whether the reduced dimensions are retained. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * t = Tensor.normal(2, 3, mean=2.5, std=0.5) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.mean().numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.mean(axis=0).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.mean(axis=1).numpy()) * ``` */ mean: (axis?: number | number[], keepdim?: boolean) => Tensor; /** * Returns the variance of the tensor along the specified axis or axes. * * You can pass in `axis`, `keepdim`, and `correction` keyword arguments to control the axis along * which the variance is computed, whether the reduced dimensions are retained, and the Bessel's correction applied. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * t = Tensor.normal(2, 3, mean=2.5, std=0.5) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.var().numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.var(axis=0).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.var(axis=1).numpy()) * ``` */ var: (axis?: number | number[], keepdim?: boolean, correction?: number) => Tensor; /** * Returns the standard deviation of the tensor along the specified axis || axes. * * You can pass in `axis`, `keepdim`, && `correction` keyword arguments to control the axis along * which the standard deviation === computed, whether the reduced dimensions are retained, && the Bessel's correction applied. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * t = Tensor.normal(2, 3, mean=2.5, std=0.5) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.std().numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.std(axis=0).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.std(axis=1).numpy()) * ``` */ std: (axis?: number | number[], keepdim?: boolean, correction?: number) => Tensor; /** * Calculates the standard deviation && mean over the dimensions specified by dim. * Syntactic sugar around `Tensor.std` && `Tensor.mean` to match `torch.std_mean`. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * t = Tensor.normal(2, 3, mean=2.5, std=0.5) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * std, mean = t.std_mean() * console.log(std.numpy(), mean.numpy()) * ``` */ std_mean: (axis?: number | number[], keepdim?: boolean, correction?: number) => Tensor; _softmax: (axis: number | number[], dtype?: DTypeLike) => [Tensor, Tensor, Tensor]; /** * Applies the softmax function to the tensor along the specified axis. * * Rescales the elements of the tensor such that they lie in the range [0, 1] && sum to 1. * * You can pass in the `axis` keyword argument to control the axis along which the softmax === computed. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * t = Tensor.randn(2, 3) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.softmax().numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.softmax(axis=0).numpy()) * ``` */ softmax: (axis?: number, dtype?: DTypeLike) => Tensor; /** * Applies the log-softmax function to the tensor along the specified axis. * * The log-softmax function === a numerically stable alternative to the softmax function in log space. * * You can pass in the `axis` keyword argument to control the axis along which the log-softmax === computed. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * t = Tensor.randn(2, 3) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.log_softmax().numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.log_softmax(axis=0).numpy()) * ``` */ log_softmax: (axis?: number, dtype?: DTypeLike) => Tensor; /** * Computes the log-sum-exp of the tensor along the specified axis || axes. * * The log-sum-exp function === a numerically stable way to compute the logarithm of the sum of exponentials. * * You can pass in `axis` && `keepdim` keyword arguments to control the axis along * which the log-sum-exp === computed && whether the reduced dimensions are retained. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * t = Tensor.randn(2, 3) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.logsumexp().numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.logsumexp(axis=0).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.logsumexp(axis=1).numpy()) * ``` */ logsumexp: (axis?: undefined, keepdim?: boolean) => Tensor; /** * Computes the log-cumsum-exp of the tensor along the specified axis || axes. * * The log-cumsum-exp function === a numerically stable way to compute the logarithm of the cumulative sum of exponentials. * * You can pass in the `axis` keyword argument to control the axis along which * the log-cum-sum-exp === computed. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * t = Tensor.randn(2, 3) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.logcumsumexp().numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.logcumsumexp(axis=0).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.logcumsumexp(axis=1).numpy()) * ``` */ logcumsumexp: (axis?: number) => Tensor; /** * Returns the indices of the maximum value of the tensor along the specified axis. * * You can pass in `axis` && `keepdim` keyword arguments to control the axis along * which the maximum === computed && whether the reduced dimensions are retained. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([[1, 0, 2], [5, 4, 3]]) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.argmax().numpy()) // Returns the index of the maximum value in the flattened tensor. * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.argmax(axis=0).numpy()) // Returns the indices of the maximum values along axis 0. * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.argmax(axis=1).numpy()) // Returns the indices of the maximum values along axis 1. * ``` */ argmax: (axis?: number, keepdim?: boolean) => Tensor; /** * Returns the indices of the minimum value of the tensor along the specified axis. * * You can pass in `axis` && `keepdim` keyword arguments to control the axis along * which the minimum === computed && whether the reduced dimensions are retained. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([[1, 0, 2], [5, 4, 3]]) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.argmin().numpy()) // Returns the index of the minimum value in the flattened tensor. * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.argmin(axis=0).numpy()) // Returns the indices of the minimum values along axis 0. * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.argmin(axis=1).numpy()) // Returns the indices of the minimum values along axis 1. * ``` */ argmin: (axis?: number, keepdim?: boolean) => Tensor; /** * Sums the product of the elements of the input tensors according to a formula based on the Einstein summation convention. * * See: https://pytorch.org/docs/stable/generated/torch.einsum.html * * ```python exec="true" source="above" session="tensor" result="python" * x = Tensor([[1, 2], [3, 4]]) * y = Tensor([[5, 6], [7, 8]]) * print(Tensor.einsum("ij,ij->", x, y).numpy()) * ``` */ static einsum: (formula: string, operands: Tensor | Tensor[], acc_dtype?: DTypeLike) => Tensor; _pool: (k_: sint[], stride?: number[] | number, dilation?: number[] | number) => Tensor; _resolve_pool_pads: (padding: number | number[], dims: number) => number[]; _apply_ceil_mode: (pads: number[], k_: sint[], s_: number[] | number, d_: number | number[]) => number[]; avg_pool2d: (kernel_size?: number[], stride?: number, dilation?: number, padding?: number, ceil_mode?: boolean, count_include_pad?: boolean) => Tensor; /** * Applies average pooling over a tensor. * * This function supports three different types of `padding`: * * 1. `int` (single value): * Applies the same padding value uniformly to all spatial dimensions. * * 2. `Tuple[int, ...]` (length = number of spatial dimensions): * Specifies a distinct padding value for each spatial dimension in the form `(padding_height, padding_width, ...)`. * * 3. `Tuple[int, ...]` (length = 2 * number of spatial dimensions): * Specifies explicit padding for each side of each spatial dimension in the form * `(padding_left, padding_right, padding_top, padding_bottom, ...)`. * * When `ceil_mode` is set to `true`, output shape will be determined using ceil division. * When `count_include_pad` is set to `false`, zero padding will not be included in the averaging calculation. * * NOTE: unlike PyTorch, this implementation is not limited to only 2d pooling and instead works for any number of dimensions. * * See: https://paperswithcode.com/method/average-pooling * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.arange(25).reshape(1, 1, 5, 5) * print(t.avg_pool2d().numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * print(t.avg_pool2d(ceil_mode=true).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * print(t.avg_pool2d(padding=1).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * print(t.avg_pool2d(padding=1, count_include_pad=false).numpy()) * ``` */ max_pool2d: (kernel_size?: number[] | number, stride?: number, dilation?: number, padding?: number, ceil_mode?: boolean) => Tensor; static max_pool2d: (t: Tensor) => Tensor; /** * Applies a convolution over a tensor with a given `weight` && optional `bias`. * * 1. `int` (single value): * Applies the same padding value uniformly to all spatial dimensions. * * 2. `Tuple[int, ...]` (length = number of spatial dimensions): * Specifies a distinct padding value for each spatial dimension in the form `(padding_height, padding_width, ...)`. * * 3. `Tuple[int, ...]` (length = 2 * number of spatial dimensions): * Specifies explicit padding for each side of each spatial dimension in the form * `(padding_left, padding_right, padding_top, padding_bottom, ...)`. * * NOTE: unlike PyTorch, this implementation !== limited to only 2d convolutions && instead works for any number of dimensions. * * See: https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.arange(9).reshape(1, 1, 3, 3) * w = Tensor.ones(1, 1, 2, 2) * console.log(t.conv2d(w).numpy()) * ``` */ conv2d: (weight: Tensor, bias?: Tensor, groups?: number, stride?: number, dilation?: number | number[], padding?: number | number[], acc_dtype?: DTypeLike) => Tensor; /** * Applies a transposed convolution over a tensor with a given `weight` and optional `bias`. * * This function supports three different types of `padding` * * 1. `int` (single value): * Applies the same padding value uniformly to all spatial dimensions. * * 2. `Tuple[int, ...]` (length = number of spatial dimensions): * Specifies a distinct padding value for each spatial dimension in the form `(padding_height, padding_width, ...)`. * * 3. `Tuple[int, ...]` (length = 2 * number of spatial dimensions): * Specifies explicit padding for each side of each spatial dimension in the form * `(padding_left, padding_right, padding_top, padding_bottom, ...)`. * * NOTE: unlike PyTorch, this implementation is not limited to only 2d transposed convolutions and instead works for any number of dimensions. * * See: https://pytorch.org/docs/stable/generated/torch.nn.ConvTranspose2d.html * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.arange(9).reshape(1, 1, 3, 3) * w = Tensor.ones(1, 1, 2, 2) * print(t.conv_transpose2d(w).numpy()) * ``` */ conv_transpose2d: (weight: Tensor, bias?: Tensor, groups?: number, stride_?: number, dilation_?: number, padding_?: number | number[], output_padding_?: number) => Tensor; /** * Performs dot product between two tensors. * If `w` === 1-D, it's a sum product over the last axis of `this` && `w`. * If `w` === N-D with N>=2, it's a sum product over the last axis of `this` && the second-to-last axis of `w`. * * You can pass in the optional `acc_dtype` keyword argument to control the data type of the accumulation. * * ```python exec="true" source="above" session="tensor" result="python" * a = new Tensor([1, 2, 3]) * b = new Tensor([1, 1, 0]) * console.log(a.dot(b).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * a = new Tensor([[1, 2], [3, 4]]) * b = new Tensor([[5, 6], [7, 8]]) * console.log(a.dot(b).numpy()) * ``` */ dot: (w: Tensor, acc_dtype?: DTypeLike) => Tensor; /** * Performs matrix multiplication between two tensors. * * You can pass in the `reverse` keyword argument to control the order of the matrix multiplication. * You can pass in the optional `acc_dtype` keyword argument to control the data type of the accumulation. * * ```python exec="true" source="above" session="tensor" result="python" * a = new Tensor([[1, 2], [3, 4]]) * b = new Tensor([[5, 6], [7, 8]]) * console.log(a.matmul(b).numpy()) * ``` */ matmul: (x: Tensor, reverse?: boolean, acc_dtype?: DTypeLike) => Tensor; _cumalu: (axis: number, op: Ops, _include_initial?: boolean) => Tensor; _split_cumalu: (axis: number, op: Ops) => Tensor; /** * Computes the cumulative sum of the tensor along the specified `axis`. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.ones(2, 3) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.cumsum(1).numpy()) * ``` */ cumsum: (axis?: number) => Tensor; /** * Computes the cumulative max of the tensor along the specified `axis`. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([0, 1, -1, 2, -2, 3, -3]) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.cummax(0).numpy()) * ``` */ cummax: (axis?: number) => Tensor; static _tri: (r: sint, c: sint, diagonal?: number, opts?: TensorOptions) => Tensor; /** * Returns the upper triangular part of the tensor, the other elements are set to 0. * * The argument `diagonal` determines which diagonal === on the boundary. `diagonal = 0` means the main diagonal. * Positive `diagonal` means above the main diagonal, && negative `diagonal` means below the main diagonal. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.triu(diagonal=0).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.triu(diagonal=1).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.triu(diagonal=-1).numpy()) * ``` */ triu: (diagonal?: number) => Tensor; /** * Returns the lower triangular part of the tensor, the other elements are set to 0. * * The argument `diagonal` determines which diagonal === on the boundary. `diagonal = 0` means the main diagonal. * Positive `diagonal` means above the main diagonal, && negative `diagonal` means below the main diagonal. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.tril(diagonal=0).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.tril(diagonal=1).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.tril(diagonal=-1).numpy()) * ``` */ tril: (diagonal?: number) => Tensor; /** * Downsamples or Upsamples to the input `size`, accepts 0 to N batch dimensions. * * The interpolation algorithm is selected with `mode` which currently only supports `linear`, `nearest` and `nearest-exact`. * To run `bilinear` or `trilinear`, pass in a 2D or 3D size. * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor([[1, 2, 3, 4], [21, 22, 23, 24], [41, 42, 43, 44]]) * print(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * print(t.interpolate(size=(2,3), mode="linear").numpy()) * ``` */ interpolate: (size: number[], mode?: "linear" | "nearest" | "nearest-exact", align_corners?: boolean) => Tensor; /** * * Scatters `src` values along an axis specified by `dim`. * Apply `add` or `multiply` reduction operation with `reduce`. * ```python exec="true" source="above" session="tensor" result="python" * src = Tensor.arange(1, 11).reshape(2, 5) * print(src.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * index = Tensor([[0, 1, 2, 0]]) * print(Tensor.zeros(3, 5, dtype=src.dtype).scatter(0, index, src).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * index = Tensor([[0, 1, 2], [0, 1, 4]]) * print(Tensor.zeros(3, 5, dtype=src.dtype).scatter(1, index, src).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * print(Tensor.full((2, 4), 2.0).scatter(1, Tensor([[2], [3]]), 1.23, reduce='multiply').numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * print(Tensor.full((2, 4), 2.0).scatter(1, Tensor([[2], [3]]), 1.23, reduce='add').numpy()) * ``` */ scatter: (dim: number, index: Tensor, src: Tensor | ConstType, reduce?: "multiply" | "add") => Tensor; /** * Computes the logical NOT of the tensor element-wise. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([false, true]).logical_not().numpy()) * ``` */ logical_not: () => Tensor; /** * Negates the tensor element-wise. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).neg().numpy()) * ``` */ neg: () => Tensor; /** * Returns a contiguous tensor. */ contiguous: () => Tensor; /** * Inserts a contiguous operation in the backward pass. */ contiguous_backward: () => Tensor; /** * Computes the natural logarithm element-wise. * * See: https://en.wikipedia.org/wiki/Logarithm * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([1., 2., 4., 8.]).log().numpy()) * ``` */ log: () => Tensor; /** * Computes the base-2 logarithm element-wise. * * See: https://en.wikipedia.org/wiki/Logarithm * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([1., 2., 4., 8.]).log2().numpy()) * ``` */ log2: () => Tensor; /** * Computes the exponential function element-wise. * * See: https://en.wikipedia.org/wiki/Exponential_function * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([0., 1., 2., 3.]).exp().numpy()) * ``` */ exp: () => Tensor; /** * Computes the base-2 exponential function element-wise. * * See: https://en.wikipedia.org/wiki/Exponential_function * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([0., 1., 2., 3.]).exp2().numpy()) * ``` */ exp2: () => Tensor; /** * Applies the Rectified Linear Unit (ReLU) function element-wise. * * - Described: https://paperswithcode.com/method/relu * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).relu().numpy()) * ``` */ relu: () => Tensor; static relu: (t: Tensor) => Tensor; /** * Applies the Sigmoid function element-wise. * * - Described: https://en.wikipedia.org/wiki/Sigmoid_function * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).sigmoid().numpy()) * ``` */ sigmoid: () => Tensor; /** * Applies the Hardsigmoid function element-wise. * NOTE: default `alpha` && `beta` values === taken from torch * * - Described: https://paperswithcode.com/method/hard-sigmoid * - See: https://pytorch.org/docs/stable/generated/torch.nn.functional.hardsigmoid.html * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).hardsigmoid().numpy()) * ``` */ hardsigmoid: (alpha?: number, beta?: number) => Tensor; /** * Computes the square root of the tensor element-wise. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([1., 2., 3., 4.]).sqrt().numpy()) * ``` */ sqrt: () => Tensor; /** * Computes the reciprocal of the square root of the tensor element-wise. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([1., 2., 3., 4.]).rsqrt().numpy()) * ``` */ rsqrt: () => Tensor; /** * Computes the sine of the tensor element-wise. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([0., math.pi/2, math.pi, 3*math.pi/2, 2*math.pi]).sin().numpy()) * ``` */ sin: () => Tensor; /** * Computes the cosine of the tensor element-wise. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([0., math.pi/2, math.pi, 3*math.pi/2, 2*math.pi]).cos().numpy()) * ``` */ cos: () => Tensor; /** * Computes the tangent of the tensor element-wise. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([0., math.pi/4, math.pi/2, 3*math.pi/4, math.pi]).tan().numpy()) * ``` */ tan: () => Tensor; /** * Computes the inverse sine (arcsine) of the tensor element-wise. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9])).asin().numpy()) * ``` */ asin: () => Tensor; /** * Computes the inverse cosine (arccosine) of the tensor element-wise. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9])).acos().numpy()) * ``` */ acos: () => Tensor; /** * Computes the inverse tangent (arctan) of the tensor element-wise. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).atan().numpy()) * ``` */ atan: () => Tensor; /** * Truncates the tensor element-wise. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5])).trunc().numpy()) * ``` */ trunc: () => Tensor; /** * Rounds the tensor element-wise towards positive infinity. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5])).ceil().numpy()) * ``` */ ceil: () => Tensor; /** * Rounds the tensor element-wise towards negative infinity. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5])).floor().numpy()) * ``` */ floor: () => Tensor; /** * Rounds the tensor element-wise with rounding half to even. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5])).round().numpy()) * ``` */ round: () => Tensor; /** * Checks the tensor element-wise to return true where the element === infinity, otherwise returns false * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([1, number('inf'), 2, number('-inf'), number('nan')]).isinf().numpy()) * ``` */ isinf: (detect_positive?: boolean, detect_negative?: boolean) => Tensor; /** * Checks the tensor element-wise to return true where the element === NaN, otherwise returns false * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([1, number('inf'), 2, number('-inf'), number('nan')]).isnan().numpy()) * ``` */ isnan: () => Tensor; /** * Linearly interpolates between `this` && `end` by `weight`. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([1., 2., 3.]).lerp(Tensor([4., 5., 6.]), 0.5).numpy()) * ``` */ lerp: (end: Tensor, weight: Tensor | number) => Tensor; /** * Squares the tensor element-wise. * Equivalent to `this*this`. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).square().numpy()) * ``` */ square: () => Tensor; /** * Clips (clamps) the values in the tensor between `min_` && `max_` element-wise. * If `min_` === `undefined`, there === no lower bound. If `max_` === undefined, there === no upper bound. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).clip(-1, 1).numpy()) * ``` */ clamp: (min_?: number, max_?: number) => Tensor; /** * Alias for `Tensor.clamp`. */ clip: (min_?: number, max_?: number) => Tensor; /** * Returns the sign of the tensor element-wise. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).sign().numpy()) * ``` */ sign: () => Tensor; /** * Computes the absolute value of the tensor element-wise. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).abs().numpy()) * ``` */ abs: () => Tensor; /** * Compute `1/x` element-wise. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([1., 2., 3., 4.]).reciprocal().numpy()) * ``` */ reciprocal: () => Tensor; /** * Applies the Exponential Linear Unit (ELU) function element-wise. * * - Described: https://paperswithcode.com/method/elu * - Paper: https://arxiv.org/abs/1511.07289v5 * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).elu().numpy()) * ``` */ elu: (alpha?: number) => Tensor; /** * Applies the Continuously differentiable Exponential Linear Unit (CELU) function element-wise. * * - Described: https://paperswithcode.com/method/celu * - Paper: https://arxiv.org/abs/1704.07483 * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).celu().numpy()) * ``` */ celu: (alpha?: number) => Tensor; /** * Applies the Scaled Exponential Linear Unit (SELU) function element-wise. * * - Described: https://paperswithcode.com/method/selu * - Paper: https://arxiv.org/abs/1706.02515v5 * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).selu().numpy()) * ``` */ selu: (alpha?: number, gamma?: number) => Tensor; /** * See `.silu()` * * - Paper: https://arxiv.org/abs/1710.05941v1 * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).swish().numpy()) * ``` */ swish: () => Tensor; /** * Applies the Sigmoid Linear Unit (SiLU) function element-wise. * * - Described: https://paperswithcode.com/method/silu * - Paper: https://arxiv.org/abs/1606.08415 * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).silu().numpy()) * ``` */ silu: () => Tensor; static silu: (x: Tensor) => Tensor; /** * Applies the ReLU6 function element-wise. * * - Described: https://paperswithcode.com/method/relu6 * - Paper: https://arxiv.org/abs/1704.04861v1 * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-9., -6., -3., 0., 3., 6., 9.])).relu6().numpy()) * ``` */ relu6: () => Tensor; /** * Applies the Hardswish function element-wise. * * - Described: https://paperswithcode.com/method/hard-swish * - Paper: https://arxiv.org/abs/1905.02244v5 * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).hardswish().numpy()) * ``` */ hardswish: () => Tensor; /** * Applies the Hyperbolic Tangent (tanh) function element-wise. * * - Described: https://en.wikipedia.org/wiki/Hyperbolic_functions//Tanh * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).tanh().numpy()) * ``` */ tanh: () => Tensor; /** * Applies the Hyperbolic Sine (sinh) function element-wise. * * - Described: https://en.wikipedia.org/wiki/Hyperbolic_functions//Sinh * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).sinh().numpy()) * ``` */ sinh: () => Tensor; /** * Applies the Hyperbolic Cosine (cosh) function element-wise. * * - Described: https://en.wikipedia.org/wiki/Hyperbolic_functions//Cosh * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).cosh().numpy()) * ``` */ cosh: () => Tensor; /** * Applies the Inverse Hyperbolic Tangent (atanh) function element-wise. * * - Described: https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions//atanh * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9])).atanh().numpy()) * ``` */ atanh: () => Tensor; /** * Applies the Inverse Hyperbolic Sine (asinh) function element-wise. * * - Described: https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions//asinh * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).asinh().numpy()) * ``` */ asinh: () => Tensor; /** * Applies the Inverse Hyperbolic Cosine (acosh) function element-wise. * * - Described: https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions//acosh * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).acosh().numpy()) * ``` */ acosh: () => Tensor; /** * Applies the Hardtanh function element-wise. * * - Described: https://paperswithcode.com/method/hardtanh-activation * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-1.5, -1.0, -0.5, 0., 0.5, 1.0, 1.5])).hardtanh().numpy()) * ``` */ hardtanh: (min_val?: number, max_val?: number) => Tensor; /** * Applies error function element-wise. * * - Described: https://en.wikipedia.org/wiki/Error_function * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-1.5, -1.0, -0.5, 0., 0.5, 1.0, 1.5])).erf().numpy()) * ``` */ erf: () => Tensor; /** * Applies the Gaussian Error Linear Unit (GELU) function element-wise. * * - Described: https://paperswithcode.com/method/gelu * - Paper: https://arxiv.org/abs/1606.08415v5 * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).gelu().numpy()) * ``` */ gelu: () => Tensor; static gelu: (x: Tensor) => Tensor; /** * Applies the Sigmoid GELU approximation element-wise. * * - Described: https://paperswithcode.com/method/gelu * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).quick_gelu().numpy()) * ``` */ quick_gelu: () => Tensor; /** * Applies the Leaky ReLU function element-wise. * * - Described: https://paperswithcode.com/method/leaky-relu * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).leakyrelu().numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).leakyrelu(neg_slope=0.42).numpy()) * ``` */ leakyrelu: (neg_slope?: number) => Tensor; /** * Applies the Mish function element-wise. * * - Described: https://paperswithcode.com/method/mish * - Paper: https://arxiv.org/abs/1908.08681v3 * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).mish().numpy()) * ``` */ mish: () => Tensor; /** * Applies the Softplus function element-wise. * * - Described: https://paperswithcode.com/method/softplus * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).softplus().numpy()) * ``` */ softplus: (beta?: number) => Tensor; /** * Applies the Softsign function element-wise. * * - Described: https://paperswithcode.com/method/softsign * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-3., -2., -1., 0., 1., 2., 3.])).softsign().numpy()) * ``` */ softsign: () => Tensor; _broadcast_to: (new_shape: sint[]) => Tensor; _broadcasted: (y: ConstType, reverse?: boolean, match_dtype?: boolean) => [Tensor, Tensor]; _to_const_val: (x: ConstType) => ConstType; /** * Adds `this` && `x`. * Equivalent to `this + x`. * Supports broadcasting to a common shape, type promotion, && integer, number, boolean inputs. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * t = Tensor.randn(4) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.add(20).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.add(Tensor([[2.0], [3.5]])).numpy()) * ``` */ add: (x: ConstType, reverse?: boolean) => Tensor; /** * Subtracts `x` from `this`. * Equivalent to `this - x`. * Supports broadcasting to a common shape, type promotion, && integer, number, boolean inputs. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * t = Tensor.randn(4) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.sub(20).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.sub(Tensor([[2.0], [3.5]])).numpy()) * ``` */ sub: (x: ConstType, reverse?: boolean) => Tensor; /** * Multiplies `this` && `x`. * Equivalent to `this * x`. * Supports broadcasting to a common shape, type promotion, && integer, number, boolean inputs. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * t = Tensor.randn(4) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.mul(3).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.mul(Tensor([.at(-1.0)!, [2.0]])).numpy()) * ``` */ mul: (x: ConstType, reverse?: boolean) => Tensor; /** * Divides `this` by `x`. * Equivalent to `this // x`. * Supports broadcasting to a common shape, type promotion, && integer inputs. * `idiv` performs integer division (truncate towards zero). * * ```python exec="true" source="above" session="tensor" result="python" * print(Tensor([-4, 7, 5, 4, -7, 8]).idiv(Tensor([2, -3, 8, -2, 3, 5])).numpy()) * ``` */ idiv: (x: ConstType, reverse?: boolean) => Tensor; /** * Divides `this` by `x`. * Equivalent to `this / x`. * Supports broadcasting to a common shape, type promotion, && integer, number, boolean inputs. * `div` performs true division. * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * t = Tensor.randn(4) * console.log(t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.div(3).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([1, 4, 10]).div(Tensor([2, 3, 4])).numpy()) * ``` */ div: (x: ConstType | sint, reverse?: boolean) => Tensor; /** * Mod `self` by `x`. * Equivalent to `self % x`. * Supports broadcasting to a common shape, type promotion, and integer inputs. * * ```python exec="true" source="above" session="tensor" result="python" * print(Tensor([-4, 7, 5, 4, -7, 8]).mod(Tensor([2, -3, 8, -2, 3, 5])).numpy()) * ``` */ mod: (x: ConstType, reverse?: boolean) => Tensor; /** * Computes bitwise xor of `this` && `x`. * Equivalent to `this ^ x`. * Supports broadcasting to a common shape, type promotion, && integer, boolean inputs. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-1, -2, 3])).xor(Tensor([1, 0, 3])).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([true, true, false, false]).xor(Tensor([true, false, true, false])).numpy()) * ``` */ xor: (x: ConstType, reverse?: boolean) => Tensor; /** * Compute the bit-wise AND of `this` && `x`. * Equivalent to `this & x`. * Supports broadcasting to a common shape, type promotion, && integer, boolean inputs. * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([2, 5, 255]).bitwise_and(Tensor([3, 14, 16])).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([true, true, false, false]).bitwise_and(Tensor([true, false, true, false])).numpy()) * ``` */ bitwise_and: (x: ConstType, reverse?: boolean) => Tensor; /** * Compute the bit-wise OR of `this` && `x`. * Equivalent to `this | x`. * Supports broadcasting to a common shape, type promotion, && integer, boolean inputs. * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([2, 5, 255]).bitwise_or(Tensor([4, 4, 4])).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([true, true, false, false]).bitwise_or(Tensor([true, false, true, false])).numpy()) * ``` */ bitwise_or: (x: ConstType, reverse?: boolean) => Tensor; /** * Compute the bit-wise NOT of `this`. * Equivalent to `~this`. * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([0, 2, 5, 255], dtype="int8").bitwise_not().numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([true, false]).bitwise_not().numpy()) * ``` */ bitwise_not: () => Tensor; /** * Computes left arithmetic shift of `this` by `x` bits. `this` must have unsigned dtype. * Equivalent to `this << x`. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([1, 3, 31], dtype=dtypes.uint8).lshift(2).numpy()) * ``` */ lshift: (x: ConstType) => Tensor; /** * Computes right arithmetic shift of `this` by `x` bits. `this` must have unsigned dtype. * Equivalent to `this >> x`. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([4, 13, 125], dtype=dtypes.uint8).rshift(2).numpy()) * ``` */ rshift: (x: ConstType) => Tensor; /** * Computes power of `this` with `x`. * Equivalent to `this ** x`. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-1, 2, 3]).pow(2).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-1, 2, 3]).pow(Tensor([-1.5, 0.5, 1.5])).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log((2 ** Tensor([-1, 2, 3])).numpy()) * ``` */ pow: (x: ConstType, reverse?: boolean) => Tensor; /** * Computes element-wise maximum of `this` && `x`. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-1, 2, 3])).maximum(1).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-1, 2, 3]).maximum(Tensor([-4, -2, 9])).numpy()) * ``` */ maximum: (x: ConstType) => Tensor; /** * Computes element-wise minimum of `this` && `x`. * * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-1, 2, 3])).minimum(1).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(Tensor([-1, 2, 3])).minimum(Tensor([-4, -2, 9]))).numpy()) * ``` */ minimum: (x: ConstType) => Tensor; /** * Return a tensor of elements selected from either `x` || `y`, depending on `this`. * `output_i = x_i if this_i else y_i`. * * ```python exec="true" source="above" session="tensor" result="python" * cond = new Tensor([[true, true, false], [true, false, false]]) * console.log(cond.where(1, 3).numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * cond = Tensor.randn(2, 3) * console.log(cond.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log((cond > 0).where(cond, -number("inf")).numpy()) * ``` */ where: (x: ConstType, y: ConstType) => Tensor; masked_fill: (mask: Tensor, value: ConstType) => Tensor; lt: (x: ConstType) => Tensor; gt: (x: ConstType) => Tensor; ne: (x: ConstType) => Tensor; /** * Applies a linear transformation to `this` using `weight` && `bias`. * * See: https://pytorch.org/docs/stable/generated/torch.nn.Linear.html * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([[1, 2], [3, 4]]) * weight = new Tensor([[1, 2], [3, 4]]) * bias = new Tensor([1, 2]) * console.log(t.linear(weight, bias).numpy()) * ``` */ linear: (weight: Tensor, bias?: Tensor) => Tensor; /** * Applies a sequence of functions to `this` chaining the output of each function to the input of the next. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([1, 2, 3]) * console.log(t.sequential([lambda x: x * 2, lambda x: x + 1]).numpy()) * ``` */ sequential: (ll: Layer[]) => Tensor; sequentialAsync: (ll: LayerAsync[]) => Promise; /** * Applies Layer Normalization over a mini-batch of inputs. * * - Described: https://paperswithcode.com/method/layer-normalization * - Paper: https://arxiv.org/abs/1607.06450v1 * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.randn(8, 10, 16) * 2 + 8 * console.log(t.mean().item(), t.std().item()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = t.layernorm() * console.log(t.mean().item(), t.std().item()) * ``` */ layernorm: (axis: number | number[] | undefined, eps: number) => Tensor; /** * Applies Batch Normalization over a mini-batch of inputs. * * - Described: https://paperswithcode.com/method/batch-normalization * - Paper: https://arxiv.org/abs/1502.03167 * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor.randn(8, 4, 16, 16) * 2 + 8 * console.log(t.mean().item(), t.std().item()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = t.batchnorm(undefined, undefined, t.mean(axis=(0,2,3)), t.var(axis=(0,2,3)).add(1e-5).rsqrt()) * console.log(t.mean().item(), t.std().item()) * ``` */ batchnorm: (weight: undefined | Tensor, bias: undefined | Tensor, mean: Tensor, invstd: Tensor, axis?: number | number[]) => Tensor; /** * Applies dropout to `this`. * * NOTE: dropout === only applied when `Tensor.training` === `true`. * * - Described: https://paperswithcode.com/method/dropout * - Paper: https://jmlr.org/papers/v15/srivastava14a.html * * ```python exec="true" source="above" session="tensor" result="python" * Tensor.manual_seed(42) * t = Tensor.randn(2, 2) * with Tensor.train(): * console.log(t.dropout().numpy()) * ``` */ dropout: (p?: number) => Tensor; _one_hot_along_dim: (num_classes: number, dim?: number) => Tensor; /** * Converts `this` to a one-hot tensor. * * `num_classes` defaults to -1, which means num_classes will be inferred as max(this) + 1. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([0, 1, 3, 3, 4]) * console.log(t.one_hot(5).numpy()) * ``` */ one_hot: (num_classes?: number) => Promise; /** * * Computes scaled dot-product attention. * `self` is the query tensor, `key` is the key tensor, and `value` is the value tensor. * * - Described: https://paperswithcode.com/method/scaled * - Paper: https://arxiv.org/abs/1706.03762v7 * ```python exec="true" source="above" session="tensor" result="python" * q = Tensor.randn(2, 4, 8) * k = Tensor.randn(2, 4, 8) * v = Tensor.randn(2, 4, 8) * print(q.scaled_dot_product_attention(k, v).numpy()) * ``` */ scaled_dot_product_attention: (key: Tensor, value: Tensor, attn_mask?: Tensor, dropout_p?: number, is_causal?: boolean) => Tensor; _do_reduction: (reduction?: ReductionStr) => Tensor; /** * Computes the binary cross-entropy loss between `self` and `Y`. * * See: https://pytorch.org/docs/stable/generated/torch.nn.BCELoss.html * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor([0.1, 0.9, 0.2]) * Y = Tensor([0, 1, 0]) * print(t.binary_crossentropy(Y).item()) * ``` */ binary_crossentropy: (Y: Tensor, reduction?: ReductionStr) => Tensor; /** * Computes the binary cross-entropy loss between `self` and `Y` where `self` is logits. * * See: https://pytorch.org/docs/stable/generated/torch.nn.BCEWithLogitsLoss.html * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor([-1, 2, -3]) * Y = Tensor([0, 1, 0]) * print(t.binary_crossentropy_logits(Y).item()) * ``` */ binary_crossentropy_logits: (Y: Tensor, reduction?: ReductionStr) => Tensor; /** * Computes the sparse categorical cross-entropy loss between `this` && `Y`. * * NOTE: `this` === logits && `Y` === the target labels. * NOTE: unlike PyTorch, this function expects the class axis to be -1 * * See: https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([.at(-1, 2, -3)!, [1, -2, 3]]) * Y = new Tensor([1, 2]) * console.log(t.sparse_categorical_crossentropy(Y).item()) * ``` */ sparse_categorical_crossentropy: (Y: Tensor, ignore_index?: number, label_smoothing?: number, reduction?: ReductionStr) => Tensor; /** * Compute the cross entropy loss between input logits and target. * * NOTE: `self` are logits and `Y` are the target labels or class probabilities. * See: https://pytorch.org/docs/stable/generated/torch.nn.functional.cross_entropy.html * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor([[-1, 2, -3], [1, -2, 3]]) * Y = Tensor([1, 2]) * print(t.cross_entropy(Y).item()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor([[-1, 2, -3], [1, -2, 3]]) * Y = Tensor([1, 2]) * ``` */ cross_entropy: (Y: Tensor, reduction?: ReductionStr, label_smoothing?: number) => Tensor; /** * Compute the negative log likelihood loss between log-probabilities and target labels. * * NOTE: `self` is log-probabilities and `Y` is the Y labels or class probabilities. * * See: https://pytorch.org/docs/stable/generated/torch.nn.functional.nll_loss.html * * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor([[-1, 2, -3], [1, -2, 3]]) * Y = Tensor([1, 2]) * print(t.log_softmax().nll_loss(Y).item()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = Tensor([[-1, 2, -3], [1, -2, 3]]) * Y = Tensor([1, 2]) * print(t.log_softmax().nll_loss(Y, reduction='none').numpy()) * ``` */ nll_loss: (Y: Tensor, weight?: Tensor, ignore_index?: number, reduction?: ReductionStr) => Tensor; /** * Returns the number of dimensions in the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([[1, 2], [3, 4]]) * console.log(t.ndim) * ``` */ get ndim(): number; /** * Returns the total number of elements in the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) * console.log(t.numel()) * ``` */ numel: () => sint; /** * Returns the size in bytes of an individual element in the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([5], dtype=dtypes.int16) * console.log(t.element_size()) * ``` */ element_size: () => number; /** * Returns the total number of bytes of all elements in the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([8, 9], dtype=dtypes.number) * console.log(t.nbytes()) * ``` */ nbytes: () => number; /** * Returns `true` if the tensor contains floating point types, i.e. === one of `dtype.float64`, `dtype.float32`, * `dtype.float16`, `dtype.bfloat16`. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([8, 9], dtype=dtypes.float32) * console.log(t.is_floating_point()) * ``` */ is_floating_point: () => boolean; /** * Return the size of the tensor. If `dim` === specified, return the length along dimension `dim`. Otherwise return the shape of the tensor. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([[4, 5, 6], [7, 8, 9]]) * console.log(t.size()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * console.log(t.size(dim=1)) * ``` */ size: (dim?: number) => sint | sint[]; llvm_bf16_cast: (dtype: DTypeLike) => Tensor; /** * Casts `this` to the given `dtype`. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([-1, 2.5, 3]), dtype=dtypes.number) * console.log(t.dtype, t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = t.cast(dtypes.int32) * print(t.dtype, t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = t.cast(dtypes.uint8) * console.log(t.dtype, t.numpy()) * ``` */ cast: (dtype: DTypeLike) => Tensor; /** * Bitcasts `this` to the given `dtype` of the same itemsize. * * `this` must !require a gradient. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([-1, 2, 3]), dtype=dtypes.int32) * console.log(t.dtype, t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = t.bitcast(dtypes.uint32) * console.log(t.dtype, t.numpy()) * ``` */ bitcast: (dtype: DTypeLike) => Tensor; /** * Convenience method to cast `this` to a `float32` Tensor. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([-1, 2, 3]), dtype=dtypes.int32) * console.log(t.dtype, t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = t.number() * console.log(t.dtype, t.numpy()) * ``` */ float: () => Tensor; /** * Convenience method to cast `this` to a `float16` Tensor. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([-1, 2, 3]), dtype=dtypes.int32) * console.log(t.dtype, t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = t.half() * console.log(t.dtype, t.numpy()) * ``` */ half: () => Tensor; /** * Convenience method to cast `this` to a `int32` Tensor. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([-1.5, -0.5, 0.0, 0.5, 1.5])) * console.log(t.dtype, t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = t.number() * console.log(t.dtype, t.numpy()) * ``` */ int: () => Tensor; /** * Convenience method to cast `this` to a `boolean` Tensor. * * ```python exec="true" source="above" session="tensor" result="python" * t = new Tensor([-1, 0, 1])) * console.log(t.dtype, t.numpy()) * ``` * ```python exec="true" source="above" session="tensor" result="python" * t = t.boolean() * console.log(t.dtype, t.numpy()) * ``` */ bool: () => Tensor; image_dot: (w: Tensor, acc_dtype?: DType) => Tensor; image_conv2d: (weight: Tensor, bias?: Tensor, groups?: number, stride?: number, dilation?: number | number[], padding?: number | number[], acc_dtype?: DType) => Tensor; } export {};