import { vec4, vec3, mat4 } from 'gl-matrix'; import { NIFTI1, NIFTI2, NIFTIEXTENSION } from 'nifti-reader-js'; declare enum COLORMAP_TYPE { MIN_TO_MAX = 0, ZERO_TO_MAX_TRANSPARENT_BELOW_MIN = 1, ZERO_TO_MAX_TRANSLUCENT_BELOW_MIN = 2 } type ColorMap = { R: number[]; G: number[]; B: number[]; A: number[]; I: number[]; min?: number; max?: number; labels?: string[]; }; type LUT = { lut: Uint8ClampedArray; min?: number; max?: number; labels?: string[]; }; declare class ColorTables { gamma: number; version: number; cluts: Record; /** * Sets cluts to alphabetically sorted cmaps */ constructor(); addColormap(key: string, cmap: ColorMap): void; colormaps(): Array; colorMaps(): Array; colormapFromKey(name: string): ColorMap; colormap(key?: string, isInvert?: boolean): Uint8ClampedArray; makeLabelLut(cm: ColorMap, alphaFill?: number, maxIdx?: number): LUT; makeLabelLutFromUrl(name: string, alphaFill?: number, maxIdx?: number): Promise; makeDrawLut(name: string | ColorMap): LUT; makeLut(Rsi: number[], Gsi: number[], Bsi: number[], Asi: number[], Isi: number[], isInvert: boolean): Uint8ClampedArray; } declare const cmapper: ColorTables; declare enum LabelTextAlignment { LEFT = "left", RIGHT = "right", CENTER = "center" } declare enum LabelLineTerminator { NONE = "none", CIRCLE = "circle", RING = "ring" } declare enum LabelAnchorPoint { NONE = 0, TOPLEFT = 9, TOPCENTER = 10, TOPRIGHT = 12, MIDDLELEFT = 17, MIDDLECENTER = 18, MIDDLERIGHT = 20, BOTTOMLEFT = 33, BOTTOMCENTER = 34, BOTTOMRIGHT = 36 } /** * Class representing label style. * @ignore */ declare class NVLabel3DStyle { textColor: number[]; textScale: number; textAlignment?: LabelTextAlignment; lineWidth: number; lineColor: number[]; lineTerminator: LabelLineTerminator; bulletScale?: number; bulletColor?: number[]; backgroundColor?: number[]; /** * @param textColor - Color of text * @param textScale - Text Size (0.0..1.0) * @param lineWidth - Line width * @param lineColor - Line color * @param bulletScale - Bullet size respective of text * @param bulletColor - Bullet color * @param backgroundColor - Background color of label */ constructor(textColor?: number[], textScale?: number, textAlignment?: LabelTextAlignment, lineWidth?: number, lineColor?: number[], lineTerminator?: LabelLineTerminator, bulletScale?: number, bulletColor?: number[], backgroundColor?: number[]); } /** * Label class * @ignore */ declare class NVLabel3D { text: string; style: NVLabel3DStyle; points?: number[] | number[][]; anchor: LabelAnchorPoint; onClick?: (label: NVLabel3D, e?: MouseEvent) => void; /** * @param text - The text of the label * @param style - The style of the label * @param points - An array of points label for label lines */ constructor(text: string, style: NVLabel3DStyle, points?: number[] | number[][], anchor?: LabelAnchorPoint, onClick?: (label: NVLabel3D, e?: MouseEvent) => void); } type NiftiHeader = { littleEndian: boolean; dim_info: number; dims: number[]; pixDims: number[]; intent_p1: number; intent_p2: number; intent_p3: number; intent_code: number; datatypeCode: number; numBitsPerVoxel: number; slice_start: number; vox_offset: number; scl_slope: number; scl_inter: number; slice_end: number; slice_code: number; xyzt_units: number; cal_max: number; cal_min: number; slice_duration: number; toffset: number; description: string; aux_file: string; qform_code: number; sform_code: number; quatern_b: number; quatern_c: number; quatern_d: number; qoffset_x: number; qoffset_y: number; qoffset_z: number; affine: number[][]; intent_name: string; magic: string; }; type Volume = Record; type Point = { comments: Array<{ text: string; prefilled?: string; }>; coordinates: { x: number; y: number; z: number; }; }; /** * Represents the vertices of a connectome * @ignore */ type NVConnectomeNode = { name: string; x: number; y: number; z: number; colorValue: number; sizeValue: number; label?: NVLabel3D; }; /** * Represents edges between connectome nodes * @ignore */ type NVConnectomeEdge = { first: number; second: number; colorValue: number; }; type ConnectomeOptions = { name: string; nodeColormap: string; nodeColormapNegative: string; nodeMinColor: number; nodeMaxColor: number; nodeScale: number; edgeColormap: string; edgeColormapNegative: string; edgeMin: number; edgeMax: number; edgeScale: number; legendLineThickness?: number; showLegend?: boolean; }; type Connectome = ConnectomeOptions & { nodes: NVConnectomeNode[]; edges: NVConnectomeEdge[]; }; type LegacyNodes = { names: string[]; prefilled: unknown[]; X: number[]; Y: number[]; Z: number[]; Color: number[]; Size: number[]; }; type LegacyConnectome = Partial & { nodes: LegacyNodes; edges: number[]; }; type DragReleaseParams = { fracStart: vec3; fracEnd: vec3; voxStart: vec3; voxEnd: vec3; mmStart: vec4; mmEnd: vec4; mmLength: number; tileIdx: number; axCorSag: SLICE_TYPE; }; type NiiVueLocationValue = { id: string; mm: vec4; name: string; value: number; vox: vec3; }; type NiiVueLocation = { axCorSag: number; frac: vec3; mm: vec4; string: string; values: NiiVueLocationValue[]; vox: vec3; xy: [number, number]; }; type SyncOpts = { '3d'?: boolean; '2d'?: boolean; zoomPan?: boolean; cal_min?: boolean; cal_max?: boolean; gamma?: boolean; useSliceOffset?: boolean; sliceType?: boolean; crosshair?: boolean; clipPlane?: boolean; }; type UIData = { mousedown: boolean; touchdown: boolean; mouseButtonLeftDown: boolean; mouseButtonCenterDown: boolean; mouseButtonRightDown: boolean; mouseDepthPicker: boolean; clickedTile: number; pan2DxyzmmAtMouseDown: vec4; prevX: number; prevY: number; currX: number; currY: number; currentTouchTime: number; lastTouchTime: number; touchTimer: NodeJS.Timeout | null; doubleTouch: boolean; isDragging: boolean; dragStart: number[]; dragEnd: number[]; dragClipPlaneStartDepthAziElev: number[]; lastTwoTouchDistance: number; multiTouchGesture: boolean; dpr?: number; max2D?: number; max3D?: number; windowX: number; windowY: number; activeDragMode: DRAG_MODE | null; activeDragButton: number | null; angleFirstLine: number[]; angleState: 'none' | 'drawing_first_line' | 'drawing_second_line' | 'complete'; activeClipPlaneIndex: number; }; type FontMetrics = { distanceRange: number; size: number; mets: Record; }; type ColormapListEntry = { name: string; min: number; max: number; isColorbarFromZero: boolean; negative: boolean; visible: boolean; invert: boolean; }; type Graph = { LTWH: number[]; plotLTWH?: number[]; opacity: number; vols: number[]; autoSizeMultiplanar: boolean; normalizeValues: boolean; isRangeCalMinMax: boolean; backColor?: number[]; lineColor?: number[]; textColor?: number[]; lineThickness?: number; gridLineThickness?: number; lineAlpha?: number; lines?: number[][]; selectedColumn?: number; lineRGB?: number[][]; }; type Descriptive = { mean: number; stdev: number; nvox: number; volumeMM3: number; volumeML: number; min: number; max: number; meanNot0: number; stdevNot0: number; nvoxNot0: number; minNot0: number; maxNot0: number; cal_min: number; cal_max: number; robust_min: number; robust_max: number; area: number | null; }; type SliceScale = { volScale: number[]; vox: number[]; longestAxis: number; dimsMM: vec3; }; type MvpMatrix2D = { modelViewProjectionMatrix: mat4; modelMatrix: mat4; normalMatrix: mat4; leftTopMM: number[]; fovMM: number[]; }; type MM = { mnMM: vec3; mxMM: vec3; rotation: mat4; fovMM: vec3; }; type SaveImageOptions = { filename: string; isSaveDrawing: boolean; volumeByIndex: number; }; /** * Enum for NIfTI datatype codes * // https://nifti.nimh.nih.gov/pub/dist/src/niftilib/nifti1.h */ declare enum NiiDataType { DT_NONE = 0, DT_BINARY = 1, DT_UINT8 = 2, DT_INT16 = 4, DT_INT32 = 8, DT_FLOAT32 = 16, DT_COMPLEX64 = 32, DT_FLOAT64 = 64, DT_RGB24 = 128, DT_INT8 = 256, DT_UINT16 = 512, DT_UINT32 = 768, DT_INT64 = 1024, DT_UINT64 = 1280, DT_FLOAT128 = 1536, DT_COMPLEX128 = 1792, DT_COMPLEX256 = 2048, DT_RGBA32 = 2304 } /** * Enum for supported image types (e.g. NII, NRRD, DICOM) */ declare enum ImageType { UNKNOWN = 0, NII = 1, DCM = 2, DCM_MANIFEST = 3, MIH = 4, MIF = 5, NHDR = 6, NRRD = 7, MHD = 8, MHA = 9, MGH = 10, MGZ = 11, V = 12, V16 = 13, VMR = 14, HEAD = 15, DCM_FOLDER = 16, SRC = 17, FIB = 18, BMP = 19, ZARR = 20, NPY = 21, NPZ = 22, HDR = 23 } type ImageFromUrlOptions = { url: string; urlImageData?: string; headers?: Record; name?: string; colorMap?: string; colormap?: string; opacity?: number; cal_min?: number; cal_max?: number; trustCalMinMax?: boolean; percentileFrac?: number; useQFormNotSForm?: boolean; alphaThreshold?: boolean; colormapNegative?: string; colorMapNegative?: string; cal_minNeg?: number; cal_maxNeg?: number; colorbarVisible?: boolean; ignoreZeroVoxels?: boolean; imageType?: ImageType; frame4D?: number; colormapLabel?: LUT | null; pairedImgData?: null; limitFrames4D?: number; isManifest?: boolean; urlImgData?: string; buffer?: ArrayBuffer; zarrLevel?: number; zarrMaxVolumeSize?: number; zarrChannel?: number; /** Convert OME spatial units to millimeters for NIfTI compatibility (default: true) */ zarrConvertUnits?: boolean; /** World-space center [x, y, z] in mm where the zarr volume center should be positioned */ zarrCenterMM?: [number, number, number]; }; type ImageFromFileOptions = { file: File | File[]; name?: string; colormap?: string; opacity?: number; urlImgData?: File | null | FileSystemEntry; cal_min?: number; cal_max?: number; trustCalMinMax?: boolean; percentileFrac?: number; ignoreZeroVoxels?: boolean; useQFormNotSForm?: boolean; colormapNegative?: string; imageType?: ImageType; frame4D?: number; limitFrames4D?: number; }; type ImageFromBase64 = { base64: string; name?: string; colormap?: string; opacity?: number; cal_min?: number; cal_max?: number; trustCalMinMax?: boolean; percentileFrac?: number; ignoreZeroVoxels?: boolean; useQFormNotSForm?: boolean; colormapNegative?: string; frame4D?: number; imageType?: ImageType; cal_minNeg?: number; cal_maxNeg?: number; colorbarVisible?: boolean; colormapLabel?: LUT | null; }; type ImageMetadata = { id: string; datatypeCode: number; nx: number; ny: number; nz: number; nt: number; dx: number; dy: number; dz: number; dt: number; bpv: number; }; declare const NVImageFromUrlOptions: (url: string, urlImageData?: string, name?: string, colormap?: string, opacity?: number, cal_min?: number, cal_max?: number, trustCalMinMax?: boolean, percentileFrac?: number, ignoreZeroVoxels?: boolean, useQFormNotSForm?: boolean, colormapNegative?: string, frame4D?: number, imageType?: ImageType, cal_minNeg?: number, cal_maxNeg?: number, colorbarVisible?: boolean, alphaThreshold?: boolean, colormapLabel?: any) => ImageFromUrlOptions; /** * ZarrChunkCache - LRU cache for zarr chunks (TypedArrays). * * LRU cache that stores TypedArrays. * TypedArrays are garbage collected automatically, so no explicit cleanup needed. */ type TypedArray = Uint8Array | Uint16Array | Int16Array | Int32Array | Uint32Array | Float32Array | Float64Array; declare class ZarrChunkCache { private cache; private loadingSet; private maxChunks; constructor(maxChunks?: number); /** * Generate a unique key for a chunk. * Format: "name:level/x/y" for 2D or "name:level/x/y/z" for 3D */ static getKey(name: string, level: number, x: number, y: number, z?: number): string; /** * Check if a chunk is in the cache */ has(key: string): boolean; /** * Get a chunk from the cache. * Also moves the entry to the end (most recently used). */ get(key: string): TypedArray | undefined; /** * Store a chunk in the cache. * Evicts oldest entries if capacity is exceeded. */ set(key: string, chunk: TypedArray): void; /** * Check if a chunk is currently being loaded */ isLoading(key: string): boolean; /** * Mark a chunk as loading (to prevent duplicate requests) */ startLoading(key: string): void; /** * Mark a chunk as done loading */ doneLoading(key: string): void; /** * Get the number of cached chunks */ get size(): number; /** * Get the number of chunks currently loading */ get loadingCount(): number; /** * Clear the entire cache */ clear(): void; /** * Delete a specific chunk from cache */ delete(key: string): boolean; /** * Get all cached keys */ keys(): IterableIterator; } /** * ZarrChunkClient - HTTP client for fetching zarr array data using zarrita.js. * * Handles pyramid discovery and chunk fetching for OME-ZARR and regular zarr stores. */ interface ZarrChunkClientConfig { /** Base URL for zarr store (e.g., "http://localhost:8090/lightsheet.zarr") */ baseUrl: string; } interface ZarrPyramidLevel { /** Level index (0 = highest resolution) */ index: number; /** Path to this level in the zarr hierarchy (e.g., "/0", "/1") */ path: string; /** Spatial-only shape in OME metadata order (non-spatial dims stripped) */ shape: number[]; /** Spatial-only chunk dimensions matching shape order */ chunks: number[]; /** Data type (e.g., "uint8", "uint16", "float32") */ dtype: string; /** Physical scale factors per spatial axis in OME metadata order from coordinateTransformations */ scales?: number[]; /** Physical translation offsets per spatial axis in OME metadata order from coordinateTransformations */ translations?: number[]; } /** * Mapping from spatial chunk coordinates to full zarr array chunk coordinates. * Handles non-spatial dimensions like channel (c) and time (t). */ interface AxisMapping { /** Total number of dimensions in the original zarr array */ originalNdim: number; /** Indices of spatial axes in the original array, in OME metadata order */ spatialIndices: number[]; /** Names of spatial axes in OME metadata order (e.g., ['x', 'y', 'z'] or ['z', 'y', 'x']) */ spatialAxisNames: string[]; /** Non-spatial axes: their index in the original array, chunk size, and default chunk coord */ nonSpatialAxes: Array<{ index: number; name: string; chunkSize: number; defaultChunkCoord: number; }>; } interface ZarrPyramidInfo { /** Name/URL of the zarr store */ name: string; /** Pyramid levels (index 0 = highest resolution) */ levels: ZarrPyramidLevel[]; /** Whether this is a 3D dataset (based on spatial dimensions) */ is3D: boolean; /** Number of spatial dimensions (2 or 3) */ ndim: number; /** Mapping from spatial to full array coordinates */ axisMapping: AxisMapping; /** Units for spatial axes in OME metadata order (e.g., "micrometer", "millimeter") */ spatialUnits?: string[]; } interface ChunkCoord { /** Pyramid level */ level: number; /** Chunk X index */ x: number; /** Chunk Y index */ y: number; /** Chunk Z index (for 3D) */ z?: number; } declare class ZarrChunkClient { private store; private baseUrl; private arrays; /** Maps level index to actual path in the zarr store */ private levelPaths; /** Axis mapping for coordinate translation */ private axisMapping; constructor(config: ZarrChunkClientConfig); /** * Discover pyramid structure by reading OME-ZARR multiscales metadata, * or falling back to probing for arrays at /0, /1, /2, etc. */ fetchInfo(): Promise; /** * Build axis mapping from OME axes metadata or infer from array dimensions. * Identifies spatial (x, y, z) vs non-spatial (c, t) dimensions and returns * indices for extracting spatial-only shape/chunks. * Spatial indices are kept in the original OME metadata order (NOT reordered). */ private buildAxisMapping; /** * Open a zarr array at a specific pyramid level. * Uses cached arrays when available. */ private openLevel; /** * Fetch a single chunk by spatial coordinates. * Uses the axis mapping to build full chunk coordinates including non-spatial dims. * Returns the spatial-only decoded TypedArray data. * * @param level - Pyramid level * @param x - Spatial X chunk index * @param y - Spatial Y chunk index * @param z - Spatial Z chunk index (for 3D) * @param nonSpatialCoords - Optional overrides for non-spatial dimensions (e.g., channel index) */ fetchChunk(level: number, x: number, y: number, z?: number, nonSpatialCoords?: Record, signal?: AbortSignal): Promise; /** * Fetch multiple chunks in parallel. * Returns a Map from chunk key to TypedArray. */ fetchChunks(name: string, level: number, coords: ChunkCoord[]): Promise>; /** * Fetch a rectangular region using zarr.get with slices. * Useful for fetching exact viewport regions rather than whole chunks. * Uses axis mapping to handle non-spatial dimensions. */ fetchRegion(level: number, region: { xStart: number; xEnd: number; yStart: number; yEnd: number; zStart?: number; zEnd?: number; }): Promise<{ data: TypedArray; shape: number[]; } | null>; /** * Get the zarr store URL */ getUrl(): string; /** * Clear cached array references */ clearArrayCache(): void; } /** * NVZarrHelper - Simplified zarr chunk management for NVImage. * * Attaches to a host NVImage and manages chunked loading of OME-Zarr data. * No zoom, no prefetching - just pan and level switching. * All coordinates are in current-level pixel space. * * Spatial dimensions are kept in OME metadata order throughout. * The mapping to NIfTI layout is: * - OME dim[0] (slowest in C-order) → NIfTI dim 3 (depth, slowest in Fortran-order) * - OME dim[1] → NIfTI dim 2 (height) * - OME dim[2] (fastest in C-order) → NIfTI dim 1 (width, fastest in Fortran-order) * This means chunk data can be copied directly without stride remapping. * The affine matrix maps NIfTI (i, j, k) indices to physical (x, y, z) space * using the OME axis names. */ interface NVZarrHelperOptions { url: string; level: number; maxVolumeSize?: number; maxTextureSize?: number; channel?: number; cacheSize?: number; /** Convert OME spatial units to millimeters for NIfTI compatibility (default: true) */ convertUnitsToMm?: boolean; } declare class NVZarrHelper { private hostImage; private chunkClient; private chunkCache; private pyramidInfo; private datatypeCode; private pyramidLevel; /** Level dimensions in OME metadata order: depth=dim[0], height=dim[1], width=dim[2] */ private levelDims; private volumeDims; private chunkSize; /** Voxel scales in OME metadata order: depth=dim[0], height=dim[1], width=dim[2] */ private voxelScales; /** Voxel translations in OME metadata order */ private voxelTranslations; private hasTranslations; private convertUnitsToMm; private worldOffsetMM; private centerX; private centerY; private centerZ; private channel; private nonSpatialCoords; private isUpdating; private needsUpdate; private currentAbortController; private runningMin; private runningMax; private calibrationDone; private updateDebounceTimer; private readonly UPDATE_DEBOUNCE_MS; private pendingChunkCount; private lastRenderedChunkCount; centerAtDragStart: { x: number; y: number; z: number; } | null; onChunksUpdated?: () => void; onAllChunksLoaded?: () => void; private constructor(); static create(hostImage: NVImage, url: string, options: NVZarrHelperOptions): Promise; loadInitialChunks(): Promise; private updateLevelInfo; private configureHostImage; /** Get unit-converted voxel scales */ private getConvertedScales; /** Get unit-converted voxel translations */ private getConvertedTranslations; /** * Build the NIfTI affine from OME axis names, scales, and translations. * * NIfTI dimensions map to OME spatial dimensions as: * i (dim 1, width) = OME spatial[-1] (last, fastest in C-order) * j (dim 2, height) = OME spatial[-2] * k (dim 3, depth) = OME spatial[-3] (first, slowest in C-order) * * The affine maps (i, j, k) → physical (x, y, z): * physical_axis = scale * nifti_dim + translation * where nifti_dim is the column index (0=i, 1=j, 2=k) and * physical_axis row is determined by the OME axis name. */ private updateAffine; beginDrag(): void; endDrag(): void; panBy(dx: number, dy: number, dz?: number): Promise; panTo(newCenterX: number, newCenterY: number, newCenterZ?: number): Promise; setPyramidLevel(level: number): Promise; getViewportState(): { centerX: number; centerY: number; centerZ: number; level: number; }; getPyramidInfo(): ZarrPyramidInfo; getPyramidLevel(): number; getLevelDims(): { width: number; height: number; depth: number; }; getVolumeDims(): { width: number; height: number; depth: number; }; getWorldOffset(): [number, number, number]; /** * Set the world-space offset so the full level's center maps to targetMM in world space. * Computes the native physical center of the zarr level, then sets worldOffsetMM * so that center aligns with targetMM. Also centers the viewport on the level center. */ setWorldCenter(targetMM: [number, number, number]): void; /** * Convert physical (mm) coordinates back to real zarr level pixel coordinates. * Inverts the affine: levelPixel = (mm - OME_translation) / scale */ mmToLevelCoords(mmX: number, mmY: number, mmZ: number): { width: number; height: number; depth: number; level: number; levelDims: { width: number; height: number; depth: number; }; }; private clampCenter; private getVisibleChunks; private updateVolume; private clearVolumeData; private assembleVisibleChunks; private assembleChunkIntoVolume; private updateCalibration; /** * Schedule a debounced chunks update callback. * Batches multiple chunk arrivals within UPDATE_DEBOUNCE_MS into a single GPU update. */ private scheduleChunksUpdated; clearCache(): void; refresh(): Promise; } /** * Represents an affine transformation in decomposed form. */ interface AffineTransform { translation: [number, number, number]; rotation: [number, number, number]; scale: [number, number, number]; } /** * Identity transform with no translation, rotation, or scale change. */ declare const identityTransform: AffineTransform; /** * Convert degrees to radians. */ declare function degToRad(degrees: number): number; /** * Create a rotation matrix from Euler angles (XYZ order). * Angles are in degrees. */ declare function eulerToRotationMatrix(rx: number, ry: number, rz: number): mat4; /** * Create a 4x4 transformation matrix from decomposed transform components. * Order: Scale -> Rotate -> Translate */ declare function createTransformMatrix(transform: AffineTransform): mat4; /** * Convert a 2D array (row-major, as used by NIfTI) to gl-matrix mat4 (column-major). */ declare function arrayToMat4(arr: number[][]): mat4; /** * Convert gl-matrix mat4 (column-major) to 2D array (row-major, as used by NIfTI). */ declare function mat4ToArray(m: mat4): number[][]; /** * Multiply a transformation matrix by an affine matrix (as 2D array). * Returns the result as a 2D array. * * The transform is applied to the left: result = transform * original * This means the transform happens in world coordinate space. */ declare function multiplyAffine(original: number[][], transform: mat4): number[][]; /** * Deep copy a 2D affine matrix array. */ declare function copyAffine(affine: number[][]): number[][]; /** * Check if two transforms are approximately equal. */ declare function transformsEqual(a: AffineTransform, b: AffineTransform, epsilon?: number): boolean; type TypedVoxelArray = Float32Array | Uint8Array | Int16Array | Float64Array | Uint16Array | Int32Array | Uint32Array; /** * a NVImage encapsulates some image data and provides methods to query and operate on images */ declare class NVImage { name: string; id: string; url?: string; headers?: Record; _colormap: string; _opacity: number; percentileFrac: number; ignoreZeroVoxels: boolean; trustCalMinMax: boolean; colormapNegative: string; colormapLabel: LUT | null; colormapInvert?: boolean; nFrame4D?: number; frame4D: number; nTotalFrame4D?: number; cal_minNeg: number; cal_maxNeg: number; colorbarVisible: boolean; modulationImage: number | null; modulateAlpha: number; series: any; nVox3D?: number; oblique_angle?: number; maxShearDeg?: number; useQFormNotSForm: boolean; colormapType?: number; pixDims?: number[]; matRAS?: mat4; pixDimsRAS?: number[]; obliqueRAS?: mat4; dimsRAS?: number[]; permRAS?: number[]; img2RASstep?: number[]; img2RASstart?: number[]; toRAS?: mat4; toRASvox?: mat4; frac2mm?: mat4; frac2mmOrtho?: mat4; extentsMinOrtho?: number[]; extentsMaxOrtho?: number[]; mm2ortho?: mat4; hdr: NIFTI1 | NIFTI2 | null; extensions?: NIFTIEXTENSION[]; imageType?: ImageType; img?: TypedVoxelArray; imaginary?: Float32Array; v1?: Float32Array; fileObject?: File | File[]; dims?: number[]; onColormapChange: (img: NVImage) => void; onOpacityChange: (img: NVImage) => void; zarrHelper: NVZarrHelper | null; _hasExplicitZarrCenter: boolean; mm000?: vec3; mm100?: vec3; mm010?: vec3; mm001?: vec3; cal_min?: number; cal_max?: number; robust_min?: number; robust_max?: number; global_min?: number; global_max?: number; urlImgData?: string; isManifest?: boolean; limitFrames4D?: number; originalAffine?: number[][]; constructor(dataBuffer?: ArrayBuffer | ArrayBuffer[] | ArrayBufferLike | null, name?: string, colormap?: string, opacity?: number, pairedImgData?: ArrayBuffer | null, cal_min?: number, cal_max?: number, trustCalMinMax?: boolean, percentileFrac?: number, ignoreZeroVoxels?: boolean, useQFormNotSForm?: boolean, colormapNegative?: string, frame4D?: number, imageType?: ImageType, cal_minNeg?: number, cal_maxNeg?: number, colorbarVisible?: boolean, colormapLabel?: LUT | null, colormapType?: number); init(dataBuffer?: ArrayBuffer | ArrayBuffer[] | ArrayBufferLike | null, name?: string, colormap?: string, opacity?: number, _pairedImgData?: ArrayBuffer | null, cal_min?: number, cal_max?: number, trustCalMinMax?: boolean, percentileFrac?: number, ignoreZeroVoxels?: boolean, useQFormNotSForm?: boolean, colormapNegative?: string, frame4D?: number, imageType?: ImageType, cal_minNeg?: number, cal_maxNeg?: number, colorbarVisible?: boolean, colormapLabel?: LUT | null, colormapType?: number, imgRaw?: ArrayBuffer | ArrayBufferLike | null): void; static new(dataBuffer: ArrayBuffer | ArrayBuffer[] | ArrayBufferLike | null, name: string, colormap: string, opacity: number, pairedImgData: ArrayBuffer | null, cal_min: number, cal_max: number, trustCalMinMax: boolean, percentileFrac: number, ignoreZeroVoxels: boolean, useQFormNotSForm: boolean, colormapNegative: string, frame4D: number, imageType: ImageType, cal_minNeg: number, cal_maxNeg: number, colorbarVisible: boolean, colormapLabel: LUT | null, colormapType: number, zarrData: null | unknown): Promise; computeObliqueAngle(mtx44: mat4): number; float32V1asRGBA(inImg: Float32Array): Uint8Array; loadImgV1(isFlipX?: boolean, isFlipY?: boolean, isFlipZ?: boolean): boolean; calculateOblique(): void; readECAT(buffer: ArrayBuffer): ArrayBuffer; readV16(buffer: ArrayBuffer): ArrayBuffer; readNPY(buffer: ArrayBuffer): Promise; readNPZ(buffer: ArrayBuffer): Promise; imageDataFromArrayBuffer(buffer: ArrayBuffer): Promise; readBMP(buffer: ArrayBuffer): Promise; readZARR(buffer: ArrayBuffer, zarrData: unknown): Promise; readVMR(buffer: ArrayBuffer): ArrayBuffer; readFIB(buffer: ArrayBuffer): Promise<[ArrayBuffer, Float32Array]>; readSRC(buffer: ArrayBuffer): Promise; readHEAD(dataBuffer: ArrayBuffer, pairedImgData: ArrayBuffer | null): Promise; readMHA(buffer: ArrayBuffer, pairedImgData: ArrayBuffer | null): Promise; readMIF(buffer: ArrayBuffer, pairedImgData: ArrayBuffer | null): Promise; calculateRAS(): void; /** * Get a deep copy of the current affine matrix. * @returns A 4x4 affine matrix as a 2D array (row-major) */ getAffine(): number[][]; /** * Set a new affine matrix and recalculate all derived RAS matrices. * Call updateGLVolume() on the Niivue instance after this to update rendering. * @param affine - A 4x4 affine matrix as a 2D array (row-major) */ setAffine(affine: number[][]): void; /** * Apply a transform (translation, rotation, scale) to the current affine matrix. * The transform is applied in world coordinate space: newAffine = transform * currentAffine * Call updateGLVolume() on the Niivue instance after this to update rendering. * @param transform - Transform to apply with translation (mm), rotation (degrees), and scale */ applyTransform(transform: AffineTransform): void; /** * Reset the affine matrix to its original state when the image was first loaded. * Call updateGLVolume() on the Niivue instance after this to update rendering. */ resetAffine(): void; hdr2RAS(nVolumes?: number): Promise; img2RAS(nVolume?: number): TypedVoxelArray; vox2mm(XYZ: number[], mtx: mat4): vec3; mm2vox(mm: number[], frac?: boolean): Float32Array | vec3; arrayEquals(a: unknown[], b: unknown[]): boolean; setColormap(cm: string): void; setColormapLabel(cm: ColorMap): void; setColormapLabelFromUrl(url: string): Promise; get colormap(): string; get colorMap(): string; set colormap(cm: string); set colorMap(cm: string); get opacity(): number; set opacity(opacity: number); /** * set contrast/brightness to robust range (2%..98%) * @param vol - volume for estimate (use -1 to use estimate on all loaded volumes; use INFINITY for current volume) * @param isBorder - if true (default) only center of volume used for estimate * @returns volume brightness and returns array [pct2, pct98, mnScale, mxScale] * @see {@link https://niivue.com/demos/features/timeseries2.html | live demo usage} */ calMinMax(vol?: number, isBorder?: boolean): number[]; intensityRaw2Scaled(raw: number): number; intensityScaled2Raw(scaled: number): number; /** * Converts NVImage to NIfTI compliant byte array, potentially compressed. * Delegates to ImageWriter.saveToUint8Array. */ saveToUint8Array(fnm: string, drawing8?: Uint8Array | null): Promise; /** * save image as NIfTI volume and trigger download. * Delegates to ImageWriter.saveToDisk. */ saveToDisk(fnm?: string, drawing8?: Uint8Array | null): Promise; static fetchDicomData(url: string, headers?: Record): Promise>; static readFirstDecompressedBytes(stream: ReadableStream, minBytes: number): Promise; static extractFilenameFromUrl(url: string): string | null; static loadInitialVolumesGz(url?: string, headers?: {}, limitFrames4D?: number): Promise; static loadInitialVolumes(url?: string, headers?: {}, limitFrames4D?: number): Promise; /** * factory function to load and return a new NVImage instance from a given URL */ static loadFromUrl({ url, urlImgData, headers, name, colormap, opacity, cal_min, cal_max, trustCalMinMax, percentileFrac, ignoreZeroVoxels, useQFormNotSForm, colormapNegative, frame4D, isManifest, limitFrames4D, imageType, colorbarVisible, buffer, zarrLevel, zarrMaxVolumeSize, zarrChannel, zarrConvertUnits, zarrCenterMM }?: Partial> & { url?: string | Uint8Array | ArrayBuffer; }): Promise; /** * Factory method: create a chunked zarr NVImage with an attached NVZarrHelper. */ static createChunkedZarr(url: string, options: { level: number; maxVolumeSize?: number; maxTextureSize?: number; channel?: number; cacheSize?: number; convertUnitsToMm?: boolean; colormap?: string; opacity?: number; zarrCenterMM?: [number, number, number]; }): Promise; static readFileAsync(file: File, bytesToLoad?: number): Promise; /** * factory function to load and return a new NVImage instance from a file in the browser */ static loadFromFile({ file, // file can be an array of file objects or a single file object name, colormap, opacity, urlImgData, cal_min, cal_max, trustCalMinMax, percentileFrac, ignoreZeroVoxels, useQFormNotSForm, colormapNegative, frame4D, limitFrames4D, imageType }: ImageFromFileOptions): Promise; /** * Creates a Uint8Array representing a NIFTI file (header + optional image data). * Delegates to ImageWriter.createNiftiArray. */ static createNiftiArray(dims?: number[], pixDims?: number[], affine?: number[], datatypeCode?: NiiDataType, img?: TypedVoxelArray | Uint8Array): Uint8Array; /** * Creates a NIFTI1 header object with basic properties. * Delegates to ImageWriter.createNiftiHeader. */ static createNiftiHeader(dims?: number[], pixDims?: number[], affine?: number[], datatypeCode?: NiiDataType): NIFTI1; /** * read a 3D slab of voxels from a volume * @see {@link https://niivue.com/demos/features/slab_selection.html | live demo usage} */ /** * read a 3D slab of voxels from a volume, specified in RAS coordinates. * Delegates to VolumeUtils.getVolumeData. */ getVolumeData(voxStart?: number[], voxEnd?: number[], dataType?: string): [TypedVoxelArray, number[]]; /** * write a 3D slab of voxels from a volume * @see {@link https://niivue.com/demos/features/slab_selection.html | live demo usage} */ /** * write a 3D slab of voxels from a volume, specified in RAS coordinates. * Delegates to VolumeUtils.setVolumeData. * Input slabData is assumed to be in the correct raw data type for the target image. */ setVolumeData(voxStart?: number[], voxEnd?: number[], img?: TypedVoxelArray): void; /** * factory function to load and return a new NVImage instance from a base64 encoded string * @example * myImage = NVImage.loadFromBase64('SomeBase64String') */ static loadFromBase64({ base64, name, colormap, opacity, cal_min, cal_max, trustCalMinMax, percentileFrac, ignoreZeroVoxels, useQFormNotSForm, colormapNegative, frame4D, imageType, cal_minNeg, cal_maxNeg, colorbarVisible, colormapLabel }: ImageFromBase64): Promise; /** * make a clone of a NVImage instance and return a new NVImage * @example * myImage = NVImage.loadFromFile(SomeFileObject) // files can be from dialogs or drag and drop * clonedImage = myImage.clone() */ clone(): NVImage; /** * fill a NVImage instance with zeros for the image data * @example * myImage = NVImage.loadFromFile(SomeFileObject) // files can be from dialogs or drag and drop * clonedImageWithZeros = myImage.clone().zeroImage() */ zeroImage(): void; /** * get nifti specific metadata about the image */ getImageMetadata(): ImageMetadata; /** * a factory function to make a zero filled image given a NVImage as a reference * @example * myImage = NVImage.loadFromFile(SomeFileObject) // files can be from dialogs or drag and drop * newZeroImage = NVImage.zerosLike(myImage) */ static zerosLike(nvImage: NVImage, dataType?: string): NVImage; /** * Returns voxel intensity at specific native coordinates. * Delegates to VolumeUtils.getValue. */ getValue(x: number, y: number, z: number, frame4D?: number, isReadImaginary?: boolean): number; /** * Returns voxel intensities at specific native coordinates. * Delegates to VolumeUtils.getValue. */ getValues(x: number, y: number, z: number, frame4D?: number, isReadImaginary?: boolean): number[]; /** * Update options for image */ applyOptionsUpdate(options: ImageFromUrlOptions): void; getImageOptions(): ImageFromUrlOptions; /** * Converts NVImage to NIfTI compliant byte array. * Handles potential re-orientation of drawing data. * Delegates to ImageWriter.toUint8Array. */ toUint8Array(drawingBytes?: Uint8Array | null): Uint8Array; convertVox2Frac(vox: vec3): vec3; convertFrac2Vox(frac: vec3): vec3; convertFrac2MM(frac: vec3, isForceSliceMM?: boolean): vec4; convertMM2Frac(mm: vec3 | vec4, isForceSliceMM?: boolean): vec3; } type ValuesArray = Array<{ id: string; vals: Float32Array; global_min?: number; global_max?: number; cal_min?: number; cal_max?: number; }>; type AnyNumberArray = number[] | Float64Array | Float32Array | Uint32Array | Uint16Array | Uint8Array | Int32Array | Int16Array | Int8Array; type DefaultMeshType = { positions: Float32Array; indices: Uint32Array; colors?: Float32Array; }; type TRACT = { pts: Float32Array; offsetPt0: Uint32Array; dps: ValuesArray; }; type TT = { pts: Float32Array; offsetPt0: Uint32Array; }; type TRX = { pts: Float32Array; offsetPt0: Uint32Array; dpg: ValuesArray; dps: ValuesArray; dpv: ValuesArray; groups: ValuesArray; header: unknown; }; type TRK = { pts: Float32Array; offsetPt0: Uint32Array; dps: ValuesArray; dpv: ValuesArray; }; type TCK = { pts: Float32Array; offsetPt0: Uint32Array; }; type VTK = DefaultMeshType | { pts: Float32Array; offsetPt0: Uint32Array; }; type ANNOT = Uint32Array | { scalars: Float32Array; colormapLabel: LUT; }; type MZ3 = { positions: Float32Array | null; indices: Uint32Array | null; scalars: Float32Array; colors: Float32Array | null; } | { scalars: Float32Array; colormapLabel: LUT; } | { scalars: Float32Array; }; type GII = { scalars: Float32Array; positions?: Float32Array; indices?: Uint32Array; colormapLabel?: LUT; anatomicalStructurePrimary: string; }; type MGH = AnyNumberArray | { scalars: AnyNumberArray; colormapLabel: LUT; }; type X3D = { positions: Float32Array; indices: Uint32Array; rgba255: Uint8Array; }; /** Enum for text alignment */ declare enum MeshType { MESH = "mesh", CONNECTOME = "connectome", FIBER = "fiber" } type NVMeshLayer = { name?: string; key?: string; url?: string; headers?: Record; opacity: number; colormap: string; colormapNegative?: string; colormapInvert?: boolean; colormapLabel?: ColorMap | LUT; useNegativeCmap?: boolean; global_min?: number; global_max?: number; cal_min: number; cal_max: number; cal_minNeg: number; cal_maxNeg: number; isAdditiveBlend?: boolean; frame4D: number; nFrame4D: number; values: AnyNumberArray; outlineBorder?: number; isTransparentBelowCalMin?: boolean; colormapType?: number; base64?: string; colorbarVisible?: boolean; showLegend?: boolean; labels?: NVLabel3D[]; atlasValues?: AnyNumberArray; }; declare const NVMeshLayerDefaults: { colormap: string; opacity: number; nFrame4D: number; frame4D: number; outlineBorder: number; cal_min: number; cal_max: number; cal_minNeg: number; cal_maxNeg: number; colormapType: COLORMAP_TYPE; values: number[]; useNegativeCmap: boolean; showLegend: boolean; }; declare class NVMeshFromUrlOptions { url: string; gl: WebGL2RenderingContext | null; name: string; opacity: number; rgba255: Uint8Array; visible: boolean; layers: NVMeshLayer[]; colorbarVisible: boolean; meshShaderIndex: number; constructor(url?: string, gl?: any, name?: string, opacity?: number, rgba255?: Uint8Array, visible?: boolean, layers?: any[], colorbarVisible?: boolean, meshShaderIndex?: number); } /** * Parameters for loading a base mesh or volume. */ type BaseLoadParams = { /** WebGL rendering context. */ gl: WebGL2RenderingContext; /** Name for this image. Default is an empty string. */ name: string; /** Opacity for this image. Default is 1. */ opacity: number; /** Base color of the mesh in RGBA [0-255]. Default is white. */ rgba255: number[] | Uint8Array; /** Whether this image is visible. */ visible: boolean; /** Layers of the mesh to load. */ layers: NVMeshLayer[]; /** Shader index for mesh rendering. Default is 0 (Phong). */ meshShaderIndex: number; }; type LoadFromUrlParams = Partial & { url: string; headers?: Record; buffer?: ArrayBuffer; }; type LoadFromFileParams = BaseLoadParams & { file: Blob; }; type LoadFromBase64Params = BaseLoadParams & { base64: string; }; /** * a NVMesh encapsulates some mesh data and provides methods to query and operate on meshes */ declare class NVMesh { id: string; name: string; anatomicalStructurePrimary: string; colorbarVisible: boolean; furthestVertexFromOrigin: number; extentsMin: number | number[]; extentsMax: number | number[]; opacity: number; visible: boolean; meshShaderIndex: number; offsetPt0: Uint32Array | null; colormapInvert: boolean; fiberGroupColormap: ColorMap | null; indexBuffer: WebGLBuffer; vertexBuffer: WebGLBuffer; vao: WebGLVertexArrayObject; vaoFiber: WebGLVertexArrayObject; pts: Float32Array; tris?: Uint32Array; layers: NVMeshLayer[]; type: MeshType; data_type?: string; rgba255: Uint8Array; fiberLength?: number; fiberLengths?: Uint32Array; fiberDensity?: Float32Array; fiberDither: number; fiberColor: string; fiberDecimationStride: number; fiberSides: number; fiberRadius: number; fiberOcclusion: number; f32PerVertex: number; dpsThreshold: number; fiberMask?: unknown[]; colormap?: ColorMap | LegacyConnectome | string | null; dpg?: ValuesArray | null; dps?: ValuesArray | null; dpv?: ValuesArray | null; groups?: ValuesArray | null; hasConnectome: boolean; connectome?: LegacyConnectome | string; indexCount?: number; vertexCount: number; nodeScale: number; edgeScale: number; legendLineThickness: number; showLegend: boolean; nodeColormap: string; edgeColormap: string; nodeColormapNegative?: string; edgeColormapNegative?: string; nodeMinColor?: number; nodeMaxColor?: number; edgeMin?: number; edgeMax?: number; nodes?: LegacyNodes | NVConnectomeNode[]; edges?: number[] | NVConnectomeEdge[]; points?: Point[]; /** * @param pts - a 3xN array of vertex positions (X,Y,Z coordinates). * @param tris - a 3xN array of triangle indices (I,J,K; indexed from zero). Each triangle generated from three vertices. * @param name - a name for this image. Default is an empty string * @param rgba255 - the base color of the mesh. RGBA values from 0 to 255. Default is white * @param opacity - the opacity for this mesh. default is 1 * @param visible - whether or not this image is to be visible * @param gl - WebGL rendering context * @param connectome - specify connectome edges and nodes. Default is null (not a connectome). * @param dpg - Data per group for tractography, see TRK format. Default is null (not tractograpgy) * @param dps - Data per streamline for tractography, see TRK format. Default is null (not tractograpgy) * @param dpv - Data per vertex for tractography, see TRK format. Default is null (not tractograpgy) * @param groups - Groups for tractography, see TRK format. Default is null (not tractograpgy) * @param colorbarVisible - does this mesh display a colorbar * @param anatomicalStructurePrimary - region for mesh. Default is an empty string */ constructor(pts: Float32Array, tris: Uint32Array, name: string, rgba255: Uint8Array, opacity: number, visible: boolean, gl: WebGL2RenderingContext, connectome?: LegacyConnectome | string | null, dpg?: ValuesArray | null, dps?: ValuesArray | null, dpv?: ValuesArray | null, groups?: ValuesArray | null, colorbarVisible?: boolean, anatomicalStructurePrimary?: string); initValuesArray(va: ValuesArray): ValuesArray; linesToCylinders(gl: WebGL2RenderingContext, posClrF32: Float32Array, indices: number[]): void; createFiberDensityMap(): void; updateFibers(gl: WebGL2RenderingContext): void; indexNearestXYZmm(Xmm: number, Ymm: number, Zmm: number): number[]; unloadMesh(gl: WebGL2RenderingContext): void; scalars2RGBA(rgba: Uint8ClampedArray, layer: NVMeshLayer, scalars: AnyNumberArray, isNegativeCmap?: boolean): Uint8ClampedArray; blendColormap(u8: Uint8Array, additiveRGBA: Uint8Array, layer: NVMeshLayer, mn: number, mx: number, lut: Uint8ClampedArray, invert?: boolean): void; updateMesh(gl: WebGL2RenderingContext): void; reverseFaces(gl: WebGL2RenderingContext): void; hierarchicalOrder(): number; decimateFaces(n: number, ntarget: number): void; decimateHierarchicalMesh(gl: WebGL2RenderingContext, order?: number): boolean; setLayerProperty(id: number, key: keyof NVMeshLayer, val: number | string | boolean, gl: WebGL2RenderingContext): Promise; setProperty(key: keyof this, val: number | string | boolean | Uint8Array | number[] | ColorMap | LegacyConnectome | Float32Array, gl: WebGL2RenderingContext): void; generatePosNormClr(pts: Float32Array, tris: Uint32Array, rgba255: Uint8Array): Float32Array; static readMesh(buffer: ArrayBuffer, name: string, gl: WebGL2RenderingContext, opacity?: number, rgba255?: Uint8Array, visible?: boolean): Promise; static loadLayer(layer: NVMeshLayer, nvmesh: NVMesh): Promise; /** * factory function to load and return a new NVMesh instance from a given URL */ static loadFromUrl({ url, headers, gl, name, opacity, rgba255, visible, layers, buffer, meshShaderIndex }?: Partial): Promise; static readFileAsync(file: Blob): Promise; /** * factory function to load and return a new NVMesh instance from a file in the browser * * @returns NVMesh instance */ static loadFromFile({ file, gl, name, opacity, rgba255, visible, layers }?: Partial): Promise; /** * load and return a new NVMesh instance from a base64 encoded string */ loadFromBase64({ base64, gl, name, opacity, rgba255, visible, layers }?: Partial): Promise; } type FreeSurferConnectome = { data_type: string; points: Array<{ comments?: Array<{ text: string; }>; coordinates: { x: number; y: number; z: number; }; }>; }; /** * Represents a connectome */ declare class NVConnectome extends NVMesh { gl: WebGL2RenderingContext; nodesChanged: EventTarget; constructor(gl: WebGL2RenderingContext, connectome: LegacyConnectome); static convertLegacyConnectome(json: LegacyConnectome): Connectome; static convertFreeSurferConnectome(json: FreeSurferConnectome, colormap?: string): Connectome; updateLabels(): void; addConnectomeNode(node: NVConnectomeNode): void; deleteConnectomeNode(node: NVConnectomeNode): void; updateConnectomeNodeByIndex(index: number, updatedNode: NVConnectomeNode): void; updateConnectomeNodeByPoint(point: [number, number, number], updatedNode: NVConnectomeNode): void; addConnectomeEdge(first: number, second: number, colorValue: number): NVConnectomeEdge; deleteConnectomeEdge(first: number, second: number): NVConnectomeEdge; findClosestConnectomeNode(point: number[], distance: number): NVConnectomeNode | null; updateConnectome(gl: WebGL2RenderingContext): void; updateMesh(gl: WebGL2RenderingContext): void; json(): Connectome; /** * Factory method to create connectome from options */ static loadConnectomeFromUrl(gl: WebGL2RenderingContext, url: string): Promise; } /** * Represents a completed measurement between two points */ interface CompletedMeasurement { startMM: vec3; endMM: vec3; distance: number; sliceIndex: number; sliceType: SLICE_TYPE; slicePosition: number; } /** * Represents a completed angle measurement between two lines */ interface CompletedAngle { firstLineMM: { start: vec3; end: vec3; }; secondLineMM: { start: vec3; end: vec3; }; angle: number; sliceIndex: number; sliceType: SLICE_TYPE; slicePosition: number; } /** * Slice Type * @ignore */ declare enum SLICE_TYPE { AXIAL = 0, CORONAL = 1, SAGITTAL = 2, MULTIPLANAR = 3, RENDER = 4 } declare enum PEN_TYPE { PEN = 0, RECTANGLE = 1, ELLIPSE = 2 } declare enum SHOW_RENDER { NEVER = 0, ALWAYS = 1, AUTO = 2 } /** * Multi-planar layout * @ignore */ declare enum MULTIPLANAR_TYPE { AUTO = 0, COLUMN = 1, GRID = 2, ROW = 3 } /** * Drag mode * @ignore */ declare enum DRAG_MODE { none = 0, contrast = 1, measurement = 2, pan = 3, slicer3D = 4, callbackOnly = 5, roiSelection = 6, angle = 7, crosshair = 8, windowing = 9 } interface MouseEventConfig { leftButton: { primary: DRAG_MODE; withShift?: DRAG_MODE; withCtrl?: DRAG_MODE; }; rightButton: DRAG_MODE; centerButton: DRAG_MODE; } interface TouchEventConfig { singleTouch: DRAG_MODE; doubleTouch: DRAG_MODE; } /** * NVConfigOptions */ type NVConfigOptions = { textHeight: number; fontSizeScaling: number; fontMinPx: number; colorbarHeight: number; colorbarWidth: number; showColorbarBorder: boolean; crosshairWidth: number; crosshairWidthUnit: 'voxels' | 'mm' | 'percent'; crosshairGap: number; rulerWidth: number; show3Dcrosshair: boolean; backColor: number[]; crosshairColor: number[]; fontColor: Float32List; selectionBoxColor: number[]; clipPlaneColor: number[]; isClipPlanesCutaway: boolean; paqdUniforms: number[]; rulerColor: number[]; colorbarMargin: number; trustCalMinMax: boolean; clipPlaneHotKey: string; cycleClipPlaneHotKey: string; viewModeHotKey: string; doubleTouchTimeout: number; longTouchTimeout: number; keyDebounceTime: number; isNearestInterpolation: boolean; atlasOutline: number; atlasActiveIndex: number; isRuler: boolean; isColorbar: boolean; isOrientCube: boolean; tileMargin: number; multiplanarPadPixels: number; multiplanarForceRender: boolean; multiplanarEqualSize: boolean; multiplanarShowRender: SHOW_RENDER; isRadiologicalConvention: boolean; meshThicknessOn2D: number | string; dragMode: DRAG_MODE; dragModePrimary: DRAG_MODE; mouseEventConfig?: MouseEventConfig; touchEventConfig?: TouchEventConfig; yoke3Dto2DZoom: boolean; isDepthPickMesh: boolean; isCornerOrientationText: boolean; isOrientationTextVisible: boolean; showAllOrientationMarkers: boolean; heroImageFraction: number; heroSliceType: SLICE_TYPE; sagittalNoseLeft: boolean; isSliceMM: boolean; isV1SliceShader: boolean; forceDevicePixelRatio: number; logLevel: 'debug' | 'info' | 'warn' | 'error' | 'fatal' | 'silent'; loadingText: string; isForceMouseClickToVoxelCenters: boolean; dragAndDropEnabled: boolean; drawingEnabled: boolean; penValue: number; penType: PEN_TYPE; floodFillNeighbors: number; isFilledPen: boolean; thumbnail: string; maxDrawUndoBitmaps: number; sliceType: SLICE_TYPE; isAntiAlias: boolean | null; isAdditiveBlend: boolean; isResizeCanvas: boolean; meshXRay: number; limitFrames4D: number; showLegend: boolean; legendBackgroundColor: number[]; legendTextColor: number[]; multiplanarLayout: MULTIPLANAR_TYPE; renderOverlayBlend: number; sliceMosaicString: string; centerMosaic: boolean; interactive: boolean; penSize: number; clickToSegment: boolean; clickToSegmentRadius: number; clickToSegmentBright: boolean; clickToSegmentAutoIntensity: boolean; clickToSegmentIntensityMax: number; clickToSegmentIntensityMin: number; clickToSegmentPercent: number; clickToSegmentMaxDistanceMM: number; clickToSegmentIs2D: boolean; selectionBoxLineThickness: number; selectionBoxIsOutline: boolean; scrollRequiresFocus: boolean; showMeasureUnits: boolean; measureTextJustify: 'start' | 'center' | 'end'; measureTextColor: number[]; measureLineColor: number[]; measureTextHeight: number; isAlphaClipDark: boolean; gradientOrder: number; gradientOpacity: number; renderSilhouette: number; gradientAmount: number; invertScrollDirection: boolean; is2DSliceShader: boolean; bounds: [[number, number], [number, number]] | null; showBoundsBorder?: boolean; boundsBorderColor?: number[]; /** Chunk cache size for zarr viewing (default 500) */ zarrCacheSize: number; /** Number of chunk rings to prefetch around the visible region for zarr viewing (0 disables, default 1) */ zarrPrefetchRings: number; }; declare const DEFAULT_OPTIONS: NVConfigOptions; type EncodeNumbersIn = T extends number ? number | string : T extends Array ? Array> : T extends object ? { [K in keyof T]: EncodeNumbersIn; } : T; type EncodedNVConfigOptions = EncodeNumbersIn; declare const DEFAULT_SCENE_DATA: {}; type SceneData = { gamma: number; azimuth: number; elevation: number; crosshairPos: vec3; clipPlanes: number[][]; clipPlaneDepthAziElevs: number[][]; volScaleMultiplier: number; pan2Dxyzmm: vec4; }; declare const INITIAL_SCENE_DATA: { gamma: number; azimuth: number; elevation: number; crosshairPos: vec3; clipPlanes: number[][]; clipPlaneDepthAziElevs: number[][]; volScaleMultiplier: number; pan2Dxyzmm: vec4; }; type Scene = { onAzimuthElevationChange: (azimuth: number, elevation: number) => void; onZoom3DChange: (scale: number) => void; sceneData: SceneData; renderAzimuth: number; renderElevation: number; volScaleMultiplier: number; crosshairPos: vec3; clipPlane: number[]; clipPlanes: number[][]; clipPlaneDepthAziElevs: number[][]; pan2Dxyzmm: vec4; _elevation?: number; _azimuth?: number; gamma?: number; }; /** * DocumentData / ExportDocumentData types (kept minimal here) */ type DocumentData = { title?: string; imageOptionsArray?: ImageFromUrlOptions[]; meshOptionsArray?: unknown[]; opts?: Partial | Partial; previewImageDataURL?: string; labels?: NVLabel3D[]; encodedImageBlobs?: string[]; encodedDrawingBlob?: string; meshesString?: string; sceneData?: Partial; connectomes?: string[]; customData?: string; completedMeasurements?: CompletedMeasurement[]; completedAngles?: CompletedAngle[]; }; type ExportDocumentData = { title?: string; encodedImageBlobs: string[]; encodedDrawingBlob: string; previewImageDataURL: string; imageOptionsMap: Map; imageOptionsArray: ImageFromUrlOptions[]; sceneData: Partial; opts: EncodedNVConfigOptions | Partial; meshesString: string; meshOptionsArray?: unknown[]; labels: NVLabel3D[]; connectomes: string[]; customData: string; completedMeasurements: CompletedMeasurement[]; completedAngles: CompletedAngle[]; }; /** * Returns a partial configuration object containing only the fields in the provided * options that differ from the DEFAULT_OPTIONS. */ /** * NVDocument class (main) */ declare class NVDocument { data: DocumentData; scene: Scene; volumes: NVImage[]; meshDataObjects?: Array; meshes: Array; drawBitmap: Uint8Array | null; imageOptionsMap: Map; meshOptionsMap: Map; completedMeasurements: CompletedMeasurement[]; completedAngles: CompletedAngle[]; private _optsProxy; private _optsChangeCallback; constructor(); /** * Title of the document */ get title(): string; /** * Gets preview image blob * @returns dataURL of preview image */ get previewImageDataURL(): string; /** * Sets preview image blob * @param dataURL - encoded preview image */ set previewImageDataURL(dataURL: string); /** * @param title - title of document */ set title(title: string); get imageOptionsArray(): ImageFromUrlOptions[]; /** * Gets the base 64 encoded blobs of associated images */ get encodedImageBlobs(): string[]; /** * Gets the base 64 encoded blob of the associated drawing */ get encodedDrawingBlob(): string; /** * Gets the options of the {@link Niivue} instance */ get opts(): NVConfigOptions; /** * Sets the options of the {@link Niivue} instance */ set opts(opts: NVConfigOptions); /** * Gets the 3D labels of the {@link Niivue} instance */ get labels(): NVLabel3D[]; /** * Sets the 3D labels of the {@link Niivue} instance */ set labels(labels: NVLabel3D[]); get customData(): string | undefined; set customData(data: string); /** * Checks if document has an image by id */ hasImage(image: NVImage): boolean; /** * Checks if document has an image by url */ hasImageFromUrl(url: string): boolean; /** * Adds an image and the options an image was created with */ addImageOptions(image: NVImage, imageOptions: ImageFromUrlOptions): void; /** * Removes image from the document as well as its options */ removeImage(image: NVImage): void; /** * Fetch any image data that is missing from this document. */ fetchLinkedData(): Promise; /** * Returns the options for the image if it was added by url */ getImageOptions(image: NVImage): ImageFromUrlOptions | null; /** * Serialise the document by delegating to NVSerializer. */ json(embedImages?: boolean, embedDrawing?: boolean): ExportDocumentData; download(fileName: string, compress: boolean, opts?: { embedImages: boolean; }): Promise; /** * Factory method to return an instance of NVDocument from a URL */ static loadFromUrl(url: string): Promise; static loadFromFile(file: Blob): Promise; /** * Factory method to return an instance of NVDocument from JSON. * Delegates the main parsing to NVSerializer, then applies NVDocument-specific * post-processing (opts decode, scene defaults, clone measurements/angles). */ static loadFromJSON(data: DocumentData): Promise; /** * Sets the callback function to be called when opts properties change */ setOptsChangeCallback(callback: (propertyName: keyof NVConfigOptions, newValue: NVConfigOptions[keyof NVConfigOptions], oldValue: NVConfigOptions[keyof NVConfigOptions]) => void): void; /** * Removes the opts change callback */ removeOptsChangeCallback(): void; /** * Creates a Proxy wrapper around the opts object to detect changes */ private _createOptsProxy; } export { ColorTables as $, type ANNOT as A, type Descriptive as B, type CompletedMeasurement as C, type DragReleaseParams as D, type ExportDocumentData as E, type FreeSurferConnectome as F, type GII as G, NiiDataType as H, type ImageFromUrlOptions as I, type SliceScale as J, type MouseEventConfig as K, type LoadFromUrlParams as L, type MZ3 as M, NVImage as N, type TouchEventConfig as O, NVLabel3D as P, type MvpMatrix2D as Q, type MM as R, SLICE_TYPE as S, type TypedVoxelArray as T, type UIData as U, type ValuesArray as V, NVLabel3DStyle as W, type X3D as X, LabelAnchorPoint as Y, NVMeshFromUrlOptions as Z, NVMeshLayerDefaults as _, NVMesh as a, cmapper as a0, NVImageFromUrlOptions as a1, NVZarrHelper as a2, ZarrChunkClient as a3, ZarrChunkCache as a4, type NVZarrHelperOptions as a5, type ZarrPyramidInfo as a6, type ZarrPyramidLevel as a7, type ChunkCoord as a8, LabelTextAlignment as a9, LabelLineTerminator as aa, identityTransform as ab, degToRad as ac, eulerToRotationMatrix as ad, createTransformMatrix as ae, arrayToMat4 as af, mat4ToArray as ag, multiplyAffine as ah, copyAffine as ai, transformsEqual as aj, PEN_TYPE as ak, SHOW_RENDER as al, MULTIPLANAR_TYPE as am, DEFAULT_OPTIONS as an, DEFAULT_SCENE_DATA as ao, INITIAL_SCENE_DATA as ap, type DocumentData as aq, type Volume as ar, type Point as as, type NVConnectomeEdge as at, type ConnectomeOptions as au, type LegacyNodes as av, type NiiVueLocationValue as aw, type NiiVueLocation as ax, type FontMetrics as ay, type CompletedAngle as b, NVDocument as c, type NVConfigOptions as d, type TRACT as e, type TT as f, type TRX as g, type TCK as h, type TRK as i, type VTK as j, type NVMeshLayer as k, type DefaultMeshType as l, type MGH as m, type LUT as n, type ColormapListEntry as o, type SyncOpts as p, type Graph as q, DRAG_MODE as r, type Scene as s, type SaveImageOptions as t, type ColorMap as u, type LegacyConnectome as v, type AffineTransform as w, type NVConnectomeNode as x, type Connectome as y, type NiftiHeader as z };