/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
///
///
import './flags_webgpu';
import { backend_util, BackendValues, DataStorage, DataType, GPUData, KernelBackend, Rank, RecursiveArray, Tensor, TensorBuffer, TensorInfo, TimingInfo, WebGPUData } from '@tensorflow/tfjs-core';
import { AdapterInfo } from './adapter_info';
import { BufferManager } from './buffer_manager';
import { TextureManager } from './texture_manager';
import * as webgpu_program from './webgpu_program';
export interface WebGPUMemoryInfo extends backend_util.MemoryInfo {
numBytesInGPU: number;
numBytesAllocatedInGPU: number;
unreliable: boolean;
}
export declare type BufferInfo = {
size: number;
usage: GPUBufferUsageFlags;
buffer: GPUBuffer;
};
export declare type TextureInfo = {
width: number;
height: number;
format: GPUTextureFormat;
usage: GPUTextureUsageFlags;
texture: GPUTexture | GPUExternalTexture;
};
declare type TensorData = {
values: BackendValues;
dtype: DataType;
shape: number[];
refCount: number;
resourceInfo?: BufferInfo | TextureInfo;
external?: boolean;
complexTensorInfos?: {
real: TensorInfo;
imag: TensorInfo;
};
};
interface DataId {
}
export declare type WebGPUKernelInfo = {
name: string;
query: Promise;
};
export declare type TimerNode = RecursiveArray | WebGPUKernelInfo;
export interface WebGPUTimingInfo extends TimingInfo {
uploadWaitMs: number;
downloadWaitMs: number;
}
declare type ProgramUniform = Array<{
type: string;
data: number[];
}>;
export declare class WebGPUBackend extends KernelBackend {
bufferManager: BufferManager;
adapterInfo: AdapterInfo;
device: GPUDevice;
queue: GPUQueue;
tensorMap: DataStorage;
textureManager: TextureManager;
thresholdToIncreaseWorkgroups: number;
private activeTimers;
private currentCommandEncoder;
private currentComputePass;
private commandQueueOwnedIds;
private dispatchNumberInEncoder;
private disposed;
private downloadWaitMs;
private dummyCanvas;
private dummyContext;
private tensorDataPendingDisposal;
private static nextDataId;
private pipelineCache;
private programTimersStack;
private querySet;
private stagingPendingDisposal;
private supportTimeQuery;
private uniformPendingDisposal;
private uploadWaitMs;
private nextDataId;
constructor(device: GPUDevice, adapterInfo?: GPUAdapterInfo);
floatPrecision(): 32;
defaultGpuBufferUsage(): number;
/**
* Dispose the memory if the dataId has 0 refCount. Return true if the memory
* is released or memory is not managed in this backend, false if memory is
* not cleared.
* @param dataId
* @oaram force Optional, remove the data regardless of refCount
*/
disposeData(dataId: DataId, force?: boolean): boolean;
memory(): WebGPUMemoryInfo;
releaseResource(dataId: DataId): void;
/** Return refCount of a `TensorData`. */
refCount(dataId: DataId): number;
/** Increase refCount of a `TensorData`. */
incRef(dataId: DataId): void;
/** Decrease refCount of a `TensorData`. */
decRef(dataId: DataId): void;
write(values: BackendValues, shape: number[], dtype: DataType): DataId;
move(dataId: DataId, values: BackendValues, shape: number[], dtype: DataType, refCount: number): void;
submitQueue(): void;
ensureCommandEncoderReady(): void;
ensureComputePassEnded(): void;
getComputePass(): GPUComputePassEncoder;
getBufferData(buffer: GPUBuffer, size: number): Promise;
private convertAndCacheOnCPU;
readSync(dataId: object): BackendValues;
read(dataId: object): Promise;
private copyBuffer;
/**
* Create a TF.js tensor out of an existing WebGPU buffer.
*/
createTensorFromGPUData(values: WebGPUData, shape: number[], dtype: DataType): Tensor;
/**
* Read tensor to a new GPUBuffer.
* @param dataId The source tensor.
*/
readToGPU(dataId: DataId): GPUData;
bufferSync(t: TensorInfo): TensorBuffer;
time(f: () => void): Promise;
makeTensorInfo(shape: number[], dtype: DataType, values?: BackendValues | string[]): TensorInfo;
private tensorToBinding;
getQueryTime(query: GPUQuerySet): Promise;
uploadToGPU(dataId: DataId): void;
private makeUniforms;
runWebGPUProgram(program: webgpu_program.WebGPUProgram, inputs: TensorInfo[], outputDtype: DataType, programDefinedUniform?: ProgramUniform, output?: TensorInfo): TensorInfo;
getTimeFromQuerySet(querySet: GPUQuerySet): Promise;
shouldExecuteOnCPU(inputs: TensorInfo[], sizeThreshold?: number): boolean;
numDataIds(): number;
dispose(): void;
}
export {};