Socket
Socket
Sign inDemoInstall

onnxruntime-common

Package Overview
Dependencies
0
Maintainers
3
Versions
86
Alerts
File Explorer

Advanced tools

Install Socket

Detect and block malicious and high-risk dependencies

Install

Comparing version 1.18.0-dev.20240309-efad5bbc5a to 1.18.0-dev.20240424-218b6b0a73

2

__commit.txt

@@ -1,1 +0,1 @@

efad5bbc5aed1717200d3e8f6ddd253394af4b99
218b6b0a734f6233845313d2c61531b7558e0b5f
import { Backend } from './backend.js';
import { InferenceSession } from './inference-session.js';
/**

@@ -14,10 +15,11 @@ * Register a backend.

/**
* Resolve backend by specified hints.
* Resolve execution providers from the specific session options.
*
* @param backendHints - a list of execution provider names to lookup. If omitted use registered backends as list.
* @returns a promise that resolves to the backend.
* @param options - the session options object.
* @returns a promise that resolves to a tuple of an initialized backend instance and a session options object with
* filtered EP list.
*
* @ignore
*/
export declare const resolveBackend: (backendHints: readonly string[]) => Promise<Backend>;
export declare const resolveBackendAndExecutionProviders: (options: InferenceSession.SessionOptions) => Promise<[backend: Backend, options: InferenceSession.SessionOptions]>;
//# sourceMappingURL=backend-impl.d.ts.map

@@ -5,3 +5,3 @@ "use strict";

Object.defineProperty(exports, "__esModule", { value: true });
exports.resolveBackend = exports.registerBackend = void 0;
exports.resolveBackendAndExecutionProviders = exports.registerBackend = void 0;
const backends = new Map();

@@ -53,44 +53,96 @@ const backendsSortedByPriority = [];

/**
* Resolve backend by specified hints.
* Try to resolve and initialize a backend.
*
* @param backendHints - a list of execution provider names to lookup. If omitted use registered backends as list.
* @returns a promise that resolves to the backend.
* @param backendName - the name of the backend.
* @returns the backend instance if resolved and initialized successfully, or an error message if failed.
*/
const tryResolveAndInitializeBackend = async (backendName) => {
const backendInfo = backends.get(backendName);
if (!backendInfo) {
return 'backend not found.';
}
if (backendInfo.initialized) {
return backendInfo.backend;
}
else if (backendInfo.aborted) {
return backendInfo.error;
}
else {
const isInitializing = !!backendInfo.initPromise;
try {
if (!isInitializing) {
backendInfo.initPromise = backendInfo.backend.init(backendName);
}
await backendInfo.initPromise;
backendInfo.initialized = true;
return backendInfo.backend;
}
catch (e) {
if (!isInitializing) {
backendInfo.error = `${e}`;
backendInfo.aborted = true;
}
return backendInfo.error;
}
finally {
delete backendInfo.initPromise;
}
}
};
/**
* Resolve execution providers from the specific session options.
*
* @param options - the session options object.
* @returns a promise that resolves to a tuple of an initialized backend instance and a session options object with
* filtered EP list.
*
* @ignore
*/
const resolveBackend = async (backendHints) => {
const resolveBackendAndExecutionProviders = async (options) => {
// extract backend hints from session options
const eps = options.executionProviders || [];
const backendHints = eps.map(i => typeof i === 'string' ? i : i.name);
const backendNames = backendHints.length === 0 ? backendsSortedByPriority : backendHints;
// try to resolve and initialize all requested backends
let backend;
const errors = [];
const availableBackendNames = new Set();
for (const backendName of backendNames) {
const backendInfo = backends.get(backendName);
if (backendInfo) {
if (backendInfo.initialized) {
return backendInfo.backend;
const resolveResult = await tryResolveAndInitializeBackend(backendName);
if (typeof resolveResult === 'string') {
errors.push({ name: backendName, err: resolveResult });
}
else {
if (!backend) {
backend = resolveResult;
}
else if (backendInfo.aborted) {
continue; // current backend is unavailable; try next
if (backend === resolveResult) {
availableBackendNames.add(backendName);
}
const isInitializing = !!backendInfo.initPromise;
try {
if (!isInitializing) {
backendInfo.initPromise = backendInfo.backend.init(backendName);
}
}
// if no backend is available, throw error.
if (!backend) {
throw new Error(`no available backend found. ERR: ${errors.map(e => `[${e.name}] ${e.err}`).join(', ')}`);
}
// for each explicitly requested backend, if it's not available, output warning message.
for (const { name, err } of errors) {
if (backendHints.includes(name)) {
// eslint-disable-next-line no-console
console.warn(`removing requested execution provider "${name}" from session options because it is not available: ${err}`);
}
}
const filteredEps = eps.filter(i => availableBackendNames.has(typeof i === 'string' ? i : i.name));
return [
backend, new Proxy(options, {
get: (target, prop) => {
if (prop === 'executionProviders') {
return filteredEps;
}
await backendInfo.initPromise;
backendInfo.initialized = true;
return backendInfo.backend;
return Reflect.get(target, prop);
}
catch (e) {
if (!isInitializing) {
errors.push({ name: backendName, err: e });
}
backendInfo.aborted = true;
}
finally {
delete backendInfo.initPromise;
}
}
}
throw new Error(`no available backend found. ERR: ${errors.map(e => `[${e.name}] ${e.err}`).join(', ')}`);
})
];
};
exports.resolveBackend = resolveBackend;
exports.resolveBackendAndExecutionProviders = resolveBackendAndExecutionProviders;
//# sourceMappingURL=backend-impl.js.map

@@ -51,3 +51,3 @@ import { InferenceSession } from './inference-session.js';

getParametersSize(trainableOnly: boolean): Promise<number>;
loadParametersBuffer(array: Uint8Array, trainableOnly: boolean): Promise<void>;
loadParametersBuffer(buffer: Uint8Array, trainableOnly: boolean): Promise<void>;
getContiguousParameters(trainableOnly: boolean): Promise<OnnxValue>;

@@ -66,5 +66,5 @@ }

createInferenceSessionHandler(uriOrBuffer: string | Uint8Array, options?: InferenceSession.SessionOptions): Promise<InferenceSessionHandler>;
createTrainingSessionHandler?(checkpointStateUriOrBuffer: TrainingSession.URIorBuffer, trainModelUriOrBuffer: TrainingSession.URIorBuffer, evalModelUriOrBuffer: TrainingSession.URIorBuffer, optimizerModelUriOrBuffer: TrainingSession.URIorBuffer, options: InferenceSession.SessionOptions): Promise<TrainingSessionHandler>;
createTrainingSessionHandler?(checkpointStateUriOrBuffer: TrainingSession.UriOrBuffer, trainModelUriOrBuffer: TrainingSession.UriOrBuffer, evalModelUriOrBuffer: TrainingSession.UriOrBuffer, optimizerModelUriOrBuffer: TrainingSession.UriOrBuffer, options: InferenceSession.SessionOptions): Promise<TrainingSessionHandler>;
}
export { registerBackend } from './backend-impl.js';
//# sourceMappingURL=backend.d.ts.map

@@ -130,8 +130,47 @@ export declare namespace Env {

/**
* Set or get the power preference.
*
* Setting this property only has effect before the first WebGPU inference session is created. The value will be
* used as options for `navigator.gpu.requestAdapter()`.
*
* See {@link https://gpuweb.github.io/gpuweb/#dictdef-gpurequestadapteroptions} for more details.
*
* @defaultValue `undefined`
*/
powerPreference?: 'low-power' | 'high-performance';
/**
* Set or get the force fallback adapter flag.
*
* Setting this property only has effect before the first WebGPU inference session is created. The value will be
* used as options for `navigator.gpu.requestAdapter()`.
*
* See {@link https://gpuweb.github.io/gpuweb/#dictdef-gpurequestadapteroptions} for more details.
*
* @defaultValue `undefined`
*/
forceFallbackAdapter?: boolean;
/**
* Set or get the adapter for WebGPU.
*
* Setting this property only has effect before the first WebGPU inference session is created. The value will be
* used as the GPU adapter for the underlying WebGPU backend to create GPU device.
*
* If this property is not set, it will be available to get after the first WebGPU inference session is created. The
* value will be the GPU adapter that created by the underlying WebGPU backend.
*
* When use with TypeScript, the type of this property is `GPUAdapter` defined in "@webgpu/types".
* Use `const adapter = env.webgpu.adapter as GPUAdapter;` in TypeScript to access this property with correct type.
*
* see comments on {@link Tensor.GpuBufferType}
*/
adapter: unknown;
/**
* Get the device for WebGPU.
*
* This property is only available after the first WebGPU inference session is created.
*
* When use with TypeScript, the type of this property is `GPUDevice` defined in "@webgpu/types".
* Use `const device = env.webgpu.device as GPUDevice;` in TypeScript to access this property with correct type.
*
* see comments on {@link GpuBufferType} for more details about why not use types defined in "@webgpu/types".
* see comments on {@link Tensor.GpuBufferType} for more details about why not use types defined in "@webgpu/types".
*/

@@ -138,0 +177,0 @@ readonly device: unknown;

@@ -11,3 +11,3 @@ /**

* See also:
* - [Get Started](https://onnxruntime.ai/docs/get-started/with-javascript.html)
* - [Get Started](https://onnxruntime.ai/docs/get-started/with-javascript/)
* - [Inference examples](https://github.com/microsoft/onnxruntime-inference-examples/tree/main/js)

@@ -21,5 +21,8 @@ *

export * from './tensor.js';
export * from './tensor-conversion.js';
export * from './tensor-factory.js';
export * from './trace.js';
export * from './onnx-model.js';
export * from './onnx-value.js';
export * from './training-session.js';
//# sourceMappingURL=index.d.ts.map

@@ -29,3 +29,3 @@ "use strict";

* See also:
* - [Get Started](https://onnxruntime.ai/docs/get-started/with-javascript.html)
* - [Get Started](https://onnxruntime.ai/docs/get-started/with-javascript/)
* - [Inference examples](https://github.com/microsoft/onnxruntime-inference-examples/tree/main/js)

@@ -39,5 +39,8 @@ *

__exportStar(require("./tensor.js"), exports);
__exportStar(require("./tensor-conversion.js"), exports);
__exportStar(require("./tensor-factory.js"), exports);
__exportStar(require("./trace.js"), exports);
__exportStar(require("./onnx-model.js"), exports);
__exportStar(require("./onnx-value.js"), exports);
__exportStar(require("./training-session.js"), exports);
//# sourceMappingURL=index.js.map

@@ -182,7 +182,5 @@ "use strict";

}
// get backend hints
const eps = options.executionProviders || [];
const backendHints = eps.map(i => typeof i === 'string' ? i : i.name);
const backend = await (0, backend_impl_js_1.resolveBackend)(backendHints);
const handler = await backend.createInferenceSessionHandler(filePathOrUint8Array, options);
// resolve backend, update session options with validated EPs, and create session handler
const [backend, optionsWithValidatedEPs] = await (0, backend_impl_js_1.resolveBackendAndExecutionProviders)(options);
const handler = await backend.createInferenceSessionHandler(filePathOrUint8Array, optionsWithValidatedEPs);
(0, trace_js_1.TRACE_FUNC_END)();

@@ -189,0 +187,0 @@ return new InferenceSession(handler);

@@ -161,13 +161,13 @@ import { OnnxModelOptions } from './onnx-model.js';

interface ExecutionProviderOptionMap {
coreml: CoreMLExecutionProviderOption;
cpu: CpuExecutionProviderOption;
coreml: CoreMlExecutionProviderOption;
cuda: CudaExecutionProviderOption;
dml: DmlExecutionProviderOption;
nnapi: NnapiExecutionProviderOption;
tensorrt: TensorRtExecutionProviderOption;
wasm: WebAssemblyExecutionProviderOption;
webgl: WebGLExecutionProviderOption;
xnnpack: XnnpackExecutionProviderOption;
webgpu: WebGpuExecutionProviderOption;
webnn: WebNNExecutionProviderOption;
nnapi: NnapiExecutionProviderOption;
xnnpack: XnnpackExecutionProviderOption;
}

@@ -187,6 +187,2 @@ type ExecutionProviderName = keyof ExecutionProviderOptionMap;

}
interface CoreMlExecutionProviderOption extends ExecutionProviderOption {
readonly name: 'coreml';
coreMlFlags?: number;
}
interface DmlExecutionProviderOption extends ExecutionProviderOption {

@@ -215,3 +211,3 @@ readonly name: 'dml';

readonly name: 'webnn';
deviceType?: 'cpu' | 'gpu';
deviceType?: 'cpu' | 'gpu' | 'npu';
numThreads?: number;

@@ -222,4 +218,35 @@ powerPreference?: 'default' | 'low-power' | 'high-performance';

readonly name: 'coreml';
/**
* The bit flags for CoreML execution provider.
*
* ```
* COREML_FLAG_USE_CPU_ONLY = 0x001
* COREML_FLAG_ENABLE_ON_SUBGRAPH = 0x002
* COREML_FLAG_ONLY_ENABLE_DEVICE_WITH_ANE = 0x004
* COREML_FLAG_ONLY_ALLOW_STATIC_INPUT_SHAPES = 0x008
* COREML_FLAG_CREATE_MLPROGRAM = 0x010
* ```
*
* See include/onnxruntime/core/providers/coreml/coreml_provider_factory.h for more details.
*
* This flag is available only in ONNXRuntime (Node.js binding).
*/
coreMlFlags?: number;
/**
* Specify whether to use CPU only in CoreML EP.
*
* This setting is available only in ONNXRuntime (react-native).
*/
useCPUOnly?: boolean;
/**
* Specify whether to enable CoreML EP on subgraph.
*
* This setting is available only in ONNXRuntime (react-native).
*/
enableOnSubgraph?: boolean;
/**
* Specify whether to only enable CoreML EP for Apple devices with ANE (Apple Neural Engine).
*
* This setting is available only in ONNXRuntime (react-native).
*/
onlyEnableDeviceWithANE?: boolean;

@@ -226,0 +253,0 @@ }

import { Tensor } from './tensor.js';
type NonTensorType = never;
export type NonTensorType = never;
/**

@@ -13,3 +13,2 @@ * Type OnnxValue Represents both tensors and non-tensors value for model's inputs/outputs.

export type OnnxValueDataLocation = Tensor.DataLocation;
export {};
//# sourceMappingURL=onnx-value.d.ts.map

@@ -204,3 +204,3 @@ import { Tensor, TypedTensor } from './tensor.js';

*
* @param bitMap - the ImageBitmap object to create tensor from
* @param bitmap - the ImageBitmap object to create tensor from
* @param options - An optional object representing options for creating tensor from URL.

@@ -207,0 +207,0 @@ *

@@ -141,3 +141,3 @@ import { TensorFactory } from './tensor-factory.js';

*/
export interface TensorConstructor {
export interface TensorConstructor extends TensorFactory {
/**

@@ -275,4 +275,4 @@ * Construct a new string tensor object from the given type, data and dims.

}
export declare const Tensor: TensorConstructor & TensorFactory;
export declare const Tensor: TensorConstructor;
export {};
//# sourceMappingURL=tensor.d.ts.map

@@ -0,4 +1,13 @@

/**
* @ignore
*/
export declare const TRACE: (deviceType: string, label: string) => void;
/**
* @ignore
*/
export declare const TRACE_FUNC_BEGIN: (extraMsg?: string) => void;
/**
* @ignore
*/
export declare const TRACE_FUNC_END: (extraMsg?: string) => void;
//# sourceMappingURL=trace.d.ts.map

@@ -7,2 +7,5 @@ "use strict";

const env_impl_js_1 = require("./env-impl.js");
/**
* @ignore
*/
const TRACE = (deviceType, label) => {

@@ -33,2 +36,5 @@ if (typeof env_impl_js_1.env.trace === 'undefined' ? !env_impl_js_1.env.wasm.trace : !env_impl_js_1.env.trace) {

};
/**
* @ignore
*/
const TRACE_FUNC_BEGIN = (extraMsg) => {

@@ -41,2 +47,5 @@ if (typeof env_impl_js_1.env.trace === 'undefined' ? !env_impl_js_1.env.wasm.trace : !env_impl_js_1.env.trace) {

exports.TRACE_FUNC_BEGIN = TRACE_FUNC_BEGIN;
/**
* @ignore
*/
const TRACE_FUNC_END = (extraMsg) => {

@@ -43,0 +52,0 @@ if (typeof env_impl_js_1.env.trace === 'undefined' ? !env_impl_js_1.env.wasm.trace : !env_impl_js_1.env.trace) {

@@ -42,8 +42,6 @@ "use strict";

const options = sessionOptions || {};
// get backend hints
const eps = options.executionProviders || [];
const backendHints = eps.map(i => typeof i === 'string' ? i : i.name);
const backend = await (0, backend_impl_js_1.resolveBackend)(backendHints);
// resolve backend, update session options with validated EPs, and create session handler
const [backend, optionsWithValidatedEPs] = await (0, backend_impl_js_1.resolveBackendAndExecutionProviders)(options);
if (backend.createTrainingSessionHandler) {
const handler = await backend.createTrainingSessionHandler(trainingOptions.checkpointState, trainingOptions.trainModel, evalModel, optimizerModel, options);
const handler = await backend.createTrainingSessionHandler(trainingOptions.checkpointState, trainingOptions.trainModel, evalModel, optimizerModel, optionsWithValidatedEPs);
return new TrainingSession(handler, !!trainingOptions.optimizerModel, !!trainingOptions.evalModel);

@@ -50,0 +48,0 @@ }

@@ -7,3 +7,3 @@ import { InferenceSession } from './inference-session.js';

*/
type URIorBuffer = string | Uint8Array;
type UriOrBuffer = string | Uint8Array;
}

@@ -75,9 +75,9 @@ /**

/**
* Copies parameter values from the given array to the training state. Currently, only supporting models with
* Copies parameter values from the given buffer to the training state. Currently, only supporting models with
* parameters of type Float32.
*
* @param buffer - Float32 buffer containing parameters converted to a Uint8Array.
* @param buffer - A Uint8Array representation of Float32 parameters.
* @param trainableOnly - True if trainable parameters only to be modified, false otherwise. Default value is true.
*/
loadParametersBuffer(array: Uint8Array, trainableOnly: boolean): Promise<void>;
loadParametersBuffer(buffer: Uint8Array, trainableOnly: boolean): Promise<void>;
/**

@@ -120,15 +120,15 @@ * Copies the model parameters to a contiguous buffer. Usually used in the context of Federated Learning.

*/
checkpointState: TrainingSession.URIorBuffer;
checkpointState: TrainingSession.UriOrBuffer;
/**
* URI or buffer for the .onnx training file.
*/
trainModel: TrainingSession.URIorBuffer;
trainModel: TrainingSession.UriOrBuffer;
/**
* Optional. URI or buffer for the .onnx optimizer model file.
*/
optimizerModel?: TrainingSession.URIorBuffer;
optimizerModel?: TrainingSession.UriOrBuffer;
/**
* Optional. URI or buffer for the .onnx eval model file.
*/
evalModel?: TrainingSession.URIorBuffer;
evalModel?: TrainingSession.UriOrBuffer;
}

@@ -135,0 +135,0 @@ /**

@@ -1,2 +0,2 @@

export declare const version = "1.18.0-dev.20240309-efad5bbc5a";
export declare const version = "1.18.0-dev.20240424-218b6b0a73";
//# sourceMappingURL=version.d.ts.map

@@ -8,3 +8,3 @@ "use strict";

// Do not modify file content manually.
exports.version = '1.18.0-dev.20240309-efad5bbc5a';
exports.version = '1.18.0-dev.20240424-218b6b0a73';
//# sourceMappingURL=version.js.map
import { Backend } from './backend.js';
import { InferenceSession } from './inference-session.js';
/**

@@ -14,10 +15,11 @@ * Register a backend.

/**
* Resolve backend by specified hints.
* Resolve execution providers from the specific session options.
*
* @param backendHints - a list of execution provider names to lookup. If omitted use registered backends as list.
* @returns a promise that resolves to the backend.
* @param options - the session options object.
* @returns a promise that resolves to a tuple of an initialized backend instance and a session options object with
* filtered EP list.
*
* @ignore
*/
export declare const resolveBackend: (backendHints: readonly string[]) => Promise<Backend>;
export declare const resolveBackendAndExecutionProviders: (options: InferenceSession.SessionOptions) => Promise<[backend: Backend, options: InferenceSession.SessionOptions]>;
//# sourceMappingURL=backend-impl.d.ts.map

@@ -48,43 +48,95 @@ // Copyright (c) Microsoft Corporation. All rights reserved.

/**
* Resolve backend by specified hints.
* Try to resolve and initialize a backend.
*
* @param backendHints - a list of execution provider names to lookup. If omitted use registered backends as list.
* @returns a promise that resolves to the backend.
* @param backendName - the name of the backend.
* @returns the backend instance if resolved and initialized successfully, or an error message if failed.
*/
const tryResolveAndInitializeBackend = async (backendName) => {
const backendInfo = backends.get(backendName);
if (!backendInfo) {
return 'backend not found.';
}
if (backendInfo.initialized) {
return backendInfo.backend;
}
else if (backendInfo.aborted) {
return backendInfo.error;
}
else {
const isInitializing = !!backendInfo.initPromise;
try {
if (!isInitializing) {
backendInfo.initPromise = backendInfo.backend.init(backendName);
}
await backendInfo.initPromise;
backendInfo.initialized = true;
return backendInfo.backend;
}
catch (e) {
if (!isInitializing) {
backendInfo.error = `${e}`;
backendInfo.aborted = true;
}
return backendInfo.error;
}
finally {
delete backendInfo.initPromise;
}
}
};
/**
* Resolve execution providers from the specific session options.
*
* @param options - the session options object.
* @returns a promise that resolves to a tuple of an initialized backend instance and a session options object with
* filtered EP list.
*
* @ignore
*/
export const resolveBackend = async (backendHints) => {
export const resolveBackendAndExecutionProviders = async (options) => {
// extract backend hints from session options
const eps = options.executionProviders || [];
const backendHints = eps.map(i => typeof i === 'string' ? i : i.name);
const backendNames = backendHints.length === 0 ? backendsSortedByPriority : backendHints;
// try to resolve and initialize all requested backends
let backend;
const errors = [];
const availableBackendNames = new Set();
for (const backendName of backendNames) {
const backendInfo = backends.get(backendName);
if (backendInfo) {
if (backendInfo.initialized) {
return backendInfo.backend;
const resolveResult = await tryResolveAndInitializeBackend(backendName);
if (typeof resolveResult === 'string') {
errors.push({ name: backendName, err: resolveResult });
}
else {
if (!backend) {
backend = resolveResult;
}
else if (backendInfo.aborted) {
continue; // current backend is unavailable; try next
if (backend === resolveResult) {
availableBackendNames.add(backendName);
}
const isInitializing = !!backendInfo.initPromise;
try {
if (!isInitializing) {
backendInfo.initPromise = backendInfo.backend.init(backendName);
}
}
// if no backend is available, throw error.
if (!backend) {
throw new Error(`no available backend found. ERR: ${errors.map(e => `[${e.name}] ${e.err}`).join(', ')}`);
}
// for each explicitly requested backend, if it's not available, output warning message.
for (const { name, err } of errors) {
if (backendHints.includes(name)) {
// eslint-disable-next-line no-console
console.warn(`removing requested execution provider "${name}" from session options because it is not available: ${err}`);
}
}
const filteredEps = eps.filter(i => availableBackendNames.has(typeof i === 'string' ? i : i.name));
return [
backend, new Proxy(options, {
get: (target, prop) => {
if (prop === 'executionProviders') {
return filteredEps;
}
await backendInfo.initPromise;
backendInfo.initialized = true;
return backendInfo.backend;
return Reflect.get(target, prop);
}
catch (e) {
if (!isInitializing) {
errors.push({ name: backendName, err: e });
}
backendInfo.aborted = true;
}
finally {
delete backendInfo.initPromise;
}
}
}
throw new Error(`no available backend found. ERR: ${errors.map(e => `[${e.name}] ${e.err}`).join(', ')}`);
})
];
};
//# sourceMappingURL=backend-impl.js.map

@@ -51,3 +51,3 @@ import { InferenceSession } from './inference-session.js';

getParametersSize(trainableOnly: boolean): Promise<number>;
loadParametersBuffer(array: Uint8Array, trainableOnly: boolean): Promise<void>;
loadParametersBuffer(buffer: Uint8Array, trainableOnly: boolean): Promise<void>;
getContiguousParameters(trainableOnly: boolean): Promise<OnnxValue>;

@@ -66,5 +66,5 @@ }

createInferenceSessionHandler(uriOrBuffer: string | Uint8Array, options?: InferenceSession.SessionOptions): Promise<InferenceSessionHandler>;
createTrainingSessionHandler?(checkpointStateUriOrBuffer: TrainingSession.URIorBuffer, trainModelUriOrBuffer: TrainingSession.URIorBuffer, evalModelUriOrBuffer: TrainingSession.URIorBuffer, optimizerModelUriOrBuffer: TrainingSession.URIorBuffer, options: InferenceSession.SessionOptions): Promise<TrainingSessionHandler>;
createTrainingSessionHandler?(checkpointStateUriOrBuffer: TrainingSession.UriOrBuffer, trainModelUriOrBuffer: TrainingSession.UriOrBuffer, evalModelUriOrBuffer: TrainingSession.UriOrBuffer, optimizerModelUriOrBuffer: TrainingSession.UriOrBuffer, options: InferenceSession.SessionOptions): Promise<TrainingSessionHandler>;
}
export { registerBackend } from './backend-impl.js';
//# sourceMappingURL=backend.d.ts.map

@@ -130,8 +130,47 @@ export declare namespace Env {

/**
* Set or get the power preference.
*
* Setting this property only has effect before the first WebGPU inference session is created. The value will be
* used as options for `navigator.gpu.requestAdapter()`.
*
* See {@link https://gpuweb.github.io/gpuweb/#dictdef-gpurequestadapteroptions} for more details.
*
* @defaultValue `undefined`
*/
powerPreference?: 'low-power' | 'high-performance';
/**
* Set or get the force fallback adapter flag.
*
* Setting this property only has effect before the first WebGPU inference session is created. The value will be
* used as options for `navigator.gpu.requestAdapter()`.
*
* See {@link https://gpuweb.github.io/gpuweb/#dictdef-gpurequestadapteroptions} for more details.
*
* @defaultValue `undefined`
*/
forceFallbackAdapter?: boolean;
/**
* Set or get the adapter for WebGPU.
*
* Setting this property only has effect before the first WebGPU inference session is created. The value will be
* used as the GPU adapter for the underlying WebGPU backend to create GPU device.
*
* If this property is not set, it will be available to get after the first WebGPU inference session is created. The
* value will be the GPU adapter that created by the underlying WebGPU backend.
*
* When use with TypeScript, the type of this property is `GPUAdapter` defined in "@webgpu/types".
* Use `const adapter = env.webgpu.adapter as GPUAdapter;` in TypeScript to access this property with correct type.
*
* see comments on {@link Tensor.GpuBufferType}
*/
adapter: unknown;
/**
* Get the device for WebGPU.
*
* This property is only available after the first WebGPU inference session is created.
*
* When use with TypeScript, the type of this property is `GPUDevice` defined in "@webgpu/types".
* Use `const device = env.webgpu.device as GPUDevice;` in TypeScript to access this property with correct type.
*
* see comments on {@link GpuBufferType} for more details about why not use types defined in "@webgpu/types".
* see comments on {@link Tensor.GpuBufferType} for more details about why not use types defined in "@webgpu/types".
*/

@@ -138,0 +177,0 @@ readonly device: unknown;

@@ -11,3 +11,3 @@ /**

* See also:
* - [Get Started](https://onnxruntime.ai/docs/get-started/with-javascript.html)
* - [Get Started](https://onnxruntime.ai/docs/get-started/with-javascript/)
* - [Inference examples](https://github.com/microsoft/onnxruntime-inference-examples/tree/main/js)

@@ -21,5 +21,8 @@ *

export * from './tensor.js';
export * from './tensor-conversion.js';
export * from './tensor-factory.js';
export * from './trace.js';
export * from './onnx-model.js';
export * from './onnx-value.js';
export * from './training-session.js';
//# sourceMappingURL=index.d.ts.map

@@ -13,3 +13,3 @@ // Copyright (c) Microsoft Corporation. All rights reserved.

* See also:
* - [Get Started](https://onnxruntime.ai/docs/get-started/with-javascript.html)
* - [Get Started](https://onnxruntime.ai/docs/get-started/with-javascript/)
* - [Inference examples](https://github.com/microsoft/onnxruntime-inference-examples/tree/main/js)

@@ -23,5 +23,8 @@ *

export * from './tensor.js';
export * from './tensor-conversion.js';
export * from './tensor-factory.js';
export * from './trace.js';
export * from './onnx-model.js';
export * from './onnx-value.js';
export * from './training-session.js';
//# sourceMappingURL=index.js.map
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
import { resolveBackend } from './backend-impl.js';
import { resolveBackendAndExecutionProviders } from './backend-impl.js';
import { Tensor } from './tensor.js';

@@ -179,7 +179,5 @@ import { TRACE_FUNC_BEGIN, TRACE_FUNC_END } from './trace.js';

}
// get backend hints
const eps = options.executionProviders || [];
const backendHints = eps.map(i => typeof i === 'string' ? i : i.name);
const backend = await resolveBackend(backendHints);
const handler = await backend.createInferenceSessionHandler(filePathOrUint8Array, options);
// resolve backend, update session options with validated EPs, and create session handler
const [backend, optionsWithValidatedEPs] = await resolveBackendAndExecutionProviders(options);
const handler = await backend.createInferenceSessionHandler(filePathOrUint8Array, optionsWithValidatedEPs);
TRACE_FUNC_END();

@@ -186,0 +184,0 @@ return new InferenceSession(handler);

@@ -161,13 +161,13 @@ import { OnnxModelOptions } from './onnx-model.js';

interface ExecutionProviderOptionMap {
coreml: CoreMLExecutionProviderOption;
cpu: CpuExecutionProviderOption;
coreml: CoreMlExecutionProviderOption;
cuda: CudaExecutionProviderOption;
dml: DmlExecutionProviderOption;
nnapi: NnapiExecutionProviderOption;
tensorrt: TensorRtExecutionProviderOption;
wasm: WebAssemblyExecutionProviderOption;
webgl: WebGLExecutionProviderOption;
xnnpack: XnnpackExecutionProviderOption;
webgpu: WebGpuExecutionProviderOption;
webnn: WebNNExecutionProviderOption;
nnapi: NnapiExecutionProviderOption;
xnnpack: XnnpackExecutionProviderOption;
}

@@ -187,6 +187,2 @@ type ExecutionProviderName = keyof ExecutionProviderOptionMap;

}
interface CoreMlExecutionProviderOption extends ExecutionProviderOption {
readonly name: 'coreml';
coreMlFlags?: number;
}
interface DmlExecutionProviderOption extends ExecutionProviderOption {

@@ -215,3 +211,3 @@ readonly name: 'dml';

readonly name: 'webnn';
deviceType?: 'cpu' | 'gpu';
deviceType?: 'cpu' | 'gpu' | 'npu';
numThreads?: number;

@@ -222,4 +218,35 @@ powerPreference?: 'default' | 'low-power' | 'high-performance';

readonly name: 'coreml';
/**
* The bit flags for CoreML execution provider.
*
* ```
* COREML_FLAG_USE_CPU_ONLY = 0x001
* COREML_FLAG_ENABLE_ON_SUBGRAPH = 0x002
* COREML_FLAG_ONLY_ENABLE_DEVICE_WITH_ANE = 0x004
* COREML_FLAG_ONLY_ALLOW_STATIC_INPUT_SHAPES = 0x008
* COREML_FLAG_CREATE_MLPROGRAM = 0x010
* ```
*
* See include/onnxruntime/core/providers/coreml/coreml_provider_factory.h for more details.
*
* This flag is available only in ONNXRuntime (Node.js binding).
*/
coreMlFlags?: number;
/**
* Specify whether to use CPU only in CoreML EP.
*
* This setting is available only in ONNXRuntime (react-native).
*/
useCPUOnly?: boolean;
/**
* Specify whether to enable CoreML EP on subgraph.
*
* This setting is available only in ONNXRuntime (react-native).
*/
enableOnSubgraph?: boolean;
/**
* Specify whether to only enable CoreML EP for Apple devices with ANE (Apple Neural Engine).
*
* This setting is available only in ONNXRuntime (react-native).
*/
onlyEnableDeviceWithANE?: boolean;

@@ -226,0 +253,0 @@ }

import { Tensor } from './tensor.js';
type NonTensorType = never;
export type NonTensorType = never;
/**

@@ -13,3 +13,2 @@ * Type OnnxValue Represents both tensors and non-tensors value for model's inputs/outputs.

export type OnnxValueDataLocation = Tensor.DataLocation;
export {};
//# sourceMappingURL=onnx-value.d.ts.map

@@ -204,3 +204,3 @@ import { Tensor, TypedTensor } from './tensor.js';

*
* @param bitMap - the ImageBitmap object to create tensor from
* @param bitmap - the ImageBitmap object to create tensor from
* @param options - An optional object representing options for creating tensor from URL.

@@ -207,0 +207,0 @@ *

@@ -141,3 +141,3 @@ import { TensorFactory } from './tensor-factory.js';

*/
export interface TensorConstructor {
export interface TensorConstructor extends TensorFactory {
/**

@@ -275,4 +275,4 @@ * Construct a new string tensor object from the given type, data and dims.

}
export declare const Tensor: TensorConstructor & TensorFactory;
export declare const Tensor: TensorConstructor;
export {};
//# sourceMappingURL=tensor.d.ts.map

@@ -0,4 +1,13 @@

/**
* @ignore
*/
export declare const TRACE: (deviceType: string, label: string) => void;
/**
* @ignore
*/
export declare const TRACE_FUNC_BEGIN: (extraMsg?: string) => void;
/**
* @ignore
*/
export declare const TRACE_FUNC_END: (extraMsg?: string) => void;
//# sourceMappingURL=trace.d.ts.map
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
import { env } from './env-impl.js';
/**
* @ignore
*/
export const TRACE = (deviceType, label) => {

@@ -28,2 +31,5 @@ if (typeof env.trace === 'undefined' ? !env.wasm.trace : !env.trace) {

};
/**
* @ignore
*/
export const TRACE_FUNC_BEGIN = (extraMsg) => {

@@ -35,2 +41,5 @@ if (typeof env.trace === 'undefined' ? !env.wasm.trace : !env.trace) {

};
/**
* @ignore
*/
export const TRACE_FUNC_END = (extraMsg) => {

@@ -37,0 +46,0 @@ if (typeof env.trace === 'undefined' ? !env.wasm.trace : !env.trace) {

// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
import { resolveBackend } from './backend-impl.js';
import { resolveBackendAndExecutionProviders } from './backend-impl.js';
import { Tensor } from './tensor.js';

@@ -39,8 +39,6 @@ const noBackendErrMsg = 'Training backend could not be resolved. ' +

const options = sessionOptions || {};
// get backend hints
const eps = options.executionProviders || [];
const backendHints = eps.map(i => typeof i === 'string' ? i : i.name);
const backend = await resolveBackend(backendHints);
// resolve backend, update session options with validated EPs, and create session handler
const [backend, optionsWithValidatedEPs] = await resolveBackendAndExecutionProviders(options);
if (backend.createTrainingSessionHandler) {
const handler = await backend.createTrainingSessionHandler(trainingOptions.checkpointState, trainingOptions.trainModel, evalModel, optimizerModel, options);
const handler = await backend.createTrainingSessionHandler(trainingOptions.checkpointState, trainingOptions.trainModel, evalModel, optimizerModel, optionsWithValidatedEPs);
return new TrainingSession(handler, !!trainingOptions.optimizerModel, !!trainingOptions.evalModel);

@@ -47,0 +45,0 @@ }

@@ -7,3 +7,3 @@ import { InferenceSession } from './inference-session.js';

*/
type URIorBuffer = string | Uint8Array;
type UriOrBuffer = string | Uint8Array;
}

@@ -75,9 +75,9 @@ /**

/**
* Copies parameter values from the given array to the training state. Currently, only supporting models with
* Copies parameter values from the given buffer to the training state. Currently, only supporting models with
* parameters of type Float32.
*
* @param buffer - Float32 buffer containing parameters converted to a Uint8Array.
* @param buffer - A Uint8Array representation of Float32 parameters.
* @param trainableOnly - True if trainable parameters only to be modified, false otherwise. Default value is true.
*/
loadParametersBuffer(array: Uint8Array, trainableOnly: boolean): Promise<void>;
loadParametersBuffer(buffer: Uint8Array, trainableOnly: boolean): Promise<void>;
/**

@@ -120,15 +120,15 @@ * Copies the model parameters to a contiguous buffer. Usually used in the context of Federated Learning.

*/
checkpointState: TrainingSession.URIorBuffer;
checkpointState: TrainingSession.UriOrBuffer;
/**
* URI or buffer for the .onnx training file.
*/
trainModel: TrainingSession.URIorBuffer;
trainModel: TrainingSession.UriOrBuffer;
/**
* Optional. URI or buffer for the .onnx optimizer model file.
*/
optimizerModel?: TrainingSession.URIorBuffer;
optimizerModel?: TrainingSession.UriOrBuffer;
/**
* Optional. URI or buffer for the .onnx eval model file.
*/
evalModel?: TrainingSession.URIorBuffer;
evalModel?: TrainingSession.UriOrBuffer;
}

@@ -135,0 +135,0 @@ /**

@@ -1,2 +0,2 @@

export declare const version = "1.18.0-dev.20240309-efad5bbc5a";
export declare const version = "1.18.0-dev.20240424-218b6b0a73";
//# sourceMappingURL=version.d.ts.map

@@ -5,3 +5,3 @@ // Copyright (c) Microsoft Corporation. All rights reserved.

// Do not modify file content manually.
export const version = '1.18.0-dev.20240309-efad5bbc5a';
export const version = '1.18.0-dev.20240424-218b6b0a73';
//# sourceMappingURL=version.js.map

@@ -5,2 +5,3 @@ // Copyright (c) Microsoft Corporation. All rights reserved.

import {Backend} from './backend.js';
import {InferenceSession} from './inference-session.js';

@@ -14,2 +15,3 @@ interface BackendInfo {

aborted?: boolean;
error?: string;
}

@@ -65,41 +67,98 @@

/**
* Resolve backend by specified hints.
* Try to resolve and initialize a backend.
*
* @param backendHints - a list of execution provider names to lookup. If omitted use registered backends as list.
* @returns a promise that resolves to the backend.
* @param backendName - the name of the backend.
* @returns the backend instance if resolved and initialized successfully, or an error message if failed.
*/
const tryResolveAndInitializeBackend = async(backendName: string): Promise<Backend|string> => {
const backendInfo = backends.get(backendName);
if (!backendInfo) {
return 'backend not found.';
}
if (backendInfo.initialized) {
return backendInfo.backend;
} else if (backendInfo.aborted) {
return backendInfo.error!;
} else {
const isInitializing = !!backendInfo.initPromise;
try {
if (!isInitializing) {
backendInfo.initPromise = backendInfo.backend.init(backendName);
}
await backendInfo.initPromise;
backendInfo.initialized = true;
return backendInfo.backend;
} catch (e) {
if (!isInitializing) {
backendInfo.error = `${e}`;
backendInfo.aborted = true;
}
return backendInfo.error!;
} finally {
delete backendInfo.initPromise;
}
}
};
/**
* Resolve execution providers from the specific session options.
*
* @param options - the session options object.
* @returns a promise that resolves to a tuple of an initialized backend instance and a session options object with
* filtered EP list.
*
* @ignore
*/
export const resolveBackend = async(backendHints: readonly string[]): Promise<Backend> => {
const backendNames = backendHints.length === 0 ? backendsSortedByPriority : backendHints;
const errors = [];
for (const backendName of backendNames) {
const backendInfo = backends.get(backendName);
if (backendInfo) {
if (backendInfo.initialized) {
return backendInfo.backend;
} else if (backendInfo.aborted) {
continue; // current backend is unavailable; try next
export const resolveBackendAndExecutionProviders = async(options: InferenceSession.SessionOptions):
Promise<[backend: Backend, options: InferenceSession.SessionOptions]> => {
// extract backend hints from session options
const eps = options.executionProviders || [];
const backendHints = eps.map(i => typeof i === 'string' ? i : i.name);
const backendNames = backendHints.length === 0 ? backendsSortedByPriority : backendHints;
// try to resolve and initialize all requested backends
let backend: Backend|undefined;
const errors = [];
const availableBackendNames = new Set<string>();
for (const backendName of backendNames) {
const resolveResult = await tryResolveAndInitializeBackend(backendName);
if (typeof resolveResult === 'string') {
errors.push({name: backendName, err: resolveResult});
} else {
if (!backend) {
backend = resolveResult;
}
if (backend === resolveResult) {
availableBackendNames.add(backendName);
}
}
}
const isInitializing = !!backendInfo.initPromise;
try {
if (!isInitializing) {
backendInfo.initPromise = backendInfo.backend.init(backendName);
// if no backend is available, throw error.
if (!backend) {
throw new Error(`no available backend found. ERR: ${errors.map(e => `[${e.name}] ${e.err}`).join(', ')}`);
}
// for each explicitly requested backend, if it's not available, output warning message.
for (const {name, err} of errors) {
if (backendHints.includes(name)) {
// eslint-disable-next-line no-console
console.warn(`removing requested execution provider "${
name}" from session options because it is not available: ${err}`);
}
await backendInfo.initPromise;
backendInfo.initialized = true;
return backendInfo.backend;
} catch (e) {
if (!isInitializing) {
errors.push({name: backendName, err: e});
}
backendInfo.aborted = true;
} finally {
delete backendInfo.initPromise;
}
}
}
throw new Error(`no available backend found. ERR: ${errors.map(e => `[${e.name}] ${e.err}`).join(', ')}`);
};
const filteredEps = eps.filter(i => availableBackendNames.has(typeof i === 'string' ? i : i.name));
return [
backend, new Proxy(options, {
get: (target, prop) => {
if (prop === 'executionProviders') {
return filteredEps;
}
return Reflect.get(target, prop);
}
})
];
};

@@ -61,3 +61,3 @@ // Copyright (c) Microsoft Corporation. All rights reserved.

getParametersSize(trainableOnly: boolean): Promise<number>;
loadParametersBuffer(array: Uint8Array, trainableOnly: boolean): Promise<void>;
loadParametersBuffer(buffer: Uint8Array, trainableOnly: boolean): Promise<void>;
getContiguousParameters(trainableOnly: boolean): Promise<OnnxValue>;

@@ -81,4 +81,4 @@ }

createTrainingSessionHandler?
(checkpointStateUriOrBuffer: TrainingSession.URIorBuffer, trainModelUriOrBuffer: TrainingSession.URIorBuffer,
evalModelUriOrBuffer: TrainingSession.URIorBuffer, optimizerModelUriOrBuffer: TrainingSession.URIorBuffer,
(checkpointStateUriOrBuffer: TrainingSession.UriOrBuffer, trainModelUriOrBuffer: TrainingSession.UriOrBuffer,
evalModelUriOrBuffer: TrainingSession.UriOrBuffer, optimizerModelUriOrBuffer: TrainingSession.UriOrBuffer,
options: InferenceSession.SessionOptions): Promise<TrainingSessionHandler>;

@@ -85,0 +85,0 @@ }

@@ -147,8 +147,47 @@ // Copyright (c) Microsoft Corporation. All rights reserved.

/**
* Set or get the power preference.
*
* Setting this property only has effect before the first WebGPU inference session is created. The value will be
* used as options for `navigator.gpu.requestAdapter()`.
*
* See {@link https://gpuweb.github.io/gpuweb/#dictdef-gpurequestadapteroptions} for more details.
*
* @defaultValue `undefined`
*/
powerPreference?: 'low-power'|'high-performance';
/**
* Set or get the force fallback adapter flag.
*
* Setting this property only has effect before the first WebGPU inference session is created. The value will be
* used as options for `navigator.gpu.requestAdapter()`.
*
* See {@link https://gpuweb.github.io/gpuweb/#dictdef-gpurequestadapteroptions} for more details.
*
* @defaultValue `undefined`
*/
forceFallbackAdapter?: boolean;
/**
* Set or get the adapter for WebGPU.
*
* Setting this property only has effect before the first WebGPU inference session is created. The value will be
* used as the GPU adapter for the underlying WebGPU backend to create GPU device.
*
* If this property is not set, it will be available to get after the first WebGPU inference session is created. The
* value will be the GPU adapter that created by the underlying WebGPU backend.
*
* When use with TypeScript, the type of this property is `GPUAdapter` defined in "@webgpu/types".
* Use `const adapter = env.webgpu.adapter as GPUAdapter;` in TypeScript to access this property with correct type.
*
* see comments on {@link Tensor.GpuBufferType}
*/
adapter: unknown;
/**
* Get the device for WebGPU.
*
* This property is only available after the first WebGPU inference session is created.
*
* When use with TypeScript, the type of this property is `GPUDevice` defined in "@webgpu/types".
* Use `const device = env.webgpu.device as GPUDevice;` in TypeScript to access this property with correct type.
*
* see comments on {@link GpuBufferType} for more details about why not use types defined in "@webgpu/types".
* see comments on {@link Tensor.GpuBufferType} for more details about why not use types defined in "@webgpu/types".
*/

@@ -155,0 +194,0 @@ readonly device: unknown;

@@ -14,3 +14,3 @@ // Copyright (c) Microsoft Corporation. All rights reserved.

* See also:
* - [Get Started](https://onnxruntime.ai/docs/get-started/with-javascript.html)
* - [Get Started](https://onnxruntime.ai/docs/get-started/with-javascript/)
* - [Inference examples](https://github.com/microsoft/onnxruntime-inference-examples/tree/main/js)

@@ -25,4 +25,7 @@ *

export * from './tensor.js';
export * from './tensor-conversion.js';
export * from './tensor-factory.js';
export * from './trace.js';
export * from './onnx-model.js';
export * from './onnx-value.js';
export * from './training-session.js';
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
import {resolveBackend} from './backend-impl.js';
import {resolveBackendAndExecutionProviders} from './backend-impl.js';
import {InferenceSessionHandler} from './backend.js';

@@ -198,7 +198,5 @@ import {InferenceSession as InferenceSessionInterface} from './inference-session.js';

// get backend hints
const eps = options.executionProviders || [];
const backendHints = eps.map(i => typeof i === 'string' ? i : i.name);
const backend = await resolveBackend(backendHints);
const handler = await backend.createInferenceSessionHandler(filePathOrUint8Array, options);
// resolve backend, update session options with validated EPs, and create session handler
const [backend, optionsWithValidatedEPs] = await resolveBackendAndExecutionProviders(options);
const handler = await backend.createInferenceSessionHandler(filePathOrUint8Array, optionsWithValidatedEPs);
TRACE_FUNC_END();

@@ -205,0 +203,0 @@ return new InferenceSession(handler);

@@ -189,3 +189,3 @@ // Copyright (c) Microsoft Corporation. All rights reserved.

// Currently, we have the following backends to support execution providers:
// Backend Node.js binding: supports 'cpu' and 'cuda'.
// Backend Node.js binding: supports 'cpu', 'dml' (win32), 'coreml' (macOS) and 'cuda' (linux).
// Backend WebAssembly: supports 'cpu', 'wasm', 'webgpu' and 'webnn'.

@@ -195,13 +195,13 @@ // Backend ONNX.js: supports 'webgl'.

interface ExecutionProviderOptionMap {
coreml: CoreMLExecutionProviderOption;
cpu: CpuExecutionProviderOption;
coreml: CoreMlExecutionProviderOption;
cuda: CudaExecutionProviderOption;
dml: DmlExecutionProviderOption;
nnapi: NnapiExecutionProviderOption;
tensorrt: TensorRtExecutionProviderOption;
wasm: WebAssemblyExecutionProviderOption;
webgl: WebGLExecutionProviderOption;
xnnpack: XnnpackExecutionProviderOption;
webgpu: WebGpuExecutionProviderOption;
webnn: WebNNExecutionProviderOption;
nnapi: NnapiExecutionProviderOption;
xnnpack: XnnpackExecutionProviderOption;
}

@@ -224,6 +224,2 @@

}
export interface CoreMlExecutionProviderOption extends ExecutionProviderOption {
readonly name: 'coreml';
coreMlFlags?: number;
}
export interface DmlExecutionProviderOption extends ExecutionProviderOption {

@@ -253,3 +249,3 @@ readonly name: 'dml';

readonly name: 'webnn';
deviceType?: 'cpu'|'gpu';
deviceType?: 'cpu'|'gpu'|'npu';
numThreads?: number;

@@ -260,4 +256,35 @@ powerPreference?: 'default'|'low-power'|'high-performance';

readonly name: 'coreml';
/**
* The bit flags for CoreML execution provider.
*
* ```
* COREML_FLAG_USE_CPU_ONLY = 0x001
* COREML_FLAG_ENABLE_ON_SUBGRAPH = 0x002
* COREML_FLAG_ONLY_ENABLE_DEVICE_WITH_ANE = 0x004
* COREML_FLAG_ONLY_ALLOW_STATIC_INPUT_SHAPES = 0x008
* COREML_FLAG_CREATE_MLPROGRAM = 0x010
* ```
*
* See include/onnxruntime/core/providers/coreml/coreml_provider_factory.h for more details.
*
* This flag is available only in ONNXRuntime (Node.js binding).
*/
coreMlFlags?: number;
/**
* Specify whether to use CPU only in CoreML EP.
*
* This setting is available only in ONNXRuntime (react-native).
*/
useCPUOnly?: boolean;
/**
* Specify whether to enable CoreML EP on subgraph.
*
* This setting is available only in ONNXRuntime (react-native).
*/
enableOnSubgraph?: boolean;
/**
* Specify whether to only enable CoreML EP for Apple devices with ANE (Apple Neural Engine).
*
* This setting is available only in ONNXRuntime (react-native).
*/
onlyEnableDeviceWithANE?: boolean;

@@ -264,0 +291,0 @@ }

@@ -6,3 +6,3 @@ // Copyright (c) Microsoft Corporation. All rights reserved.

type NonTensorType = never;
export type NonTensorType = never;

@@ -9,0 +9,0 @@ /**

@@ -256,3 +256,3 @@ // Copyright (c) Microsoft Corporation. All rights reserved.

*
* @param bitMap - the ImageBitmap object to create tensor from
* @param bitmap - the ImageBitmap object to create tensor from
* @param options - An optional object representing options for creating tensor from URL.

@@ -259,0 +259,0 @@ *

@@ -163,3 +163,3 @@ // Copyright (c) Microsoft Corporation. All rights reserved.

*/
export interface TensorConstructor {
export interface TensorConstructor extends TensorFactory {
// #region CPU tensor - specify element type

@@ -330,2 +330,2 @@ /**

// eslint-disable-next-line @typescript-eslint/naming-convention
export const Tensor = TensorImpl as (TensorConstructor & TensorFactory);
export const Tensor = TensorImpl as TensorConstructor;

@@ -6,2 +6,5 @@ // Copyright (c) Microsoft Corporation. All rights reserved.

/**
* @ignore
*/
export const TRACE = (deviceType: string, label: string) => {

@@ -33,2 +36,5 @@ if (typeof env.trace === 'undefined' ? !env.wasm.trace : !env.trace) {

/**
* @ignore
*/
export const TRACE_FUNC_BEGIN = (extraMsg?: string) => {

@@ -41,2 +47,5 @@ if (typeof env.trace === 'undefined' ? !env.wasm.trace : !env.trace) {

/**
* @ignore
*/
export const TRACE_FUNC_END = (extraMsg?: string) => {

@@ -43,0 +52,0 @@ if (typeof env.trace === 'undefined' ? !env.wasm.trace : !env.trace) {

// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
import {resolveBackend} from './backend-impl.js';
import {resolveBackendAndExecutionProviders} from './backend-impl.js';
import {SessionHandler, TrainingSessionHandler} from './backend.js';

@@ -58,9 +58,8 @@ import {InferenceSession as InferenceSession} from './inference-session.js';

// get backend hints
const eps = options.executionProviders || [];
const backendHints = eps.map(i => typeof i === 'string' ? i : i.name);
const backend = await resolveBackend(backendHints);
// resolve backend, update session options with validated EPs, and create session handler
const [backend, optionsWithValidatedEPs] = await resolveBackendAndExecutionProviders(options);
if (backend.createTrainingSessionHandler) {
const handler = await backend.createTrainingSessionHandler(
trainingOptions.checkpointState, trainingOptions.trainModel, evalModel, optimizerModel, options);
trainingOptions.checkpointState, trainingOptions.trainModel, evalModel, optimizerModel,
optionsWithValidatedEPs);
return new TrainingSession(handler, !!trainingOptions.optimizerModel, !!trainingOptions.evalModel);

@@ -67,0 +66,0 @@ } else {

@@ -14,3 +14,3 @@ // Copyright (c) Microsoft Corporation. All rights reserved.

*/
type URIorBuffer = string|Uint8Array;
type UriOrBuffer = string|Uint8Array;
}

@@ -102,9 +102,9 @@

/**
* Copies parameter values from the given array to the training state. Currently, only supporting models with
* Copies parameter values from the given buffer to the training state. Currently, only supporting models with
* parameters of type Float32.
*
* @param buffer - Float32 buffer containing parameters converted to a Uint8Array.
* @param buffer - A Uint8Array representation of Float32 parameters.
* @param trainableOnly - True if trainable parameters only to be modified, false otherwise. Default value is true.
*/
loadParametersBuffer(array: Uint8Array, trainableOnly: boolean): Promise<void>;
loadParametersBuffer(buffer: Uint8Array, trainableOnly: boolean): Promise<void>;

@@ -162,15 +162,15 @@ /**

*/
checkpointState: TrainingSession.URIorBuffer;
checkpointState: TrainingSession.UriOrBuffer;
/**
* URI or buffer for the .onnx training file.
*/
trainModel: TrainingSession.URIorBuffer;
trainModel: TrainingSession.UriOrBuffer;
/**
* Optional. URI or buffer for the .onnx optimizer model file.
*/
optimizerModel?: TrainingSession.URIorBuffer;
optimizerModel?: TrainingSession.UriOrBuffer;
/**
* Optional. URI or buffer for the .onnx eval model file.
*/
evalModel?: TrainingSession.URIorBuffer;
evalModel?: TrainingSession.UriOrBuffer;
}

@@ -177,0 +177,0 @@

@@ -7,2 +7,2 @@ // Copyright (c) Microsoft Corporation. All rights reserved.

export const version = '1.18.0-dev.20240309-efad5bbc5a';
export const version = '1.18.0-dev.20240424-218b6b0a73';

@@ -5,3 +5,3 @@ {

"name": "onnxruntime-common",
"version": "1.18.0-dev.20240309-efad5bbc5a",
"version": "1.18.0-dev.20240424-218b6b0a73",
"repository": {

@@ -8,0 +8,0 @@ "url": "https://github.com/Microsoft/onnxruntime.git",

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc