docs: update

This commit is contained in:
Carson M.
2026-03-06 01:05:04 -06:00
parent 94417081c4
commit 56b32fddd8
25 changed files with 1165 additions and 993 deletions

View File

@@ -1,84 +1,6 @@
//! `ort-web` is an [`ort`] backend that enables the usage of ONNX Runtime in the web.
//!
//! # Usage
//! ## CORS
//! `ort-web` dynamically fetches the required scripts & WASM binary at runtime. By default, it will fetch the build
//! from the `cdn.pyke.io` domain, so make sure it is accessible via CORS if you have that configured.
//!
//! You can also use a self-hosted build with [`Dist`]; see the [`api`](fn@api) function for an example. The scripts &
//! binary can be acquired from the `dist` folder of the [`onnxruntime-web` npm package](https://npmjs.com/package/onnxruntime-web).
//!
//! ### Telemetry
//! `ort-web` collects telemetry data by default and sends it to `signal.pyke.io`. This telemetry data helps us
//! understand how `ort-web` is being used so we can improve it. Zero PII is collected; you can see what is sent in
//! `_telemetry.js`. If you wish to contribute telemetry data, please allowlist `signal.pyke.io`; otherwise, it can be
//! disabled via [`EnvironmentBuilder::with_telemetry`](ort::environment::EnvironmentBuilder::with_telemetry).
//!
//! ## Initialization
//! `ort` must have the `alternative-backend` feature enabled, as this enables the usage of [`ort::set_api`].
//!
//! You can choose which build of ONNX Runtime to fetch by choosing any combination of these 3 feature flags:
//! [`FEATURE_WEBGL`], [`FEATURE_WEBGPU`], [`FEATURE_WEBNN`]. These enable the usage of the [WebGL][ort::ep::WebGL],
//! [WebGPU][ort::ep::WebGPU], and [WebNN][ort::ep::WebNN] EPs respectively. You can `|` features together to enable
//! multiple at once:
//!
//! ```no_run
//! use ort_web::{FEATURE_WEBGL, FEATURE_WEBGPU};
//! ort::set_api(ort_web::api(FEATURE_WEBGL | FEATURE_WEBGPU).await?);
//! ```
//!
//! You'll still need to configure the EPs on a per-session basis later like you would normally, but this allows you to
//! e.g. only fetch the CPU build if the user doesn't have hardware acceleration.
//!
//! ## Session creation
//! Sessions can only be created from a URL, or indirectly from memory - that means no
//! `SessionBuilder::commit_from_memory_directly` for `.ort` format models, and no `SessionBuilder::commit_from_file`.
//!
//! The remaining commit functions - `SessionBuilder::commit_from_url` and `SessionBuilder::commit_from_memory` are
//! marked `async` and need to be `await`ed. `commit_from_url` is always available when targeting WASM and does not
//! require the `fetch-models` feature flag to be enabled for `ort`.
//!
//! ## Inference
//! Only `Session::run_async` is supported; `Session::run` will always throw an error.
//!
//! Inference outputs are not synchronized by default (see the next section). If you need access to the data of all
//! session outputs from Rust, the [`sync_outputs`] function can be used to sync them all at once.
//!
//! ## Synchronization
//! ONNX Runtime is loaded as a separate WASM module, and `ort-web` acts as an intermediary between the two. There is no
//! mechanism in WASM for two modules to share memory, so tensors often need to be 'synchronized' when one side needs to
//! see data from the other.
//!
//! [`Tensor::new`](ort::value::Tensor::new) should never be used for creating inputs, as they start out allocated on
//! the ONNX Runtime side, thus requiring a sync (of empty data) to Rust before it can be written to. Prefer instead
//! [`Tensor::from_array`](ort::value::Tensor::from_array)/
//! [`TensorRef::from_array_view`](ort::value::TensorRef::from_array_view), as tensors created this way never require
//! synchronization.
//!
//! As previously stated, session outputs are **not** synchronized. If you wish to use their data in Rust, you must
//! either sync all outputs at once with [`sync_outputs`], or sync each tensor at a time (if you only use a few
//! outputs):
//! ```ignore
//! use ort_web::{TensorExt, SyncDirection};
//!
//! let mut outputs = session.run_async(ort::inputs![...]).await?;
//!
//! let mut bounding_boxes = outputs.remove("bounding_boxes").unwrap();
//! bounding_boxes.sync(SyncDirection::Rust).await?;
//!
//! // now we can use the data
//! let data = bounding_boxes.try_extract_tensor::<f32>()?;
//! ```
//!
//! Once a session output is `sync`ed, that tensor becomes backed by a Rust buffer. Updates to the tensor's data from
//! the Rust side will not reflect in ONNX Runtime until the tensor is `sync`ed with `SyncDirection::Runtime`. Likewise,
//! updates to the tensor's data from ONNX Runtime won't reflect in Rust until Rust syncs that tensor with
//! `SyncDirection::Rust`. You don't have to worry about this behavior if you only ever *read* from session outputs,
//! though.
//!
//! ## Limitations
//! - [`OutputSelector`](ort::session::OutputSelector) is not currently implemented.
//! - [`IoBinding`](ort::io_binding) is not supported by ONNX Runtime on the web.
//! For more information, see https://ort.pyke.io/backends/web
#![deny(clippy::panic, clippy::panicking_unwrap)]
#![warn(clippy::std_instead_of_alloc, clippy::std_instead_of_core)]

View File

@@ -52,7 +52,7 @@ function Footer() {
<p style={{ fontWeight: '600' }}>
made with <FooterEmoji /> by
<a target="_blank" href="https://pyke.io/">
<svg height='12' viewBox='0 0 21 10' style={{ display: 'inline', marginLeft: '5px', marginTop: '-4px' }}>
<svg height='12' viewBox='0 0 21 10' style={{ display: 'inline', marginLeft: '5px', marginTop: '-3px' }}>
<rect width='10' height='10' fill='#00BDFF' />
<rect x='11' width='10' height='10' fill='#00FF86' />
</svg>
@@ -76,7 +76,13 @@ export default async function RootLayout({ children }) {
defaultTheme: 'system'
}}
navbar={<Navbar
logo={<img src='/assets/banner.png' style={{ height: '34px' }} />}
logo={<>
<img src='/assets/banner.png' style={{ height: '34px' }} />
<svg height='5' viewBox='0 0 21 10' style={{ marginLeft: '3px', marginTop: '6px', alignSelf: 'start' }}>
<rect width='10' height='10' fill='#00BDFF' />
<rect x='11' width='10' height='10' fill='#00FF86' />
</svg>
</>}
chatLink='https://discord.gg/uQtsNu2xMa'
projectLink='https://github.com/pykeio/ort'
/>}

View File

@@ -1,15 +1,15 @@
'use client';
import { Card, Flex, Heading, Skeleton, Switch, Text } from '@radix-ui/themes';
import { Code } from 'nextra/components';
import { useState } from 'react';
import { PiCheckBold, PiInfoFill, PiWarningFill } from 'react-icons/pi';
import { useStore } from 'zustand';
import { EXECUTION_PROVIDER_ARRAY } from '../core/ep';
import { PLATFORM_STORE } from '../core/platform';
import { useIsClient } from '../core/utils';
import PlatformSelector from './PlatformSelector';
import { Code } from 'nextra/components';
import { PiCheckBold, PiInfoFill, PiWarningBold, PiWarningFill } from 'react-icons/pi';
export default function ExecutionProviders() {
const isClient = useIsClient();
@@ -80,6 +80,7 @@ export default function ExecutionProviders() {
? <Flex direction='row' gap='1' align='center' style={{ fontSize: '12px', color: 'var(--green-11)' }}><PiCheckBold /> Ready to use</Flex>
: <Flex direction='row' gap='1' align='center' style={{ fontSize: '12px', color: 'var(--gray-11)' }}><PiInfoFill /> Requires compiling ONNX Runtime from source</Flex>
: null}
{ep.note && <Flex direction='row' gap='1' align='center' style={{ fontSize: '12px', color: 'var(--gray-11)' }}><PiInfoFill /> <span>{ep.note}</span></Flex>}
<Code style={{ fontSize: '12px' }}>features = [ "{ep.feature}" ]</Code>
</Card>
})}

View File

@@ -1 +1 @@
export const CRATE_VERSION = '2.0.0-rc.11';
export const CRATE_VERSION = '2.0.0-rc.12';

View File

@@ -1,5 +1,6 @@
export default {
index: 'Alternative backends',
candle: <span style={{ fontFamily: '"Monaspace Neon"' }}>ort-candle</span>,
tract: <span style={{ fontFamily: '"Monaspace Neon"' }}>ort-tract</span>
tract: <span style={{ fontFamily: '"Monaspace Neon"' }}>ort-tract</span>,
web: <span style={{ fontFamily: '"Monaspace Neon"' }}>ort-web</span>
};

View File

@@ -1,7 +1,8 @@
import { Steps } from 'nextra/components';
import Ort from '../../components/Ort';
# `ort-candle`
`ort-candle` is an [alternative backend](/backends) for `ort` based on [🤗 Hugging Face `candle`](https://github.com/huggingface/candle).
`ort-candle` is an [alternative backend](/backends) for <Ort/> based on [🤗 Hugging Face `candle`](https://github.com/huggingface/candle).
## Supported APIs
- ✅ `ort::init`
@@ -34,26 +35,28 @@ import { Steps } from 'nextra/components';
### Install `ort-candle`
```toml filename="Cargo.toml"
[dependencies]
ort-candle = "0.1.0+0.8"
ort-candle = "0.3.0+0.9.2"
...
```
### Enable the `alternative-backend` feature
This instructs `ort` to not try to download/link to ONNX Runtime.
This instructs <Ort/> to not try to download/link to ONNX Runtime.
```toml filename="Cargo.toml"
[dependencies.ort]
version = "=2.0.0-rc.11"
version = "=2.0.0-rc.12"
default-features = false # Disables the `download-binaries` feature since we don't need it
features = [
"std",
"ndarray",
"alternative-backend"
]
```
### Initialize the backend
Use `ort::set_api` to use the crate's API implementation.
Use [`ort::set_api`](https://docs.rs/ort/latest/ort/fn.set_api.html) to use the crate's API implementation.
```rs
```rs filename="main.rs"
fn main() {
// This should run as early in your application as possible - before you ever use `ort`!
ort::set_api(ort_candle::api());

View File

@@ -5,12 +5,13 @@ title: Alternative backends
# Alternative backends
import { Callout, Steps } from 'nextra/components';
import Ort from '../../components/Ort';
Since [ONNX Runtime](https://onnxruntime.ai/) is written in C++, linking troubles often arise when attempting to use it in a Rust project - especially with WASM. `v2.0.0-rc.11` of `ort` introduced support for **alternative backends** -- that is, ONNX executors that do not use ONNX Runtime.
Since [ONNX Runtime](https://onnxruntime.ai/) is written in C++, linking troubles often arise when attempting to use it in a Rust project--especially with WASM. `v2.0.0-rc.12` of <Ort/> introduced support for **alternative backends**: ONNX runtimes that aren't *the* ONNX Runtime.
As the Rust ML scene has evolved, many exciting new inference engines supporting ONNX models have popped up, like 🤗 Hugging Face's [`candle`](https://github.com/huggingface/candle), [Burn](https://github.com/tracel-ai/burn), and [`tract`](https://github.com/sonos/tract). These libraries, being written in pure Rust (minus some GPU kernels) play much nicer when it comes to linking, and often support any platform Rust's standard library does. They're also, of course, memory safe and 🦀blazingly🔥fast🚀!
As the Rust ML scene has evolved, many exciting new inference engines supporting ONNX models have popped up, like 🤗 Hugging Face's [`candle`](https://github.com/huggingface/candle), [Burn](https://github.com/tracel-ai/burn), and [`tract`](https://github.com/sonos/tract). These libraries, being written in pure Rust, play much nicer when it comes to linking, and often support any platform Rust's standard library does (which is a lot more than ONNX Runtime!) They're also, of course, memory safe and 🦀blazingly🔥fast🚀
Internally, alternative backend implementations are simply glue code between these libraries and the ONNX Runtime C API. Because they implement the same API as ONNX Runtime, using them in `ort` is as simple as adding one line of code!
<Ort/> alternative backends are simply wrappers that implements the ONNX Runtime C API over another crate. Because they implement the same exact API as ONNX Runtime, using them in <Ort/> is as simple as adding one line of code!
## Using an alternative backend
@@ -27,24 +28,26 @@ We'll use [`ort-tract`](/backends/tract) for this example.
```toml filename="Cargo.toml"
[dependencies]
ort-tract = "0.1.0+0.21"
ort-tract = "0.3.0+0.22"
...
```
### Enable the `alternative-backend` feature
This instructs `ort` to not try to download/link to ONNX Runtime.
This instructs <Ort/> to not try to download/link to ONNX Runtime.
```toml filename="Cargo.toml"
[dependencies.ort]
version = "=2.0.0-rc.11"
version = "=2.0.0-rc.12"
default-features = false # Disables the `download-binaries` feature since we don't need it
features = [
"std",
"ndarray",
"alternative-backend"
]
```
### Initialize the backend
Use `ort::set_api` to use the crate's API implementation (replacing `ort_tract` with whichever backend crate you choose to use):
Use [`ort::set_api`](https://docs.rs/ort/latest/ort/fn.set_api.html) to use the crate's API implementation (replacing `ort_tract` with whichever backend crate you choose to use):
```rs
fn main() {
@@ -55,13 +58,13 @@ fn main() {
### Done!
<Callout type='info'>
Be sure to check each backend's docs page to see which APIs are and are not implemented.
Be sure to check each backend's docs page to see which APIs are and are not supported.
</Callout>
</Steps>
## Available backends
`ort` currently has the following backends:
<Ort/> currently has the following backends:
- [`ort-candle`](/backends/candle), based on [🤗 Hugging Face `candle`](https://github.com/huggingface/candle)
- 🔷 **Supports**: CPU, CUDA (though not available via `ort-candle` right now), WebAssembly
@@ -69,3 +72,6 @@ fn main() {
- [`ort-tract`](/backends/tract), based on [`tract`](https://github.com/sonos/tract)
- 🔷 **Supports**: CPU, WebAssembly
- ✅ [Great operator support](https://github.com/sonos/tract?tab=readme-ov-file#onnx)
- [`ort-web`](/backends/web) runs ONNX Runtime in the web
- 🔷 **Supports**: WebAssembly (with WebGL & WebGPU backends!)
- ✅ Great operator support - *it's the full ONNX Runtime!*

View File

@@ -1,7 +1,9 @@
import { Steps } from 'nextra/components';
import Ort from '../../components/Ort';
# `ort-tract`
`ort-tract` is an [alternative backend](/backends) for `ort` based on [`tract`](https://github.com/sonos/tract).
`ort-tract` is an [alternative backend](/backends) for <Ort/> based on [`tract`](https://github.com/sonos/tract).
## Supported APIs
- ✅ `ort::init`
@@ -35,26 +37,28 @@ import { Steps } from 'nextra/components';
### Install `ort-tract`
```toml filename="Cargo.toml"
[dependencies]
ort-tract = "0.2.0+0.21"
ort-tract = "0.3.0+0.22"
...
```
### Enable the `alternative-backend` feature
This instructs `ort` to not try to download/link to ONNX Runtime.
This instructs <Ort/> to not try to download/link to ONNX Runtime.
```toml filename="Cargo.toml"
[dependencies.ort]
version = "=2.0.0-rc.11"
version = "=2.0.0-rc.12"
default-features = false # Disables the `download-binaries` feature since we don't need it
features = [
"std",
"ndarray",
"alternative-backend"
]
```
### Initialize the backend
Use `ort::set_api` to use the crate's API implementation.
Use [`ort::set_api`](https://docs.rs/ort/latest/ort/fn.set_api.html) to use the crate's API implementation.
```rs
```rs filename="main.rs"
fn main() {
// This should run as early in your application as possible - before you ever use `ort`!
ort::set_api(ort_tract::api());

View File

@@ -0,0 +1,168 @@
import { Steps } from 'nextra/components';
import Ort from '../../components/Ort';
# `ort-web`
`ort-web` is an [alternative backend](/backends) for <Ort/> that allows you to use ONNX Runtime on the Web.
ONNX Runtime is written in C++. Compiling it to WASM requires the use of [Emscripten](https://emscripten.org/). Meanwhile, Rust in WASM typically uses [`wasm-bindgen`](https://wasm-bindgen.github.io/wasm-bindgen/). Emscripten and `wasm-bindgen` have very different ABIs, conventions, and build processes that make them impractical to link together normally.
Rather than trying to link ONNX Runtime into your Rust application, `ort-web` instead acts as a *bridge* between ONNX Runtime in Emscripten and your Rust code. They live in separate WebAssembly contexts, and `ort-web` allows data to flow between them. `ort-web` tries to feel as close to normal <Ort/> as possible, but because two WASM contexts cannot directly share memory, there are some limitations, like having to [manually synchronize data](#synchronization) between contexts.
## Supported APIs
- ✅ `ort::init`
- 🔷 `ort::environment::EnvironmentBuilder`
- `EnvironmentBuilder::with_telemetry` <sup>[*](#telemetry)</sup>
- `EnvironmentBuilder::commit`
- 🔷 `ort::memory::Allocator`
- `Allocator::default`
- `Allocator::memory_info`
- ✅ `ort::memory::MemoryInfo`
- 🔷 `ort::session::Session`
- `Session::builder`
- `Session::allocator`
- `Session::run_async`
- ⚠️ Synchronous run methods like `Session::run` and `Session::run_with_options` are not supported.
- 🔷 `ort::session::builder::SessionBuilder`
- `SessionBuilder::new`
- `SessionBuilder::commit_from_memory`
- `SessionBuilder::commit_from_url` (does not require `fetch-models` feature)
- `SessionBuilder::with_optimization_level`
- ✅ `ort::value::DynValue`, `ort::value::DynValueRef`, `ort::value::DynValueRefMut`
- ✅ `ort::value::Tensor`, `TensorRef`, `TensorRefMut`, etc.
- ✅ `ort::value::ValueType`
## Installation
<Steps>
### Install `ort-web`
```toml filename="Cargo.toml"
[dependencies]
ort-web = "0.2.1+1.24"
...
```
### Enable the `alternative-backend` feature
This instructs <Ort/> to not try its usual linking steps.
```toml filename="Cargo.toml"
[dependencies.ort]
version = "=2.0.0-rc.12"
default-features = false # Disables the `download-binaries` feature since we don't need it
features = [
"std",
"ndarray",
"api-24",
"alternative-backend"
]
```
### Initialize the backend
Use [`ort::set_api`](https://docs.rs/ort/latest/wasm32-unknown-unknown/ort/fn.set_api.html) to use the crate's API implementation.
```rs filename="lib.rs"
use ort_web::FEATURE_WEBGPU;
use wasm_bindgen::JsError;
async fn init() -> Result<(), JsError> {
// This should always be run before you use any other `ort` API.
ort::set_api(ort_web::api(FEATURE_WEBGPU).await?);
...
}
```
### Done!
</Steps>
## Toggling features
You can choose which build of ONNX Runtime to fetch by choosing any combination of `FEATURE_WEBGL`, `FEATURE_WEBGPU`, and `FEATURE_WEBNN`. These enable the usage of the WebGL, [WebGPU](https://docs.rs/ort/latest/wasm32-unknown-unknown/ort/ep/webgpu/struct.WebGPU.html), and [WebNN](https://docs.rs/ort/latest/wasm32-unknown-unknown/ort/ep/webnn/struct.WebNN.html) EPs respectively. You can `|` features together to enable multiple at once:
```rs
use ort_web::{FEATURE_WEBGL, FEATURE_WEBGPU};
ort::set_api(ort_web::api(FEATURE_WEBGL | FEATURE_WEBGPU).await?);
```
You'll still need to configure the EPs on a per-session basis later like you would normally, but this allows you to e.g. only fetch the CPU build (`FEATURE_NONE`) if the user doesn't have hardware acceleration.
## Session creation
Sessions can only be [created from a URL](https://docs.rs/ort/latest/wasm32-unknown-unknown/ort/session/builder/struct.SessionBuilder.html#method.commit_from_url), or [indirectly from memory](https://docs.rs/ort/latest/wasm32-unknown-unknown/ort/session/builder/struct.SessionBuilder.html#method.commit_from_memory)--that means no `SessionBuilder::commit_from_memory_directly` for `.ort` format models, and no `SessionBuilder::commit_from_file`.
Unlike vanilla <Ort/>, `commit_from_url` and `commit_from_memory` are marked `async` on the Web and thus need to be `await`ed. Also, `commit_from_url` is always available, regardless of whether the `fetch-models` feature is enabled.
```rs filename="lib.rs"
use ort::{ep, session::Session};
use ort_web::FEATURE_WEBGPU;
use wasm_bindgen::JsError;
async fn init() -> Result<(), JsError> {
ort::set_api(ort_web::api(FEATURE_WEBGPU).await?);
let mut session = Session::builder()?
.with_execution_providers([
// only available with FEATURE_WEBGPU
ep::WebGPU::default().build()
])?
.commit_from_url("./model.onnx")
.await?; // <- note we must .await on the web
}
```
## Synchronization
With `ort-web`, ONNX Runtime is loaded as a separate WASM module, and `ort-web` acts as an intermediary between it and <Ort/>. There is no mechanism in WASM for two modules to share memory, so tensors often need to be *'synchronized'* when one side needs to see data from the other.
This means that [`Tensor::new`](https://docs.rs/ort/latest/wasm32-unknown-unknown/ort/value/type.Tensor.html#method.new) should never be used for creating inputs, as they start out allocated on the ONNX Runtime side, thus requiring a sync (of *empty data*) to Rust before it can be written to. Prefer instead [`Tensor::from_array`](https://docs.rs/ort/latest/wasm32-unknown-unknown/ort/value/type.Tensor.html#method.from_array)/[`TensorRef::from_array_view`](https://docs.rs/ort/latest/wasm32-unknown-unknown/ort/value/type.TensorRef.html#method.from_array_view), as tensors created this way never require synchronization.
Outputs of a session are **not** synchronized automatically. If you wish to use their data in Rust, you must either sync all outputs at once with [`ort_web::sync_outputs`](https://docs.rs/ort-web/latest/ort_web/fn.sync_outputs.html), or sync each tensor at a time (if you only use a few outputs):
```rs filename="lib.rs"
use ort_web::{TensorExt, SyncDirection};
let mut outputs = session.run_async(ort::inputs![...]).await?;
let mut bounding_boxes = outputs.remove("bounding_boxes").unwrap();
bounding_boxes.sync(SyncDirection::Rust).await?;
// now we can use the data
let data = bounding_boxes.try_extract_tensor::<f32>()?;
```
Once a session output is `sync`ed, that tensor becomes backed by a Rust buffer. Updates to the tensor's data from the Rust side will not reflect in ONNX Runtime until the tensor is `sync`ed with [`SyncDirection::Runtime`](https://docs.rs/ort-web/latest/ort_web/enum.SyncDirection.html#variant.Runtime). Likewise, updates to the tensor's data from ONNX Runtime won't reflect in Rust until Rust syncs that tensor with [`SyncDirection::Rust`](https://docs.rs/ort-web/latest/ort_web/enum.SyncDirection.html#variant.Rust). You don't have to worry about this behavior if you only ever *read* from session outputs, though.
## Serving assets
`ort-web` dynamically fetches the required scripts & WASM binary at runtime. By default, it will fetch the build from the `cdn.pyke.io` domain, so make sure it's accessible through your [content security policy](https://developer.mozilla.org/en-US/docs/Web/HTTP/Guides/CSP) if you have that configured.
You can also use a self-hosted build with [`Dist`](https://docs.rs/ort-web/latest/ort_web/struct.Dist.html):
```rs filename="lib.rs"
use ort::session::Session;
use ort_web::Dist;
async fn init_model() -> anyhow::Result<Session> {
let dist = Dist::new("https://cdn.jsdelivr.net/npm/onnxruntime-web@1.24.2/dist/")
// we want to load the WebGPU build
.with_script_name("ort.webgpu.min.js");
ort::set_api(ort_web::api(dist).await?);
}
```
The scripts & binary can be acquired from the `dist` folder of the [`onnxruntime-web` npm package](https://npmjs.com/package/onnxruntime-web).
## Telemetry
Unlike vanilla <Ort/>, `ort-web` **includes & enables telemetry by default**; this telemetry data is sent to pyke, not Microsoft.
When telemetry is enabled, committing a session for the first time on a page will send the domain name to `signal.pyke.io`. This is **the only data we collect**; we use it to better understand where & how `ort-web` is being used. You can see the exact details in the [`_telemetry.js` file](https://docs.rs/crate/ort-web/latest/source/_telemetry.js).
You can always disable this telemetry via [`EnvironmentBuilder::with_telemetry`](https://docs.rs/ort/latest/wasm32-unknown-unknown/ort/environment/struct.EnvironmentBuilder.html#method.with_telemetry):
```rs filename="lib.rs"
use wasm_bindgen::JsError;
async fn init() -> Result<(), JsError> {
ort::set_api(ort_web::api().await?);
ort::init()
.with_telemetry(false)
.commit();
// ...
}
```

View File

@@ -2,11 +2,13 @@
title: 'Values'
---
# Values
import { Callout } from 'nextra/components';
For ONNX Runtime, a **value** represents any type that can be given to/returned from a session or operator. Values come in three main types:
import Ort from '../../components/Ort';
# Values
An ONNX **value** represents any type that can be given to/returned from a session or operator. Values come in three main types:
- **Tensors** (multi-dimensional arrays). This is the most common type of `Value`.
- **Maps** map a key type to a value type, similar to Rust's `HashMap<K, V>`.
- **Sequences** are homogenously-typed dynamically-sized lists, similar to Rust's `Vec<T>`. The only values allowed in sequences are tensors, or maps of tensors.
@@ -14,7 +16,7 @@ For ONNX Runtime, a **value** represents any type that can be given to/returned
## Creating values
### Creating tensors
Tensors can be created with [`Tensor::from_array`](https://docs.rs/ort/2.0.0-rc.11/ort/value/type.Tensor.html#method.from_array) from either:
Tensors can be created with [`Tensor::from_array`](https://docs.rs/ort/latest/ort/value/type.Tensor.html#method.from_array) from either:
- an [`ndarray::Array`](https://docs.rs/ndarray/0.17.1/ndarray/type.Array.html), or
- a tuple of `(shape, data)`, where:
- `shape` is one of `Vec<I>`, `[I; N]` or `&[I]`, where `I` is `i64` or `usize`, and
@@ -29,7 +31,7 @@ let tensor = Tensor::from_array(([1usize, 2, 3], vec![1.0_f32, 2.0, 3.0, 4.0, 5.
The created tensor will take ownership of the passed data. See [Creating views of external data](#creating-views-of-external-data) to create temporary tensors referencing borrowed data.
### Creating maps & sequences
`Map`s can be [created](https://docs.rs/ort/2.0.0-rc.11/ort/value/type.Map.html#method.new) from any iterator yielding tuples of `(K, V)`, where `K` and `V` are tensor element types.
`Map`s can be [created](https://docs.rs/ort/latest/ort/value/type.Map.html#method.new) from any iterator yielding tuples of `(K, V)`, where `K` and `V` are tensor element types.
```rs
let mut map = HashMap::<String, f32>::new();
@@ -40,7 +42,7 @@ map.insert("three".to_string(), 3.0);
let map = Map::<String, f32>::new(map)?;
```
`Map`s can also be [created from 2 tensors](https://docs.rs/ort/2.0.0-rc.11/ort/value/type.Map.html#method.new_kv), one containing keys and the other containing values:
`Map`s can also be [created from 2 tensors](https://docs.rs/ort/latest/ort/value/type.Map.html#method.new_kv) containing keys and values:
```rs
let keys = Tensor::<i64>::from_array(([4], vec![0, 1, 2, 3]))?;
let values = Tensor::<f32>::from_array(([4], vec![1., 2., 3., 4.]))?;
@@ -48,16 +50,16 @@ let values = Tensor::<f32>::from_array(([4], vec![1., 2., 3., 4.]))?;
let map = Map::new_kv(keys, values)?;
```
`Sequence`s can be [created](https://docs.rs/ort/2.0.0-rc.11/ort/value/type.Sequence.html#method.new) from any iterator yielding a `Value` subtype:
`Sequence`s can be [created](https://docs.rs/ort/latest/ort/value/type.Sequence.html#method.new) from any iterator yielding a `Value` subtype:
```rs
let tensor1 = Tensor::<f32>::new(&allocator, [1, 128, 128, 3])?;
let tensor2 = Tensor::<f32>::new(&allocator, [1, 224, 224, 3])?;
let sequence: Sequence<Tensor<f32>> = Sequence::new(vec![tensor1, tensor2])?;
let sequence = Sequence::new(vec![tensor1, tensor2])?;
```
## Using values
Values can be used as an input in a session's [`run`](https://docs.rs/ort/2.0.0-rc.11/ort/session/struct.Session.html#method.run) function - either by value, by reference, or [by view](#views).
Values can be passed as an input to a session's [`run`](https://docs.rs/ort/latest/ort/session/struct.Session.html#method.run) function by value, by reference, or [by view](#views).
```rs
let latents = Tensor::<f32>::new(&allocator, [1, 128, 128, 3])?;
let text_embedding = Tensor::<f32>::new(&allocator, [1, 48, 256])?;
@@ -73,7 +75,7 @@ let outputs = session.run(ort::inputs![
### Extracting data
To access the underlying data of a value directly, the data must first be **extracted**.
`Tensor`s can either extract to an `ndarray::ArrayView` [via `extract_array`](https://docs.rs/ort/2.0.0-rc.11/ort/value/type.Tensor.html#method.extract_array) when the [`ndarray` feature is enabled](/setup/cargo-features), or extract to a tuple [via `extract_tensor`](https://docs.rs/ort/2.0.0-rc.11/ort/value/type.Tensor.html#method.extract_tensor) of `(&Shape, &[T])` (where the second element is the slice of data contained within the tensor).
`Tensor`s can either extract to an `ndarray::ArrayView` [via `extract_array`](https://docs.rs/ort/latest/ort/value/type.Tensor.html#method.extract_array) when the [`ndarray` feature is enabled](/setup/cargo-features), or extract to a tuple [via `extract_tensor`](https://docs.rs/ort/latest/ort/value/type.Tensor.html#method.extract_tensor) of `(&Shape, &[T])` (where the second element is the slice of data contained within the tensor).
```rs
let array = ndarray::Array4::<f32>::ones((1, 16, 16, 3));
let tensor = TensorRef::from_array_view(&array)?;
@@ -93,7 +95,7 @@ let mut original_array = vec![1_i64, 2, 3, 4, 5];
assert_eq!(original_array, [1, 2, 42, 4, 5]);
```
`Map` and `Sequence` have [`Map::extract_map`](https://docs.rs/ort/2.0.0-rc.11/ort/value/type.Map.html#method.extract_map) and [`Sequence::extract_sequence`](https://docs.rs/ort/2.0.0-rc.11/ort/value/type.Sequence.html#method.extract_sequence), which emit a `HashMap<K, V>` and a `Vec` of value [views](#views) respectively. Unlike `extract_tensor`, these types cannot mutably extract their data, and always allocate on each `extract` call, making them more computationally expensive.
`Map` and `Sequence` have [`Map::extract_map`](https://docs.rs/ort/latest/ort/value/type.Map.html#method.extract_map) and [`Sequence::extract_sequence`](https://docs.rs/ort/latest/ort/value/type.Sequence.html#method.extract_sequence), which emit a `HashMap<K, V>` and a `Vec` of value [views](#views) respectively. Unlike `extract_tensor`, these types cannot mutably extract their data, and always allocate on each `extract` call, making them more computationally expensive.
Session outputs return `DynValue`s, which are values whose [type is not known at compile time](#dynamic-values). In order to extract data from a `DynValue`, you must either [downcast it to a strong type](#downcasting) or use a corresponding `try_extract_*` method, which fails if the value's type is not compatible:
```rs
@@ -119,10 +121,10 @@ let my_tensor: ort::value::Tensor<f32> = Tensor::new(...)?;
let tensor_view: ort::value::TensorRef<'_, f32> = my_tensor.view();
```
Views act identically to a borrow of their type - `TensorRef` supports `extract_tensor`, `TensorRefMut` supports `extract_tensor` and `extract_tensor_mut`. The same is true for sequences & maps.
Views act identically to a borrow of their type: `TensorRef` supports `extract_tensor`, `TensorRefMut` supports `extract_tensor` and `extract_tensor_mut`. The same is true for sequences & maps.
### Creating views of external data
You can create `TensorRef`s and `TensorRefMut`s from views of external data, like an `ndarray` array, or a raw slice of data. These types act almost identically to a `Tensor` - you can extract them and pass them as session inputs - but as they do not take ownership of the data, they are bound to the input's lifetime.
You can create `TensorRef`s and `TensorRefMut`s from views of external data, like an `ndarray` array, or a raw slice of data. These types act almost identically to a `Tensor`--you can extract them and pass them as session inputs--but as they do not take ownership of the data, they are bound to the lifetime of the original data.
```rs
let original_data = Array4::<f32>::from_shape_vec(...);
@@ -133,9 +135,9 @@ let tensor_view_mut = TensorRefMut::from_array_view_mut(([1, 3, 64, 64], &mut *o
```
## Dynamic values
Sessions in `ort` return a map of `DynValue`s. These are values whose exact type is not known at compile time. You can determine a value's [type](https://docs.rs/ort/2.0.0-rc.11/ort/value/enum.ValueType.html) via its `.dtype()` method.
Sessions in <Ort/> return a map of `DynValue`s. These are values whose exact type is not known at compile time. You can determine a value's [type](https://docs.rs/ort/latest/ort/value/enum.ValueType.html) via its `.dtype()` method.
You can also use fallible methods to extract data from this value - for example, [`DynValue::try_extract_tensor`](https://docs.rs/ort/2.0.0-rc.11/ort/value/type.DynValue.html#method.try_extract_tensor), which fails if the value is not a tensor. Often times though, you'll want to reuse the same value which you are certain is a tensor - in which case, you can **downcast** the value.
You can also use fallible methods to extract data from this value, like [`DynValue::try_extract_tensor`](https://docs.rs/ort/latest/ort/value/type.DynValue.html#method.try_extract_tensor), which fails if the value is not a tensor. Often times though, you'll want to reuse the same value which you are certain is a tensor, in which case you can **downcast** the value.
### Downcasting
**Downcasting** means to convert a dyn type like `DynValue` to stronger type like `DynTensor`. Downcasting can be performed using the `.downcast()` function on `DynValue`:
@@ -148,9 +150,9 @@ let dyn_tensor: ort::value::DynTensor = value.downcast()?;
If `value` is not actually a tensor, the `downcast()` call will fail.
#### Stronger types
`DynTensor` means that the type **is** a tensor, but the *element type is unknown*. There are also `DynSequence`s and `DynMap`s, which have the same meaning - the *kind* of value is known, but the element/key/value types are not.
`DynTensor` means that the type **is** a tensor, but the *element type is unknown*. There are also `DynSequence`s and `DynMap`s, which have the same meaning: the *kind* of value is known, but the element/key/value types are not.
The strongly typed variants of these types - `Tensor<T>`, `Sequence<T>`, and `Map<K, V>`, can be directly downcasted to, too:
The strongly typed variants of these types--`Tensor<T>`, `Sequence<T>`, and `Map<K, V>`--can be directly downcasted to, too:
```rs
let dyn_value: ort::value::DynValue = outputs.remove("output0").unwrap();
@@ -197,7 +199,7 @@ let tensor_view: ort::value::TensorRef<'_, f32> = dyn_value.view().downcast()?;
```
### Conversion recap
- `DynValue` represents a value that can be any type - tensor, sequence, or map. The type can be retrieved with `.dtype()`.
- `DynValue` represents a value that can be any type--tensor, sequence, or map. The type can be retrieved with `.dtype()`.
- `DynTensor`, `DynMap`, and `DynSequence` are values with known container types, but unknown element types.
- `Tensor<T>`, `Map<K, V>`, and `Sequence<T>` are values with known container and element types.
- `Tensor<T>` and co. can be converted from/to their dyn types using `.downcast()`/`.upcast()`, respectively.

View File

@@ -22,7 +22,7 @@ import { CRATE_VERSION } from '../constants';
<img width="100%" src="/assets/sample-onnx-graph.png" alt="An example visual representation of an ONNX graph, showing how an input tensor flows through layers of convolution nodes." />
Converting a neural network to a graph representation like ONNX opens the door to more optimizations and broader acceleration hardware support. ONNX Runtime can significantly improve the inference speed/latency of most models and enable acceleration with NVIDIA CUDA & TensorRT, Intel OpenVINO, Qualcomm QNN, Huawei CANN, and [much more](/perf/execution-providers).
Converting a neural network to a graph representation like ONNX opens the door to more optimizations and broader accelerator support. ONNX Runtime can significantly improve the inference speed/latency of most models and enable hardware acceleration with NVIDIA CUDA & TensorRT, Intel OpenVINO, Qualcomm QNN, Huawei CANN, and [much more](/perf/execution-providers).
<Ort/> is the Rust gateway to ONNX Runtime, allowing you to infer your ONNX models via an easy-to-use and ergonomic API. Many commercial, open-source, & research projects use <Ort/> in some pretty serious production scenarios to boost inference performance:
- [**Bloop**](https://bloop.ai/)'s semantic code search feature is powered by <Ort/>.
@@ -39,7 +39,7 @@ Converting a neural network to a graph representation like ONNX opens the door t
If you have a [supported platform](/setup/platforms) (and you probably do), installing <Ort/> couldn't be any simpler! Just add it to your Cargo dependencies:
```toml
[dependencies]
ort = "=2.0.0-rc.11"
ort = "=2.0.0-rc.12"
```
### Convert your model
@@ -63,10 +63,10 @@ let mut model = Session::builder()?
```
### Perform inference
Preprocess your inputs, then `run()` the session to perform inference.
Prepare your inputs, then `run()` the session to perform inference.
```rust
let outputs = model.run(ort::inputs!["image" => image]?)?;
let outputs = model.run(ort::inputs!["image" => image])?;
let predictions = outputs["output0"].try_extract_array::<f32>()?;
...
```

View File

@@ -3,13 +3,15 @@ title: Version mapping
description: Information about `ort`'s versioning and relation to ONNX Runtime versioning.
---
import Ort from '../../components/Ort';
# Version mapping
## Versions of ONNX Runtime used by `ort`
## Versions of ONNX Runtime used by <Ort/>
| **ort** | **ONNX Runtime** |
| -------- | ----------------:|
| v2.0.0+ | v1.24.1 |
| v2.0.0+ | v1.24.2 |
| v1.16.0-v1.16.2 | v1.16.0 |
| v1.15.0-v1.15.5 | v1.15.1 |
| v1.14.2-v1.14.8 | v1.14.1 |
@@ -17,22 +19,22 @@ description: Information about `ort`'s versioning and relation to ONNX Runtime v
| v1.13.1-v1.13.3 | v1.13.1 |
| v1.13.0 | v1.12.1 |
## Supported ONNX opsets by `ort` version
## Supported ONNX opsets by <Ort/> version
Note that this only applies to the default ONNX Runtime backend.
| **ort** | **ONNX opset version** | **ONNX ML opset version** |
| -------- |:----------------------:|:-------------------------:|
| v2.0.0+ | 22 | 4 |
| v2.0.0+ | 24 | 4 |
| v1.16.0-v1.16.2 | 19 | 3 |
| v1.15.0-v1.15.5 | 19 | 3 |
| v1.14.0-v1.14.8 | 18 | 3 |
| v1.13.0-v1.13.3 | 17 | 3 |
## A note on SemVer
`ort` versions pre-2.0 were not SemVer compatible. From v2.0 onwards, breaking API changes are accompanied by a **major version update**.
<Ort/> versions pre-2.0 were not SemVer compatible. From v2.0 onwards, breaking API changes are accompanied by a **major version update**.
Updates to the version of ONNX Runtime used by `ort` may occur on **minor** version updates, i.e. 2.0 ships with ONNX Runtime 1.24, but 2.1 may ship with 1.25. ONNX Runtime is generally forward compatible, but in case you require a specific version of ONNX Runtime, you should pin the minor version in your `Cargo.toml` using a [tilde requirement](https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#tilde-requirements):
```toml
Updates to the version of ONNX Runtime used by <Ort/> may occur on **minor** version updates, i.e. 2.0 ships with ONNX Runtime 1.24, but 2.1 may ship with 1.25. ONNX Runtime is generally forward compatible, but in case you require a specific version of ONNX Runtime, you should pin <Ort/>'s minor version in your `Cargo.toml` using a [tilde requirement](https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#tilde-requirements):
```toml filename="Cargo.toml"
[dependencies]
ort = { version = "~2.0", ... }
```

View File

@@ -20,7 +20,7 @@ Not all platforms support all execution providers, of course. This handy widget
<Callout type='info'>
To enable the use of an execution provider inside <Ort/>, you'll need to enable its respective Cargo feature, e.g. the `cuda` feature to use CUDA, or the `coreml` feature to use CoreML.
```toml Cargo.toml
```toml filename="Cargo.toml"
[dependencies]
ort = { version = "2.0", features = [ "cuda" ] }
```
@@ -28,9 +28,9 @@ Not all platforms support all execution providers, of course. This handy widget
See the widget above for the full list of EPs and their corresponding Cargo features.
</Callout>
In order to configure sessions to use certain execution providers, you must **register** them when creating an environment or session. You can do this via the `SessionBuilder::with_execution_providers` method. For example, to register the CUDA execution provider for a session:
In order to configure sessions to use certain execution providers, you must **register** them when creating an environment or session. You can do this via the [`SessionBuilder::with_execution_providers`](https://docs.rs/ort/latest/ort/session/builder/struct.SessionBuilder.html#method.with_execution_providers) method. For example, to register the CUDA execution provider for a session:
```rust
```rust filename="main.rs"
use ort::{ep::CUDA, session::Session};
fn main() -> anyhow::Result<()> {
@@ -42,9 +42,9 @@ fn main() -> anyhow::Result<()> {
}
```
You can, of course, specify multiple execution providers. `ort` will register all EPs specified, in order. If an EP does not support a certain operator in a graph, it will fall back to the next successfully registered EP, or to the CPU if all else fails.
You can, of course, specify multiple execution providers. <Ort/> will register all EPs specified, in order. If an EP does not support a certain operator in a graph, it will fall back to the next successfully registered EP, or to the CPU if all else fails.
```rust
```rust filename="main.rs"
use ort::{ep, session::Session};
fn main() -> anyhow::Result<()> {
@@ -65,9 +65,9 @@ fn main() -> anyhow::Result<()> {
```
## Configuring EPs
EPs have configuration options to control behavior or increase performance. Each execution provider struct returns a builder with configuration methods. See the [API reference](https://docs.rs/ort/2.0.0-rc.11/ort/ep/index.html#reexports) for the EP structs for more information on which options are supported and what they do.
EPs have configuration options to control behavior or increase performance. Each execution provider struct returns a builder with configuration methods. See the [API reference](https://docs.rs/ort/latest/ort/ep/index.html#reexports) for the EP structs for more information on which options are supported and what they do.
```rust
```rust filename="main.rs"
use ort::{ep, session::Session};
fn main() -> anyhow::Result<()> {
@@ -86,10 +86,10 @@ fn main() -> anyhow::Result<()> {
```
## Fallback behavior
`ort` will silently fail and fall back to executing on the CPU if all execution providers fail to register. In many cases, though, you'll want to show the user an error message when an EP fails to register, or outright abort the process.
<Ort/> will silently fail and fall back to executing on the CPU if all execution providers fail to register. In many cases, though, you'll want to show the user an error message when an EP fails to register, or outright abort the process.
You can configure an EP to return an error on failure by adding `.error_on_failure()` after you `.build()` it. In this example, if CUDA doesn't register successfully, the program will exit with an error at `with_execution_providers`:
```rust
You can configure an EP to return an error on failure by adding [`.error_on_failure()`](https://docs.rs/ort/latest/ort/ep/struct.ExecutionProviderDispatch.html#method.error_on_failure) after you `.build()` it. In this example, if CUDA doesn't register successfully, the program will exit with an error at `with_execution_providers`:
```rust filename="main.rs"
use ort::{ep, session::Session};
fn main() -> anyhow::Result<()> {
@@ -103,9 +103,9 @@ fn main() -> anyhow::Result<()> {
}
```
If you require more complex error handling, you can also manually register execution providers via the `ExecutionProvider::register` method:
If you require more complex error handling, you can also manually register execution providers via the [`ExecutionProvider::register`](https://docs.rs/ort/latest/ort/ep/trait.ExecutionProvider.html#tymethod.register) method:
```rust
```rust filename="main.rs"
use ort::{
ep::{self, ExecutionProvider},
session::Session
@@ -126,9 +126,9 @@ fn main() -> anyhow::Result<()> {
}
```
You can also check whether ONNX Runtime is even compiled with support for the execution provider with the `is_available` method.
You can also check whether ONNX Runtime is even compiled with support for the execution provider with the [`is_available`](https://docs.rs/ort/latest/ort/ep/trait.ExecutionProvider.html#method.is_available) method.
```rust
```rust filename="main.rs"
use ort::{
ep::{self, ExecutionProvider},
session::Session
@@ -152,10 +152,10 @@ fn main() -> anyhow::Result<()> {
}
```
## Global defaults
You can configure `ort` to attempt to register a list of execution providers for all sessions created in an environment.
## Global EPs
You can configure EPs to be registered for all sessions created throughout the program by configuring the environment:
```rust
```rust filename="main.rs"
use ort::{ep, session::Session};
fn main() -> anyhow::Result<()> {
@@ -165,17 +165,17 @@ fn main() -> anyhow::Result<()> {
let session = Session::builder()?.commit_from_file("model.onnx")?;
// The session will attempt to register the CUDA EP
// since we configured the environment default.
// since we configured environment EPs.
Ok(())
}
```
<Callout type='warning'>
`ort::init` must come before you create any sessions, otherwise the configuration will not take effect!
Environment configs must be committed before you create any sessions, otherwise the configuration will not take effect!
</Callout>
Sessions configured with their own execution providers will *extend* the execution provider defaults, rather than overriding them.
EPs configured on a per-session basis (with [`SessionBuilder::with_execution_providers`](https://docs.rs/ort/latest/ort/session/builder/struct.SessionBuilder.html#method.with_execution_providers)) will *take precedence* over environment EPs, but it won't replace them.
## Troubleshooting
If it seems like the execution provider is not registering properly, or you are not getting acceptable performance, see the [Troubleshooting: Performance](/troubleshooting/performance) page for more information on how to debug any EP issues.
@@ -185,14 +185,28 @@ If it seems like the execution provider is not registering properly, or you are
### Dynamically-linked EP requirements
Certain EPs like CUDA and TensorRT use a separate interface that require them to be compiled as dynamic libraries which are loaded at runtime when the EP is registered. The DirectML and WebGPU EP do not use this interface, but do require helper dylibs.
Due to the quirks of dynamic library loading, you may encounter issues with builds including these EPs due to ONNX Runtime failing to find the dylibs at runtime. `ort`'s `copy-dylibs` Cargo feature (which is enabled by default) tries to alleviate this issue by symlinking these dylibs into your `target` folder so they can be found by your application when in development. On Windows platforms that don't have [Developer Mode](https://learn.microsoft.com/en-us/windows/uwp/get-started/enable-your-device-for-development) enabled, a copy is instead performed (excluding examples and tests). On other platforms, additional setup is required to get the application to load dylibs from its parent folder.
Due to the quirks of dynamic library loading, you may encounter issues with builds including these EPs due to ONNX Runtime failing to find the dylibs at runtime. <Ort/>'s `copy-dylibs` [Cargo feature](/setup/cargo-features) (which is enabled by default) tries to alleviate this issue by symlinking these dylibs into your `target` folder so they can be found by your application when in development. On Windows platforms that don't have [Developer Mode](https://learn.microsoft.com/en-us/windows/uwp/get-started/enable-your-device-for-development) enabled, a copy is instead performed. On other platforms, additional setup is required to get the application to load dylibs from its parent folder.
See [Runtime dylib loading](/setup/linking#runtime-dylib-loading) for more information.
### Prebuilt binary combos
<Ort/> provides prebuilt binaries for the following combinations of EP features:
- `directml`/`xnnpack`/`coreml` are available in any build if the platform supports it.
- `cuda`/`tensorrt`
- `webgpu`
- `nvrtx`
This means that we have builds for `features = ["cuda", "tensorrt", "directml"]`, but *not* `features = ["cuda", "webgpu"]`. Specifying both `cuda` and `webgpu` for example **will fall back to downloading a CPU-only build**.
If you want a single build of ONNX Runtime that has both `cuda` *and* `webgpu`, you'll have to compile it from source.
### CUDA
`ort` provides binaries for CUDA 12 with cuDNN 9.x only. Make sure the correct version of CUDA & cuDNN are installed and available on the `PATH`.
<Ort/> provides binaries for CUDA ≥ 12.8 or ≥ 13.2, and targets cuDNN ≥ 9.19. Make sure CUDA and cuDNN are installed and available on the `PATH`.
<Ort/> will try to automatically detect which CUDA version you're using, but sometimes it gets it wrong and reverts to CUDA 12 (especially if you have both 12 and 13 installed). You can override the CUDA version by setting the `ORT_CUDA_VERSION` environment variable to `12` or `13`.
### WebGPU
The WebGPU EP is **experimental** and may produce incorrect results/crashes; these issues should be reported upstream as there's unfortunately nothing we can do about them.
WebGPU binaries are provided for Windows & Linux. On Windows, the build supports running on DirectX 12 or DirectX 11. On Linux, it supports Vulkan & OpenGL/GLES.
WebGPU binaries are provided for Windows, macOS, and Linux. On Windows, DirectX 12 and 11 are supported. On Linux, Vulkan and OpenGL/GLES are supported.

View File

@@ -9,12 +9,12 @@ import { Callout } from 'nextra/components';
Often times when running a model with a non-CPU [execution provider](/perf/execution-providers), you'll find that the act of copying data between the device and CPU takes up a considerable amount of inference time.
In some cases, this I/O overhead is unavoidable -- a causal language model, for example, must copy its sequence of input tokens to the GPU and copy the output probabilities back to the CPU to perform sampling on each run. In this case, there isn't much room to optimize I/O. In other cases, though, you may have an input or output that does *not* need to be copied off of the device it is allocated on - i.e., if an input does not change between runs (such as a style embedding), or if an output is subsequently used directly as an input to another/the same model on the same device.
In some cases, this I/O overhead is unavoidable--a causal language model, for example, must copy its sequence of input tokens to the GPU and copy the output probabilities back to the CPU to perform sampling on each run. In this case, there isn't much room to optimize I/O. In other cases, though, you may have an input or output that does *not* need to be copied off of the device it is allocated on--i.e., if an input does not change between runs (such as a style embedding), or if an output is subsequently used directly as an input to another/the same model on the same device.
For these cases, ONNX Runtime provides **I/O binding**, an interface that allows you to manually specify which inputs/outputs reside on which device, and control when they are synchronized.
## Creating
I/O binding is used via the [`IoBinding`](https://docs.rs/ort/2.0.0-rc.11/ort/io_binding/struct.IoBinding.html) struct. `IoBinding` is created using the [`Session::create_binding`](https://docs.rs/ort/2.0.0-rc.11/ort/session/struct.Session.html#method.create_binding) method:
I/O binding is used via the [`IoBinding`](https://docs.rs/ort/latest/ort/io_binding/struct.IoBinding.html) struct. `IoBinding` is created using the [`Session::create_binding`](https://docs.rs/ort/latest/ort/session/struct.Session.html#method.create_binding) method:
```rs
let mut binding = session.create_binding()?;
@@ -59,7 +59,7 @@ binding.bind_output_to_device("action", &allocator.memory_info())?;
This means that subsequent runs will *override* the data in `action`. If you need to access a bound output's data *across* runs (i.e. in a multithreading setting), the data needs to be copied to another buffer to avoid undefined behavior.
</Callout>
Outputs can be bound to any device -- they can even stay on the EP device if you bind it to a tensor created with the session's allocator (`Tensor::new(session.allocator(), ...)`). You can then access the pointer to device memory using [`Tensor::data_ptr`](https://docs.rs/ort/2.0.0-rc.11/ort/value/type.Tensor.html#method.data_ptr).
Outputs can be bound to any device -- they can even stay on the EP device if you bind it to a tensor created with the session's allocator (`Tensor::new(session.allocator(), ...)`). You can then access the pointer to device memory using [`Tensor::data_ptr`](https://docs.rs/ort/latest/ort/value/type.Tensor.html#method.data_ptr).
If you do bind an output to the session's device, it is not guaranteed to be synchronized after `run`, just like `bind_input`. You can force outputs to synchronize immediately using `IoBinding::synchronize_outputs`.

View File

@@ -1,5 +1,6 @@
export default {
'platforms': 'Platform support',
'cargo-features': 'Cargo features',
'multiversion': 'Multiversioning',
'linking': 'Linking'
};

View File

@@ -2,20 +2,32 @@
title: Cargo features
---
import Ort from '../../components/Ort';
# Cargo features
> *✅ = default, ⚒️ = not default*
- ✅ **`ndarray`**: Enables tensors to be created from/extracted to [`ndarray`](https://crates.io/crates/ndarray) multi-dimensional arrays. We highly recommend this feature if you need to do a lot of complex pre/post-processing requiring multi-dimensional array access, but for something like an LLM, omitting this feature won't require too much extra work but will save a fair amount of compile time.
- ✅ **`download-binaries`**: Downloads prebuilt binaries from pyke's CDN service for supported platforms. Disabling this means you'll need to compile ONNX Runtime from source yourself, and [link `ort` to it](/setup/linking).
- ✅ **`download-binaries`**: Downloads prebuilt binaries from pyke's CDN service for supported platforms. Disabling this means you'll need to compile ONNX Runtime from source yourself, and [link <Ort/> to it](/setup/linking).
- ✅ **`copy-dylibs`**: In case dynamic libraries are used (like with the CUDA execution provider), creates a symlink to them in the relevant places in the `target` folder to make [compile-time dynamic linking](/setup/linking#compile-time-dynamic-linking) work.
- ✅ **`tracing`**; Log messages through [`tracing`](https://crates.io/crates/tracing).
- ⚒️ **`half`**: Enables support for creating & extracting float16/bfloat16 tensors via the [`half`](https://crates.io/crates/half) crate. ONNX models that are converted to 16-bit precision will typically convert to/from 32-bit floats at the input/output, so you will likely never actually need to interact with a 16-bit tensor on the Rust side.
- ✅ **`tracing`**; Log messages through the [`tracing`](https://crates.io/crates/tracing) crate for simple & configurable logging. When disabled, ONNX Runtime will instead log directly to `stderr`; see [Logging](/troubleshooting/logging) to configure.
- ⚒️ **`half`**: Enables support for creating & extracting float16/bfloat16 tensors via the [`half`](https://crates.io/crates/half) crate. ONNX models that are converted to 16-bit precision will typically convert to/from 32-bit floats at the input/output, so you will likely never actually need to enable this feature.
- ⚒️ **`num-complex`**: Enables support for creating & extracting complex32/complex64 tensors via the [`num-complex`](https://crates.io/crates/num-complex) crate.
- ⚒️ **`preload-dylibs`**: Enables [dynamic library preloading](https://docs.rs/ort/2.0.0-rc.11/ort/util/fn.preload_dylib.html); useful if you want to ship CUDA alongside your application instead of requiring the user to install it themselves.
- ⚒️ **`preload-dylibs`**: Enables [dynamic library preloading](https://docs.rs/ort/latest/ort/util/fn.preload_dylib.html); useful if you want to ship CUDA alongside your application instead of requiring the user to install it themselves.
- ⚒️ **`load-dynamic`**: Enables [runtime dynamic linking](/setup/linking#runtime-loading-with-load-dynamic), which alleviates many of the troubles with compile-time dynamic linking and offers greater flexibility.
- ⚒️ **`alternative-backend`**: Disables linking to ONNX Runtime, allowing you to instead configure an [alternative backend](/backends).
- ⚒️ **`fetch-models`**: Enables the [`SessionBuilder::commit_from_url`](https://docs.rs/ort/2.0.0-rc.11/ort/session/builder/struct.SessionBuilder.html#method.commit_from_url) method, allowing you to quickly download & run a model from a URL. This should only be used for quick testing.
- ⚒️ **`pkg-config`**: Enables linking to `libonnxruntime` via `pkg-config`.
- ⚒️ **`fetch-models`**: Enables the [`SessionBuilder::commit_from_url`](https://docs.rs/ort/latest/ort/session/builder/struct.SessionBuilder.html#method.commit_from_url) method, allowing you to quickly download & run a model from a URL. This should only be used for quick testing.
## TLS features
One of these is required to be enabled when `download-binaries` is enabled, as `download-binaries` always uses HTTPS.
- ✅ **`tls-native`**: Uses the platform's native TLS provider for broadest compatibility & fastest compile times.
- ⚒️ **`tls-rustls`**: Uses [`rustls`](https://crates.io/crates/rustls) with [`ring`](https://crates.io/crates/ring) as its crypto provider.
- ⚒️ **`tls-native-vendored`**: Uses a vendored copy of OpenSSL on Linux.
## Execution providers
Each [execution provider](/perf/execution-providers) is also gated behind a Cargo feature. Each EP's Cargo feature must be enabled for it to be usable; see the linked page for details & the full list of EP features.
## Version features
The minimum ONNX Runtime version required by <Ort/> is controlled via `api-*` features. See [Multiversioning](/setup/multiversion) for more info.

View File

@@ -1,23 +1,24 @@
---
title: Linking
description: Here's how `ort` links to ONNX Runtime, and how to configure its behavior.
description: How `ort` links to ONNX Runtime, and how to configure its behavior for custom builds.
---
# Linking
import { Callout, Tabs, Steps } from 'nextra/components';
import Ort from '../../components/Ort';
`ort` provides its own builds of ONNX Runtime to make your experience as painless as possible, but in some cases, you'll want to use a custom build of ONNX Runtime with `ort`. Luckily, we make this very easy by handling all of the linking configuration automagically. Just point `ort` to the output of ONNX Runtime's build pipeline and it'll Just Work™.
<Ort/> provides its own builds of ONNX Runtime to make your experience as painless as possible, but in some cases, you'll want to use a custom build of ONNX Runtime with <Ort/>. Luckily, we make this very easy by handling all of the linking configuration automagically. Just point <Ort/> to the output of ONNX Runtime's build pipeline and it'll Just Work™.
## Static linking
Most ONNX Runtime compile configurations will support static linking - just run `build.sh` without the `--build_shared_lib` argument. You should prefer static linking if your execution providers support it, as it avoids many issues and follows de facto Rust practices. If you compile both static libraries and dynamic libraries, `ort` will prefer linking to the static libraries.
Most ONNX Runtime compile configurations will support static linking--just run `build.sh` without the `--build_shared_lib` argument. You should prefer static linking if your execution providers support it, as it avoids many issues and follows de facto Rust practices. If you compile both static libraries and dynamic libraries, <Ort/> will prefer linking to the static libraries.
To direct `ort` to your statically built binaries, use the `ORT_LIB_PATH` environment variable when running `cargo build`. Point it to the location where the static libraries (`.a`/`.lib` files) are compiled to. This will typically be `onnxruntime/build/<os>/<profile>`. For example:
To direct <Ort/> to your statically built binaries, use the `ORT_LIB_PATH` environment variable when running `cargo build`. Point it to the location where the static libraries (`.a`/`.lib` files) are compiled to. This will typically be `onnxruntime/build/<os>/<profile>`. For example:
```shell
$ ORT_LIB_PATH=~/onnxruntime/build/Linux/Release cargo build
```
For iOS (or for other platforms if you are compiling multiple profiles at once), you'll need to manually specify the profile with the `ORT_LIB_PROFILE` environment variable. If not specified, `ort` will prefer `Release` over `RelWithDebInfo` over `MinSizeRel` over `Debug`.
For iOS (or for other platforms if you are compiling multiple profiles at once), you'll need to manually specify the profile with the `ORT_LIB_PROFILE` environment variable. If not specified, <Ort/> will prefer `Release` over `RelWithDebInfo` over `MinSizeRel` over `Debug`.
## Dynamic linking
When it comes to dynamic linking, there are two options: `load-dynamic`, or standard compile-time dynamic linking. We recommend `load-dynamic` as it gives more control and is often far less troublesome to work with.
@@ -37,21 +38,29 @@ ort = { version = "2", features = [ "load-dynamic" ] }
### Point ort to the dylib
<Tabs items={['Programmatically', 'Via shell']}>
<Tabs.Tab title="Programmatically">
```rust main.rs
fn main() -> anyhow::Result<()> {
// Find our custom ONNX Runtime dylib path somehow
// (i.e. resolving it from the root of our program's install folder)
let dylib_path = crate::internal::find_onnxruntime_dylib()?;
```rust filename="main.rs"
fn find_onnxruntime_dylib() -> anyhow::Result<PathBuf> {
// Find our custom ONNX Runtime dylib path somehow (i.e. resolving it from the root of our program's install folder)
// The path should point to the `libonnxruntime` binary, which looks like:
// - on Unix: /etc/.../libonnxruntime.so
// - on Windows: C:\Program Files\...\onnxruntime.dll
// - on Linux: /etc/.../libonnxruntime.so
// - on macOS: /.../libonnxruntime.dylib
unimplemented!()
}
// Initialize ort with the path to the dylib. This **must** be called before any usage of `ort`!
fn main() -> anyhow::Result<()> {
let dylib_path = find_onnxruntime_dylib()?;
// Initialize ort with the path to the dylib. This **must** be called before any other usage of `ort`!
// `init_from` returns a `Result<EnvironmentBuilder>` which you can use to further configure the environment
// before `.commit()`ing; see the Environment docs for more information on what you can configure.
// `init_from` will return an `Err` if it fails to load the dylib.
ort::init_from(dylib_path)?.commit();
// Now we can use `ort`!
let mut session = Session::builder()?
.commit_from_file("model.onnx")?;
Ok(())
}
```
@@ -62,15 +71,15 @@ ort = { version = "2", features = [ "load-dynamic" ] }
```shell
$ ORT_DYLIB_PATH=../onnxruntime-build/linux-x64/libonnxruntime.so ./mirai
```
<Callout type='info'>`ORT_DYLIB_PATH` is relative to the *executable*. Cargo examples and tests are compiled to a different directory than binary crates: `target/<profile>/examples` and `target/<profile>/deps` respectively. Keep this in mind if you're going to use relative paths.</Callout>
</Tabs.Tab>
</Tabs>
</Steps>
<Callout type='info'>`ORT_DYLIB_PATH` is relative to the executable. Cargo examples and tests are compiled to a different directory than binary crates: `target/<profile>/examples` and `target/<profile>/deps` respectively. Keep this in mind if you're going to use relative paths.</Callout>
### Compile-time dynamic linking
For compile-time dynamic linking, you'll need to configure your environment in the exact same way as if you were [statically linking](#static-linking).
For compile-time dynamic linking, you'll configure your environment in the exact same way as if you were [statically linking](#static-linking), with one extra step: **set the `ORT_PREFER_DYNAMIC_LINK` environment variable to `1`**.
#### Runtime dylib loading
Dylibs linked at compile-time need to be placed in a specific location for them to be found by the executable. For Windows, this is either somewhere on the `PATH`, or in the same folder as the executable. On macOS and Linux, they have to be placed somewhere in the `LD_LIBRARY_PATH`, or you can use rpath to configure the executable to search for dylibs in its parent folder. We've had the least issues with rpath, but YMMV.
@@ -78,7 +87,7 @@ Dylibs linked at compile-time need to be placed in a specific location for them
To configure rpath, you'll need to:
<Steps>
#### Enable rpath in Cargo.toml
```toml filename="Cargo.toml" copy
```toml filename="Cargo.toml"
[profile.dev]
rpath = true
@@ -91,7 +100,7 @@ rpath = true
#### Add the executable origin to rpath
<Tabs items={['Linux', 'macOS']}>
<Tabs.Tab title="Linux">
```toml filename="~/.cargo/config.toml" copy
```toml filename="~/.cargo/config.toml"
[target.x86_64-unknown-linux-gnu]
rustflags = [ "-Clink-args=-Wl,-rpath,\\$ORIGIN" ]
@@ -99,7 +108,7 @@ rpath = true
```
</Tabs.Tab>
<Tabs.Tab title="macOS">
```toml filename="~/.cargo/config.toml" copy
```toml filename="~/.cargo/config.toml"
[target.x86_64-apple-darwin]
rustflags = [ "-Clink-args=-Wl,-rpath,@loader_path" ]

View File

@@ -0,0 +1,36 @@
import Ort from '../../components/Ort';
# Multiversioning
<Ort/> can support any version of ONNX Runtime between 1.17 to 1.24. It does this by lowering the **API version** it requests based on what features are enabled.
By default, the latest APIs (gated under feature `api-24`) are enabled by default, so you have access to all ONNX Runtime 1.24 features out of the box. If you want to target an earlier version of ONNX Runtime, you must disable `default-features` and set your minimum API version accordingly:
```toml filename="Cargo.toml"
[dependencies.ort]
version = "=2.0.0-rc.12"
default-features = false
features = [
"std",
"ndarray",
"download-binaries",
# We use the `Adapter` API, which is only available since ONNX Runtime 1.20,
# so set our minimum API version to 20.
"api-20"
]
```
In the above example, only features available since ONNX Runtime 1.20 will be accessible in code. Because `download-binaries` is enabled, <Ort/> will still download the latest ONNX Runtime (v1.24), but it will happily use an older version if you [set up manual linking](/setup/linking).
The [API docs](https://docs.rs/ort) shows the minimum API version required for each method/struct; if no notice is shown, then it's always available.
![A screenshot of the docs.rs page for SessionBuilder::with_auto_device. Below the method definition, an infobox says "Available on crate feature api-22 only".](/assets/_api-version-feature.webp)
The following API version features are available. Each feature also enables all APIs before it.
- **`api-17`**: ONNX Runtime v1.17 (baseline)
- **`api-18`**: ONNX Runtime v1.18
- **`api-19`**: ONNX Runtime v1.19
- **`api-20`**: ONNX Runtime v1.20
- **`api-21`**: ONNX Runtime v1.21
- **`api-22`**: ONNX Runtime v1.22
- **`api-23`**: ONNX Runtime v1.23
- **`api-24`**: ONNX Runtime v1.24 (latest)

View File

@@ -4,29 +4,5 @@ title: 'Troubleshooting: Issues compiling/linking'
# Troubleshooting: Issues compiling/linking
## The trait bound `ort::value::Value: From<...>` is not satisfied
An error like this might come up when attempting to upgrade from an earlier (1.x) version of `ort` to a more recent version:
```
error[E0277]: the trait bound `ort::value::Value: From<ArrayBase<OwnedRepr<f32>, Dim<[usize; 3]>>>` is not satisfied
--> src/main.rs:72:16
|
72 | let inputs = ort::inputs![
| ______________________^
73 | | input1,
74 | | ]?;
| |_________^ the trait `From<ArrayBase<OwnedRepr<f32>, Dim<[usize; 3]>>>` is not implemented for `ort::value::Value`, which is required by `ort::value::Value: TryFrom<ArrayBase<OwnedRepr<f32>, Dim<[usize; 3]>>>`
|
= help: the following other types implement trait `From<T>`:
`ort::value::Value` implements `From<ort::value::Value<DynTensorValueType>>`
`ort::value::Value` implements `From<ort::value::Value<TensorValueType<T>>>`
= note: required for `ArrayBase<OwnedRepr<f32>, Dim<[usize; 3]>>` to implement `Into<ort::value::Value>`
= note: required for `ort::value::Value` to implement `TryFrom<ArrayBase<OwnedRepr<i64>, Dim<[usize; 2]>>>`
= note: this error originates in the macro `ort::inputs` (in Nightly builds, run with -Z macro-backtrace for more info)
```
Recent versions of `ort` require `ndarray` `0.17`, whereas older versions (and thus possibly your code) required `0.16`. Since these versions are semver incompatible, Cargo treats the `ndarray` used by your crate and the `ndarray` used by `ort` as separate crates entirely; hence the contradictory error message.
To fix this, upgrade your `ndarray` dependency to `0.17`; the new release features no breaking changes, although `.into_shape()` is deprecated; see [`ndarray`'s release notes](https://github.com/rust-ndarray/ndarray/releases/tag/0.16.0) for more information.
## Unresolved external symbol `__std_*`
If you encounter these errors when linking on Windows, make sure your Visual Studio 2022 installation is up to date; at least version **17.11** is required when using default pyke binaries.
If you encounter these errors when linking on Windows, make sure your Visual Studio 2022 installation is up to date; at least version **17.14** is required when using default pyke binaries.

View File

@@ -5,30 +5,33 @@ title: 'Troubleshooting: Logging'
# Troubleshooting: Logging
import { Tabs, Steps } from 'nextra/components';
import Ort from '../../components/Ort';
`ort` hooks into ONNX Runtime to route its logging messages through the [`tracing`](https://crates.io/crates/tracing) crate. These logging messages can often provide more helpful information about specific failure modes than `ort`'s error messages alone.
With the `tracing` [feature flag](../setup/cargo-features) (which is enabled by default), <Ort/> hooks into ONNX Runtime to route its logging messages through the [`tracing`](https://crates.io/crates/tracing) crate. These logging messages can often provide more helpful information about specific failure modes than <Ort/>'s error messages alone.
To enable logging for `ort`, you need to set up a `tracing` **subscriber** in your application, such as [`tracing-subscriber`](https://crates.io/crates/tracing-subscriber). `tracing-subscriber`'s `fmt` subscriber logs readable (and quite pretty!) messages to the console. To set it up:
To see logs from <Ort/>'s `tracing` integration, you'll need to set up a **subscriber** in your application, such as [`tracing-subscriber`](https://crates.io/crates/tracing-subscriber). `tracing-subscriber`'s `fmt` subscriber logs readable (and quite pretty!) messages to the console. To set it up:
<Steps>
### Add `tracing-subscriber` to your dependencies
```toml Cargo.toml
```toml filename="Cargo.toml"
[dependencies]
tracing-subscriber = { version = "0.3", features = [ "env-filter", "fmt" ] }
```
### Initialize the subscriber in the main function
```rust main.rs
```rust filename="main.rs"
fn main() {
tracing_subscriber::fmt::init();
// ... rest of code
}
```
### Show debug messages from ort
The environment variable `RUST_LOG` configures filters for crates that use `tracing`; see [`tracing_subcriber::EnvFilter`](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html) for more information.
The environment variable `RUST_LOG` configures filters for crates that use `tracing`; see [`tracing_subcriber::EnvFilter`](https://docs.rs/tracing-subscriber/0.3.22/tracing_subscriber/filter/struct.EnvFilter.html) for more information.
Set `RUST_LOG` to `ort=debug` to see all debug messages from `ort`. (You can also set it to `trace` for more verbosity, or `info`, `warn`, or `error` for less.)
Set `RUST_LOG` to `ort=debug` to see all debug messages from <Ort/>. (You can also set it to `trace` for more verbosity, or `info`, `warn`, or `error` for less.)
<Tabs items={['Windows (PowerShell)', 'Windows (Command Prompt)', 'Linux', 'macOS']}>
<Tabs.Tab title="Windows (PowerShell)">
```powershell
@@ -55,3 +58,8 @@ Set `RUST_LOG` to `ort=debug` to see all debug messages from `ort`. (You can als
</Tabs>
</Steps>
## Logging without `tracing`
When the `tracing` feature is disabled, <Ort/> will instead forward log messages to `stderr`, so you'll still see important messages even when `default-features = false`.
You can configure the verbosity level by setting the `ORT_LOG` environment variable to one of `fatal`, `error`, `warning`, `info`, or `verbose`. The default log level is `warning`.

View File

@@ -2,10 +2,12 @@
title: 'Troubleshooting: Performance'
---
import Ort from '../../components/Ort';
# Troubleshooting: Performance
## Execution providers don't seem to register
`ort` is designed to fail gracefully when an execution provider is not available or fails to register. To debug errors raised by EPs, [set up logging for `ort`](/troubleshooting/logging).
## Execution providers don't seem to work
<Ort/> is designed to fail gracefully when an execution provider is not available or fails to register. To debug errors raised by EPs, [set up logging for <Ort/>](/troubleshooting/logging).
You can also detect EP regsitration failures programmatically. See [Execution providers: Fallback behavior](/perf/execution-providers#fallback-behavior) for more info.

View File

@@ -1,3 +1,4 @@
import { Link } from 'nextra-theme-docs';
import { BiChip } from 'react-icons/bi';
import { BsAmd } from 'react-icons/bs';
import { PiGlobeBold } from 'react-icons/pi';
@@ -10,6 +11,7 @@ export interface ExecutionProvider {
vendor: string | null;
name: string;
feature: string;
note?: React.ReactNode;
platforms: TripleFilter[];
binaries?: TripleFilter[];
}
@@ -23,6 +25,7 @@ export const EXECUTION_PROVIDER_ARRAY: ExecutionProvider[] = [
vendor: 'NVIDIA',
name: 'CUDA',
feature: 'cuda',
note: <>Supports both CUDA 12 & CUDA 13. See <Link href='/perf/execution-providers#cuda'>here</Link> for more info.</>,
platforms: [ { os: 'windows', arch: 'x64' }, { os: 'linux', arch: 'x64' }, { os: 'linux', arch: 'arm64' } ],
binaries: [ { os: 'windows', arch: 'x64' }, { os: 'linux', arch: 'x64' } ]
},
@@ -149,8 +152,8 @@ export const EXECUTION_PROVIDER_ARRAY: ExecutionProvider[] = [
vendor: null,
name: 'WebGPU',
feature: 'webgpu',
platforms: [ { os: 'web' }, { os: 'windows' }, { os: 'linux' } ],
binaries: [ { os: 'web' }, { os: 'windows', arch: 'x64' }, { os: 'linux', arch: 'x64' } ]
platforms: [ { os: 'web' }, { os: 'windows' }, { os: 'linux' }, { os: 'macos' } ],
binaries: [ { os: 'web' }, { os: 'windows', arch: 'x64' }, { os: 'linux', arch: 'x64' }, { os: 'macos', arch: 'arm64' } ]
},
{
icon: <PiGlobeBold style={{ color: '#0066b0' }} />,

View File

@@ -16,13 +16,13 @@
"nextra-theme-docs": "^4.6.1",
"react": "^19.2.4",
"react-dom": "^19.2.4",
"react-icons": "^5.5.0",
"react-icons": "^5.6.0",
"zustand": "^5.0.11"
},
"devDependencies": {
"@types/canvas-confetti": "^1.9.0",
"@types/node": "^22.19.7",
"@types/react": "^19.2.10",
"@types/node": "^22.19.13",
"@types/react": "^19.2.14",
"@types/react-dom": "^19.2.3",
"pagefind": "^1.4.0",
"typescript": "^5.9.3"

1538
docs/pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB