├── .watchmanconfig
├── apps
└── expo-example
│ ├── src
│ ├── screens
│ │ └── apple
│ │ │ ├── SpeechScreen
│ │ │ ├── index.tsx
│ │ │ └── index.ios.tsx
│ │ │ ├── AppleLLMScreen
│ │ │ ├── index.tsx
│ │ │ └── index.ios.tsx
│ │ │ ├── PlaygroundScreen
│ │ │ ├── index.tsx
│ │ │ └── index.ios.tsx
│ │ │ └── TranscribeScreen
│ │ │ ├── index.tsx
│ │ │ └── index.ios.tsx
│ ├── global.css
│ ├── utils
│ │ └── audioUtils.ts
│ ├── tools.ts
│ └── App.tsx
│ ├── assets
│ ├── ck.png
│ ├── icon.png
│ ├── favicon.png
│ ├── splash-icon.png
│ └── adaptive-icon.png
│ ├── polyfills.ts
│ ├── babel.config.js
│ ├── tailwind.config.js
│ ├── index.js
│ ├── tsconfig.json
│ ├── .gitignore
│ ├── metro.config.js
│ ├── README.md
│ ├── nativewind-env.d.ts
│ ├── package.json
│ └── app.json
├── packages
├── mlc
│ ├── app.plugin.js
│ ├── babel.config.js
│ ├── tsconfig.json
│ ├── src
│ │ ├── index.ts
│ │ ├── expo-plugin.ts
│ │ └── NativeMLCEngine.ts
│ ├── tsconfig.build.json
│ ├── android
│ │ ├── gradle.properties
│ │ ├── src
│ │ │ └── main
│ │ │ │ ├── AndroidManifest.xml
│ │ │ │ └── java
│ │ │ │ └── com
│ │ │ │ └── callstack
│ │ │ │ └── ai
│ │ │ │ ├── NativeMLCEnginePackage.kt
│ │ │ │ └── ModelDownloader.kt
│ │ └── build.gradle
│ ├── ios
│ │ └── engine
│ │ │ ├── BackgroundWorker.h
│ │ │ ├── LLMEngine.h
│ │ │ ├── JSONFFIEngine.h
│ │ │ ├── EngineState.h
│ │ │ ├── BackgroundWorker.mm
│ │ │ ├── LLMEngine.mm
│ │ │ ├── EngineState.mm
│ │ │ └── JSONFFIEngine.mm
│ ├── README.md
│ ├── mlc-package-config-android.json
│ ├── mlc-package-config-ios.json
│ ├── react-native-ai-mlc.podspec
│ └── package.json
├── llama
│ ├── tsconfig.json
│ ├── tsconfig.build.json
│ ├── src
│ │ ├── index.ts
│ │ └── storage.ts
│ ├── package.json
│ └── README.md
└── apple-llm
│ ├── tsconfig.json
│ ├── tsconfig.build.json
│ ├── src
│ ├── NativeAppleUtils.ts
│ ├── index.ts
│ ├── NativeAppleEmbeddings.ts
│ ├── NativeAppleTranscription.ts
│ ├── NativeAppleSpeech.ts
│ ├── NativeAppleLLM.ts
│ ├── utils.ts
│ └── stream.ts
│ ├── AppleLLM.podspec
│ ├── ios
│ ├── utils
│ │ └── AppleUtils.mm
│ ├── AppleLLMError.swift
│ ├── embeddings
│ │ └── AppleEmbeddings.mm
│ ├── speech
│ │ ├── AppleSpeechImpl.swift
│ │ └── AppleSpeech.mm
│ └── transcription
│ │ ├── AppleTranscription.mm
│ │ └── AppleTranscriptionImpl.swift
│ ├── README.md
│ └── package.json
├── bunfig.toml
├── .gitattributes
├── website
├── src
│ ├── public
│ │ ├── logo-dark.png
│ │ ├── logo-light.png
│ │ ├── og-image.jpg
│ │ ├── rspress-icon.png
│ │ ├── smartphone.svg
│ │ ├── shield.svg
│ │ ├── zap.svg
│ │ ├── device.svg
│ │ ├── icon.svg
│ │ ├── code.svg
│ │ └── apple.svg
│ ├── _nav.json
│ ├── docs
│ │ ├── mlc
│ │ │ ├── _meta.json
│ │ │ ├── model-management.md
│ │ │ ├── getting-started.md
│ │ │ └── generating.md
│ │ ├── llama
│ │ │ ├── _meta.json
│ │ │ ├── getting-started.md
│ │ │ ├── generating.md
│ │ │ └── model-management.md
│ │ ├── apple
│ │ │ ├── _meta.json
│ │ │ ├── getting-started.md
│ │ │ ├── running-on-simulator.md
│ │ │ ├── speech.md
│ │ │ ├── transcription.md
│ │ │ ├── embeddings.md
│ │ │ └── generating.md
│ │ ├── _meta.json
│ │ └── index.md
│ └── index.md
├── theme
│ ├── fonts
│ │ ├── alliance-no-2-medium.ttf
│ │ └── alliance-no-2-regular.ttf
│ └── styles.css
├── .gitignore
├── vercel.json
├── README.md
├── package.json
├── tsconfig.json
└── rspress.config.ts
├── .prettierrc
├── .github
├── actions
│ └── setup
│ │ └── action.yml
└── workflows
│ └── ci.yml
├── .editorconfig
├── tsconfig.base.json
├── eslint.config.mjs
├── LICENSE
├── .gitignore
├── package.json
├── CONTRIBUTING.md
├── README.md
└── CODE_OF_CONDUCT.md
/.watchmanconfig:
--------------------------------------------------------------------------------
1 | {}
--------------------------------------------------------------------------------
/apps/expo-example/src/screens/apple/SpeechScreen/index.tsx:
--------------------------------------------------------------------------------
1 | export default null
2 |
--------------------------------------------------------------------------------
/apps/expo-example/src/screens/apple/AppleLLMScreen/index.tsx:
--------------------------------------------------------------------------------
1 | export default null
2 |
--------------------------------------------------------------------------------
/apps/expo-example/src/screens/apple/PlaygroundScreen/index.tsx:
--------------------------------------------------------------------------------
1 | export default null
2 |
--------------------------------------------------------------------------------
/apps/expo-example/src/screens/apple/TranscribeScreen/index.tsx:
--------------------------------------------------------------------------------
1 | export default null
2 |
--------------------------------------------------------------------------------
/packages/mlc/app.plugin.js:
--------------------------------------------------------------------------------
1 | module.exports = require('./lib/commonjs/expo-plugin')
2 |
--------------------------------------------------------------------------------
/bunfig.toml:
--------------------------------------------------------------------------------
1 | [install]
2 |
3 | linker = "hoisted"
4 | peer = false
5 | frozen-lockfile = true
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.pbxproj -text
2 |
3 | # specific for windows script files
4 | *.bat text eol=crlf
5 |
--------------------------------------------------------------------------------
/apps/expo-example/src/global.css:
--------------------------------------------------------------------------------
1 | @tailwind base;
2 | @tailwind components;
3 | @tailwind utilities;
4 |
--------------------------------------------------------------------------------
/packages/mlc/babel.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | presets: ['module:@react-native/babel-preset'],
3 | }
4 |
--------------------------------------------------------------------------------
/packages/mlc/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.base.json",
3 | "include": ["src/**/*"]
4 | }
5 |
--------------------------------------------------------------------------------
/packages/llama/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.base.json",
3 | "include": ["src/**/*"]
4 | }
5 |
--------------------------------------------------------------------------------
/apps/expo-example/assets/ck.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/callstackincubator/ai/HEAD/apps/expo-example/assets/ck.png
--------------------------------------------------------------------------------
/apps/expo-example/assets/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/callstackincubator/ai/HEAD/apps/expo-example/assets/icon.png
--------------------------------------------------------------------------------
/packages/apple-llm/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.base.json",
3 | "include": ["src/**/*"]
4 | }
5 |
--------------------------------------------------------------------------------
/packages/mlc/src/index.ts:
--------------------------------------------------------------------------------
1 | export { mlc } from './ai-sdk'
2 | export { default as MLCEngine } from './NativeMLCEngine'
3 |
--------------------------------------------------------------------------------
/website/src/public/logo-dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/callstackincubator/ai/HEAD/website/src/public/logo-dark.png
--------------------------------------------------------------------------------
/website/src/public/logo-light.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/callstackincubator/ai/HEAD/website/src/public/logo-light.png
--------------------------------------------------------------------------------
/website/src/public/og-image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/callstackincubator/ai/HEAD/website/src/public/og-image.jpg
--------------------------------------------------------------------------------
/website/src/public/rspress-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/callstackincubator/ai/HEAD/website/src/public/rspress-icon.png
--------------------------------------------------------------------------------
/apps/expo-example/assets/favicon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/callstackincubator/ai/HEAD/apps/expo-example/assets/favicon.png
--------------------------------------------------------------------------------
/website/src/_nav.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "text": "Docs",
4 | "link": "/docs/",
5 | "activeMatch": "/docs/"
6 | }
7 | ]
8 |
--------------------------------------------------------------------------------
/apps/expo-example/assets/splash-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/callstackincubator/ai/HEAD/apps/expo-example/assets/splash-icon.png
--------------------------------------------------------------------------------
/apps/expo-example/assets/adaptive-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/callstackincubator/ai/HEAD/apps/expo-example/assets/adaptive-icon.png
--------------------------------------------------------------------------------
/packages/mlc/tsconfig.build.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "./tsconfig",
3 | "include": ["src/**/*"],
4 | "exclude": ["**/__tests__/**"]
5 | }
6 |
--------------------------------------------------------------------------------
/packages/llama/tsconfig.build.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "./tsconfig",
3 | "include": ["src/**/*"],
4 | "exclude": ["**/__tests__/**"]
5 | }
6 |
--------------------------------------------------------------------------------
/website/theme/fonts/alliance-no-2-medium.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/callstackincubator/ai/HEAD/website/theme/fonts/alliance-no-2-medium.ttf
--------------------------------------------------------------------------------
/website/theme/fonts/alliance-no-2-regular.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/callstackincubator/ai/HEAD/website/theme/fonts/alliance-no-2-regular.ttf
--------------------------------------------------------------------------------
/packages/apple-llm/tsconfig.build.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "./tsconfig",
3 | "include": ["src/**/*"],
4 | "exclude": ["**/__tests__/**"]
5 | }
6 |
--------------------------------------------------------------------------------
/.prettierrc:
--------------------------------------------------------------------------------
1 | {
2 | "quoteProps": "consistent",
3 | "singleQuote": true,
4 | "tabWidth": 2,
5 | "trailingComma": "es5",
6 | "useTabs": false,
7 | "semi": false
8 | }
9 |
--------------------------------------------------------------------------------
/packages/mlc/android/gradle.properties:
--------------------------------------------------------------------------------
1 | MLC_kotlinVersion=2.1.20
2 | MLC_minSdkVersion=21
3 | MLC_targetSdkVersion=31
4 | MLC_compileSdkVersion=31
5 | MLC_ndkversion=21.4.7075529
6 |
--------------------------------------------------------------------------------
/website/.gitignore:
--------------------------------------------------------------------------------
1 | # Local
2 | .DS_Store
3 | *.local
4 | *.log*
5 |
6 | # Dist
7 | node_modules
8 | dist/
9 | doc_build/
10 |
11 | # IDE
12 | .vscode/*
13 | !.vscode/extensions.json
14 | .idea
15 |
--------------------------------------------------------------------------------
/website/src/public/smartphone.svg:
--------------------------------------------------------------------------------
1 |
4 |
--------------------------------------------------------------------------------
/apps/expo-example/polyfills.ts:
--------------------------------------------------------------------------------
1 | import '@azure/core-asynciterator-polyfill'
2 |
3 | import structuredClone from '@ungap/structured-clone'
4 |
5 | if (!('structuredClone' in globalThis)) {
6 | globalThis.structuredClone = structuredClone
7 | }
8 |
--------------------------------------------------------------------------------
/apps/expo-example/babel.config.js:
--------------------------------------------------------------------------------
1 | module.exports = function (api) {
2 | api.cache(true)
3 | return {
4 | presets: [
5 | ['babel-preset-expo', { jsxImportSource: 'nativewind' }],
6 | 'nativewind/babel',
7 | ],
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/website/src/docs/mlc/_meta.json:
--------------------------------------------------------------------------------
1 | [
2 | { "type": "file", "name": "getting-started", "label": "Getting Started" },
3 | { "type": "file", "name": "generating", "label": "Generating" },
4 | { "type": "file", "name": "model-management", "label": "Model Management" }
5 | ]
6 |
--------------------------------------------------------------------------------
/website/src/docs/llama/_meta.json:
--------------------------------------------------------------------------------
1 | [
2 | { "type": "file", "name": "getting-started", "label": "Getting Started" },
3 | { "type": "file", "name": "generating", "label": "Generating" },
4 | { "type": "file", "name": "model-management", "label": "Model Management" }
5 | ]
6 |
--------------------------------------------------------------------------------
/packages/llama/src/index.ts:
--------------------------------------------------------------------------------
1 | export type { DownloadProgress, LlamaModelOptions, ModelInfo } from './ai-sdk'
2 | export { llama, LlamaEngine, LlamaLanguageModel } from './ai-sdk'
3 | export type {
4 | CompletionParams,
5 | ContextParams,
6 | LlamaContext,
7 | TokenData,
8 | } from 'llama.rn'
9 |
--------------------------------------------------------------------------------
/packages/apple-llm/src/NativeAppleUtils.ts:
--------------------------------------------------------------------------------
1 | import type { TurboModule } from 'react-native'
2 | import { TurboModuleRegistry } from 'react-native'
3 |
4 | export interface Spec extends TurboModule {
5 | getCurrentLocale(): string
6 | }
7 |
8 | export default TurboModuleRegistry.getEnforcing('NativeAppleUtils')
9 |
--------------------------------------------------------------------------------
/packages/mlc/ios/engine/BackgroundWorker.h:
--------------------------------------------------------------------------------
1 | //
2 | // BackgroundWorker.h
3 | // Pods
4 | //
5 |
6 | #import
7 |
8 | NS_ASSUME_NONNULL_BEGIN
9 |
10 | @interface BackgroundWorker : NSThread
11 | - (instancetype)initWithTask:(void (^)(void))task;
12 | @end
13 |
14 | NS_ASSUME_NONNULL_END
15 |
--------------------------------------------------------------------------------
/website/src/public/shield.svg:
--------------------------------------------------------------------------------
1 |
4 |
--------------------------------------------------------------------------------
/website/src/public/zap.svg:
--------------------------------------------------------------------------------
1 |
4 |
--------------------------------------------------------------------------------
/.github/actions/setup/action.yml:
--------------------------------------------------------------------------------
1 | name: Setup
2 | description: Setup Bun and install dependencies
3 |
4 | runs:
5 | using: composite
6 | steps:
7 | - name: Setup Bun
8 | uses: oven-sh/setup-bun@v2
9 | with:
10 | bun-version: latest
11 |
12 | - name: Install dependencies
13 | run: bun install
14 | shell: bash
15 |
--------------------------------------------------------------------------------
/.editorconfig:
--------------------------------------------------------------------------------
1 | # EditorConfig helps developers define and maintain consistent
2 | # coding styles between different editors and IDEs
3 | # editorconfig.org
4 |
5 | root = true
6 |
7 | [*]
8 |
9 | indent_style = space
10 | indent_size = 2
11 |
12 | end_of_line = lf
13 | charset = utf-8
14 | trim_trailing_whitespace = true
15 | insert_final_newline = true
16 |
--------------------------------------------------------------------------------
/apps/expo-example/tailwind.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('tailwindcss').Config} */
2 | module.exports = {
3 | // NOTE: Update this to include the paths to all files that contain Nativewind classes.
4 | content: ['./src/**/*.{js,jsx,ts,tsx}'],
5 | presets: [require('nativewind/preset')],
6 | theme: {
7 | extend: {},
8 | },
9 | plugins: [],
10 | }
11 |
--------------------------------------------------------------------------------
/website/vercel.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://openapi.vercel.sh/vercel.json",
3 | "buildCommand": "bun run build",
4 | "cleanUrls": true,
5 | "framework": null,
6 | "installCommand": "bun install --frozen-lockfile",
7 | "rewrites": [{ "source": "/(.*)", "destination": "/404.html" }],
8 | "outputDirectory": "build",
9 | "trailingSlash": false
10 | }
11 |
--------------------------------------------------------------------------------
/website/src/public/device.svg:
--------------------------------------------------------------------------------
1 |
4 |
--------------------------------------------------------------------------------
/apps/expo-example/index.js:
--------------------------------------------------------------------------------
1 | import './polyfills'
2 |
3 | import { registerRootComponent } from 'expo'
4 |
5 | import App from './src/App'
6 |
7 | // registerRootComponent calls AppRegistry.registerComponent('main', () => App);
8 | // It also ensures that whether you load the app in Expo Go or in a native build,
9 | // the environment is set up appropriately
10 | registerRootComponent(App)
11 |
--------------------------------------------------------------------------------
/apps/expo-example/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "expo/tsconfig.base",
3 | "compilerOptions": {
4 | "strictNullChecks": true,
5 | "paths": {
6 | "@react-native-ai/apple": ["../../packages/apple-llm/src"],
7 | "@react-native-ai/mlc": ["../../packages/mlc/src"]
8 | },
9 | "types": ["nativewind/types"]
10 | },
11 | "include": ["src/**/*", "nativewind-env.d.ts"]
12 | }
13 |
--------------------------------------------------------------------------------
/website/README.md:
--------------------------------------------------------------------------------
1 | # Rspress Website
2 |
3 | ## Setup
4 |
5 | Install the dependencies:
6 |
7 | ```bash
8 | npm install
9 | ```
10 |
11 | ## Get Started
12 |
13 | Start the dev server:
14 |
15 | ```bash
16 | npm run dev
17 | ```
18 |
19 | Build the website for production:
20 |
21 | ```bash
22 | npm run build
23 | ```
24 |
25 | Preview the production build locally:
26 |
27 | ```bash
28 | npm run preview
29 | ```
30 |
--------------------------------------------------------------------------------
/packages/mlc/android/src/main/AndroidManifest.xml:
--------------------------------------------------------------------------------
1 |
3 |
4 |
7 |
8 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/tsconfig.base.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "ESNext",
4 | "lib": ["DOM", "ESNext"],
5 | "module": "preserve",
6 | "moduleResolution": "bundler",
7 | "allowJs": true,
8 | "esModuleInterop": true,
9 | "jsx": "react-native",
10 | "customConditions": ["react-native"],
11 | "resolveJsonModule": true,
12 | "declaration": true,
13 | "strict": true,
14 | "skipLibCheck": true
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/website/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "docs",
3 | "version": "0.0.1",
4 | "private": true,
5 | "scripts": {
6 | "dev": "rspress dev",
7 | "build": "rspress build",
8 | "preview": "rspress preview"
9 | },
10 | "dependencies": {
11 | "@callstack/rspress-preset": "0.4.5",
12 | "@rspress/core": "2.0.0-beta.32"
13 | },
14 | "devDependencies": {
15 | "@types/node": "^22",
16 | "@types/react": "^19"
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/website/src/public/icon.svg:
--------------------------------------------------------------------------------
1 |
4 |
--------------------------------------------------------------------------------
/website/src/docs/apple/_meta.json:
--------------------------------------------------------------------------------
1 | [
2 | { "type": "file", "name": "getting-started", "label": "Getting Started" },
3 | { "type": "file", "name": "generating", "label": "Generating" },
4 | { "type": "file", "name": "embeddings", "label": "Embeddings" },
5 | { "type": "file", "name": "transcription", "label": "Transcription" },
6 | { "type": "file", "name": "speech", "label": "Speech" },
7 | { "type": "file", "name": "running-on-simulator", "label": "Running on Simulator" }
8 | ]
9 |
--------------------------------------------------------------------------------
/packages/apple-llm/src/index.ts:
--------------------------------------------------------------------------------
1 | export { apple, createAppleProvider } from './ai-sdk'
2 | export { default as AppleEmbeddings } from './NativeAppleEmbeddings'
3 | export { default as AppleFoundationModels } from './NativeAppleLLM'
4 | export { default as AppleSpeech, VoiceInfo } from './NativeAppleSpeech'
5 | export { default as AppleTranscription } from './NativeAppleTranscription'
6 | export { default as AppleUtils } from './NativeAppleUtils'
7 | export { addWAVHeader, AudioFormatType } from './utils'
8 |
--------------------------------------------------------------------------------
/website/theme/styles.css:
--------------------------------------------------------------------------------
1 | @font-face {
2 | font-family: "Alliance No. 2";
3 | font-style: normal;
4 | font-weight: 400;
5 | font-display: swap;
6 | src: url("./fonts/alliance-no-2-regular.ttf") format("truetype");
7 | font-display: block;
8 | }
9 |
10 | @font-face {
11 | font-family: "Alliance No. 2";
12 | font-style: normal;
13 | font-weight: 500;
14 | font-display: swap;
15 | src: url("./fonts/alliance-no-2-medium.ttf") format("truetype");
16 | font-display: block;
17 | }
--------------------------------------------------------------------------------
/website/src/docs/_meta.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "type": "file",
4 | "name": "index",
5 | "label": "Introduction"
6 | },
7 | {
8 | "type": "dir",
9 | "name": "apple",
10 | "label": "Apple",
11 | "collapsible": true,
12 | "collapsed": false
13 | },
14 | {
15 | "type": "dir",
16 | "name": "llama",
17 | "label": "Llama",
18 | "collapsible": true,
19 | "collapsed": false
20 | },
21 | {
22 | "type": "dir",
23 | "name": "mlc",
24 | "label": "MLC",
25 | "collapsible": true,
26 | "collapsed": false
27 | }
28 | ]
29 |
--------------------------------------------------------------------------------
/packages/mlc/src/expo-plugin.ts:
--------------------------------------------------------------------------------
1 | import {
2 | ConfigPlugin,
3 | createRunOncePlugin,
4 | withEntitlementsPlist,
5 | } from 'expo/config-plugins'
6 |
7 | const pkg = require('../../package.json')
8 |
9 | const withMLCMemoryOptimization: ConfigPlugin = (config) => {
10 | return withEntitlementsPlist(config, (config) => {
11 | config.modResults['com.apple.developer.kernel.increased-memory-limit'] =
12 | true
13 | return config
14 | })
15 | }
16 |
17 | export default createRunOncePlugin(
18 | withMLCMemoryOptimization,
19 | pkg.name,
20 | pkg.version
21 | )
22 |
--------------------------------------------------------------------------------
/packages/mlc/ios/engine/LLMEngine.h:
--------------------------------------------------------------------------------
1 | //
2 | // LLMEngine.h
3 | // Pods
4 | //
5 |
6 | #import
7 |
8 | NS_ASSUME_NONNULL_BEGIN
9 |
10 | @interface LLMEngine : NSObject
11 |
12 | - (instancetype)init;
13 |
14 | - (void)reloadWithModelPath:(NSString *)modelPath modelLib:(NSString *)modelLib;
15 | - (void)reset;
16 | - (void)unload;
17 |
18 | - (NSString*)chatCompletionWithMessages:(NSArray *)messages options:(NSDictionary *)options completion:(void (^)(NSDictionary* response))completion;
19 | - (void)cancelRequest:(NSString *)requestId;
20 |
21 | @end
22 |
23 | NS_ASSUME_NONNULL_END
24 |
--------------------------------------------------------------------------------
/apps/expo-example/.gitignore:
--------------------------------------------------------------------------------
1 | # Learn more https://docs.github.com/en/get-started/getting-started-with-git/ignoring-files
2 |
3 | # dependencies
4 | node_modules/
5 |
6 | # Expo
7 | .expo/
8 | dist/
9 | web-build/
10 | expo-env.d.ts
11 |
12 | # Native
13 | .kotlin/
14 | *.orig.*
15 | *.jks
16 | *.p8
17 | *.p12
18 | *.key
19 | *.mobileprovision
20 |
21 | # Metro
22 | .metro-health-check*
23 |
24 | # debug
25 | npm-debug.*
26 | yarn-debug.*
27 | yarn-error.*
28 |
29 | # macOS
30 | .DS_Store
31 | *.pem
32 |
33 | # local env files
34 | .env*.local
35 |
36 | # typescript
37 | *.tsbuildinfo
38 |
39 | ios/
40 | android/
41 |
--------------------------------------------------------------------------------
/apps/expo-example/metro.config.js:
--------------------------------------------------------------------------------
1 | // Learn more https://docs.expo.io/guides/customizing-metro
2 | const { getDefaultConfig } = require('expo/metro-config')
3 | const { withNativeWind } = require('nativewind/metro')
4 | const {
5 | wrapWithAudioAPIMetroConfig,
6 | } = require('react-native-audio-api/metro-config')
7 |
8 | const config = getDefaultConfig(__dirname)
9 |
10 | // 3. Force Metro to resolve (sub)dependencies only from the `nodeModulesPaths`
11 | config.resolver.disableHierarchicalLookup = true
12 |
13 | module.exports = wrapWithAudioAPIMetroConfig(
14 | withNativeWind(config, { input: './src/global.css' })
15 | )
16 |
--------------------------------------------------------------------------------
/packages/apple-llm/src/NativeAppleEmbeddings.ts:
--------------------------------------------------------------------------------
1 | import type { TurboModule } from 'react-native'
2 | import { TurboModuleRegistry } from 'react-native'
3 |
4 | export interface EmbeddingInfo {
5 | hasAvailableAssets: boolean
6 | dimension: number
7 | languages: string[]
8 | maximumSequenceLength: number
9 | modelIdentifier: string
10 | revision: number
11 | scripts: string[]
12 | }
13 |
14 | export interface Spec extends TurboModule {
15 | getInfo(language: string): Promise
16 | prepare(language: string): Promise
17 | generateEmbeddings(values: string[], language: string): Promise
18 | }
19 |
20 | export default TurboModuleRegistry.getEnforcing('NativeAppleEmbeddings')
21 |
--------------------------------------------------------------------------------
/packages/apple-llm/AppleLLM.podspec:
--------------------------------------------------------------------------------
1 | require "json"
2 |
3 | package = JSON.parse(File.read(File.join(__dir__, "package.json")))
4 |
5 | Pod::Spec.new do |s|
6 | s.name = "AppleLLM"
7 |
8 | s.version = package["version"]
9 | s.summary = package["description"]
10 | s.homepage = package["homepage"]
11 | s.license = package["license"]
12 | s.authors = package["author"]
13 |
14 | s.platforms = { :ios => min_ios_version_supported }
15 | s.source = { :git => "https://github.com/callstackincubator/ai.git", :tag => "#{s.version}" }
16 |
17 | s.source_files = "ios/**/*.{h,m,mm,swift}"
18 | s.exclude_files = "ios/Tests/**/*"
19 |
20 | install_modules_dependencies(s)
21 | end
22 |
--------------------------------------------------------------------------------
/packages/mlc/ios/engine/JSONFFIEngine.h:
--------------------------------------------------------------------------------
1 | //
2 | // LLMEngine.h
3 | // Pods
4 | //
5 |
6 | #import
7 | #import
8 |
9 | /**
10 | * This is an internal Raw JSON FFI Engine that redirects request to internal JSON FFI Engine in C++
11 | */
12 | @interface JSONFFIEngine : NSObject
13 |
14 | - (void)initBackgroundEngine:(void (^)(NSString *))streamCallback;
15 |
16 | - (void)reload:(NSString *)engineConfig;
17 |
18 | - (void)unload;
19 |
20 | - (void)reset;
21 |
22 | - (void)chatCompletion:(NSString *)requestJSON requestID:(NSString *)requestID;
23 |
24 | - (void)abort:(NSString *)requestID;
25 |
26 | - (void)runBackgroundLoop;
27 |
28 | - (void)runBackgroundStreamBackLoop;
29 |
30 | - (void)exitBackgroundLoop;
31 |
32 | @end
33 |
--------------------------------------------------------------------------------
/website/src/public/code.svg:
--------------------------------------------------------------------------------
1 |
17 |
--------------------------------------------------------------------------------
/packages/mlc/ios/engine/EngineState.h:
--------------------------------------------------------------------------------
1 | //
2 | // MLCEngine.h
3 | // Pods
4 | //
5 | // Created by Szymon Rybczak on 19/07/2024.
6 | //
7 |
8 | #import "JSONFFIEngine.h"
9 | #import
10 |
11 | NS_ASSUME_NONNULL_BEGIN
12 |
13 | @interface EngineState : NSObject
14 | @property(nonatomic, strong) NSMutableDictionary *requestStateMap;
15 |
16 | - (NSString*)chatCompletionWithJSONFFIEngine:(JSONFFIEngine *)jsonFFIEngine
17 | request:(NSDictionary *)request
18 | completion:(void (^)(NSDictionary* response))completion;
19 | - (void)streamCallbackWithResult:(NSString *)result;
20 | - (void)cancelRequest:(NSString *)requestId
21 | withJSONFFIEngine:(JSONFFIEngine *)jsonFFIEngine;
22 | @end
23 |
24 | NS_ASSUME_NONNULL_END
25 |
--------------------------------------------------------------------------------
/packages/mlc/ios/engine/BackgroundWorker.mm:
--------------------------------------------------------------------------------
1 | //
2 | // BackgroundWorker.mm
3 | // Pods
4 | //
5 |
6 | #import "BackgroundWorker.h"
7 |
8 | /**
9 | * BackgroundWorker manages background thread execution for the MLC engine.
10 | * This class provides a simple interface to run long-running tasks on separate threads,
11 | * ensuring the main thread remains responsive while the LLM engine processes requests.
12 | * It's used to run the engine's background loop and stream processing loop concurrently.
13 | */
14 | @implementation BackgroundWorker {
15 | void (^_task)(void);
16 | }
17 |
18 | - (instancetype)initWithTask:(void (^)(void))task {
19 | self = [super init];
20 | if (self) {
21 | _task = [task copy];
22 | }
23 | return self;
24 | }
25 |
26 | - (void)main {
27 | if (_task) {
28 | _task();
29 | }
30 | }
31 |
32 | @end
33 |
--------------------------------------------------------------------------------
/website/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "ES2020",
4 | "lib": ["DOM", "ES2020"],
5 | "module": "ESNext",
6 | "jsx": "react-jsx",
7 | "noEmit": true,
8 | "strict": true,
9 | "skipLibCheck": true,
10 | "isolatedModules": true,
11 | "resolveJsonModule": true,
12 | "moduleResolution": "bundler",
13 | "useDefineForClassFields": true,
14 | "allowImportingTsExtensions": true,
15 | "paths": {
16 | "@callstack/rspress-theme": ["../theme/src"],
17 | "@callstack/rspress-theme/*": ["../theme/src/*"],
18 | "@callstack/rspress-theme/plugin": ["../theme/src/plugin"],
19 | "@callstack/rspress-theme/plugin/*": ["../theme/src/plugin/*"]
20 | }
21 | },
22 | "include": ["docs", "theme", "rspress.config.ts"],
23 | "mdx": {
24 | "checkMdx": true
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/packages/apple-llm/ios/utils/AppleUtils.mm:
--------------------------------------------------------------------------------
1 | //
2 | // AppleUtils.mm
3 | // AppleLLM
4 | //
5 | // Created by Mike Grabowski on 03/08/2025.
6 | //
7 |
8 | #if __has_include("AppleLLM/AppleLLM-Swift.h")
9 | #import "AppleLLM/AppleLLM-Swift.h"
10 | #else
11 | #import "AppleLLM-Swift.h"
12 | #endif
13 |
14 | #import
15 | #import
16 |
17 | @interface AppleUtils : NativeAppleUtilsSpecBase
18 | @end
19 |
20 | using namespace facebook;
21 |
22 | @implementation AppleUtils
23 |
24 | + (NSString *)moduleName {
25 | return @"NativeAppleUtils";
26 | }
27 |
28 | - (std::shared_ptr)getTurboModule:(const react::ObjCTurboModule::InitParams &)params {
29 | return std::make_shared(params);
30 | }
31 |
32 | - (NSString *)getCurrentLocale {
33 | return [NSLocale currentLocale].localeIdentifier;
34 | }
35 |
36 | @end
37 |
--------------------------------------------------------------------------------
/eslint.config.mjs:
--------------------------------------------------------------------------------
1 | import { defineConfig, globalIgnores } from 'eslint/config'
2 | import expoConfig from 'eslint-config-expo/flat.js'
3 | import eslintPluginPrettierRecommended from 'eslint-plugin-prettier/recommended'
4 | import simpleImportSort from 'eslint-plugin-simple-import-sort'
5 | import globals from 'globals'
6 |
7 | export default defineConfig([
8 | globalIgnores(['website/**/*', '**/node_modules', '**/lib', '**/build']),
9 | expoConfig,
10 | eslintPluginPrettierRecommended,
11 | {
12 | plugins: {
13 | 'simple-import-sort': simpleImportSort,
14 | },
15 | rules: {
16 | 'simple-import-sort/imports': 'error',
17 | 'simple-import-sort/exports': 'error',
18 | },
19 | },
20 | {
21 | files: ['**/babel.config.js', '**/react-native.config.js'],
22 | languageOptions: {
23 | globals: {
24 | ...globals.node,
25 | },
26 | },
27 | },
28 | // Configure Prettier
29 | {
30 | rules: {
31 | 'prettier/prettier': 'error',
32 | },
33 | },
34 | ])
35 |
--------------------------------------------------------------------------------
/packages/apple-llm/README.md:
--------------------------------------------------------------------------------
1 | # Apple Provider for Vercel AI SDK
2 |
3 | A Vercel AI SDK provider for Apple Foundation Models, enabling access to Apple Intelligence in React Native applications.
4 |
5 | **Requirements:**
6 | - iOS 26+
7 | - Apple Intelligence enabled device
8 | - Vercel AI SDK v5
9 | - React Native New Architecture
10 |
11 | ```ts
12 | import { apple } from '@react-native-ai/apple'
13 | import { generateText } from 'ai'
14 |
15 | const answer = await generateText({
16 | model: apple(),
17 | prompt: 'What is the meaning of life?'
18 | })
19 | ```
20 |
21 | ## Features
22 |
23 | - ✅ Text generation with Apple Foundation Models
24 | - ✅ Structured outputs
25 | - ✅ Tool calling
26 | - ✅ Streaming
27 |
28 | ## Documentation
29 |
30 | For complete installation instructions and API documentation, visit our [documentation site](https://react-native-ai.com/docs/apple).
31 |
32 | ## License
33 |
34 | MIT
35 |
36 | ---
37 |
38 | Made with ❤️ and [create-react-native-library](https://github.com/callstack/react-native-builder-bob)
39 |
--------------------------------------------------------------------------------
/website/rspress.config.ts:
--------------------------------------------------------------------------------
1 | import path from 'node:path'
2 | import url from 'node:url'
3 | import { withCallstackPreset } from '@callstack/rspress-preset'
4 |
5 | const __filename = url.fileURLToPath(import.meta.url)
6 | const __dirname = path.dirname(__filename)
7 |
8 | export default withCallstackPreset(
9 | {
10 | context: __dirname,
11 | docs: {
12 | title: 'React Native AI',
13 | description:
14 | 'react-native-ai brings on-device LLMs capabilities to mobile React Native apps',
15 | rootUrl: 'https://react-native-ai.dev',
16 | icon: '/icon.svg',
17 | ogImage: '/og-image.jpg',
18 | logoLight: '/logo-light.png',
19 | logoDark: '/logo-dark.png',
20 | editUrl: 'https://github.com/callstackincubator/ai/edit/main/website/src',
21 | rootDir: 'src',
22 | socials: {
23 | discord: 'https://discord.com/invite/dmDkGFNj9k',
24 | github: 'https://github.com/callstackincubator/ai',
25 | x: 'https://x.com/callstackio',
26 | },
27 | },
28 | },
29 | { outDir: 'build' }
30 | )
31 |
--------------------------------------------------------------------------------
/packages/apple-llm/src/NativeAppleTranscription.ts:
--------------------------------------------------------------------------------
1 | import type { TurboModule } from 'react-native'
2 | import { TurboModuleRegistry } from 'react-native'
3 |
4 | export interface TranscriptionSegment {
5 | text: string
6 | startSecond: number
7 | endSecond: number
8 | }
9 |
10 | export interface TranscriptionResult {
11 | segments: TranscriptionSegment[]
12 | duration: number
13 | }
14 |
15 | export interface Spec extends TurboModule {
16 | isAvailable(language: string): boolean
17 | prepare(language: string): Promise
18 | }
19 |
20 | declare global {
21 | function __apple__llm__transcribe__(
22 | data: ArrayBufferLike,
23 | language: string
24 | ): Promise
25 | }
26 |
27 | const NativeAppleTranscription = TurboModuleRegistry.getEnforcing(
28 | 'NativeAppleTranscription'
29 | )
30 |
31 | export default {
32 | transcribe: (data: ArrayBufferLike, language: string) =>
33 | globalThis.__apple__llm__transcribe__(data, language),
34 | prepare: NativeAppleTranscription.prepare,
35 | isAvailable: NativeAppleTranscription.isAvailable,
36 | }
37 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 | on:
3 | push:
4 | branches:
5 | - main
6 | pull_request:
7 | branches:
8 | - main
9 |
10 | jobs:
11 | lint:
12 | runs-on: ubuntu-latest
13 | steps:
14 | - name: Checkout
15 | uses: actions/checkout@v3
16 |
17 | - name: Setup
18 | uses: ./.github/actions/setup
19 |
20 | - name: Lint files
21 | run: bun run lint
22 |
23 | - name: Typecheck files
24 | run: bun run typecheck
25 |
26 | build-library:
27 | runs-on: ubuntu-latest
28 | steps:
29 | - name: Checkout
30 | uses: actions/checkout@v3
31 |
32 | - name: Setup
33 | uses: ./.github/actions/setup
34 |
35 | - name: Build package
36 | run: cd packages/mlc && bun run prepare
37 |
38 | build-apple-llm:
39 | runs-on: macos-latest
40 | steps:
41 | - name: Checkout
42 | uses: actions/checkout@v3
43 |
44 | - name: Setup
45 | uses: ./.github/actions/setup
46 |
47 | - name: Build package
48 | run: cd packages/apple-llm && bun run prepare
49 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 szymonrybczak
4 | Permission is hereby granted, free of charge, to any person obtaining a copy
5 | of this software and associated documentation files (the "Software"), to deal
6 | in the Software without restriction, including without limitation the rights
7 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | copies of the Software, and to permit persons to whom the Software is
9 | furnished to do so, subject to the following conditions:
10 |
11 | The above copyright notice and this permission notice shall be included in all
12 | copies or substantial portions of the Software.
13 |
14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 | SOFTWARE.
21 |
--------------------------------------------------------------------------------
/packages/mlc/README.md:
--------------------------------------------------------------------------------
1 | # MLC Provider for Vercel AI SDK
2 |
3 | A Vercel AI SDK provider for MLC (Machine Learning Compilation) models, enabling on-device large language model inference in React Native applications.
4 |
5 | **Requirements:**
6 |
7 | - iOS 14+
8 | - React Native New Architecture
9 | - Vercel AI SDK v5
10 |
11 | ```ts
12 | import { mlc } from '@react-native-ai/mlc'
13 | import { generateText } from 'ai'
14 |
15 | const answer = await generateText({
16 | model: mlc('Llama-3.2-3B-Instruct'),
17 | prompt: 'What is the meaning of life?'
18 | })
19 | ```
20 |
21 | ## Features
22 |
23 | - ✅ On-device text generation with MLC models
24 | - ✅ Multiple model support (Llama, Phi, Qwen, etc.)
25 | - ✅ Model downloading and management
26 | - ✅ Streaming responses
27 | - ✅ Hardware-accelerated inference
28 |
29 | ## Documentation
30 |
31 | For complete installation instructions and API documentation, visit our [documentation site](https://react-native-ai.com/docs/mlc).
32 |
33 | ## License
34 |
35 | MIT
36 |
37 | ---
38 |
39 | Made with ❤️ and [create-react-native-library](https://github.com/callstack/react-native-builder-bob)
40 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # OSX
2 | #
3 | .DS_Store
4 |
5 | # XDE
6 | .expo/
7 |
8 | # VSCode
9 | .vscode/
10 | jsconfig.json
11 |
12 | # Xcode
13 | #
14 | build/
15 | *.pbxuser
16 | !default.pbxuser
17 | *.mode1v3
18 | !default.mode1v3
19 | *.mode2v3
20 | !default.mode2v3
21 | *.perspectivev3
22 | !default.perspectivev3
23 | xcuserdata
24 | *.xccheckout
25 | *.moved-aside
26 | DerivedData
27 | *.hmap
28 | *.ipa
29 | *.xcuserstate
30 | project.xcworkspace
31 | .xcode.env.local
32 |
33 | # AI
34 | .claude
35 |
36 | # Android/IJ
37 | #
38 | .classpath
39 | .cxx
40 | .gradle
41 | .idea
42 | .project
43 | .settings
44 | local.properties
45 | android.iml
46 |
47 | # Cocoapods
48 | #
49 | apps/expo-example/ios/Pods/
50 |
51 | # Ruby
52 | apps/expo-example/vendor/
53 |
54 | # node.js
55 | #
56 | node_modules/
57 | npm-debug.log
58 | yarn-debug.log
59 | yarn-error.log
60 |
61 | # BUCK
62 | buck-out/
63 | \.buckd/
64 | android/app/libs
65 | android/keystores/debug.keystore
66 |
67 | # Yarn
68 | .yarn/*
69 | !.yarn/patches
70 | !.yarn/plugins
71 | !.yarn/releases
72 | !.yarn/sdks
73 | !.yarn/versions
74 |
75 | # Expo
76 | .expo/
77 |
78 | # Turborepo
79 | .turbo/
80 |
81 | # ignore lib
82 | lib/
83 |
84 | # other
85 | dist/
86 |
87 | # ignore prebuilt (MLC)
88 | prebuilt/
89 |
--------------------------------------------------------------------------------
/packages/mlc/android/src/main/java/com/callstack/ai/NativeMLCEnginePackage.kt:
--------------------------------------------------------------------------------
1 | package com.callstack.ai
2 |
3 | import com.facebook.react.BaseReactPackage
4 |
5 | import com.facebook.react.bridge.NativeModule
6 | import com.facebook.react.bridge.ReactApplicationContext
7 | import com.facebook.react.module.model.ReactModuleInfo
8 | import com.facebook.react.module.model.ReactModuleInfoProvider
9 | import java.util.HashMap
10 |
11 | class NativeMLCEnginePackage : BaseReactPackage() {
12 | override fun getModule(name: String, reactContext: ReactApplicationContext): NativeModule? =
13 | if (name == NativeMLCEngineModule.NAME) {
14 | NativeMLCEngineModule(reactContext)
15 | } else {
16 | null
17 | }
18 |
19 | override fun getReactModuleInfoProvider(): ReactModuleInfoProvider =
20 | ReactModuleInfoProvider {
21 | val moduleInfos: MutableMap = HashMap()
22 | moduleInfos[NativeMLCEngineModule.NAME] = ReactModuleInfo(
23 | NativeMLCEngineModule.NAME,
24 | NativeMLCEngineModule.NAME,
25 | canOverrideExistingModule = false,
26 | needsEagerInit = false,
27 | hasConstants = true,
28 | isCxxModule = false,
29 | isTurboModule = true
30 | )
31 |
32 | moduleInfos
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/packages/mlc/mlc-package-config-android.json:
--------------------------------------------------------------------------------
1 | {
2 | "device": "android",
3 | "model_list": [
4 | {
5 | "model": "HF://mlc-ai/Llama-3.2-1B-Instruct-q4f16_0-MLC",
6 | "model_id": "Llama-3.2-1B-Instruct",
7 | "estimated_vram_bytes": 1200000000,
8 | "bundle_weight": false,
9 | "overrides": {
10 | "context_window_size": 4096,
11 | "prefill_chunk_size": 512
12 | }
13 | },
14 | {
15 | "model": "HF://mlc-ai/Llama-3.2-3B-Instruct-q4f16_0-MLC",
16 | "model_id": "Llama-3.2-3B-Instruct",
17 | "estimated_vram_bytes": 2000000000,
18 | "bundle_weight": false,
19 | "overrides": {
20 | "context_window_size": 4096,
21 | "prefill_chunk_size": 256
22 | }
23 | },
24 | {
25 | "model": "HF://mlc-ai/Phi-3.5-mini-instruct-q4f16_0-MLC",
26 | "model_id": "Phi-3.5-mini-instruct",
27 | "estimated_vram_bytes": 2300000000,
28 | "bundle_weight": false,
29 | "overrides": {
30 | "context_window_size": 4096,
31 | "prefill_chunk_size": 256
32 | }
33 | },
34 | {
35 | "model": "HF://mlc-ai/Qwen2-1.5B-Instruct-q4f16_0-MLC",
36 | "model_id": "Qwen2-1.5B-Instruct",
37 | "estimated_vram_bytes": 600000000,
38 | "bundle_weight": false,
39 | "overrides": {
40 | "context_window_size": 2048,
41 | "prefill_chunk_size": 1024
42 | }
43 | }
44 | ]
45 | }
46 |
--------------------------------------------------------------------------------
/packages/mlc/mlc-package-config-ios.json:
--------------------------------------------------------------------------------
1 | {
2 | "device": "iphone",
3 | "model_list": [
4 | {
5 | "model": "HF://mlc-ai/Llama-3.2-1B-Instruct-q4f16_1-MLC",
6 | "model_id": "Llama-3.2-1B-Instruct",
7 | "estimated_vram_bytes": 1200000000,
8 | "bundle_weight": false,
9 | "overrides": {
10 | "context_window_size": 4096,
11 | "prefill_chunk_size": 512
12 | }
13 | },
14 | {
15 | "model": "HF://mlc-ai/Llama-3.2-3B-Instruct-q4f16_1-MLC",
16 | "model_id": "Llama-3.2-3B-Instruct",
17 | "estimated_vram_bytes": 2000000000,
18 | "bundle_weight": false,
19 | "overrides": {
20 | "context_window_size": 4096,
21 | "prefill_chunk_size": 256
22 | }
23 | },
24 | {
25 | "model": "HF://mlc-ai/Phi-3.5-mini-instruct-q4f16_1-MLC",
26 | "model_id": "Phi-3.5-mini-instruct",
27 | "estimated_vram_bytes": 2300000000,
28 | "bundle_weight": false,
29 | "overrides": {
30 | "context_window_size": 4096,
31 | "prefill_chunk_size": 256
32 | }
33 | },
34 | {
35 | "model": "HF://mlc-ai/Qwen2.5-0.5B-Instruct-q4f16_1-MLC",
36 | "model_id": "Qwen2.5-0.5B-Instruct",
37 | "estimated_vram_bytes": 600000000,
38 | "bundle_weight": false,
39 | "overrides": {
40 | "context_window_size": 2048,
41 | "prefill_chunk_size": 1024
42 | }
43 | }
44 | ]
45 | }
46 |
--------------------------------------------------------------------------------
/website/src/public/apple.svg:
--------------------------------------------------------------------------------
1 |
32 |
--------------------------------------------------------------------------------
/apps/expo-example/README.md:
--------------------------------------------------------------------------------
1 | # React Native AI - Example App
2 |
3 | This example app demonstrates both Apple Intelligence and MLC on-device AI capabilities.
4 |
5 | ## Prerequisites
6 |
7 | > [!IMPORTANT]
8 | > Before running this app, you need to build the MLC runtime binaries.
9 |
10 | ### Build MLC Runtime
11 |
12 | Navigate to the MLC package and run the build command for your target platform:
13 |
14 | **For iOS:**
15 | ```bash
16 | cd ../../packages/mlc
17 | bun run build:runtime:ios
18 | ```
19 |
20 | **For Android:**
21 | ```bash
22 | cd ../../packages/mlc
23 | bun run build:runtime:android
24 | ```
25 |
26 | > [!NOTE]
27 | > The build process requires additional setup. Run `./scripts/build-runtime.sh --help` in the MLC package directory to see detailed prerequisites for your platform.
28 |
29 | ## Running the App
30 |
31 | After building the MLC runtime, navigate back to this directory and run:
32 |
33 | **iOS:**
34 | ```bash
35 | bun run ios
36 | ```
37 |
38 | **Android:**
39 | ```bash
40 | bun run android
41 | ```
42 |
43 | ## Features
44 |
45 | - Apple Intelligence (iOS 17+): Text generation, embeddings, transcription, speech synthesis
46 | - MLC Models: Run Llama, Phi, Mistral, and Qwen models on-device
47 | - Tool calling and structured output support
48 | - Streaming text generation
49 |
50 | ## Troubleshooting
51 |
52 | > [!WARNING]
53 | > If you encounter runtime errors related to MLC:
54 | > 1. Ensure you've built the runtime binaries (see above)
55 | > 2. Run `npx expo prebuild --clean` if you've made configuration changes
56 | > 3. Check that your device has sufficient memory for the model you're using (1-8GB)
57 |
--------------------------------------------------------------------------------
/apps/expo-example/src/utils/audioUtils.ts:
--------------------------------------------------------------------------------
1 | import { addWAVHeader, AudioFormatType } from '@react-native-ai/apple'
2 | import type { AudioBuffer } from 'react-native-audio-api'
3 |
4 | /**
5 | * Merges multiple AudioBuffer objects into a single continuous Float32Array
6 | *
7 | * The Web Audio API recording typically provides audio data as a series of AudioBuffer chunks.
8 | * Each buffer contains PCM data that can be accessed via getChannelData().
9 | *
10 | * In the future, we should support multiple channels too.
11 | */
12 | export const mergeBuffersToFloat32Array = (
13 | buffers: AudioBuffer[]
14 | ): Float32Array => {
15 | if (buffers.length === 0) {
16 | return new Float32Array(0)
17 | }
18 |
19 | const totalLength = buffers.reduce((sum, buffer) => sum + buffer.length, 0)
20 | const mergedPCM = new Float32Array(totalLength)
21 |
22 | let offset = 0
23 | for (const buffer of buffers) {
24 | const channelData = buffer.getChannelData(0)
25 | mergedPCM.set(channelData, offset)
26 | offset += buffer.length
27 | }
28 |
29 | return mergedPCM
30 | }
31 |
32 | /**
33 | * Converts Float32Array PCM data to WAV file format
34 | *
35 | * Uses the addWAVHeader utility from @react-native-ai/apple library with appropriate format settings.
36 | * This ensures consistent WAV file generation across the application.
37 | */
38 | export const float32ArrayToWAV = (
39 | pcmData: Float32Array,
40 | sampleRate: number
41 | ): ArrayBuffer => {
42 | return addWAVHeader(pcmData.buffer, {
43 | sampleRate,
44 | channels: 1,
45 | bitsPerSample: 32,
46 | formatType: AudioFormatType.FLOAT,
47 | })
48 | }
49 |
--------------------------------------------------------------------------------
/website/src/docs/index.md:
--------------------------------------------------------------------------------
1 | # Introduction
2 |
3 | A collection of on-device AI primitives for React Native with first-class Vercel AI SDK support. Run AI models directly on users' devices for privacy-preserving, low-latency inference without server costs.
4 |
5 | ## Why On-Device AI?
6 |
7 | - **Privacy-first:** All processing happens locally—no data leaves the device
8 | - **Instant responses:** No network latency, immediate AI capabilities
9 | - **Offline-ready:** Works anywhere, even without internet
10 | - **Zero server costs:** No API fees or infrastructure to maintain
11 |
12 | ## Available Providers
13 |
14 | ### Apple Intelligence
15 |
16 | Native integration with Apple's on-device AI capabilities through `@react-native-ai/apple`:
17 |
18 | - **Text Generation** - Apple Foundation Models for chat and completion
19 | - **Embeddings** - NLContextualEmbedding for semantic search and similarity
20 | - **Transcription** - SpeechAnalyzer for fast, accurate speech-to-text
21 | - **Speech Synthesis** - AVSpeechSynthesizer for natural text-to-speech
22 |
23 | Production-ready with instant availability on supported iOS devices.
24 |
25 | ### MLC Engine (Work in Progress)
26 |
27 | Run any open-source LLM locally using MLC's optimized runtime through `@react-native-ai/mlc`:
28 |
29 | - Support for popular models like Llama, Mistral, and Phi
30 | - Cross-platform compatibility (iOS and Android)
31 |
32 | > [!NOTE]
33 | > MLC support is experimental and not recommended for production use yet.
34 |
35 | ### Google (Coming Soon)
36 |
37 | Support for Google's on-device models is planned for future releases.
38 |
39 | Get started by choosing the approach that fits your needs!
40 |
--------------------------------------------------------------------------------
/website/src/docs/apple/getting-started.md:
--------------------------------------------------------------------------------
1 | # Getting Started
2 |
3 | The Apple provider enables you to use Apple's on-device AI capabilities with the Vercel AI SDK in React Native applications. This includes language models, text embeddings, and other Apple-provided AI features that run entirely on-device for privacy and performance.
4 |
5 | ## Installation
6 |
7 | Install the Apple provider:
8 |
9 | ```bash
10 | npm install @react-native-ai/apple
11 | ```
12 |
13 | While you can use the Apple provider standalone, we recommend using it with the Vercel AI SDK for a much better developer experience. The AI SDK provides unified APIs, streaming support, and advanced features. To use with the AI SDK, you'll need v5 and [required polyfills](https://v5.ai-sdk.dev/docs/getting-started/expo#polyfills):
14 |
15 | ```bash
16 | npm install ai
17 | ```
18 |
19 | ## Requirements
20 |
21 | - **React Native New Architecture** - Required for native module functionality
22 |
23 | > [!NOTE]
24 | > Different Apple AI features have varying iOS version requirements. Check the specific API documentation for compatibility details.
25 |
26 | ## Running on Simulator
27 |
28 | To use Apple Intelligence with the iOS Simulator, you need to enable it on your macOS system first. See the [Running on Simulator](./running-on-simulator) guide for detailed setup instructions.
29 |
30 | ## Basic Usage
31 |
32 | Import the Apple provider and use it with the AI SDK:
33 |
34 | ```typescript
35 | import { apple } from '@react-native-ai/apple';
36 | import { generateText } from 'ai';
37 |
38 | const result = await generateText({
39 | model: apple(),
40 | prompt: 'Explain quantum computing in simple terms'
41 | });
42 | ```
43 |
--------------------------------------------------------------------------------
/packages/apple-llm/src/NativeAppleSpeech.ts:
--------------------------------------------------------------------------------
1 | import type { TurboModule } from 'react-native'
2 | import { TurboModuleRegistry } from 'react-native'
3 | import type { UnsafeObject } from 'react-native/Libraries/Types/CodegenTypes'
4 |
5 | import { addWAVHeader } from './utils'
6 |
7 | export interface SpeechOptions {
8 | language?: string
9 | voice?: string
10 | }
11 |
12 | export interface VoiceInfo {
13 | identifier: string
14 | name: string
15 | language: string
16 | quality: 'default' | 'enhanced' | 'premium'
17 | isPersonalVoice: boolean
18 | isNoveltyVoice: boolean
19 | }
20 |
21 | export interface Spec extends TurboModule {
22 | getVoices(): Promise
23 | }
24 |
25 | const NativeAppleSpeech =
26 | TurboModuleRegistry.getEnforcing('NativeAppleSpeech')
27 |
28 | interface AudioResult {
29 | data: ArrayBufferLike
30 | sampleRate: number
31 | channels: number
32 | bitsPerSample: number
33 | formatType: number // 0 = integer, 1 = float
34 | }
35 |
36 | declare global {
37 | function __apple__llm__generate_audio__(
38 | text: string,
39 | options: UnsafeObject
40 | ): Promise
41 | }
42 |
43 | export default {
44 | getVoices: NativeAppleSpeech.getVoices,
45 | generate: async (
46 | text: string,
47 | options: SpeechOptions = {}
48 | ): Promise => {
49 | const result = await globalThis.__apple__llm__generate_audio__(
50 | text,
51 | options
52 | )
53 | return addWAVHeader(result.data, {
54 | sampleRate: result.sampleRate,
55 | channels: result.channels,
56 | bitsPerSample: result.bitsPerSample,
57 | formatType: result.formatType,
58 | })
59 | },
60 | }
61 |
--------------------------------------------------------------------------------
/packages/apple-llm/src/NativeAppleLLM.ts:
--------------------------------------------------------------------------------
1 | import type { TurboModule } from 'react-native'
2 | import { TurboModuleRegistry } from 'react-native'
3 | import type {
4 | EventEmitter,
5 | UnsafeObject,
6 | } from 'react-native/Libraries/Types/CodegenTypes'
7 |
8 | export interface AppleMessage {
9 | role: 'assistant' | 'system' | 'tool' | 'user'
10 | content: string
11 | }
12 |
13 | export interface AppleGenerationOptions {
14 | temperature?: number
15 | maxTokens?: number
16 | topP?: number
17 | topK?: number
18 | schema?: UnsafeObject
19 | tools?: UnsafeObject
20 | }
21 |
22 | export type StreamUpdateEvent = {
23 | streamId: string
24 | content: string
25 | }
26 |
27 | export type StreamCompleteEvent = {
28 | streamId: string
29 | }
30 |
31 | export type StreamErrorEvent = {
32 | streamId: string
33 | error: string
34 | }
35 |
36 | export interface Spec extends TurboModule {
37 | isAvailable(): boolean
38 | generateText(
39 | messages: AppleMessage[],
40 | options: AppleGenerationOptions
41 | ): Promise<
42 | (
43 | | { type: 'text'; text: string }
44 | | {
45 | type: 'tool-call'
46 | toolName: string
47 | input: string
48 | }
49 | | {
50 | type: 'tool-result'
51 | toolName: string
52 | output: string
53 | }
54 | )[]
55 | >
56 | generateStream(
57 | messages: AppleMessage[],
58 | options: AppleGenerationOptions
59 | ): string
60 | cancelStream(streamId: string): void
61 |
62 | onStreamUpdate: EventEmitter
63 | onStreamComplete: EventEmitter
64 | onStreamError: EventEmitter
65 | }
66 |
67 | export default TurboModuleRegistry.getEnforcing('NativeAppleLLM')
68 |
--------------------------------------------------------------------------------
/packages/apple-llm/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@react-native-ai/apple",
3 | "version": "0.11.0",
4 | "description": "Apple LLM provider for Vercel AI SDK",
5 | "main": "lib/commonjs/index",
6 | "module": "lib/module/index",
7 | "types": "lib/typescript/index.d.ts",
8 | "react-native": "src/index",
9 | "source": "src/index",
10 | "license": "MIT",
11 | "homepage": "https://github.com/callstackincubator/ai#readme",
12 | "author": "Mike Grabowski ",
13 | "scripts": {
14 | "clean": "del-cli lib",
15 | "typecheck": "tsc --noEmit",
16 | "prepare": "bob build"
17 | },
18 | "dependencies": {
19 | "@ai-sdk/provider": "^2.0.0",
20 | "@ai-sdk/provider-utils": "^3.0.10",
21 | "zod": "^4.2.1"
22 | },
23 | "peerDependencies": {
24 | "react-native": ">=0.76.0"
25 | },
26 | "devDependencies": {
27 | "react-native": "0.81.4"
28 | },
29 | "react-native-builder-bob": {
30 | "source": "src",
31 | "output": "lib",
32 | "targets": [
33 | "commonjs",
34 | "module",
35 | [
36 | "typescript",
37 | {
38 | "project": "tsconfig.build.json"
39 | }
40 | ]
41 | ]
42 | },
43 | "codegenConfig": {
44 | "name": "NativeAppleLLM",
45 | "type": "modules",
46 | "jsSrcsDir": "src",
47 | "ios": {
48 | "modulesProvider": {
49 | "NativeAppleLLM": "AppleLLM",
50 | "NativeAppleEmbeddings": "AppleEmbeddings",
51 | "NativeAppleTranscription": "AppleTranscription",
52 | "NativeAppleSpeech": "AppleSpeech",
53 | "NativeAppleUtils": "AppleUtils"
54 | }
55 | }
56 | },
57 | "keywords": [
58 | "react-native",
59 | "apple",
60 | "llm",
61 | "ai",
62 | "sdk",
63 | "vercel"
64 | ]
65 | }
66 |
--------------------------------------------------------------------------------
/website/src/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | pageType: home
3 |
4 | hero:
5 | name: On-device LLMs in React Native
6 | text:
7 | tagline: react-native-ai brings on-device LLMs capabilities to mobile React Native apps.
8 | actions:
9 | - theme: brand
10 | text: Quick Start
11 | link: /docs/
12 | - theme: alt
13 | text: GitHub
14 | link: https://github.com/callstackincubator/ai
15 | image:
16 | src:
17 | light: /logo-light.png
18 | dark: /logo-dark.png
19 | alt: Logo
20 | features:
21 | - title: On-device LLM execution
22 | details: Run large language models directly on mobile devices without requiring cloud infrastructure or internet connectivity.
23 | icon:
24 | - title: Vercel AI SDK compatibility
25 | details: Seamless integration with Vercel AI SDK, allowing you to use familiar functions like streamText and generateText with local models.
26 | icon:
27 | - title: Apple Foundation Models
28 | details: Native support for Apple's Foundation Models on iOS 26+ devices with Apple Intelligence, providing seamless integration with Apple's on-device AI capabilities.
29 | icon:
30 | - title: MLC LLM Engine powered
31 | details: Built on top of MLC LLM Engine, providing optimized performance and efficient model execution on mobile devices.
32 | icon:
33 | - title: Cross-platform support
34 | details: Full support for both iOS and Android platforms with platform-specific optimizations and configurations.
35 | icon:
36 | - title: Privacy-first approach
37 | details: All AI processing happens locally on the device, ensuring user data privacy and eliminating the need for cloud-based AI services.
38 | icon:
39 | ---
40 |
--------------------------------------------------------------------------------
/packages/llama/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@react-native-ai/llama",
3 | "version": "0.10.0",
4 | "description": "llama.rn provider for Vercel AI SDK",
5 | "main": "lib/commonjs/index",
6 | "module": "lib/module/index",
7 | "types": "lib/typescript/index.d.ts",
8 | "react-native": "src/index",
9 | "source": "src/index",
10 | "files": [
11 | "src",
12 | "lib",
13 | "!**/__tests__",
14 | "!**/__fixtures__",
15 | "!**/__mocks__",
16 | "!**/.*"
17 | ],
18 | "license": "MIT",
19 | "homepage": "https://github.com/callstackincubator/ai#readme",
20 | "repository": {
21 | "type": "git",
22 | "url": "git+https://github.com/callstackincubator/ai.git"
23 | },
24 | "bugs": {
25 | "url": "https://github.com/callstackincubator/ai/issues"
26 | },
27 | "publishConfig": {
28 | "registry": "https://registry.npmjs.org/"
29 | },
30 | "author": "Szymon Rybczak (https://github.com/szymonrybczak)",
31 | "scripts": {
32 | "clean": "del-cli lib",
33 | "typecheck": "tsc --noEmit",
34 | "prepare": "bob build"
35 | },
36 | "dependencies": {
37 | "@ai-sdk/provider": "^2.0.0",
38 | "@ai-sdk/provider-utils": "^3.0.10",
39 | "react-native-blob-util": "^0.24.5",
40 | "zod": "^4.0.0"
41 | },
42 | "peerDependencies": {
43 | "react-native": ">=0.76.0",
44 | "llama.rn": "^0.10.0-rc.0"
45 | },
46 | "devDependencies": {
47 | "react-native": "0.81.4",
48 | "llama.rn": "^0.10.0-rc.0"
49 | },
50 | "react-native-builder-bob": {
51 | "source": "src",
52 | "output": "lib",
53 | "targets": [
54 | "commonjs",
55 | "module",
56 | [
57 | "typescript",
58 | {
59 | "project": "tsconfig.build.json"
60 | }
61 | ]
62 | ]
63 | },
64 | "keywords": [
65 | "react-native",
66 | "llama",
67 | "llm",
68 | "ai",
69 | "sdk",
70 | "vercel",
71 | "gguf",
72 | "on-device"
73 | ]
74 | }
75 |
--------------------------------------------------------------------------------
/apps/expo-example/nativewind-env.d.ts:
--------------------------------------------------------------------------------
1 | import {
2 | ScrollViewProps,
3 | ScrollViewPropsAndroid,
4 | ScrollViewPropsIOS,
5 | Touchable,
6 | VirtualizedListProps,
7 | } from 'react-native'
8 |
9 | declare module '@react-native/virtualized-lists' {
10 | export interface VirtualizedListWithoutRenderItemProps<
11 | // eslint-disable-next-line @typescript-eslint/no-unused-vars
12 | ItemT,
13 | > extends ScrollViewProps {
14 | ListFooterComponentClassName?: string
15 | ListHeaderComponentClassName?: string
16 | }
17 | }
18 |
19 | declare module 'react-native' {
20 | interface ScrollViewProps
21 | extends ViewProps, ScrollViewPropsIOS, ScrollViewPropsAndroid, Touchable {
22 | contentContainerClassName?: string
23 | indicatorClassName?: string
24 | }
25 | interface FlatListProps extends VirtualizedListProps {
26 | columnWrapperClassName?: string
27 | }
28 | interface ImageBackgroundProps extends ImagePropsBase {
29 | imageClassName?: string
30 | }
31 | interface ImagePropsBase {
32 | className?: string
33 | cssInterop?: boolean
34 | }
35 | interface ViewProps {
36 | className?: string
37 | cssInterop?: boolean
38 | }
39 | interface TextInputProps {
40 | placeholderClassName?: string
41 | }
42 | interface TextProps {
43 | className?: string
44 | cssInterop?: boolean
45 | }
46 | interface SwitchProps {
47 | className?: string
48 | cssInterop?: boolean
49 | }
50 | interface InputAccessoryViewProps {
51 | className?: string
52 | cssInterop?: boolean
53 | }
54 | interface TouchableWithoutFeedbackProps {
55 | className?: string
56 | cssInterop?: boolean
57 | }
58 | interface StatusBarProps {
59 | className?: string
60 | cssInterop?: boolean
61 | }
62 | interface KeyboardAvoidingViewProps extends ViewProps {
63 | contentContainerClassName?: string
64 | }
65 | interface ModalBaseProps {
66 | presentationClassName?: string
67 | }
68 | }
69 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@react-native-ai/monorepo",
3 | "private": true,
4 | "scripts": {
5 | "typecheck": "bun run --filter='*' typecheck",
6 | "lint": "eslint \"**/*.{js,ts,tsx}\"",
7 | "release": "release-it",
8 | "build": "bun run --filter='@react-native-ai/*' prepare"
9 | },
10 | "repository": {
11 | "type": "git",
12 | "url": "git+https://github.com/callstackincubator/ai.git"
13 | },
14 | "license": "MIT",
15 | "devDependencies": {
16 | "@babel/plugin-transform-strict-mode": "^7.27.1",
17 | "@commitlint/config-conventional": "^20.2.0",
18 | "@react-native/babel-preset": "^0.81.4",
19 | "@react-native/typescript-config": "^0.81.4",
20 | "@release-it-plugins/workspaces": "^4.2.0",
21 | "@release-it/conventional-changelog": "^5.0.0",
22 | "@typescript-eslint/eslint-plugin": "^7.3.1",
23 | "@typescript-eslint/parser": "^7.3.1",
24 | "commitlint": "^20.2.0",
25 | "del-cli": "^7.0.0",
26 | "eslint": "^9.31.0",
27 | "eslint-config-expo": "^9.2.0",
28 | "eslint-config-prettier": "^10.1.5",
29 | "eslint-plugin-import": "^2.32.0",
30 | "eslint-plugin-prettier": "^5.5.1",
31 | "eslint-plugin-simple-import-sort": "^12.1.1",
32 | "prettier": "^3.7.4",
33 | "react-native-builder-bob": "^0.40.17",
34 | "release-it": "^19.1.0",
35 | "typescript": "^5.8.3"
36 | },
37 | "workspaces": [
38 | "packages/*",
39 | "apps/expo-example"
40 | ],
41 | "packageManager": "bun@1.2.19",
42 | "release-it": {
43 | "git": {
44 | "commitMessage": "chore: release ${version}",
45 | "tagName": "v${version}"
46 | },
47 | "npm": false,
48 | "github": {
49 | "release": true
50 | },
51 | "plugins": {
52 | "@release-it/conventional-changelog": {
53 | "preset": "angular"
54 | },
55 | "@release-it-plugins/workspaces": {
56 | "workspaces": [
57 | "packages/*"
58 | ]
59 | }
60 | }
61 | },
62 | "version": "0.11.0"
63 | }
64 |
--------------------------------------------------------------------------------
/packages/apple-llm/ios/AppleLLMError.swift:
--------------------------------------------------------------------------------
1 | //
2 | // AppleLLMError.swift
3 | // AppleLLM
4 | //
5 | // Created by Mike Grabowski on 13/07/2025.
6 | //
7 |
8 | import Foundation
9 |
10 | enum AppleLLMError: Error, LocalizedError {
11 | case modelUnavailable
12 | case unsupportedOS
13 | case generationError(String)
14 | case streamNotFound(String)
15 | case invalidMessage(String)
16 | case conflictingSamplingMethods
17 | case invalidSchema(String)
18 | case toolCallError(Error)
19 | case unknownToolCallError
20 |
21 | var errorDescription: String? {
22 | switch self {
23 | case .modelUnavailable:
24 | return "Apple Intelligence model is not available"
25 | case .unsupportedOS:
26 | return "Apple Intelligence not available on this iOS version"
27 | case .generationError(let message):
28 | return "Generation error: \(message)"
29 | case .streamNotFound(let id):
30 | return "Stream with ID \(id) not found"
31 | case .invalidMessage(let role):
32 | return "Invalid message role '\(role)'. Supported roles are: system, user, assistant"
33 | case .conflictingSamplingMethods:
34 | return "Cannot specify both topP and topK parameters simultaneously. Please use only one sampling method."
35 | case .invalidSchema(let message):
36 | return "Invalid schema: \(message)"
37 | case .toolCallError(let error):
38 | return "Error calling tool: \(error.localizedDescription)"
39 | case .unknownToolCallError:
40 | return "Unknown tool call error"
41 | }
42 |
43 | }
44 |
45 | var code: Int {
46 | switch self {
47 | case .modelUnavailable: return 1
48 | case .unsupportedOS: return 2
49 | case .generationError: return 3
50 | case .streamNotFound: return 4
51 | case .invalidMessage: return 5
52 | case .conflictingSamplingMethods: return 6
53 | case .invalidSchema: return 7
54 | case .unknownToolCallError: return 8
55 | case .toolCallError: return 9
56 | }
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/packages/apple-llm/ios/embeddings/AppleEmbeddings.mm:
--------------------------------------------------------------------------------
1 | //
2 | // AppleEmbeddings.mm
3 | // AppleLLM
4 | //
5 | // Created by Mike Grabowski on 27/07/2025.
6 | //
7 |
8 | #if __has_include("AppleLLM/AppleLLM-Swift.h")
9 | #import "AppleLLM/AppleLLM-Swift.h"
10 | #else
11 | #import "AppleLLM-Swift.h"
12 | #endif
13 |
14 | #import
15 | #import
16 | #import
17 |
18 | #import
19 |
20 | #import
21 |
22 | @interface AppleEmbeddings : NativeAppleEmbeddingsSpecBase
23 | @property (strong, nonatomic) AppleEmbeddingsImpl *embeddings;
24 | @end
25 |
26 | using namespace facebook;
27 | using namespace JS::NativeAppleLLM;
28 |
29 | @implementation AppleEmbeddings
30 |
31 | - (instancetype)init {
32 | self = [super init];
33 | if (self) {
34 | _embeddings = [AppleEmbeddingsImpl new];
35 | }
36 | return self;
37 | }
38 |
39 | + (NSString *)moduleName {
40 | return @"NativeAppleEmbeddings";
41 | }
42 |
43 | - (std::shared_ptr)getTurboModule:(const react::ObjCTurboModule::InitParams &)params {
44 | return std::make_shared(params);
45 | }
46 |
47 | - (void)getInfo:(nonnull NSString *)language resolve:(nonnull RCTPromiseResolveBlock)resolve reject:(nonnull RCTPromiseRejectBlock)reject {
48 | [_embeddings getInfo:language resolve:resolve reject:reject];
49 | }
50 |
51 | - (void)prepare:(nonnull NSString *)language resolve:(nonnull RCTPromiseResolveBlock)resolve reject:(nonnull RCTPromiseRejectBlock)reject {
52 | [_embeddings prepare:language resolve:resolve reject:reject];
53 | }
54 |
55 | - (void)generateEmbeddings:(nonnull NSArray *)sentences language:(nonnull NSString *)language resolve:(nonnull RCTPromiseResolveBlock)resolve reject:(nonnull RCTPromiseRejectBlock)reject {
56 | [_embeddings generateEmbeddings:sentences language:language resolve:resolve reject:reject];
57 | }
58 |
59 | @end
60 |
--------------------------------------------------------------------------------
/apps/expo-example/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@react-native-ai/example",
3 | "version": "1.0.0",
4 | "main": "index.js",
5 | "scripts": {
6 | "start": "expo start",
7 | "android": "expo run:android",
8 | "ios": "expo run:ios",
9 | "web": "expo start --web",
10 | "typecheck": "tsc --noEmit"
11 | },
12 | "dependencies": {
13 | "@azure/core-asynciterator-polyfill": "^1.0.2",
14 | "@bottom-tabs/react-navigation": "^0.10.2",
15 | "@react-native-ai/apple": "workspace:*",
16 | "@react-native-ai/llama": "workspace:*",
17 | "@react-native-ai/mlc": "workspace:*",
18 | "@react-native-picker/picker": "^2.11.4",
19 | "@react-native-vector-icons/get-image": "^12.3.0",
20 | "@react-native-vector-icons/material-icons": "^12.4.0",
21 | "@react-navigation/bottom-tabs": "^7.9.0",
22 | "@react-navigation/native": "^7.1.16",
23 | "@react-navigation/native-stack": "^7.3.25",
24 | "@ungap/structured-clone": "^1.3.0",
25 | "ai": "^5.0.56",
26 | "expo": "^54.0.8",
27 | "expo-battery": "~10.0.7",
28 | "expo-build-properties": "~1.0.8",
29 | "expo-calendar": "~15.0.7",
30 | "expo-clipboard": "~8.0.7",
31 | "expo-document-picker": "~14.0.7",
32 | "expo-status-bar": "~3.0.8",
33 | "llama.rn": "^0.10.0-rc.0",
34 | "nativewind": "^4.1.23",
35 | "react": "19.1.0",
36 | "react-native": "0.81.4",
37 | "react-native-audio-api": "^0.7.1",
38 | "react-native-blob-util": "^0.24.5",
39 | "react-native-bottom-tabs": "^0.11.0",
40 | "react-native-keyboard-controller": "1.18.5",
41 | "react-native-reanimated": "~4.1.0",
42 | "react-native-safe-area-context": "~5.6.0",
43 | "react-native-screens": "~4.16.0",
44 | "react-native-worklets": "^0.6.1",
45 | "web-streams-polyfill": "^4.1.0",
46 | "zod": "^4.0.0"
47 | },
48 | "devDependencies": {
49 | "@babel/core": "^7.20.0",
50 | "@types/react": "~19.1.0",
51 | "prettier-plugin-tailwindcss": "^0.5.11",
52 | "tailwindcss": "^3.4.17"
53 | },
54 | "private": true
55 | }
56 |
--------------------------------------------------------------------------------
/apps/expo-example/app.json:
--------------------------------------------------------------------------------
1 | {
2 | "expo": {
3 | "name": "@react-native-ai example",
4 | "slug": "rn-ai-example",
5 | "version": "1.0.0",
6 | "orientation": "portrait",
7 | "icon": "./assets/icon.png",
8 | "userInterfaceStyle": "light",
9 | "newArchEnabled": true,
10 | "splash": {
11 | "image": "./assets/splash-icon.png",
12 | "resizeMode": "contain",
13 | "backgroundColor": "#ffffff"
14 | },
15 | "ios": {
16 | "supportsTablet": true,
17 | "bundleIdentifier": "com.callstack.ai.example"
18 | },
19 | "android": {
20 | "adaptiveIcon": {
21 | "foregroundImage": "./assets/adaptive-icon.png",
22 | "backgroundColor": "#ffffff"
23 | },
24 | "edgeToEdgeEnabled": true,
25 | "package": "com.callstack.ai.example"
26 | },
27 | "web": {
28 | "favicon": "./assets/favicon.png"
29 | },
30 | "plugins": [
31 | [
32 | "expo-calendar",
33 | {
34 | "calendarPermission": "The app needs to access your calendar."
35 | }
36 | ],
37 | "react-native-bottom-tabs",
38 | [
39 | "expo-build-properties",
40 | {
41 | "ios": {
42 | "extraPods": [
43 | {
44 | "name": "SDWebImage",
45 | "modular_headers": true
46 | },
47 | {
48 | "name": "SDWebImageSVGCoder",
49 | "modular_headers": true
50 | }
51 | ]
52 | }
53 | }
54 | ],
55 | [
56 | "react-native-audio-api",
57 | {
58 | "iosBackgroundMode": true,
59 | "iosMicrophonePermission": "This app requires access to the microphone to record audio.",
60 | "androidPermissions": [
61 | "android.permission.MODIFY_AUDIO_SETTINGS",
62 | "android.permission.FOREGROUND_SERVICE",
63 | "android.permission.FOREGROUND_SERVICE_MEDIA_PLAYBACK"
64 | ],
65 | "androidForegroundService": true,
66 | "androidFSTypes": ["mediaPlayback"]
67 | }
68 | ],
69 | "@react-native-ai/mlc"
70 | ]
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/packages/mlc/ios/engine/LLMEngine.mm:
--------------------------------------------------------------------------------
1 | //
2 | // LLMEngine.mm
3 | // Pods
4 | //
5 |
6 | #import "LLMEngine.h"
7 | #import "BackgroundWorker.h"
8 | #import "EngineState.h"
9 |
10 | @interface LLMEngine ()
11 |
12 | @property(nonatomic, strong) EngineState* state;
13 | @property(nonatomic, strong) JSONFFIEngine* jsonFFIEngine;
14 | @property(nonatomic, strong) NSMutableArray* threads;
15 |
16 | @end
17 |
18 | @implementation LLMEngine
19 |
20 | - (instancetype)init {
21 | self = [super init];
22 | if (self) {
23 | _state = [[EngineState alloc] init];
24 | _jsonFFIEngine = [[JSONFFIEngine alloc] init];
25 | _threads = [NSMutableArray array];
26 |
27 | [_jsonFFIEngine initBackgroundEngine:^(NSString* _Nullable result) {
28 | [self.state streamCallbackWithResult:result];
29 | }];
30 |
31 | BackgroundWorker* backgroundWorker = [[BackgroundWorker alloc] initWithTask:^{
32 | [NSThread setThreadPriority:1.0];
33 | [self.jsonFFIEngine runBackgroundLoop];
34 | }];
35 |
36 | BackgroundWorker* backgroundStreamBackWorker = [[BackgroundWorker alloc] initWithTask:^{
37 | [self.jsonFFIEngine runBackgroundStreamBackLoop];
38 | }];
39 |
40 | backgroundWorker.qualityOfService = NSQualityOfServiceUserInteractive;
41 | [_threads addObject:backgroundWorker];
42 | [_threads addObject:backgroundStreamBackWorker];
43 | [backgroundWorker start];
44 | [backgroundStreamBackWorker start];
45 | }
46 | return self;
47 | }
48 |
49 | - (void)dealloc {
50 | [self.jsonFFIEngine exitBackgroundLoop];
51 | }
52 |
53 | - (void)reloadWithModelPath:(NSString*)modelPath modelLib:(NSString*)modelLib {
54 | NSString* engineConfig =
55 | [NSString stringWithFormat:@"{\"model\": \"%@\", \"model_lib\": \"system://%@\", \"mode\": \"interactive\"}", modelPath, modelLib];
56 | [self.jsonFFIEngine reload:engineConfig];
57 | }
58 |
59 | - (void)reset {
60 | [self.jsonFFIEngine reset];
61 | }
62 |
63 | - (void)unload {
64 | [self.jsonFFIEngine unload];
65 | }
66 |
67 | - (NSString*)chatCompletionWithMessages:(NSArray*)messages options:(NSDictionary*)options completion:(void (^)(NSDictionary* response))completion {
68 | return [self.state chatCompletionWithJSONFFIEngine:self.jsonFFIEngine request:options completion:completion];
69 | }
70 |
71 | - (void)cancelRequest:(NSString *)requestId {
72 | [self.state cancelRequest:requestId withJSONFFIEngine:self.jsonFFIEngine];
73 | }
74 |
75 | @end
76 |
--------------------------------------------------------------------------------
/apps/expo-example/src/tools.ts:
--------------------------------------------------------------------------------
1 | import { tool } from 'ai'
2 | import * as Calendar from 'expo-calendar'
3 | import { z } from 'zod'
4 |
5 | /**
6 | * Creates a new calendar event with specified title, date, time and duration
7 | */
8 | export const createCalendarEvent = tool({
9 | description: 'Create a new calendar event',
10 | inputSchema: z.object({
11 | title: z.string().describe('Event title'),
12 | date: z.string().describe('Event date (YYYY-MM-DD)'),
13 | time: z.string().optional().describe('Event time (HH:MM)'),
14 | duration: z.number().optional().describe('Duration in minutes'),
15 | }),
16 | execute: async ({ title, date, time, duration = 60 }) => {
17 | await Calendar.requestCalendarPermissionsAsync()
18 |
19 | const calendars = await Calendar.getCalendarsAsync(
20 | Calendar.EntityTypes.EVENT
21 | )
22 |
23 | const eventDate = new Date(date)
24 | if (time) {
25 | const [hours, minutes] = time.split(':').map(Number)
26 | eventDate.setHours(hours, minutes)
27 | }
28 |
29 | await Calendar.createEventAsync(calendars[0].id, {
30 | title,
31 | startDate: eventDate,
32 | endDate: new Date(eventDate.getTime() + duration * 60 * 1000),
33 | })
34 |
35 | return { message: `Created "${title}"` }
36 | },
37 | })
38 |
39 | /**
40 | * Retrieves upcoming calendar events for a specified number of days
41 | */
42 | export const checkCalendarEvents = tool({
43 | description: 'Check upcoming calendar events',
44 | inputSchema: z.object({
45 | days: z.number().optional().describe('Number of days to look ahead'),
46 | }),
47 | execute: async ({ days = 7 }) => {
48 | await Calendar.requestCalendarPermissionsAsync()
49 |
50 | const calendars = await Calendar.getCalendarsAsync(
51 | Calendar.EntityTypes.EVENT
52 | )
53 |
54 | const startDate = new Date()
55 | const endDate = new Date(startDate.getTime() + days * 24 * 60 * 60 * 1000)
56 |
57 | const events = await Calendar.getEventsAsync(
58 | calendars.map((cal) => cal.id),
59 | startDate,
60 | endDate
61 | )
62 |
63 | return events.map((event) => ({
64 | title: event.title,
65 | date: event.startDate,
66 | }))
67 | },
68 | })
69 |
70 | /**
71 | * Get current time
72 | */
73 | export const getCurrentTime = tool({
74 | description: 'Get current time and date',
75 | inputSchema: z.object({}),
76 | execute: async () => {
77 | return `Current time is: ${new Date().toUTCString()}`
78 | },
79 | })
80 |
--------------------------------------------------------------------------------
/website/src/docs/llama/getting-started.md:
--------------------------------------------------------------------------------
1 | # Getting Started
2 |
3 | The Llama provider enables you to run GGUF models directly on-device in React Native applications using [llama.rn](https://github.com/mybigday/llama.rn). This allows you to download and run any GGUF model from HuggingFace for privacy, performance, and offline capabilities.
4 |
5 | ## Installation
6 |
7 | Install the Llama provider and its peer dependencies:
8 |
9 | ```bash
10 | npm install @react-native-ai/llama llama.rn react-native-blob-util
11 | ```
12 |
13 | While you can use the Llama provider standalone, we recommend using it with the Vercel AI SDK for a much better developer experience. The AI SDK provides unified APIs, streaming support, and advanced features. To use with the AI SDK, you'll need v5 and [required polyfills](https://v5.ai-sdk.dev/docs/getting-started/expo#polyfills):
14 |
15 | ```bash
16 | npm install ai
17 | ```
18 |
19 | ## Requirements
20 |
21 | - **React Native >= 0.76.0** - Required for native module functionality
22 | - **llama.rn >= 0.10.0** - The underlying llama.cpp bindings
23 |
24 | ## Basic Usage
25 |
26 | Import the Llama provider and use it with the AI SDK:
27 |
28 | ```typescript
29 | import { llama } from '@react-native-ai/llama'
30 | import { streamText } from 'ai'
31 |
32 | // Create model instance (Model ID format: "owner/repo/filename.gguf")
33 | const model = llama.languageModel(
34 | 'ggml-org/SmolLM3-3B-GGUF/SmolLM3-Q4_K_M.gguf'
35 | )
36 |
37 | // Download from HuggingFace
38 | await model.download()
39 |
40 | // Initialize model (loads into memory)
41 | await model.prepare()
42 |
43 | const { textStream } = streamText({
44 | model,
45 | prompt: 'Explain quantum computing in simple terms',
46 | })
47 |
48 | for await (const delta of textStream) {
49 | console.log(delta)
50 | }
51 |
52 | // Cleanup when done
53 | await model.unload()
54 | ```
55 |
56 | ## Model ID Format
57 |
58 | Models are identified using the HuggingFace format: `owner/repo/filename.gguf`
59 |
60 | For example:
61 |
62 | - `ggml-org/SmolLM3-3B-GGUF/SmolLM3-Q4_K_M.gguf`
63 | - `Qwen/Qwen2.5-3B-Instruct-GGUF/qwen2.5-3b-instruct-q3_k_m.gguf`
64 | - `lmstudio-community/gemma-2-2b-it-GGUF/gemma-2-2b-it-Q3_K_M.gguf`
65 |
66 | You can find GGUF models on [HuggingFace](https://huggingface.co/models?library=gguf).
67 |
68 | ## Next Steps
69 |
70 | - **[Model Management](./model-management.md)** - Complete guide to model lifecycle, downloading, and API reference
71 | - **[Generating](./generating.md)** - Learn how to generate text and stream responses
72 |
--------------------------------------------------------------------------------
/packages/mlc/ios/engine/EngineState.mm:
--------------------------------------------------------------------------------
1 | //
2 | // EngineState.mm
3 | // Pods
4 | //
5 |
6 | #import "EngineState.h"
7 | #import "JSONFFIEngine.h"
8 |
9 | /**
10 | * EngineState manages the request lifecycle and callback routing for chat completions.
11 | * It maintains a mapping between request IDs and their corresponding completion handlers,
12 | * ensuring that streaming responses are properly routed back to the correct caller.
13 | * This class handles JSON serialization/deserialization and coordinates between
14 | * the high-level API and the low-level JSON FFI engine.
15 | */
16 | @implementation EngineState
17 |
18 | - (instancetype)init {
19 | self = [super init];
20 | if (self) {
21 | _requestStateMap = [NSMutableDictionary new];
22 | }
23 | return self;
24 | }
25 |
26 | - (NSString*)chatCompletionWithJSONFFIEngine:(JSONFFIEngine*)jsonFFIEngine
27 | request:(NSDictionary*)request
28 | completion:(void (^)(NSDictionary* response))completion {
29 | NSError* error;
30 | NSData* jsonData = [NSJSONSerialization dataWithJSONObject:request options:0 error:&error];
31 | if (error) {
32 | @throw [NSException exceptionWithName:@"JSONSerializationException"
33 | reason:[NSString stringWithFormat:@"Failed to serialize request: %@",
34 | error.localizedDescription]
35 | userInfo:nil];
36 | }
37 |
38 | NSString* jsonRequest = [[NSString alloc] initWithData:jsonData encoding:NSUTF8StringEncoding];
39 | NSString* requestID = [[NSUUID UUID] UUIDString];
40 |
41 | self.requestStateMap[requestID] = completion;
42 |
43 | [jsonFFIEngine chatCompletion:jsonRequest requestID:requestID];
44 |
45 | return requestID;
46 | }
47 |
48 | - (void)streamCallbackWithResult:(NSString*)result {
49 | NSError* error;
50 | NSArray* responses = [NSJSONSerialization JSONObjectWithData:[result dataUsingEncoding:NSUTF8StringEncoding] options:0 error:&error];
51 | if (error) {
52 | NSLog(@"Error decoding JSON: %@", error);
53 | return;
54 | }
55 |
56 | for (NSDictionary* res in responses) {
57 | NSString* requestID = res[@"id"];
58 | void (^completion)(NSDictionary*) = self.requestStateMap[requestID];
59 | if (completion) {
60 | completion(res);
61 | if (res[@"usage"]) {
62 | [self.requestStateMap removeObjectForKey:requestID];
63 | }
64 | }
65 | }
66 | }
67 |
68 | - (void)cancelRequest:(NSString *)requestId withJSONFFIEngine:(JSONFFIEngine *)jsonFFIEngine {
69 | [self.requestStateMap removeObjectForKey:requestId];
70 | [jsonFFIEngine abort:requestId];
71 | }
72 |
73 | @end
74 |
--------------------------------------------------------------------------------
/packages/apple-llm/src/utils.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * WAV file utilities for handling audio data
3 | *
4 | * This module provides utilities for creating WAV file headers and converting PCM data to WAV format.
5 | */
6 |
7 | export const AudioFormatType = {
8 | INTEGER: 0,
9 | FLOAT: 1,
10 | } as const
11 |
12 | export interface WAVOptions {
13 | sampleRate: number
14 | channels?: number
15 | bitsPerSample?: number
16 | formatType?: number // 0 = integer, 1 = float (use AudioFormatType constants)
17 | }
18 |
19 | /**
20 | * Creates a WAV file header with the specified parameters
21 | */
22 | const createWAVHeader = (
23 | pcmDataLength: number,
24 | options: WAVOptions
25 | ): ArrayBuffer => {
26 | const {
27 | sampleRate,
28 | channels = 1,
29 | bitsPerSample = 16,
30 | formatType = AudioFormatType.INTEGER,
31 | } = options
32 |
33 | const buffer = new ArrayBuffer(44)
34 | const view = new DataView(buffer)
35 |
36 | const byteRate = (sampleRate * channels * bitsPerSample) / 8
37 | const blockAlign = (channels * bitsPerSample) / 8
38 |
39 | // Helper function to write ASCII strings
40 | const writeString = (offset: number, string: string) => {
41 | for (let i = 0; i < string.length; i++) {
42 | view.setUint8(offset + i, string.charCodeAt(i))
43 | }
44 | }
45 |
46 | // WAV file header (44 bytes total)
47 | writeString(0, 'RIFF') // ChunkID
48 | view.setUint32(4, 36 + pcmDataLength, true) // ChunkSize
49 | writeString(8, 'WAVE') // Format
50 | writeString(12, 'fmt ') // Subchunk1ID
51 | view.setUint32(16, 16, true) // Subchunk1Size (16 for PCM)
52 | view.setUint16(20, formatType === AudioFormatType.FLOAT ? 3 : 1, true) // AudioFormat (3 = IEEE float, 1 = PCM)
53 | view.setUint16(22, channels, true) // NumChannels
54 | view.setUint32(24, sampleRate, true) // SampleRate
55 | view.setUint32(28, byteRate, true) // ByteRate
56 | view.setUint16(32, blockAlign, true) // BlockAlign
57 | view.setUint16(34, bitsPerSample, true) // BitsPerSample
58 | writeString(36, 'data') // Subchunk2ID
59 | view.setUint32(40, pcmDataLength, true) // Subchunk2Size
60 |
61 | return buffer
62 | }
63 |
64 | /**
65 | * Adds WAV header to PCM data
66 | *
67 | * Takes PCM data (either raw bytes from native APIs or Float32Array from Web Audio API)
68 | * and wraps it in a proper WAV file structure.
69 | */
70 | export const addWAVHeader = (
71 | pcmData: ArrayBufferLike,
72 | options: WAVOptions
73 | ): ArrayBuffer => {
74 | const header = createWAVHeader(pcmData.byteLength, options)
75 |
76 | const wavBuffer = new ArrayBuffer(header.byteLength + pcmData.byteLength)
77 | const wavView = new Uint8Array(wavBuffer)
78 |
79 | wavView.set(new Uint8Array(header), 0)
80 | wavView.set(new Uint8Array(pcmData), header.byteLength)
81 |
82 | return wavBuffer
83 | }
84 |
--------------------------------------------------------------------------------
/website/src/docs/mlc/model-management.md:
--------------------------------------------------------------------------------
1 | # Model Management
2 |
3 | This guide covers the complete lifecycle of MLC models - from discovery and download to cleanup and removal.
4 |
5 | ## Available Models
6 |
7 | The package includes a prebuilt runtime optimized for the following models:
8 |
9 | | Model ID | Size | Best For |
10 | |----------|------|----------|
11 | | `Qwen2.5-0.5B-Instruct` | ~600MB | Fast responses, basic conversations |
12 | | `Llama-3.2-1B-Instruct` | ~1.2GB | Balanced performance and quality |
13 | | `Llama-3.2-3B-Instruct` | ~2GB | High quality responses, complex reasoning |
14 | | `Phi-3.5-mini-instruct` | ~2.3GB | Code generation, technical tasks |
15 |
16 | > **Note**: These models use q4f16_1 quantization (4-bit weights, 16-bit activations) optimized for mobile devices. For other models, you'll need to build MLC from source (documentation coming soon).
17 |
18 | ## Model Lifecycle
19 |
20 | ### Discovering Models
21 |
22 | Get the list of models included in the runtime:
23 |
24 | ```typescript
25 | import { MLCEngine } from '@react-native-ai/mlc';
26 |
27 | const models = await MLCEngine.getModels();
28 |
29 | console.log('Available models:', models);
30 | // Output: [{ model_id: 'Llama-3.2-1B-Instruct' }, ...]
31 | ```
32 |
33 | ### Creating Model Instance
34 |
35 | Create a model instance using the `mlc.languageModel()` method:
36 |
37 | ```typescript
38 | import { mlc } from '@react-native-ai/mlc';
39 |
40 | const model = mlc.languageModel('Llama-3.2-1B-Instruct');
41 | ```
42 |
43 | ### Downloading Models
44 |
45 | Models need to be downloaded to the device before use.
46 |
47 | ```typescript
48 | import { MLCEngine } from '@react-native-ai/mlc';
49 |
50 | await model.download();
51 |
52 | console.log('Download complete!');
53 | ```
54 |
55 | You can track download progress:
56 |
57 | ```typescript
58 | await model.download((event) => {
59 | console.log(`Download: ${event.percentage}%`);
60 | });
61 | ```
62 |
63 | ### Preparing Models
64 |
65 | After downloading, prepare the model for inference:
66 |
67 | ```typescript
68 | await model.prepare();
69 | ```
70 |
71 | ### Using Models
72 |
73 | Once prepared, use the model with AI SDK functions:
74 |
75 | ```typescript
76 | import { generateText } from 'ai';
77 |
78 | const result = await generateText({
79 | model,
80 | prompt: 'Hello! Introduce yourself briefly.',
81 | });
82 |
83 | console.log(result.text);
84 | ```
85 |
86 | ### Unloading Models
87 |
88 | Unload the current model from memory to free resources:
89 |
90 | ```typescript
91 | await model.unload();
92 | ```
93 |
94 | ### Removing Downloaded Models
95 |
96 | Delete downloaded model files to free storage:
97 |
98 | ```typescript
99 | await model.remove();
100 | ```
101 |
--------------------------------------------------------------------------------
/packages/mlc/react-native-ai-mlc.podspec:
--------------------------------------------------------------------------------
1 | require "json"
2 |
3 | package = JSON.parse(File.read(File.join(__dir__, "package.json")))
4 |
5 | # Dynamically resolve the package path using Node.js
6 | resolved_path = `node -p "require.resolve('@react-native-ai/mlc/package.json')"`.chomp
7 | if $?.success?
8 | package_path = File.dirname(resolved_path)
9 | else
10 | raise "Failed to resolve package path for react-native-ai-mlc. Make sure Node.js is available and the package is installed."
11 | end
12 |
13 | Pod::Spec.new do |s|
14 | s.name = "react-native-ai-mlc"
15 |
16 | s.version = package["version"]
17 | s.summary = package["description"]
18 | s.homepage = package["homepage"]
19 | s.license = package["license"]
20 | s.authors = package["author"]
21 |
22 | s.platforms = { :ios => "14.0" }
23 | s.source = { :git => "https://github.com/callstackincubator/ai.git", :tag => "#{s.version}" }
24 |
25 | s.source_files = "ios/**/*.{h,m,mm,swift}"
26 |
27 | # Use prebuilt static libraries shipped with the package
28 | s.vendored_libraries = [
29 | 'prebuilt/ios/lib/libmlc_llm.a',
30 | 'prebuilt/ios/lib/libmodel_iphone.a',
31 | 'prebuilt/ios/lib/libsentencepiece.a',
32 | 'prebuilt/ios/lib/libtokenizers_c.a',
33 | 'prebuilt/ios/lib/libtokenizers_cpp.a',
34 | 'prebuilt/ios/lib/libtvm_ffi_static.a',
35 | 'prebuilt/ios/lib/libtvm_runtime.a'
36 | ]
37 |
38 | # Include bundle configuration
39 | s.resources = ['prebuilt/ios/bundle/**/*']
40 |
41 | # Compiler configuration
42 | s.pod_target_xcconfig = {
43 | 'HEADER_SEARCH_PATHS' => '"$(PODS_TARGET_SRCROOT)/ios" "$(PODS_TARGET_SRCROOT)/prebuilt/ios/include"',
44 | 'CLANG_CXX_LANGUAGE_STANDARD' => 'c++17',
45 | 'LIBRARY_SEARCH_PATHS' => '"$(PODS_TARGET_SRCROOT)/prebuilt/ios/lib"'
46 | }
47 |
48 | # User target configuration to ensure static libraries are force loaded
49 | s.user_target_xcconfig = {
50 | 'OTHER_LDFLAGS' => [
51 | '$(inherited)',
52 | '-ObjC',
53 | "-force_load \"#{package_path}/prebuilt/ios/lib/libmodel_iphone.a\"",
54 | "-force_load \"#{package_path}/prebuilt/ios/lib/libmlc_llm.a\"",
55 | "-force_load \"#{package_path}/prebuilt/ios/lib/libtvm_runtime.a\"",
56 | "-force_load \"#{package_path}/prebuilt/ios/lib/libtvm_ffi_static.a\"",
57 | "-force_load \"#{package_path}/prebuilt/ios/lib/libsentencepiece.a\"",
58 | "-force_load \"#{package_path}/prebuilt/ios/lib/libtokenizers_cpp.a\"",
59 | "-force_load \"#{package_path}/prebuilt/ios/lib/libtokenizers_c.a\""
60 | ].join(' ')
61 | }
62 |
63 | # Framework dependencies
64 | s.frameworks = ['Metal', 'MetalKit', 'MetalPerformanceShaders']
65 | s.libraries = ['c++']
66 |
67 | # React Native dependencies
68 | install_modules_dependencies(s)
69 | end
70 |
--------------------------------------------------------------------------------
/website/src/docs/llama/generating.md:
--------------------------------------------------------------------------------
1 | # Generating
2 |
3 | You can generate responses using Llama models with the Vercel AI SDK's `generateText` or `streamText` functions.
4 |
5 | ## Requirements
6 |
7 | - Models must be downloaded and prepared before use
8 | - Sufficient device storage for model files (typically 1-4GB per model depending on quantization)
9 |
10 | ## Text Generation
11 |
12 | ```typescript
13 | import { llama } from '@react-native-ai/llama'
14 | import { generateText } from 'ai'
15 |
16 | // Create and prepare model
17 | const model = llama.languageModel(
18 | 'ggml-org/SmolLM3-3B-GGUF/SmolLM3-Q4_K_M.gguf'
19 | )
20 | await model.download()
21 | await model.prepare()
22 |
23 | const result = await generateText({
24 | model,
25 | prompt: 'Explain quantum computing in simple terms',
26 | })
27 |
28 | console.log(result.text)
29 | ```
30 |
31 | ## Streaming
32 |
33 | Stream responses for real-time output:
34 |
35 | ```typescript
36 | import { llama } from '@react-native-ai/llama'
37 | import { streamText } from 'ai'
38 |
39 | // Create and prepare model
40 | const model = llama.languageModel(
41 | 'ggml-org/SmolLM3-3B-GGUF/SmolLM3-Q4_K_M.gguf'
42 | )
43 | await model.download()
44 | await model.prepare()
45 |
46 | const { textStream } = streamText({
47 | model,
48 | prompt: 'Write a short story about a robot learning to paint',
49 | })
50 |
51 | for await (const delta of textStream) {
52 | console.log(delta)
53 | }
54 | ```
55 |
56 | ## Available Options
57 |
58 | Configure model behavior with generation options:
59 |
60 | - `temperature` (0-1): Controls randomness. Higher values = more creative, lower = more focused
61 | - `maxTokens`: Maximum number of tokens to generate
62 | - `topP` (0-1): Nucleus sampling threshold
63 | - `topK`: Top-K sampling parameter
64 |
65 | You can pass selected options with `generateText` or `streamText` as follows:
66 |
67 | ```typescript
68 | import { llama } from '@react-native-ai/llama'
69 | import { generateText } from 'ai'
70 |
71 | // Create and prepare model
72 | const model = llama.languageModel(
73 | 'ggml-org/SmolLM3-3B-GGUF/SmolLM3-Q4_K_M.gguf'
74 | )
75 | await model.download()
76 | await model.prepare()
77 |
78 | const result = await generateText({
79 | model,
80 | prompt: 'Write a creative story',
81 | temperature: 0.8,
82 | maxTokens: 500,
83 | topP: 0.9,
84 | })
85 | ```
86 |
87 | ## Model Configuration Options
88 |
89 | When creating a model instance, you can configure llama.rn specific options:
90 |
91 | ```typescript
92 | const model = llama.languageModel(
93 | 'ggml-org/SmolLM3-3B-GGUF/SmolLM3-Q4_K_M.gguf',
94 | {
95 | n_ctx: 4096, // Context size (default: 2048)
96 | n_gpu_layers: 99, // Number of GPU layers (default: 99)
97 | }
98 | )
99 | ```
100 |
--------------------------------------------------------------------------------
/packages/apple-llm/src/stream.ts:
--------------------------------------------------------------------------------
1 | import type { LanguageModelV2StreamPart } from '@ai-sdk/provider'
2 |
3 | import NativeAppleLLMSpec, {
4 | type AppleGenerationOptions,
5 | type AppleMessage,
6 | } from './NativeAppleLLM'
7 |
8 | export function generateStream(
9 | messages: AppleMessage[],
10 | options: AppleGenerationOptions = {}
11 | ): ReadableStream {
12 | if (typeof ReadableStream === 'undefined') {
13 | throw new Error(
14 | `ReadableStream is not available in this environment. Please load a polyfill, such as web-streams-polyfill.`
15 | )
16 | }
17 |
18 | let streamId: string | null = null
19 | let listeners: { remove(): void }[] = []
20 |
21 | const cleanup = () => {
22 | listeners.forEach((listener) => listener.remove())
23 | listeners = []
24 | }
25 |
26 | const stream = new ReadableStream({
27 | async start(controller) {
28 | try {
29 | streamId = NativeAppleLLMSpec.generateStream(messages, options)
30 |
31 | controller.enqueue({
32 | type: 'text-start',
33 | id: streamId,
34 | })
35 |
36 | const updateListener = NativeAppleLLMSpec.onStreamUpdate((data) => {
37 | if (data.streamId === streamId) {
38 | controller.enqueue({
39 | type: 'text-delta',
40 | delta: data.content,
41 | id: data.streamId,
42 | })
43 | }
44 | })
45 |
46 | const completeListener = NativeAppleLLMSpec.onStreamComplete((data) => {
47 | if (data.streamId === streamId) {
48 | controller.enqueue({
49 | type: 'text-end',
50 | id: streamId,
51 | })
52 | controller.enqueue({
53 | type: 'finish',
54 | finishReason: 'stop',
55 | usage: {
56 | inputTokens: 0,
57 | outputTokens: 0,
58 | totalTokens: 0,
59 | },
60 | })
61 | cleanup()
62 | controller.close()
63 | }
64 | })
65 |
66 | const errorListener = NativeAppleLLMSpec.onStreamError((data) => {
67 | if (data.streamId === streamId) {
68 | controller.enqueue({
69 | type: 'error',
70 | error: data.error,
71 | })
72 | cleanup()
73 | controller.close()
74 | }
75 | })
76 |
77 | listeners = [updateListener, completeListener, errorListener]
78 | } catch (error) {
79 | cleanup()
80 | controller.error(new Error(`Apple LLM stream failed: ${error}`))
81 | }
82 | },
83 | cancel() {
84 | cleanup()
85 | if (streamId) {
86 | NativeAppleLLMSpec.cancelStream(streamId)
87 | }
88 | },
89 | })
90 |
91 | return stream
92 | }
93 |
--------------------------------------------------------------------------------
/website/src/docs/mlc/getting-started.md:
--------------------------------------------------------------------------------
1 | # Getting Started
2 |
3 | The MLC provider enables you to run large language models directly on-device in React Native applications. This includes popular models like Llama, Phi-3, Mistral, and Qwen that run entirely on-device for privacy, performance, and offline capabilities.
4 |
5 | ## Installation
6 |
7 | Install the MLC provider:
8 |
9 | ```bash
10 | npm install @react-native-ai/mlc
11 | ```
12 |
13 | While you can use the MLC provider standalone, we recommend using it with the Vercel AI SDK for a much better developer experience. The AI SDK provides unified APIs, streaming support, and advanced features. To use with the AI SDK, you'll need v5 and [required polyfills](https://v5.ai-sdk.dev/docs/getting-started/expo#polyfills):
14 |
15 | ```bash
16 | npm install ai
17 | ```
18 |
19 | ## Requirements
20 |
21 | - **React Native New Architecture** - Required for native module functionality
22 | - **Increased Memory Limit capability** - Required for large model loading
23 |
24 | ## Running Your Application
25 |
26 | :::danger Physical Device Required
27 | You must run your application on a **physical iOS device** or using **Mac (Designed for iPad)** destination in Xcode, as prebuilt model binaries will not work in the iOS Simulator.
28 | :::
29 |
30 | ## Configuration
31 |
32 | ### Expo Config Plugin
33 |
34 | For Expo projects, add the MLC config plugin to automatically configure the increased memory limit capability:
35 |
36 | ```json
37 | {
38 | "expo": {
39 | "plugins": [
40 | "@react-native-ai/mlc"
41 | ]
42 | }
43 | }
44 | ```
45 |
46 | The plugin automatically adds the `com.apple.developer.kernel.increased-memory-limit` entitlement to your iOS app, which is required to run large language models that exceed the default memory limits.
47 |
48 | After adding the plugin, run:
49 |
50 | ```bash
51 | npx expo prebuild --clean
52 | ```
53 |
54 | ### Manual Installation
55 |
56 | If you're not using Expo or prefer manual configuration, add the "Increased Memory Limit" capability in Xcode:
57 |
58 | 1. Open your iOS project in Xcode
59 | 2. Navigate to your target's **Signing & Capabilities** tab
60 | 3. Click **+ Capability** and add "Increased Memory Limit"
61 |
62 | ## Basic Usage
63 |
64 | Import the MLC provider and use it with the AI SDK:
65 |
66 | ```typescript
67 | import { mlc } from '@react-native-ai/mlc';
68 | import { generateText } from 'ai';
69 |
70 | const model = mlc.languageModel("Llama-3.2-3B-Instruct");
71 |
72 | await model.download();
73 | await model.prepare();
74 |
75 | const result = await generateText({
76 | model,
77 | prompt: 'Explain quantum computing in simple terms'
78 | });
79 | ```
80 |
81 | ## Next Steps
82 |
83 | - **[Model Management](./model-management.md)** - Complete guide to model lifecycle, available models, and API reference
84 | - **[Generating](./generating.md)** - Learn how to generate text and stream responses
85 |
--------------------------------------------------------------------------------
/website/src/docs/mlc/generating.md:
--------------------------------------------------------------------------------
1 | # Generating
2 |
3 | You can generate responses using MLC models with the Vercel AI SDK's `generateText`, `streamText`, or `generateObject` functions.
4 |
5 | ## Requirements
6 |
7 | - Models must be downloaded and prepared before use
8 | - Sufficient device storage for model files (1-4GB per model)
9 |
10 | ## Text Generation
11 |
12 | ```typescript
13 | import { mlc } from '@react-native-ai/mlc';
14 | import { generateText } from 'ai';
15 |
16 | // Create and prepare model
17 | const model = mlc.languageModel('Llama-3.2-3B-Instruct');
18 | await model.prepare();
19 |
20 | const result = await generateText({
21 | model,
22 | prompt: 'Explain quantum computing in simple terms'
23 | });
24 |
25 | console.log(result.text);
26 | ```
27 |
28 | ## Streaming
29 |
30 | Stream responses for real-time output:
31 |
32 | ```typescript
33 | import { mlc } from '@react-native-ai/mlc';
34 | import { streamText } from 'ai';
35 |
36 | // Create and prepare model
37 | const model = mlc.languageModel('Llama-3.2-3B-Instruct');
38 | await model.prepare();
39 |
40 | const { textStream } = await streamText({
41 | model,
42 | prompt: 'Write a short story about a robot learning to paint'
43 | });
44 |
45 | for await (const delta of textStream) {
46 | console.log(delta);
47 | }
48 | ```
49 |
50 | ## Structured Output
51 |
52 | Generate structured data that conforms to a specific schema:
53 |
54 | ```typescript
55 | import { generateObject } from 'ai';
56 | import { mlc } from '@react-native-ai/mlc';
57 | import { z } from 'zod';
58 |
59 | // Create and prepare model
60 | const model = mlc.languageModel('Llama-3.2-3B-Instruct');
61 | await model.prepare();
62 |
63 | const schema = z.object({
64 | name: z.string(),
65 | description: z.string()
66 | });
67 |
68 | const result = await generateObject({
69 | model,
70 | prompt: 'Who are you? Please identify yourself and your capabilities.',
71 | schema
72 | });
73 |
74 | console.log(result.object);
75 | ```
76 | ## Available Options
77 |
78 | Configure model behavior with generation options:
79 |
80 | - `temperature` (0-1): Controls randomness. Higher values = more creative, lower = more focused
81 | - `maxTokens`: Maximum number of tokens to generate
82 | - `topP` (0-1): Nucleus sampling threshold
83 | - `topK`: Top-K sampling parameter
84 |
85 | You can pass selected options with `generateText`, `streamText`, or `generateObject` as follows:
86 |
87 | ```typescript
88 | import { mlc } from '@react-native-ai/mlc';
89 | import { generateText } from 'ai';
90 |
91 | // Create and prepare model
92 | const model = mlc.languageModel('Llama-3.2-3B-Instruct');
93 | await model.prepare();
94 |
95 | const result = await generateText({
96 | model,
97 | prompt: 'Write a creative story',
98 | temperature: 0.8,
99 | maxTokens: 500,
100 | topP: 0.9,
101 | });
102 | ```
103 |
--------------------------------------------------------------------------------
/packages/mlc/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@react-native-ai/mlc",
3 | "version": "0.11.0",
4 | "description": "MLC LLM provider for Vercel AI SDK",
5 | "main": "lib/commonjs/index",
6 | "module": "lib/module/index",
7 | "types": "lib/typescript/index.d.ts",
8 | "react-native": "src/index",
9 | "source": "src/index",
10 | "files": [
11 | "src",
12 | "lib",
13 | "android",
14 | "ios",
15 | "cpp",
16 | "prebuilt",
17 | "*.podspec",
18 | "app.plugin.js",
19 | "!ios/build",
20 | "!android/build",
21 | "!android/gradle",
22 | "!android/gradlew",
23 | "!android/gradlew.bat",
24 | "!android/local.properties",
25 | "!**/__tests__",
26 | "!**/__fixtures__",
27 | "!**/__mocks__",
28 | "!**/.*"
29 | ],
30 | "scripts": {
31 | "clean": "del-cli lib",
32 | "typecheck": "tsc --noEmit",
33 | "prepare": "bob build",
34 | "build:runtime:ios": "./scripts/build-runtime.sh --platform ios",
35 | "build:runtime:android": "./scripts/build-runtime.sh --platform android"
36 | },
37 | "keywords": [
38 | "react-native",
39 | "mlc",
40 | "llm",
41 | "ai",
42 | "sdk",
43 | "vercel",
44 | "expo",
45 | "config-plugin"
46 | ],
47 | "repository": {
48 | "type": "git",
49 | "url": "git+https://github.com/callstackincubator/ai.git"
50 | },
51 | "author": "Szymon Rybczak (https://github.com/szymonrybczak)",
52 | "contributors": [
53 | "Mike Grabowski (https://github.com/grabbou)"
54 | ],
55 | "license": "MIT",
56 | "bugs": {
57 | "url": "https://github.com/callstackincubator/ai/issues"
58 | },
59 | "homepage": "https://github.com/callstackincubator/ai#readme",
60 | "publishConfig": {
61 | "registry": "https://registry.npmjs.org/"
62 | },
63 | "devDependencies": {
64 | "react-native": "0.81.4"
65 | },
66 | "react-native-builder-bob": {
67 | "source": "src",
68 | "output": "lib",
69 | "targets": [
70 | "commonjs",
71 | "module",
72 | [
73 | "typescript",
74 | {
75 | "project": "tsconfig.build.json"
76 | }
77 | ]
78 | ]
79 | },
80 | "codegenConfig": {
81 | "name": "NativeMLCEngine",
82 | "type": "modules",
83 | "jsSrcsDir": "src",
84 | "ios": {
85 | "modulesProvider": {
86 | "NativeMLCEngine": "MLCEngine"
87 | }
88 | },
89 | "android": {
90 | "javaPackageName": "com.callstack.ai"
91 | }
92 | },
93 | "dependencies": {
94 | "@ai-sdk/provider": "^2.0.0",
95 | "@ai-sdk/provider-utils": "^3.0.10",
96 | "zod": "^4.2.1"
97 | },
98 | "peerDependencies": {
99 | "react-native": ">=0.76.0",
100 | "expo": "*"
101 | },
102 | "peerDependenciesMeta": {
103 | "expo": {
104 | "optional": true
105 | }
106 | }
107 | }
108 |
--------------------------------------------------------------------------------
/packages/mlc/android/build.gradle:
--------------------------------------------------------------------------------
1 | import com.android.Version
2 |
3 | buildscript {
4 | def kotlin_version = rootProject.ext.has("kotlinVersion") ? rootProject.ext.get("kotlinVersion") : project.properties["MLC_kotlinVersion"]
5 |
6 | repositories {
7 | google()
8 | mavenCentral()
9 | }
10 |
11 | dependencies {
12 | classpath "com.android.tools.build:gradle:7.2.1"
13 | classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version"
14 | classpath "org.jetbrains.kotlin:kotlin-serialization:$kotlin_version"
15 | }
16 | }
17 |
18 | apply plugin: "com.android.library"
19 | apply plugin: "kotlin-android"
20 | apply plugin: "kotlinx-serialization"
21 | apply plugin: "com.facebook.react"
22 |
23 | def getExtOrDefault(name) {
24 | return rootProject.ext.has(name) ? rootProject.ext.get(name) : project.properties["MLC_" + name]
25 | }
26 |
27 | def getExtOrIntegerDefault(name) {
28 | return rootProject.ext.has(name) ? rootProject.ext.get(name) : (project.properties["MLC_" + name]).toInteger()
29 | }
30 |
31 | static def supportsNamespace() {
32 | def parsed = Version.ANDROID_GRADLE_PLUGIN_VERSION.tokenize('.')
33 | def major = parsed[0].toInteger()
34 | def minor = parsed[1].toInteger()
35 |
36 | // Namespace support was added in 7.3.0
37 | return (major == 7 && minor >= 3) || major >= 8
38 | }
39 |
40 | android {
41 | if (supportsNamespace()) {
42 | namespace "com.callstack.ai"
43 |
44 | sourceSets {
45 | main {
46 | manifest.srcFile "src/main/AndroidManifest.xml"
47 | }
48 | }
49 | }
50 |
51 | compileSdkVersion getExtOrIntegerDefault("compileSdkVersion")
52 |
53 | defaultConfig {
54 | minSdkVersion getExtOrIntegerDefault("minSdkVersion")
55 | targetSdkVersion getExtOrIntegerDefault("targetSdkVersion")
56 | }
57 |
58 | buildFeatures {
59 | buildConfig true
60 | }
61 |
62 | buildTypes {
63 | release {
64 | minifyEnabled false
65 | }
66 | }
67 |
68 | lintOptions {
69 | disable "GradleCompatible"
70 | }
71 |
72 | compileOptions {
73 | sourceCompatibility JavaVersion.VERSION_1_8
74 | targetCompatibility JavaVersion.VERSION_1_8
75 | }
76 |
77 | sourceSets {
78 | main {
79 | java.srcDirs += [
80 | "src",
81 | "generated/java",
82 | "generated/jni",
83 | "../prebuilt/android/lib/mlc4j/src/main/java"
84 | ]
85 | assets.srcDirs += ["../prebuilt/android/lib/mlc4j/src/main/assets"]
86 | jniLibs.srcDirs += ["../prebuilt/android/lib/mlc4j/output"]
87 | }
88 | }
89 | }
90 |
91 | repositories {
92 | mavenCentral()
93 | google()
94 | }
95 |
96 | def kotlin_version = getExtOrDefault("kotlinVersion")
97 |
98 | dependencies {
99 | implementation "com.facebook.react:react-native:+"
100 | implementation "org.jetbrains.kotlin:kotlin-stdlib:$kotlin_version"
101 |
102 | // Dependencies
103 | implementation 'org.jetbrains.kotlinx:kotlinx-serialization-json:1.7.3'
104 |
105 | // MLC4J dependency
106 | implementation files('../prebuilt/android/lib/mlc4j/output/tvm4j_core.jar')
107 | }
108 |
109 | react {
110 | jsRootDir = file("../src/")
111 | libraryName = "MLC"
112 | codegenJavaPackageName = "com.callstack.ai"
113 | }
114 |
--------------------------------------------------------------------------------
/packages/mlc/src/NativeMLCEngine.ts:
--------------------------------------------------------------------------------
1 | import type { TurboModule } from 'react-native'
2 | import { TurboModuleRegistry } from 'react-native'
3 | import type { EventEmitter } from 'react-native/Libraries/Types/CodegenTypes'
4 |
5 | export interface ModelConfig {
6 | model_id?: string
7 | }
8 |
9 | type MessageRole = 'assistant' | 'system' | 'tool' | 'user'
10 |
11 | export interface Message {
12 | role: MessageRole
13 | content: string
14 | }
15 |
16 | export interface CompletionUsageExtra {
17 | ttft_s: number
18 | prefill_tokens_per_s: number
19 | prompt_tokens: number
20 | jump_forward_tokens: number
21 | completion_tokens: number
22 | end_to_end_latency_s: number
23 | prefill_tokens: number
24 | inter_token_latency_s: number
25 | decode_tokens_per_s: number
26 | decode_tokens: number
27 | }
28 |
29 | export interface CompletionUsage {
30 | prompt_tokens: number
31 | completion_tokens: number
32 | total_tokens: number
33 | extra: CompletionUsageExtra
34 | }
35 |
36 | export interface GeneratedMessage {
37 | role: MessageRole
38 | content: string
39 | tool_calls: ChatToolCall[]
40 | finish_reason: 'stop' | 'length' | 'tool_calls'
41 | usage: CompletionUsage
42 | }
43 |
44 | export interface ResponseFormat {
45 | type: 'json_object' | 'text'
46 | schema?: string
47 | }
48 |
49 | export interface ChatFunctionCall {
50 | name: string
51 | arguments?: Record
52 | }
53 |
54 | export interface ChatToolCall {
55 | id: string
56 | type: string
57 | function: ChatFunctionCall
58 | }
59 |
60 | export interface ChatFunctionTool {
61 | name: string
62 | description?: string
63 | parameters: Record
64 | }
65 |
66 | export interface ChatTool {
67 | type: 'function'
68 | function: ChatFunctionTool
69 | }
70 |
71 | export interface GenerationOptions {
72 | temperature?: number
73 | maxTokens?: number
74 | topP?: number
75 | topK?: number
76 | responseFormat?: ResponseFormat
77 | tools?: ChatTool[]
78 | toolChoice?: 'none' | 'auto'
79 | }
80 |
81 | export interface DownloadProgress {
82 | percentage: number
83 | }
84 |
85 | export interface ChatUpdateEvent {
86 | delta: {
87 | content: string
88 | role: MessageRole
89 | }
90 | }
91 |
92 | export interface ChatCompleteEvent {
93 | usage: CompletionUsage
94 | finish_reason: 'stop' | 'length' | 'tool_calls'
95 | }
96 |
97 | export interface Spec extends TurboModule {
98 | getModel(name: string): Promise
99 | getModels(): Promise
100 |
101 | generateText(
102 | messages: Message[],
103 | options?: GenerationOptions
104 | ): Promise
105 |
106 | streamText(messages: Message[], options?: GenerationOptions): Promise
107 | cancelStream(streamId: string): Promise
108 |
109 | downloadModel(modelId: string): Promise
110 | removeModel(modelId: string): Promise
111 |
112 | prepareModel(modelId: string): Promise
113 | unloadModel(): Promise
114 |
115 | onChatUpdate: EventEmitter
116 | onChatComplete: EventEmitter
117 | onDownloadProgress: EventEmitter
118 | }
119 |
120 | export default TurboModuleRegistry.getEnforcing('MLCEngine')
121 |
--------------------------------------------------------------------------------
/website/src/docs/apple/running-on-simulator.md:
--------------------------------------------------------------------------------
1 | # Running on Simulator
2 |
3 | This guide helps you configure Apple Intelligence to work with the iOS Simulator in your React Native applications.
4 |
5 | Apple Intelligence must be enabled on your macOS system before it can be used in the iOS Simulator. Follow these steps to ensure proper configuration.
6 |
7 | ## Prerequisites
8 |
9 | Before running Apple Intelligence on the simulator, verify your macOS settings are correctly configured.
10 |
11 | ## Step 1: Check Language Settings
12 |
13 | Apple Intelligence and Siri must use the same language. To configure this:
14 |
15 | 1. Open **System Settings** > **Apple Intelligence & Siri**
16 | 2. Verify that both **Apple Intelligence** and **Siri** are set to the **same language**
17 | 3. If the languages don't match, update them to use the same language
18 |
19 | ## Step 2: Set Your Region
20 |
21 | Apple Intelligence is only available in certain regions. To enable it:
22 |
23 | 1. Open **System Settings** > **General** > **Language & Region**
24 | 2. Set your **Region** to **United States** or another Apple Intelligence-supported region
25 | 3. Restart your Mac if prompted
26 |
27 | After changing your region to a supported location, the Apple Intelligence toggle should appear in your system settings.
28 |
29 | > [!NOTE]
30 | > United States is one of the supported regions, but Apple Intelligence may be available in other regions as well. Check Apple's official documentation for the complete list of supported regions.
31 |
32 | ## Step 3: Enable Apple Intelligence
33 |
34 | Once your language and region are configured:
35 |
36 | 1. Open **System Settings** > **Apple Intelligence & Siri**
37 | 2. Toggle on **Apple Intelligence**
38 | 3. macOS will begin downloading the required models
39 | 4. Wait for the download to complete before testing in the simulator
40 |
41 | > [!NOTE]
42 | > The model download may take several minutes depending on your internet connection. Ensure you have sufficient disk space available.
43 |
44 | ## Step 4: Verify Simulator Access
45 |
46 | After enabling Apple Intelligence on macOS:
47 |
48 | 1. Launch your iOS Simulator
49 | 2. The simulator will inherit Apple Intelligence capabilities from your Mac
50 | 3. Test your application to confirm Apple Foundation Models are accessible
51 |
52 | ## Common Issues
53 |
54 | ### Apple Intelligence Toggle Not Visible
55 |
56 | If you don't see the Apple Intelligence toggle in System Settings:
57 |
58 | - Verify your Mac supports Apple Intelligence (Apple Silicon required)
59 | - Ensure your macOS version is up to date
60 | - Confirm your region is set to a supported location
61 | - Check that both Siri and Apple Intelligence language settings match
62 |
63 | ### Models Not Working in Simulator
64 |
65 | If Apple Intelligence is enabled but models aren't working:
66 |
67 | - Ensure the model download completed successfully on macOS
68 | - Restart the iOS Simulator
69 | - Verify that `apple.isAvailable()` returns `true` in your code (see [Availability Check](./generating#availability-check))
70 | - Check that your app is running on iOS 26+ in the simulator
71 |
72 | ## API Availability
73 |
74 | Always check if Apple Intelligence is available before using it in your application. See the [Availability Check](./generating#availability-check) section in the Generating guide for details on how to implement this check.
75 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | Thank you for your interest in contributing to React Native AI! This guide will help you get started.
4 |
5 | ## Prerequisites
6 |
7 | - **Bun** (latest version recommended)
8 | - **React Native development environment** (Android Studio, Xcode)
9 |
10 | ## Project Structure
11 |
12 | This is a monorepo with the following packages:
13 |
14 | - **`packages/mlc/`** - MLC-LLM integration for React Native
15 | - **`packages/apple-llm/`** - Apple Intelligence turbo module for iOS
16 | - **`apps/expo-example/`** - example Expo app
17 |
18 | ## Quick Start
19 |
20 | 1. **Clone and install dependencies:**
21 | ```bash
22 | git clone https://github.com/callstackincubator/ai.git
23 | cd ai
24 | bun install
25 | ```
26 |
27 | 2. **Run quality checks:**
28 | ```bash
29 | bun run typecheck
30 | bun run lint
31 | ```
32 |
33 | 3. **Test your changes:**
34 | ```bash
35 | # Run example app
36 | cd apps/expo-example
37 | bun run start
38 | bun run android # or ios
39 | ```
40 |
41 | ## Development Guidelines
42 |
43 | ### Code Quality
44 |
45 | - Follow existing code style and patterns
46 | - Ensure TypeScript types are properly defined
47 | - Run `bun run lint --fix` to fix formatting issues
48 |
49 | ### Native Code
50 |
51 | - **Android**: Use Kotlin, follow existing patterns in `packages/*/android/`
52 | - **iOS**: Use Swift/Objective-C++, follow existing patterns in `packages/*/ios/`
53 |
54 | ### Commit Messages
55 |
56 | Follow [Conventional Commits](https://www.conventionalcommits.org/):
57 | - `feat:` new features
58 | - `fix:` bug fixes
59 | - `docs:` documentation changes
60 | - `refactor:` code refactoring
61 | - `test:` test additions/updates
62 | - `chore:` tooling/config changes
63 |
64 | ## Pull Request Process
65 |
66 | 1. **Fork** the repository
67 | 2. **Create a feature branch** from `main`
68 | 3. **Make your changes** following the guidelines above
69 | 4. **Test thoroughly** with both example apps
70 | 5. **Submit a pull request** with:
71 | - Clear description of changes
72 | - Screenshots/videos for UI changes
73 | - Test plan for reviewers
74 |
75 | ## Common Tasks
76 |
77 | ### Working with packages
78 |
79 | Build all packages
80 |
81 | ```bash
82 | bun run --filter='@react-native-ai/*' prepare
83 | ```
84 |
85 | Work on specific package
86 |
87 | ```bash
88 | cd packages/mlc
89 | bun run typecheck
90 | ```
91 |
92 | ### Running examples
93 |
94 | ```bash
95 | cd apps/expo-example
96 | bun run prestart # Setup MLC dependencies
97 | bun run start
98 | bun run android
99 | ```
100 |
101 | ### Native Development
102 |
103 | - **Android**: Open `apps/expo-example/android` in Android Studio
104 | - **iOS MLC**: Open `apps/expo-example/ios/AiExample.xcworkspace` in Xcode
105 |
106 | - **RN example app**: Open `apps/expo-example/ios/example.xcworkspace` in Xcode
107 | - The Apple Intelligence package is a Turbo Module - work directly from the integrated Xcode project
108 | - Native code is in `packages/apple-llm/ios/` but best developed through the example app
109 |
110 | ## Need Help?
111 |
112 | - Check existing [issues](https://github.com/callstackincubator/ai/issues)
113 | - Review the [code of conduct](CODE_OF_CONDUCT.md)
114 | - Ask questions in pull request discussions
115 |
116 | We appreciate all contributions, big and small! 🚀
117 |
--------------------------------------------------------------------------------
/packages/llama/src/storage.ts:
--------------------------------------------------------------------------------
1 | import RNBlobUtil from 'react-native-blob-util'
2 |
3 | let storagePath = `${RNBlobUtil.fs.dirs.DocumentDir}/llama-models`
4 |
5 | export interface DownloadProgress {
6 | percentage: number
7 | }
8 |
9 | export interface ModelInfo {
10 | model_id: string
11 | path: string
12 | filename: string
13 | sizeBytes?: number
14 | }
15 |
16 | export function setStoragePath(path: string): void {
17 | storagePath = path
18 | }
19 |
20 | export function getStoragePath(): string {
21 | return storagePath
22 | }
23 |
24 | export function parseModelId(modelId: string): {
25 | repo: string
26 | filename: string
27 | } {
28 | const parts = modelId.split('/')
29 | if (parts.length < 3) {
30 | throw new Error(
31 | `Invalid model ID format: "${modelId}". Expected format: "owner/repo/filename.gguf"`
32 | )
33 | }
34 | const filename = parts.pop()!
35 | const repo = parts.join('/')
36 | return { repo, filename }
37 | }
38 |
39 | export function getModelPath(modelId: string): string {
40 | const { filename } = parseModelId(modelId)
41 | return `${storagePath}/${filename}`
42 | }
43 |
44 | export async function isModelDownloaded(modelId: string): Promise {
45 | const path = getModelPath(modelId)
46 | return RNBlobUtil.fs.exists(path)
47 | }
48 |
49 | export async function getDownloadedModels(): Promise {
50 | try {
51 | const exists = await RNBlobUtil.fs.exists(storagePath)
52 | if (!exists) {
53 | return []
54 | }
55 |
56 | const files = await RNBlobUtil.fs.ls(storagePath)
57 | const models: ModelInfo[] = []
58 |
59 | for (const filename of files) {
60 | if (filename.endsWith('.gguf')) {
61 | const path = `${storagePath}/${filename}`
62 | const stat = await RNBlobUtil.fs.stat(path)
63 |
64 | models.push({
65 | model_id: filename,
66 | path,
67 | filename,
68 | sizeBytes: Number(stat.size),
69 | })
70 | }
71 | }
72 |
73 | return models
74 | } catch (error) {
75 | console.error('Failed to get downloaded models:', error)
76 | return []
77 | }
78 | }
79 |
80 | export async function downloadModel(
81 | modelId: string,
82 | progressCallback?: (progress: DownloadProgress) => void
83 | ): Promise {
84 | const { repo, filename } = parseModelId(modelId)
85 | const url = `https://huggingface.co/${repo}/resolve/main/${filename}?download=true`
86 | const destPath = `${storagePath}/${filename}`
87 |
88 | const dirExists = await RNBlobUtil.fs.exists(storagePath)
89 | if (!dirExists) {
90 | await RNBlobUtil.fs.mkdir(storagePath)
91 | }
92 |
93 | const fileExists = await RNBlobUtil.fs.exists(destPath)
94 | if (fileExists) {
95 | progressCallback?.({ percentage: 100 })
96 | return destPath
97 | }
98 |
99 | const result = await RNBlobUtil.config({
100 | path: destPath,
101 | fileCache: true,
102 | })
103 | .fetch('GET', url)
104 | .progress((received, total) => {
105 | const percentage = Math.round((Number(received) / Number(total)) * 100)
106 | progressCallback?.({ percentage })
107 | })
108 |
109 | return result.path()
110 | }
111 |
112 | export async function removeModel(modelId: string): Promise {
113 | const path = getModelPath(modelId)
114 | const exists = await RNBlobUtil.fs.exists(path)
115 | if (exists) {
116 | await RNBlobUtil.fs.unlink(path)
117 | }
118 | }
119 |
--------------------------------------------------------------------------------
/packages/llama/README.md:
--------------------------------------------------------------------------------
1 | # @react-native-ai/llama
2 |
3 | llama.rn provider for Vercel AI SDK - run GGUF models on-device in React Native.
4 |
5 | ## Installation
6 |
7 | ```bash
8 | npm install @react-native-ai/llama llama.rn react-native-blob-util ai
9 | ```
10 |
11 | ## Usage with AI SDK
12 |
13 | ```typescript
14 | import { llama } from '@react-native-ai/llama'
15 | import { generateText, streamText } from 'ai'
16 |
17 | // Create model instance (Model ID format: "owner/repo/filename.gguf")
18 | const model = llama.languageModel(
19 | 'ggml-org/SmolLM3-3B-GGUF/SmolLM3-Q4_K_M.gguf',
20 | {
21 | n_ctx: 2048,
22 | n_gpu_layers: 99,
23 | }
24 | )
25 |
26 | // Download from HuggingFace (with progress)
27 | await model.download((progress) => {
28 | console.log(`Downloading: ${progress.percentage}%`)
29 | })
30 |
31 | // Initialize model (loads into memory)
32 | await model.prepare()
33 |
34 | // Generate text (non-streaming)
35 | const { text } = await generateText({
36 | model,
37 | messages: [
38 | { role: 'system', content: 'You are a helpful assistant.' },
39 | { role: 'user', content: 'Write a haiku about coding.' },
40 | ],
41 | maxOutputTokens: 100,
42 | temperature: 0.7,
43 | })
44 |
45 | console.log(text)
46 |
47 | // Stream text
48 | const result = streamText({
49 | model,
50 | messages: [
51 | { role: 'system', content: 'You are a helpful assistant.' },
52 | { role: 'user', content: 'Tell me a story.' },
53 | ],
54 | maxOutputTokens: 500,
55 | temperature: 0.7,
56 | })
57 |
58 | for await (const chunk of result.textStream) {
59 | process.stdout.write(chunk)
60 | }
61 |
62 | // Cleanup
63 | await model.unload()
64 | ```
65 |
66 | ## Direct Context Usage
67 |
68 | For advanced use cases, you can access the underlying `LlamaContext` directly:
69 |
70 | ```typescript
71 | import { llama, LlamaEngine } from '@react-native-ai/llama'
72 |
73 | // List downloaded models
74 | const models = await LlamaEngine.getModels()
75 |
76 | // Create and prepare model
77 | const model = llama.languageModel(
78 | 'ggml-org/SmolLM3-3B-GGUF/SmolLM3-Q4_K_M.gguf'
79 | )
80 | await model.prepare()
81 |
82 | // Access underlying LlamaContext
83 | const context = model.getContext()
84 | const result = await context.completion({
85 | messages: [{ role: 'user', content: 'Hello!' }],
86 | n_predict: 100,
87 | })
88 |
89 | // Cleanup
90 | await model.unload()
91 |
92 | // Remove from disk
93 | await model.remove()
94 | ```
95 |
96 | ## API
97 |
98 | ### `llama.languageModel(modelId, options?)`
99 |
100 | Creates a language model instance.
101 |
102 | - `modelId`: Model identifier in format `owner/repo/filename.gguf`
103 | - `options`:
104 | - `n_ctx`: Context size (default: 2048)
105 | - `n_gpu_layers`: Number of GPU layers (default: 99)
106 | - `contextParams`: Additional llama.rn context parameters
107 |
108 | ### `LlamaEngine`
109 |
110 | - `getModels()`: Get list of downloaded models
111 | - `isDownloaded(modelId)`: Check if a model is downloaded
112 | - `setStoragePath(path)`: Set custom storage directory
113 |
114 | ### Model Instance Methods
115 |
116 | - `download(progressCallback?)`: Download model from HuggingFace
117 | - `isDownloaded()`: Check if this model is downloaded
118 | - `prepare()`: Initialize/load model into memory
119 | - `getContext()`: Get underlying LlamaContext
120 | - `unload()`: Release model from memory
121 | - `remove()`: Delete model from disk
122 |
123 | ## Requirements
124 |
125 | - React Native >= 0.76.0
126 | - llama.rn >= 0.10.0-rc.0
127 |
--------------------------------------------------------------------------------
/website/src/docs/apple/speech.md:
--------------------------------------------------------------------------------
1 | # Speech
2 |
3 | Convert text to speech using Apple's on-device speech synthesis capabilities.
4 |
5 | ## Overview
6 |
7 | This provider uses Apple's [`AVSpeechSynthesizer`](https://developer.apple.com/documentation/avfaudio/avspeechsynthesizer) to perform text-to-speech entirely on-device. Audio is synthesized locally and returned to your app as a WAV byte stream.
8 |
9 | ## Requirements
10 |
11 | - **iOS 13+** – Required for programmatic audio rendering with `AVSpeechSynthesizer.write(...)`.
12 | - **iOS 17+** – Required to use Personal Voice if available on device.
13 |
14 | ## Usage
15 |
16 | ### Basic Speech
17 |
18 | ```tsx
19 | import { apple } from '@react-native-ai/apple'
20 | import { experimental_generateSpeech as speech } from 'ai'
21 |
22 | const response = await speech({
23 | model: apple.speechModel(),
24 | text: 'Hello from Apple on-device speech!',
25 | })
26 |
27 | // Access the buffer in a preferred way
28 | console.log(response.audio.uint8Array)
29 | console.log(response.audio.base64)
30 | ```
31 |
32 | > [!NOTE]
33 | > Sample rate and bit depth are determined by the system voice (commonly 44.1 kHz, 16‑bit PCM; some devices may return 32‑bit float which is encoded accordingly in the WAV header).
34 |
35 | ### Language and Voice Selection
36 |
37 | You can control the output language or select a specific voice by identifier. To see what voices are available on the device:
38 |
39 | ```tsx
40 | import { AppleSpeech } from '@react-native-ai/apple'
41 |
42 | const voices = await AppleSpeech.getVoices()
43 | ```
44 |
45 | Use a specific voice by passing its `identifier`:
46 |
47 | ```tsx
48 | await speech({
49 | model: apple.speechModel(),
50 | text: 'Custom voice synthesis',
51 | voice: 'com.apple.voice.super-compact.en-US.Samantha',
52 | })
53 | ```
54 |
55 | Or specify only `language` to use the system’s default voice for that locale:
56 |
57 | ```tsx
58 | await speech({
59 | model: apple.speechModel(),
60 | text: 'Bonjour tout le monde!',
61 | language: 'en-US',
62 | })
63 | ```
64 |
65 | > [!NOTE]
66 | > If both `voice` and `language` are provided, `voice` takes priority.
67 |
68 | > [!NOTE]
69 | > If only `language` is provided, the default system voice for that locale is used.
70 |
71 | ## Voices and Asset Management
72 |
73 | The system provides a catalog of built‑in voices, including enhanced and premium variants, which may require a one‑time system download. If you have created a Personal Voice on device (iOS 17+), it appears in the list and is flagged accordingly.
74 |
75 | > [!NOTE]
76 | > Voice assets are managed by the operating system. To add or manage voices, use iOS Settings → Accessibility → Read & Speak → Voices. This provider does not bundle or manage voice downloads.
77 |
78 | ## Direct API Access
79 |
80 | For advanced use cases, you can access the speech API directly:
81 |
82 | ### AppleSpeech
83 |
84 | ```tsx
85 | import { AppleSpeech } from '@react-native-ai/apple'
86 |
87 | // List available voices (identifier, name, language, quality, traits)
88 | const voices = await AppleSpeech.getVoices()
89 |
90 | // Generate audio as an ArrayBuffer-like WAV payload
91 | const buffer = await AppleSpeech.generate('Hello world', {
92 | language: 'en-US',
93 | })
94 |
95 | // Convert to Uint8Array if needed
96 | ```
97 |
98 | Returned voice objects include:
99 |
100 | - `identifier`: string
101 | - `name`: string
102 | - `language`: BCP‑47 code, e.g. `en-US`
103 | - `quality`: `default` | `enhanced` | `premium`
104 | - `isPersonalVoice`: boolean (iOS 17+)
105 | - `isNoveltyVoice`: boolean (iOS 17+)
106 |
107 | > [!NOTE]
108 | > On iOS 17+, the provider requests Personal Voice authorization before listing voices so your Personal Voice can be surfaced if available.
109 |
--------------------------------------------------------------------------------
/packages/mlc/ios/engine/JSONFFIEngine.mm:
--------------------------------------------------------------------------------
1 | //
2 | // LLMEngine.mm
3 | // LLMEngine
4 | //
5 | #import
6 | #import
7 | #include
8 |
9 | #include "JSONFFIEngine.h"
10 |
11 | #define TVM_USE_LIBBACKTRACE 0
12 | #define DMLC_USE_LOGGING_LIBRARY
13 |
14 | #include
15 | #include
16 |
17 | using namespace tvm::runtime;
18 |
19 | @implementation JSONFFIEngine {
20 | // Internal c++ classes
21 | // internal module backed by JSON FFI
22 | Module json_ffi_engine_;
23 | // member functions
24 | PackedFunc init_background_engine_func_;
25 | PackedFunc unload_func_;
26 | PackedFunc reload_func_;
27 | PackedFunc reset_func_;
28 | PackedFunc chat_completion_func_;
29 | PackedFunc abort_func_;
30 | PackedFunc run_background_loop_func_;
31 | PackedFunc run_background_stream_back_loop_func_;
32 | PackedFunc exit_background_loop_func_;
33 | }
34 |
35 | - (instancetype)init {
36 | if (self = [super init]) {
37 | // load chat module
38 | const PackedFunc* f_json_ffi_create = Registry::Get("mlc.json_ffi.CreateJSONFFIEngine");
39 | ICHECK(f_json_ffi_create) << "Cannot find mlc.json_ffi.CreateJSONFFIEngine";
40 | json_ffi_engine_ = (*f_json_ffi_create)();
41 | init_background_engine_func_ = json_ffi_engine_->GetFunction("init_background_engine");
42 | reload_func_ = json_ffi_engine_->GetFunction("reload");
43 | unload_func_ = json_ffi_engine_->GetFunction("unload");
44 | reset_func_ = json_ffi_engine_->GetFunction("reset");
45 | chat_completion_func_ = json_ffi_engine_->GetFunction("chat_completion");
46 | abort_func_ = json_ffi_engine_->GetFunction("abort");
47 | run_background_loop_func_ = json_ffi_engine_->GetFunction("run_background_loop");
48 | run_background_stream_back_loop_func_ =
49 | json_ffi_engine_->GetFunction("run_background_stream_back_loop");
50 | exit_background_loop_func_ = json_ffi_engine_->GetFunction("exit_background_loop");
51 |
52 | ICHECK(init_background_engine_func_ != nullptr);
53 | ICHECK(reload_func_ != nullptr);
54 | ICHECK(unload_func_ != nullptr);
55 | ICHECK(reset_func_ != nullptr);
56 | ICHECK(chat_completion_func_ != nullptr);
57 | ICHECK(abort_func_ != nullptr);
58 | ICHECK(run_background_loop_func_ != nullptr);
59 | ICHECK(run_background_stream_back_loop_func_ != nullptr);
60 | ICHECK(exit_background_loop_func_ != nullptr);
61 | }
62 | return self;
63 | }
64 |
65 | - (void)initBackgroundEngine:(void (^)(NSString*))streamCallback {
66 | TypedPackedFunc internal_stream_callback([streamCallback](String value) {
67 | streamCallback([NSString stringWithUTF8String:value.c_str()]);
68 | });
69 | int device_type = kDLMetal;
70 | int device_id = 0;
71 | init_background_engine_func_(device_type, device_id, internal_stream_callback);
72 | }
73 |
74 | - (void)reload:(NSString*)engineConfigJson {
75 | std::string engine_config = engineConfigJson.UTF8String;
76 | reload_func_(engine_config);
77 | }
78 |
79 | - (void)unload {
80 | unload_func_();
81 | }
82 |
83 | - (void)reset {
84 | reset_func_();
85 | }
86 |
87 | - (void)chatCompletion:(NSString*)requestJSON requestID:(NSString*)requestID {
88 | std::string request_json = requestJSON.UTF8String;
89 | std::string request_id = requestID.UTF8String;
90 | chat_completion_func_(request_json, request_id);
91 | }
92 |
93 | - (void)abort:(NSString*)requestID {
94 | std::string request_id = requestID.UTF8String;
95 | abort_func_(request_id);
96 | }
97 |
98 | - (void)runBackgroundLoop {
99 | run_background_loop_func_();
100 | }
101 |
102 | - (void)runBackgroundStreamBackLoop {
103 | run_background_stream_back_loop_func_();
104 | }
105 |
106 | - (void)exitBackgroundLoop {
107 | exit_background_loop_func_();
108 | }
109 |
110 | @end
111 |
--------------------------------------------------------------------------------
/website/src/docs/apple/transcription.md:
--------------------------------------------------------------------------------
1 | # Transcription
2 |
3 | Convert speech to text using Apple's on-device `SpeechAnalyzer` and `SpeechTranscriber`.
4 |
5 | ## Overview
6 |
7 | This provider uses Apple's [`SpeechAnalyzer`](https://developer.apple.com/documentation/speech/speechanalyzer) and [`SpeechTranscriber`](https://developer.apple.com/documentation/speech/speechtranscriber) to perform speech-to-text transcription entirely on-device. This is Apple's new advanced speech recognition model and is available in iOS 26 and onwards.
8 |
9 | ## Requirements
10 |
11 | - **iOS 26+** - SpeechAnalyzer requires iOS 26
12 |
13 | ## Usage
14 |
15 | ### Basic Transcription
16 |
17 | ```tsx
18 | const file = await fetch(
19 | 'https://www.voiptroubleshooter.com/open_speech/american/OSR_us_000_0010_8k.wav'
20 | )
21 | const audio = await file.arrayBuffer()
22 |
23 | const response = await experimental_transcribe({
24 | model: apple.transcriptionModel(),
25 | audio,
26 | })
27 |
28 | console.log(response.text)
29 | console.log(response.segments)
30 | console.log(response.durationInSeconds)
31 | ```
32 |
33 | The `audio` parameter accepts either an `ArrayBuffer` or a base64-encoded string.
34 |
35 | > [!NOTE]
36 | > The API currently does not support streaming or live transcription. It is relatively easy to include, please let us know on Github if you need support for this.
37 |
38 | ## Language Support
39 |
40 | The transcription model supports multiple languages with automatic language detection. You can configure a custom one with this API:
41 |
42 | ```tsx
43 | await experimental_transcribe({
44 | model: apple.transcriptionModel(),
45 | audio: audioArrayBuffer,
46 | providerOptions: {
47 | apple: {
48 | language: 'fr',
49 | },
50 | },
51 | })
52 | ```
53 |
54 | > [!NOTE]
55 | > By default, the transcription model will use device language.
56 |
57 | ## Asset Management
58 |
59 | Apple's SpeechAnalyzer requires downloading language-specific assets to the device. The provider automatically requests assets when needed, but you can also prepare them manually:
60 |
61 | ```tsx
62 | import { NativeAppletranscription } from '@react-native-ai/apple'
63 |
64 | await NativeAppletranscription.prepare('en')
65 | ```
66 |
67 | When you call `prepare()` for a language, the system first checks if the required assets are already present on the device. If they are, the method resolves immediately without any network activity, making subsequent embedding operations instant.
68 |
69 | > [!NOTE]
70 | > All language models and assets are stored in Apple's system-wide assets catalog, separate from your app bundle. This means zero impact on your app's size. Assets may already be available if the user has previously used other apps, or if system features have requested them.
71 |
72 | ## Direct API Access
73 |
74 | For advanced use cases, you can access the speech transcription API directly:
75 |
76 | ### AppleTranscription
77 |
78 | ```tsx
79 | import { AppleTranscription } from '@react-native-ai/apple'
80 |
81 | // Check availability for a language
82 | const isAvailable: boolean = await AppleTranscription.isAvailable(language: string)
83 |
84 | // Prepare language assets
85 | await AppleTranscription.prepare(language: string)
86 |
87 | // Transcribe audio with timing information
88 | const { segments, duration } = await AppleTranscription.transcribe(
89 | arrayBuffer,
90 | language: string
91 | )
92 | ```
93 |
94 | ## Benchmarks
95 |
96 | Performance comparison showing transcription speed for a 34-minute audio file ([source](https://www.macrumors.com/2025/06/18/apple-transcription-api-faster-than-whisper/)):
97 |
98 | | System | Processing Time | Performance |
99 | | ------------------------- | ------------------- | ----------- |
100 | | Apple SpeechAnalyzer | 45 seconds | Baseline |
101 | | MacWhisper Large V3 Turbo | 1 minute 41 seconds | 2.2× slower |
102 |
--------------------------------------------------------------------------------
/packages/apple-llm/ios/speech/AppleSpeechImpl.swift:
--------------------------------------------------------------------------------
1 | //
2 | // AppleSpeechImpl.swift
3 | // AppleLLM
4 | //
5 | // Created by Mike Grabowski on 04/08/2025.
6 | //
7 |
8 | import Foundation
9 | import AVFoundation
10 |
11 | @objc
12 | public class AppleSpeechImpl: NSObject {
13 | private let speechSynthesizer = AVSpeechSynthesizer()
14 |
15 | @objc
16 | public func getVoices(_ resolve: @escaping ([Any]) -> Void, reject: @escaping (String, String, Error?) -> Void) {
17 | Task {
18 | if #available(iOS 17.0, *) {
19 | await withCheckedContinuation { continuation in
20 | AVSpeechSynthesizer.requestPersonalVoiceAuthorization { _ in
21 | continuation.resume()
22 | }
23 | }
24 | }
25 |
26 | let allVoices = AVSpeechSynthesisVoice.speechVoices()
27 | let voiceInfos = allVoices.map { $0.toDictionary() }
28 | resolve(voiceInfos)
29 | }
30 | }
31 |
32 | @objc
33 | public func generateAudio(_ text: String, options: [String: Any], resolve: @escaping ([String: Any]) -> Void, reject: @escaping (String, String, Error?) -> Void) {
34 | let utterance = AVSpeechUtterance(string: text)
35 |
36 | utterance.voice = if let voiceId = options["voice"] as? String {
37 | AVSpeechSynthesisVoice(identifier: voiceId)
38 | } else if let language = options["language"] as? String {
39 | AVSpeechSynthesisVoice(language: language)
40 | } else {
41 | nil
42 | }
43 |
44 | var collectedBuffers: [AVAudioPCMBuffer] = []
45 |
46 | speechSynthesizer.write(utterance) { buffer in
47 | guard let pcm = buffer as? AVAudioPCMBuffer else { return }
48 |
49 | if pcm.frameLength == 0 {
50 | let result = AppleSpeechImpl.concatenatePCMDataWithFormat(from: collectedBuffers)
51 | resolve(result)
52 | return
53 | }
54 |
55 | collectedBuffers.append(pcm)
56 | }
57 | }
58 | }
59 |
60 | extension AppleSpeechImpl {
61 | /// Concatenate raw PCM data from AVAudioPCMBuffer array and return format information
62 | /// JavaScript will handle WAV header generation
63 | static func concatenatePCMDataWithFormat(from buffers: [AVAudioPCMBuffer]) -> [String: Any] {
64 | guard let first = buffers.first else {
65 | return [
66 | "data": Data(),
67 | "sampleRate": 22050,
68 | "channels": 1,
69 | "bitsPerSample": 32,
70 | "formatType": 1
71 | ]
72 | }
73 |
74 | let channels = Int(first.format.channelCount)
75 | let sampleRate = Int(first.format.sampleRate)
76 |
77 | // Determine format type and bits per sample based on AVAudioCommonFormat
78 | let (formatType, bitsPerSample): (Int, Int) = {
79 | switch first.format.commonFormat {
80 | case .pcmFormatFloat32:
81 | return (1, 32)
82 | case .pcmFormatFloat64:
83 | return (1, 64)
84 | case .pcmFormatInt16:
85 | return (0, 16)
86 | case .pcmFormatInt32:
87 | return (0, 32)
88 | default:
89 | return (1, 32)
90 | }
91 | }()
92 |
93 | // Estimate capacity from actual valid bytes in each buffer
94 | let estimatedCapacity = buffers.reduce(0) { acc, buf in
95 | let audioBuffer = buf.audioBufferList.pointee.mBuffers
96 | return acc + Int(audioBuffer.mDataByteSize)
97 | }
98 |
99 | var payload = Data()
100 | payload.reserveCapacity(estimatedCapacity)
101 |
102 | // Concatenate raw PCM payloads using mDataByteSize
103 | for buf in buffers {
104 | let m = buf.audioBufferList.pointee.mBuffers
105 | let byteCount = Int(m.mDataByteSize)
106 | if let p = m.mData {
107 | payload.append(contentsOf: UnsafeRawBufferPointer(start: p, count: byteCount))
108 | }
109 | }
110 |
111 | return [
112 | "data": payload,
113 | "sampleRate": sampleRate,
114 | "channels": channels,
115 | "bitsPerSample": bitsPerSample,
116 | "formatType": formatType
117 | ]
118 | }
119 | }
120 |
121 | extension AVSpeechSynthesisVoice {
122 | func toDictionary() -> [String: Any] {
123 | var data = [
124 | "identifier": self.identifier,
125 | "name": self.name,
126 | "language": self.language,
127 | "quality": self.quality,
128 | "isPersonalVoice": false,
129 | "isNoveltyVoice": false
130 | ] as [String : Any]
131 |
132 | if #available(iOS 17.0, *) {
133 | data["isPersonalVoice"] = self.voiceTraits.contains(.isPersonalVoice)
134 | data["isNoveltyVoice"] = self.voiceTraits.contains(.isNoveltyVoice)
135 | }
136 |
137 | return data
138 | }
139 | }
140 |
--------------------------------------------------------------------------------
/packages/mlc/android/src/main/java/com/callstack/ai/ModelDownloader.kt:
--------------------------------------------------------------------------------
1 | package com.callstack.ai
2 |
3 | import kotlinx.coroutines.Dispatchers
4 | import kotlinx.coroutines.async
5 | import kotlinx.coroutines.awaitAll
6 | import kotlinx.coroutines.coroutineScope
7 | import kotlinx.coroutines.withContext
8 | import kotlinx.serialization.Serializable
9 | import kotlinx.serialization.json.Json
10 | import java.io.File
11 | import java.net.URL
12 | import java.util.concurrent.atomic.AtomicInteger
13 |
14 | /**
15 | * ModelDownloader handles downloading and managing MLC model files.
16 | *
17 | * This class is responsible for:
18 | * - Downloading model tokenizer files and parameter files from remote URLs
19 | * - Managing concurrent downloads with controlled parallelism (max 3 simultaneous downloads)
20 | * - Tracking download progress and providing real-time updates
21 | */
22 | class ModelDownloader(
23 | private val modelUrl: String,
24 | private val modelDir: File
25 | ) {
26 | private var paramsConfig: ParamsConfig = ParamsConfig(emptyList())
27 |
28 | companion object {
29 | const val PARAMS_CONFIG_FILENAME = "ndarray-cache.json"
30 | const val MODEL_CONFIG_FILENAME = "mlc-chat-config.json"
31 | private val json = Json { ignoreUnknownKeys = true }
32 | }
33 |
34 | /**
35 | * Downloads all required model files including tokenizer files and parameter files.
36 | *
37 | * @param onProgress Optional callback for progress updates (current, total)
38 | */
39 | suspend fun downloadModel(onProgress: (current: Int, total: Int) -> Unit = { _, _ -> }) {
40 | val progressCounter = AtomicInteger(0)
41 |
42 | // First download model config and params config
43 | coroutineScope {
44 | listOf(
45 | async {
46 | downloadSingleFile(MODEL_CONFIG_FILENAME)
47 | },
48 | async {
49 | downloadSingleFile(PARAMS_CONFIG_FILENAME)
50 | }
51 | ).awaitAll()
52 | }
53 |
54 | // Load model config to get tokenizer files
55 | val modelConfigFile = File(modelDir, MODEL_CONFIG_FILENAME)
56 | require(modelConfigFile.exists()) { "Model config file not found: ${modelConfigFile.path}" }
57 | val modelConfig = json.decodeFromString(modelConfigFile.readText())
58 |
59 | // Load params config to get parameter files
60 | val paramsConfigFile = File(modelDir, PARAMS_CONFIG_FILENAME)
61 | require(paramsConfigFile.exists()) { "Params config file not found: ${paramsConfigFile.path}" }
62 | paramsConfig = json.decodeFromString(paramsConfigFile.readText())
63 |
64 | // Now download tokenizer files and parameter files
65 | val allFiles = modelConfig.tokenizer_files + paramsConfig.records.map { it.dataPath }
66 | val remainingFiles = allFiles.filter { !File(modelDir, it).exists() }
67 |
68 | if (remainingFiles.isNotEmpty()) {
69 | coroutineScope {
70 | remainingFiles.chunked(3).forEach { chunk ->
71 | chunk.map { filename ->
72 | async {
73 | downloadSingleFile(filename)
74 | onProgress(progressCounter.incrementAndGet(), remainingFiles.size)
75 | }
76 | }.awaitAll()
77 | }
78 | }
79 | }
80 |
81 | // Final progress update
82 | onProgress(progressCounter.get(), remainingFiles.size)
83 | }
84 |
85 |
86 | /**
87 | * Downloads a single file from the remote URL.
88 | * Returns early if the file already exists.
89 | */
90 | private suspend fun downloadSingleFile(filename: String): Unit = withContext(Dispatchers.IO) {
91 | val targetFile = File(modelDir, filename)
92 |
93 | // Skip if file already exists
94 | if (targetFile.exists()) {
95 | return@withContext
96 | }
97 |
98 | val url = URL("$modelUrl/resolve/main/$filename")
99 |
100 | // Ensure parent directory exists
101 | targetFile.parentFile?.mkdirs()
102 |
103 | // Download to temporary file first, then rename to avoid partial files
104 | val tempFile = File(modelDir, "$filename.tmp")
105 |
106 | url.openStream().use { input ->
107 | tempFile.outputStream().use { output ->
108 | input.copyTo(output)
109 | }
110 | }
111 |
112 | require(tempFile.exists()) { "Failed to download file: $filename" }
113 | require(tempFile.renameTo(targetFile)) { "Failed to rename temp file: $filename" }
114 | }
115 | }
116 |
117 | @Serializable
118 | data class ParamsConfig(val records: List)
119 |
120 | @Serializable
121 | data class ParamsRecord(val dataPath: String)
122 |
123 | @Serializable
124 | data class ModelConfig(
125 | val tokenizer_files: List,
126 | val context_window_size: Int,
127 | val prefill_chunk_size: Int
128 | )
129 |
--------------------------------------------------------------------------------
/packages/apple-llm/ios/transcription/AppleTranscription.mm:
--------------------------------------------------------------------------------
1 | //
2 | // AppleTranscription.mm
3 | // AppleLLM
4 | //
5 | // Created by Mike Grabowski on 01/08/2025.
6 | //
7 |
8 | #if __has_include("AppleLLM/AppleLLM-Swift.h")
9 | #import "AppleLLM/AppleLLM-Swift.h"
10 | #else
11 | #import "AppleLLM-Swift.h"
12 | #endif
13 |
14 | #import
15 | #import
16 | #import
17 | #import
18 |
19 | #import
20 | #import
21 |
22 | #import
23 |
24 | @interface AppleTranscription : NativeAppleTranscriptionSpecBase
25 | @property (strong, nonatomic) AppleTranscriptionImpl *transcription;
26 | @end
27 |
28 | using namespace facebook;
29 | using namespace JS::NativeAppleLLM;
30 | using namespace react;
31 |
32 | @implementation AppleTranscription
33 |
34 | @synthesize callInvoker;
35 |
36 | - (instancetype)init {
37 | self = [super init];
38 | if (self) {
39 | _transcription = [AppleTranscriptionImpl new];
40 | }
41 | return self;
42 | }
43 |
44 | + (NSString *)moduleName {
45 | return @"NativeAppleTranscription";
46 | }
47 |
48 | - (void)installJSIBindingsWithRuntime:(facebook::jsi::Runtime &)rt callInvoker:(const std::shared_ptr &)jsInvoker {
49 | AppleTranscriptionImpl *transcriptionModule = _transcription;
50 |
51 | @try {
52 | auto global = rt.global();
53 |
54 | auto transcribeFunc = jsi::Function::createFromHostFunction(
55 | rt,
56 | jsi::PropNameID::forAscii(rt, "transcribe"),
57 | 2,
58 | [transcriptionModule, jsInvoker](jsi::Runtime& rt, const jsi::Value& thisVal, const jsi::Value* args, size_t count) -> jsi::Value {
59 | auto arrayBuffer = args[0].asObject(rt);
60 | if (!arrayBuffer.isArrayBuffer(rt)) {
61 | throw jsi::JSError(rt, "First argument must be an ArrayBuffer");
62 | }
63 |
64 | auto buffer = arrayBuffer.getArrayBuffer(rt);
65 | NSData *audioData = [NSData dataWithBytes:buffer.data(rt) length:buffer.size(rt)];
66 |
67 | auto languageStr = args[1].asString(rt).utf8(rt);
68 | NSString *language = [NSString stringWithUTF8String:languageStr.c_str()];
69 |
70 | auto Promise = rt.global().getPropertyAsFunction(rt, "Promise");
71 |
72 | return Promise.callAsConstructor(rt, jsi::Function::createFromHostFunction(
73 | rt,
74 | jsi::PropNameID::forAscii(rt, "executor"),
75 | 2,
76 | [transcriptionModule, audioData, language, jsInvoker](jsi::Runtime& rt, const jsi::Value& thisVal, const jsi::Value* args, size_t count) -> jsi::Value {
77 | using ResolveCallback = facebook::react::AsyncCallback<>;
78 | using RejectCallback = facebook::react::AsyncCallback;
79 |
80 | auto resolve = ResolveCallback(rt, args[0].asObject(rt).asFunction(rt), jsInvoker);
81 | auto reject = RejectCallback(rt, args[1].asObject(rt).asFunction(rt), jsInvoker);
82 |
83 | [transcriptionModule transcribe:audioData language:language resolve:^(id result) {
84 | resolve.call([result](jsi::Runtime& rt, jsi::Function& resolveFunc) {
85 | auto jsResult = react::TurboModuleConvertUtils::convertObjCObjectToJSIValue(rt, result);
86 | resolveFunc.call(rt, jsResult);
87 | });
88 | } reject:^(NSString *code, NSString *message, NSError *error) {
89 | reject.call([message](jsi::Runtime& rt, jsi::Function& rejectFunc) {
90 | auto jsError = jsi::String::createFromUtf8(rt, [message UTF8String]);
91 | rejectFunc.call(rt, jsError);
92 | });
93 | }];
94 |
95 | return jsi::Value::undefined();
96 | }
97 | ));
98 | }
99 | );
100 |
101 | global.setProperty(rt, "__apple__llm__transcribe__", transcribeFunc);
102 | } @catch (NSException *exception) {
103 | throw jsi::JSError(rt, [[NSString stringWithFormat:@"Failed to install transcribe handler: %@", exception.reason] UTF8String]);
104 | }
105 | }
106 |
107 | - (std::shared_ptr)getTurboModule:(const react::ObjCTurboModule::InitParams &)params {
108 | return std::make_shared(params);
109 | }
110 |
111 | - (nonnull NSNumber *)isAvailable:(nonnull NSString *)language {
112 | return @([_transcription isAvailable:language]);
113 | }
114 |
115 | - (void)prepare:(nonnull NSString *)language resolve:(nonnull RCTPromiseResolveBlock)resolve reject:(nonnull RCTPromiseRejectBlock)reject {
116 | [_transcription prepare:language resolve:resolve reject:reject];
117 | }
118 |
119 | @end
120 |
--------------------------------------------------------------------------------
/website/src/docs/apple/embeddings.md:
--------------------------------------------------------------------------------
1 | # Embeddings
2 |
3 | Generate text embeddings using Apple's on-device NLContextualEmbedding model with the AI SDK.
4 |
5 | ## Overview
6 |
7 | This provider uses Apple's [`NLContextualEmbedding`](https://developer.apple.com/documentation/naturallanguage/nlcontextualembedding) to generate contextual text embeddings entirely on-device. This is Apple's implementation of a BERT-like transformer model integrated into iOS 17+, providing privacy-preserving text understanding capabilities.
8 |
9 | ## Model Architecture
10 |
11 | NLContextualEmbedding uses a transformer-based architecture trained with masked language modeling (similar to BERT). Apple provides three optimized models grouped by writing system:
12 |
13 | - **Latin Script Model** (20 languages): English, Spanish, French, German, Italian, Portuguese, Dutch, and others - produces 512-dimensional embeddings
14 | - **Cyrillic Script Model** (4 languages): Russian, Ukrainian, Bulgarian, Serbian
15 | - **CJK Model** (3 languages): Chinese, Japanese, Korean
16 |
17 | Each model is multilingual within its script family, enabling cross-lingual semantic understanding. The models are compressed and optimized for Apple's Neural Engine, typically under 100MB when downloaded.
18 |
19 | ## Requirements
20 |
21 | - **iOS 17+** - NLContextualEmbedding requires iOS 17 or later
22 |
23 | ## Usage
24 |
25 | ### Single Text
26 |
27 | ```tsx
28 | import { embed } from 'ai'
29 | import { apple } from '@react-native-ai/apple'
30 |
31 | const { embedding } = await embed({
32 | model: apple.textEmbeddingModel(),
33 | value: 'Hello world',
34 | })
35 |
36 | console.log(embedding)
37 | ```
38 |
39 | ### Multiple Texts
40 |
41 | ```tsx
42 | import { embedMany } from 'ai'
43 | import { apple } from '@react-native-ai/apple'
44 |
45 | const { embeddings } = await embedMany({
46 | model: apple.textEmbeddingModel(),
47 | values: ['Hello world', 'How are you?', 'Goodbye'],
48 | })
49 |
50 | console.log(embeddings)
51 | ```
52 |
53 | ## Language Support
54 |
55 | The embeddings model supports multiple languages. You can specify the language using [ISO 639-1 codes](https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes) or full names:
56 |
57 | ```tsx
58 | await embed({
59 | model: apple.textEmbeddingModel(),
60 | value: 'Bonjour',
61 | providerOptions: {
62 | apple: {
63 | language: 'fr',
64 | }
65 | }
66 | })
67 | ```
68 |
69 | For list of all supported languages, [check Apple documentation](https://developer.apple.com/documentation/naturallanguage/nlcontextualembedding#overview).
70 |
71 | > [!NOTE]
72 | > By default, the embeddings model will use device language.
73 |
74 | ## Asset Management
75 |
76 | Apple's NLContextualEmbedding requires downloading language-specific assets to the device. The provider automatically requests assets when needed, but you can also prepare them manually:
77 |
78 | ```tsx
79 | import { NativeAppleEmbeddings } from '@react-native-ai/apple'
80 |
81 | await NativeAppleEmbeddings.prepare('en')
82 | ```
83 |
84 | When you call `prepare()` for a language, the system first checks if the required assets are already present on the device. If they are, the method resolves immediately without any network activity, making subsequent embedding operations instant.
85 |
86 | > [!NOTE]
87 | > All language models and assets are stored in Apple's system-wide assets catalog, separate from your app bundle. This means zero impact on your app's size. Assets may already be available if the user has previously used other apps, or if system features have requested them.
88 |
89 | ## Direct API Access
90 |
91 | For advanced use cases, you can access the embeddings API directly:
92 |
93 | ### AppleEmbeddings
94 |
95 | ```tsx
96 | import { AppleEmbeddings } from '@react-native-ai/apple'
97 |
98 | // Get embedding model information
99 | const modelInfo: EmbeddingInfo = await AppleEmbeddings.getInfo(language: string)
100 |
101 | // Prepare language assets
102 | await AppleEmbeddings.prepare(language: string)
103 |
104 | // Generate embeddings
105 | const embeddings = await AppleEmbeddings.generateEmbeddings(
106 | values: string[],
107 | language: string
108 | ): Promise
109 |
110 | export interface EmbeddingInfo {
111 | hasAvailableAssets: boolean
112 | dimension: number
113 | languages: string[]
114 | maximumSequenceLength: number
115 | modelIdentifier: string
116 | revision: number
117 | scripts: string[]
118 | }
119 | ```
120 |
121 | ## Benchmarks
122 |
123 | Performance results showing processing time in milliseconds per embedding across different text lengths:
124 |
125 | | Device | Short (~10 tokens) | Medium (~30 tokens) | Long (~90 tokens) |
126 | |----------------|--------------------|----------------------|-------------------|
127 | | iPhone 16 Pro | 19.19 | 21.53 | 33.59 |
128 |
129 | Each category is tested with 5 consecutive runs to calculate reliable averages and account for system variability.
130 |
--------------------------------------------------------------------------------
/apps/expo-example/src/App.tsx:
--------------------------------------------------------------------------------
1 | import './global.css'
2 |
3 | import {
4 | createNativeBottomTabNavigator,
5 | NativeBottomTabNavigationOptions,
6 | } from '@bottom-tabs/react-navigation'
7 | import Icon from '@react-native-vector-icons/material-icons'
8 | import { NavigationContainer } from '@react-navigation/native'
9 | import {
10 | createNativeStackNavigator,
11 | NativeStackNavigatorProps,
12 | } from '@react-navigation/native-stack'
13 | import { StatusBar } from 'expo-status-bar'
14 | import React from 'react'
15 | import { Platform } from 'react-native'
16 | import { KeyboardProvider } from 'react-native-keyboard-controller'
17 | import { SafeAreaProvider } from 'react-native-safe-area-context'
18 |
19 | import AppleLLMScreen from './screens/apple/AppleLLMScreen'
20 | import PlaygroundScreen from './screens/apple/PlaygroundScreen'
21 | import SpeechScreen from './screens/apple/SpeechScreen'
22 | import TranscribeScreen from './screens/apple/TranscribeScreen'
23 | import LlamaRNScreen from './screens/LlamaRNScreen'
24 | import MLCScreen from './screens/MLCScreen'
25 |
26 | const Tab = createNativeBottomTabNavigator()
27 |
28 | const RootStack = createNativeStackNavigator()
29 |
30 | type ScreenProto = {
31 | routeName: string
32 | screenOptions?: NativeStackNavigatorProps['screenOptions']
33 | tabScreenOptions: NativeBottomTabNavigationOptions
34 | Component: null | (() => React.JSX.Element)
35 | }
36 |
37 | const screens = (
38 | [
39 | {
40 | routeName: 'AppleLLM',
41 | screenOptions: { title: 'AppleLLM' },
42 | tabScreenOptions: {
43 | tabBarIcon: () => ({
44 | sfSymbol: 'brain.head.profile',
45 | }),
46 | },
47 | Component: AppleLLMScreen,
48 | },
49 | {
50 | routeName: 'Llama',
51 | screenOptions: { title: 'Llama.rn' },
52 | tabScreenOptions: {
53 | tabBarIcon: () =>
54 | Platform.OS === 'ios'
55 | ? { sfSymbol: 'sparkles' }
56 | : Icon.getImageSourceSync('auto-fix-high', 24),
57 | },
58 | Component: LlamaRNScreen,
59 | },
60 | {
61 | routeName: 'MLC',
62 | screenOptions: { title: 'MLC Engine' },
63 | tabScreenOptions: {
64 | tabBarIcon: () =>
65 | Platform.OS === 'ios'
66 | ? { sfSymbol: 'cpu' }
67 | : Icon.getImageSourceSync('memory', 24),
68 | },
69 | Component: MLCScreen,
70 | },
71 | {
72 | routeName: 'Playground',
73 | screenOptions: { title: 'Playground' },
74 | tabScreenOptions: {
75 | tabBarIcon: () => ({ sfSymbol: 'play.circle' }),
76 | },
77 | Component: PlaygroundScreen,
78 | },
79 | {
80 | routeName: 'Transcribe',
81 | screenOptions: {
82 | title: 'Speech to Text',
83 | },
84 | tabScreenOptions: {
85 | tabBarIcon: () => ({ sfSymbol: 'text.quote' }),
86 | },
87 | Component: TranscribeScreen,
88 | },
89 | {
90 | routeName: 'Speech',
91 | screenOptions: { title: 'Text to Speech' },
92 | tabScreenOptions: {
93 | tabBarIcon: () => ({ sfSymbol: 'speaker.wave.3' }),
94 | },
95 | Component: SpeechScreen,
96 | },
97 | ] as ScreenProto[]
98 | )
99 | // filter only components available on the current platform; non-available components have platform-specific entrypoints exporting null
100 | .filter(
101 | (
102 | screen
103 | ): screen is ScreenProto & {
104 | Component: NonNullable
105 | } => screen.Component !== null
106 | )
107 | // initialize stack navigators
108 | .map((screen) => ({
109 | ...screen,
110 | StackNavigator: createNativeStackNavigator(),
111 | }))
112 | // wrap each screen in its own stack navigator
113 | .map(({ StackNavigator, Component, screenOptions, ...screen }) => ({
114 | ...screen,
115 | Component: () => (
116 |
117 |
122 |
123 | ),
124 | }))
125 |
126 | function Tabs() {
127 | return (
128 |
129 | {screens.map(({ routeName: name, Component, tabScreenOptions }) => (
130 |
136 | ))}
137 |
138 | )
139 | }
140 |
141 | export default function App() {
142 | return (
143 |
144 |
145 |
146 |
147 |
152 |
153 |
154 |
155 |
156 |
157 | )
158 | }
159 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://www.callstack.com/open-source?utm_campaign=generic&utm_source=github&utm_medium=referral&utm_content=react-native-ai)
2 |
3 | # React Native AI
4 |
5 | A collection of on-device AI primitives for React Native with first-class Vercel AI SDK support. Run AI models directly on users' devices for privacy-preserving, low-latency inference without server costs.
6 |
7 | ## Features
8 |
9 | - 🚀 **Instant AI** - Use built-in system models immediately without downloads
10 | - 🔒 **Privacy-first** - All processing happens on-device, data stays local
11 | - 🎯 **Vercel AI SDK compatible** - Drop-in replacement with familiar APIs
12 | - 🎨 **Complete toolkit** - Text generation, embeddings, transcription, speech synthesis
13 |
14 | ## Available Providers
15 |
16 | ### Apple
17 |
18 | Native integration with Apple's on-device AI capabilities:
19 |
20 | - **Text Generation** - Apple Foundation Models for chat and completion
21 | - **Embeddings** - NLContextualEmbedding for 512-dimensional semantic vectors
22 | - **Transcription** - SpeechAnalyzer for fast, accurate speech-to-text
23 | - **Speech Synthesis** - AVSpeechSynthesizer for natural text-to-speech with system voices
24 |
25 | #### Installation
26 |
27 | ```bash
28 | npm install @react-native-ai/apple
29 | ```
30 |
31 | No additional linking needed, works immediately on iOS devices (autolinked).
32 |
33 | #### Usage
34 |
35 | ```typescript
36 | import { apple } from '@react-native-ai/apple'
37 | import {
38 | generateText,
39 | embed,
40 | experimental_transcribe as transcribe,
41 | experimental_generateSpeech as speech
42 | } from 'ai'
43 |
44 | // Text generation with Apple Intelligence
45 | const { text } = await generateText({
46 | model: apple(),
47 | prompt: 'Explain quantum computing'
48 | })
49 |
50 | // Generate embeddings
51 | const { embedding } = await embed({
52 | model: apple.textEmbeddingModel(),
53 | value: 'Hello world'
54 | })
55 |
56 | // Transcribe audio
57 | const { text } = await transcribe({
58 | model: apple.transcriptionModel(),
59 | audio: audioBuffer
60 | })
61 |
62 | // Text-to-speech
63 | const { audio } = await speech({
64 | model: apple.speechModel(),
65 | text: 'Hello from Apple!'
66 | })
67 | ```
68 |
69 | #### Availability
70 |
71 | | Feature | iOS Version | Additional Requirements |
72 | |---------|-------------|------------------------|
73 | | Text Generation | iOS 26+ | Apple Intelligence device |
74 | | Embeddings | iOS 17+ | - |
75 | | Transcription | iOS 26+ | - |
76 | | Speech Synthesis | iOS 13+ | iOS 17+ for Personal Voice |
77 |
78 | See the [Apple documentation](https://react-native-ai.dev/docs/apple/getting-started) for detailed setup and usage guides.
79 |
80 | ### MLC Engine (Experimental)
81 |
82 | Run popular open-source LLMs directly on-device using MLC's optimized runtime.
83 |
84 | #### Installation
85 |
86 | ```bash
87 | npm install @react-native-ai/mlc
88 | ```
89 |
90 | Requires the "Increased Memory Limit" capability in Xcode. See the [getting started guide](https://react-native-ai.dev/docs/mlc/getting-started) for setup instructions.
91 |
92 | #### Usage
93 |
94 | ```typescript
95 | import { mlc } from '@react-native-ai/mlc'
96 | import { generateText } from 'ai'
97 |
98 | // Create model instance
99 | const model = mlc.languageModel('Llama-3.2-3B-Instruct')
100 |
101 | // Download and prepare model (one-time setup)
102 | await model.download()
103 | await model.prepare()
104 |
105 | // Generate response with Llama via MLC engine
106 | const { text } = await generateText({
107 | model,
108 | prompt: 'Explain quantum computing'
109 | })
110 | ```
111 |
112 | #### Available Models
113 |
114 | | Model ID | Size |
115 | |----------|------|
116 | | `Llama-3.2-3B-Instruct` | ~2GB |
117 | | `Phi-3-mini-4k-instruct` | ~2.5GB |
118 | | `Mistral-7B-Instruct` | ~4.5GB |
119 | | `Qwen2.5-1.5B-Instruct` | ~1GB |
120 |
121 | > [!NOTE]
122 | > MLC requires iOS devices with sufficient memory (1-8GB depending on model). The prebuilt runtime supports the models listed above. For other models or custom configurations, you'll need to recompile the MLC runtime from source.
123 |
124 | ### Google (Coming Soon)
125 |
126 | Support for Google's on-device models is planned for future releases.
127 |
128 | ## Documentation
129 |
130 | Comprehensive guides and API references are available at [react-native-ai.dev](https://react-native-ai.dev).
131 |
132 | ## Contributing
133 |
134 | Read the [contribution guidelines](/CONTRIBUTING.md) before contributing.
135 |
136 | ## Made with ❤️ at Callstack
137 |
138 | **react-native-ai** is an open source project and will always remain free to use. If you think it's cool, please star it 🌟.
139 |
140 | [Callstack][callstack-readme-with-love] is a group of React and React Native geeks, contact us at [hello@callstack.com](mailto:hello@callstack.com) if you need any help with these or just want to say hi!
141 |
142 | ---
143 |
144 | Made with [create-react-native-library](https://github.com/callstack/react-native-builder-bob)
145 |
146 | [callstack-readme-with-love]: https://callstack.com/?utm_source=github.com&utm_medium=referral&utm_campaign=react-native-ai&utm_term=readme-with-love
147 |
--------------------------------------------------------------------------------
/website/src/docs/llama/model-management.md:
--------------------------------------------------------------------------------
1 | # Model Management
2 |
3 | This guide covers the complete lifecycle of Llama models - from discovery and download to cleanup and removal.
4 |
5 | ## Finding Models
6 |
7 | Unlike MLC which has a prebuilt set of models, the Llama provider can run any GGUF model from HuggingFace. You can browse available models at [HuggingFace GGUF Models](https://huggingface.co/models?library=gguf).
8 |
9 | ### Recommended Models
10 |
11 | Here are some popular models that work well on mobile devices:
12 |
13 | | Model ID | Size | Best For |
14 | | ----------------------------------------------------------------- | ------ | -------------------------------- |
15 | | `ggml-org/SmolLM3-3B-GGUF/SmolLM3-Q4_K_M.gguf` | ~1.8GB | Balanced performance and quality |
16 | | `Qwen/Qwen2.5-3B-Instruct-GGUF/qwen2.5-3b-instruct-q3_k_m.gguf` | ~1.9GB | General conversations |
17 | | `lmstudio-community/gemma-2-2b-it-GGUF/gemma-2-2b-it-Q3_K_M.gguf` | ~2.3GB | High quality responses |
18 |
19 | > **Note**: When selecting models, consider quantization levels (Q3, Q4, Q5, etc.). Lower quantization = smaller size but potentially lower quality. Q4_K_M is a good balance for mobile.
20 |
21 | ## Model Lifecycle
22 |
23 | ### Discovering Downloaded Models
24 |
25 | Get the list of models that have been downloaded to the device:
26 |
27 | ```typescript
28 | import { LlamaEngine } from '@react-native-ai/llama'
29 |
30 | const models = await LlamaEngine.getModels()
31 |
32 | console.log('Downloaded models:', models)
33 | // Output: [{ model_id: 'SmolLM3-Q4_K_M.gguf', path: '...', filename: '...', sizeBytes: 1800000000 }, ...]
34 | ```
35 |
36 | ### Creating Model Instance
37 |
38 | Create a model instance using the `llama.languageModel()` method:
39 |
40 | ```typescript
41 | import { llama } from '@react-native-ai/llama'
42 |
43 | const model = llama.languageModel(
44 | 'ggml-org/SmolLM3-3B-GGUF/SmolLM3-Q4_K_M.gguf'
45 | )
46 | ```
47 |
48 | With configuration options:
49 |
50 | ```typescript
51 | const model = llama.languageModel(
52 | 'ggml-org/SmolLM3-3B-GGUF/SmolLM3-Q4_K_M.gguf',
53 | {
54 | n_ctx: 4096, // Context window size (default: 2048)
55 | n_gpu_layers: 99, // GPU layers for acceleration (default: 99)
56 | }
57 | )
58 | ```
59 |
60 | ### Checking Download Status
61 |
62 | Check if a model is already downloaded:
63 |
64 | ```typescript
65 | // Using instance method
66 | const isReady = await model.isDownloaded()
67 |
68 | // Or using LlamaEngine
69 | import { LlamaEngine } from '@react-native-ai/llama'
70 | const isDownloaded = await LlamaEngine.isDownloaded(
71 | 'ggml-org/SmolLM3-3B-GGUF/SmolLM3-Q4_K_M.gguf'
72 | )
73 | ```
74 |
75 | ### Downloading Models
76 |
77 | Models are downloaded from HuggingFace automatically:
78 |
79 | ```typescript
80 | await model.download()
81 |
82 | console.log('Download complete!')
83 | ```
84 |
85 | You can track download progress:
86 |
87 | ```typescript
88 | await model.download((progress) => {
89 | console.log(`Download: ${progress.percentage}%`)
90 | })
91 | ```
92 |
93 | ### Preparing Models
94 |
95 | After downloading, prepare the model for inference (loads it into memory):
96 |
97 | ```typescript
98 | await model.prepare()
99 | ```
100 |
101 | ### Using Models
102 |
103 | Once prepared, use the model with AI SDK functions:
104 |
105 | ```typescript
106 | import { streamText } from 'ai'
107 |
108 | const { textStream } = streamText({
109 | model,
110 | prompt: 'Hello! Introduce yourself briefly.',
111 | })
112 |
113 | for await (const delta of textStream) {
114 | console.log(delta)
115 | }
116 | ```
117 |
118 | ### Unloading Models
119 |
120 | Unload the model from memory to free resources:
121 |
122 | ```typescript
123 | await model.unload()
124 | ```
125 |
126 | ### Removing Downloaded Models
127 |
128 | Delete downloaded model files to free storage:
129 |
130 | ```typescript
131 | await model.remove()
132 | ```
133 |
134 | ## Custom Storage Path
135 |
136 | By default, models are stored in `${DocumentDir}/llama-models/`. You can customize this:
137 |
138 | ```typescript
139 | import { LlamaEngine } from '@react-native-ai/llama'
140 |
141 | LlamaEngine.setStoragePath('/custom/path/to/models')
142 | ```
143 |
144 | ## API Reference
145 |
146 | ### `llama.languageModel(modelId, options?)`
147 |
148 | Creates a language model instance.
149 |
150 | - `modelId`: Model identifier in format `owner/repo/filename.gguf`
151 | - `options`:
152 | - `n_ctx`: Context size (default: 2048)
153 | - `n_gpu_layers`: Number of GPU layers (default: 99)
154 | - `contextParams`: Additional llama.rn context parameters
155 |
156 | ### `LlamaEngine`
157 |
158 | - `getModels()`: Get list of downloaded models
159 | - `isDownloaded(modelId)`: Check if a model is downloaded
160 | - `setStoragePath(path)`: Set custom storage directory
161 |
162 | ### Model Instance Methods
163 |
164 | - `download(progressCallback?)`: Download model from HuggingFace
165 | - `isDownloaded()`: Check if this model is downloaded
166 | - `prepare()`: Initialize/load model into memory
167 | - `unload()`: Release model from memory
168 | - `remove()`: Delete model from disk
169 |
--------------------------------------------------------------------------------
/packages/apple-llm/ios/transcription/AppleTranscriptionImpl.swift:
--------------------------------------------------------------------------------
1 | //
2 | // AppleTranscriptionImpl.swift
3 | // AppleLLM
4 | //
5 | // Created by Mike Grabowski on 01/08/2025.
6 | //
7 |
8 | import Foundation
9 | import Speech
10 | import AVFoundation
11 | import UniformTypeIdentifiers
12 |
13 | @objc
14 | public class AppleTranscriptionImpl: NSObject {
15 |
16 | @available(iOS 26, *)
17 | private func createTranscriber(for locale: Locale) -> SpeechTranscriber {
18 | let preset = SpeechTranscriber.Preset.timeIndexedTranscriptionWithAlternatives
19 |
20 | return SpeechTranscriber(
21 | locale: locale,
22 | transcriptionOptions: preset.transcriptionOptions,
23 | reportingOptions: preset.reportingOptions.subtracting([.alternativeTranscriptions]),
24 | attributeOptions: preset.attributeOptions
25 | )
26 | }
27 | @objc
28 | public func isAvailable(_ language: String) -> Bool {
29 | if #available(iOS 26, *) {
30 | return SpeechTranscriber.isAvailable
31 | } else {
32 | return false
33 | }
34 | }
35 |
36 | @objc
37 | public func prepare(_ language: String, resolve: @escaping (Any?) -> Void, reject: @escaping (String, String, Error?) -> Void) {
38 | if #available(iOS 26, *) {
39 | Task {
40 | do {
41 | let locale = Locale(identifier: language)
42 |
43 | guard let supportedLocale = await SpeechTranscriber.supportedLocale(equivalentTo: locale) else {
44 | reject("AppleTranscription", "Locale not supported: \(language)", nil)
45 | return
46 | }
47 |
48 | let transcriber = createTranscriber(for: supportedLocale)
49 |
50 | let status = await AssetInventory.status(forModules: [transcriber])
51 |
52 | switch status {
53 | case .installed:
54 | resolve(nil)
55 | case .supported, .downloading:
56 | if let request = try? await AssetInventory.assetInstallationRequest(supporting: [transcriber]) {
57 | try await request.downloadAndInstall()
58 | resolve(nil)
59 | } else {
60 | resolve(nil)
61 | }
62 | case .unsupported:
63 | reject("AppleTranscription", "Assets not supported for locale: \(supportedLocale.identifier)", nil)
64 | @unknown default:
65 | reject ("AppleTranscription", "Unknown asset inventory status", nil)
66 | }
67 | } catch {
68 | reject("AppleTranscription", "Failed to prepare assets: \(error.localizedDescription)", error)
69 | }
70 | }
71 | } else {
72 | reject("AppleTranscription", "Not available on this platform", nil)
73 | }
74 | }
75 |
76 | @objc
77 | public func transcribe(_ audioData: Data, language: String, resolve: @escaping ([String: Any]) -> Void, reject: @escaping (String, String, Error?) -> Void) {
78 | if #available(iOS 26, *) {
79 | let tempDirectory = FileManager.default.temporaryDirectory
80 | let fileName = UUID().uuidString
81 | let fileURL = tempDirectory.appendingPathComponent(fileName)
82 |
83 | do {
84 | try audioData.write(to: fileURL)
85 |
86 | guard let audioFile = try? AVAudioFile(forReading: fileURL) else {
87 | reject("AppleTranscription", "Invalid audio data", nil)
88 | return
89 | }
90 |
91 | Task {
92 | do {
93 | let transcriber = createTranscriber(for: Locale(identifier: language))
94 |
95 | let analyzer = SpeechAnalyzer(modules: [transcriber])
96 |
97 | defer {
98 | try? FileManager.default.removeItem(at: fileURL)
99 | }
100 |
101 | var segments: [[String: Any]] = []
102 |
103 | Task {
104 | for try await result in transcriber.results {
105 | if result.isFinal {
106 | let segment: [String: Any] = [
107 | "text": String(result.text.characters),
108 | "startSecond": CMTimeGetSeconds(result.range.start),
109 | "endSecond": CMTimeGetSeconds(CMTimeRangeGetEnd(result.range))
110 | ]
111 | segments.append(segment)
112 | }
113 | }
114 | }
115 |
116 | let lastSampleTime = try await analyzer.analyzeSequence(from: audioFile)
117 |
118 | if let lastSampleTime {
119 | try await analyzer.finalizeAndFinish(through: lastSampleTime)
120 | } else {
121 | await analyzer.cancelAndFinishNow()
122 | }
123 |
124 | let totalDuration = if let lastSampleTime { CMTimeGetSeconds(lastSampleTime) } else { 0.0 }
125 |
126 | let result: [String: Any] = [
127 | "segments": segments,
128 | "duration": totalDuration
129 | ]
130 |
131 | resolve(result)
132 | } catch {
133 | reject("AppleTranscription", "Transcription failed: \(error.localizedDescription)", error)
134 | }
135 | }
136 | } catch {
137 | reject("AppleTranscription", "Failed to write audio data: \(error.localizedDescription)", error)
138 | }
139 | } else {
140 | reject("AppleTranscription", "Not available on this platform", nil)
141 | }
142 | }
143 | }
144 |
145 |
--------------------------------------------------------------------------------
/apps/expo-example/src/screens/apple/AppleLLMScreen/index.ios.tsx:
--------------------------------------------------------------------------------
1 | import { createAppleProvider } from '@react-native-ai/apple'
2 | import { ModelMessage, streamText } from 'ai'
3 | import React, { useEffect, useRef, useState } from 'react'
4 | import {
5 | Keyboard,
6 | ScrollView,
7 | Text,
8 | TextInput,
9 | TouchableOpacity,
10 | View,
11 | } from 'react-native'
12 | import { useBottomTabBarHeight } from 'react-native-bottom-tabs'
13 | import { useReanimatedKeyboardAnimation } from 'react-native-keyboard-controller'
14 | import Animated, { useAnimatedStyle } from 'react-native-reanimated'
15 |
16 | import {
17 | checkCalendarEvents,
18 | createCalendarEvent,
19 | getCurrentTime,
20 | } from '../../../tools'
21 |
22 | const apple = createAppleProvider({
23 | availableTools: {
24 | getCurrentTime,
25 | createCalendarEvent,
26 | checkCalendarEvents,
27 | },
28 | })
29 |
30 | export default function AppleLLMScreen() {
31 | const [messages, setMessages] = useState([])
32 | const [inputText, setInputText] = useState('')
33 | const [isGenerating, setIsGenerating] = useState(false)
34 | const scrollViewRef = useRef(null)
35 |
36 | useEffect(() => {
37 | const keyboardWillShowListener = Keyboard.addListener(
38 | 'keyboardWillShow',
39 | () => {
40 | scrollViewRef.current?.scrollToEnd({ animated: true })
41 | }
42 | )
43 | return () => {
44 | keyboardWillShowListener.remove()
45 | }
46 | }, [])
47 |
48 | const sendMessage = async () => {
49 | if (!inputText.trim() || isGenerating) return
50 |
51 | const userMessage: ModelMessage = {
52 | role: 'user',
53 | content: inputText.trim(),
54 | }
55 |
56 | const updatedMessages = [...messages, userMessage]
57 | setMessages(updatedMessages)
58 | setInputText('')
59 | setIsGenerating(true)
60 |
61 | const messageIdx = updatedMessages.length
62 |
63 | setMessages([
64 | ...updatedMessages,
65 | {
66 | role: 'assistant',
67 | content: '...',
68 | },
69 | ])
70 |
71 | let accumulatedContent = ''
72 |
73 | try {
74 | const result = streamText({
75 | model: apple(),
76 | messages: updatedMessages,
77 | tools: {
78 | getCurrentTime,
79 | createCalendarEvent,
80 | checkCalendarEvents,
81 | },
82 | })
83 |
84 | for await (const chunk of result.textStream) {
85 | accumulatedContent += chunk
86 | setMessages((prev) => {
87 | const newMessages = [...prev]
88 | newMessages[messageIdx] = {
89 | role: 'assistant',
90 | content: accumulatedContent,
91 | }
92 | return newMessages
93 | })
94 | }
95 | } catch (error) {
96 | const errorMessage = `Error: ${error instanceof Error ? error.message : 'Failed to generate response'}`
97 | setMessages((prev) => {
98 | const newMessages = [...prev]
99 | newMessages[messageIdx] = {
100 | role: 'assistant',
101 | content: errorMessage,
102 | }
103 | return newMessages
104 | })
105 | } finally {
106 | setIsGenerating(false)
107 | }
108 | }
109 |
110 | const bottomTabBarHeight = useBottomTabBarHeight()
111 | const keyboardAnimation = useReanimatedKeyboardAnimation()
112 |
113 | const animatedStyle = useAnimatedStyle(() => ({
114 | transform: [
115 | {
116 | translateY:
117 | keyboardAnimation.progress.value === 0
118 | ? -bottomTabBarHeight
119 | : keyboardAnimation.height.value,
120 | },
121 | ],
122 | }))
123 |
124 | return (
125 |
129 | {
133 | scrollViewRef.current?.scrollToEnd({ animated: true })
134 | }}
135 | >
136 | {messages.map((message, index) => (
137 |
141 |
146 |
151 | {message.content as string}
152 |
153 |
154 |
155 | ))}
156 |
157 |
158 |
159 |
168 |
175 |
182 | ↑
183 |
184 |
185 |
186 |
187 | )
188 | }
189 |
--------------------------------------------------------------------------------
/apps/expo-example/src/screens/apple/SpeechScreen/index.ios.tsx:
--------------------------------------------------------------------------------
1 | import { apple, AppleSpeech, VoiceInfo } from '@react-native-ai/apple'
2 | import { Picker } from '@react-native-picker/picker'
3 | import { experimental_generateSpeech } from 'ai'
4 | import React, { useEffect, useState } from 'react'
5 | import {
6 | ActivityIndicator,
7 | Alert,
8 | ScrollView,
9 | Text,
10 | TextInput,
11 | TouchableOpacity,
12 | View,
13 | } from 'react-native'
14 | import { AudioContext } from 'react-native-audio-api'
15 |
16 | const play = async (arrayBuffer: ArrayBufferLike) => {
17 | const context = new AudioContext()
18 |
19 | const source = context.createBufferSource()
20 | source.buffer = await context.decodeAudioData(arrayBuffer as ArrayBuffer)
21 | source.connect(context.destination)
22 |
23 | source.start()
24 | }
25 |
26 | export default function SpeechScreen() {
27 | const [inputText, setInputText] = useState(
28 | 'On-device text to speech is awesome'
29 | )
30 | const [isGenerating, setIsGenerating] = useState(false)
31 | const [generatedSpeech, setGeneratedSpeech] = useState<{
32 | arrayBuffer: ArrayBufferLike
33 | time: number
34 | } | null>(null)
35 | const [voices, setVoices] = useState([])
36 | const [selectedVoice, setSelectedVoice] = useState(null)
37 |
38 | useEffect(() => {
39 | const loadVoices = async () => {
40 | try {
41 | const voiceList = await AppleSpeech.getVoices()
42 | setVoices(voiceList)
43 | } catch (error) {
44 | console.error('Failed to load voices:', error)
45 | }
46 | }
47 |
48 | loadVoices()
49 | }, [])
50 |
51 | const generateSpeech = async () => {
52 | if (!inputText.trim() || isGenerating) return
53 |
54 | setIsGenerating(true)
55 | setGeneratedSpeech(null)
56 |
57 | const startTime = Date.now()
58 |
59 | try {
60 | const result = await experimental_generateSpeech({
61 | model: apple.speechModel(),
62 | text: inputText,
63 | voice: selectedVoice ?? undefined,
64 | })
65 |
66 | const endTime = Date.now()
67 | const duration = endTime - startTime
68 |
69 | setGeneratedSpeech({
70 | arrayBuffer: result.audio.uint8Array.buffer,
71 | time: duration,
72 | })
73 | } catch (error) {
74 | Alert.alert(
75 | 'Speech Generation Error',
76 | error instanceof Error ? error.message : 'Failed to generate speech'
77 | )
78 | } finally {
79 | setIsGenerating(false)
80 | }
81 | }
82 |
83 | return (
84 |
88 |
89 |
90 | Enter Your Text
91 |
101 |
102 |
109 | {isGenerating ? (
110 |
111 |
112 |
113 | Generating...
114 |
115 |
116 | ) : (
117 |
118 | Generate Speech
119 |
120 | )}
121 |
122 |
123 |
124 | {voices.length > 0 && (
125 |
126 | Voice Selection
127 |
128 |
132 |
133 | {voices.map((voice) => (
134 |
139 | ))}
140 |
141 |
142 |
143 | )}
144 |
145 | {generatedSpeech && (
146 |
147 |
148 | Generated Speech
149 | Ready
150 |
151 |
152 |
153 | Generated in {generatedSpeech.time}ms
154 |
155 |
156 | play(generatedSpeech.arrayBuffer)}
159 | >
160 |
161 | ▶️ Play Audio
162 |
163 |
164 |
165 | )}
166 |
167 |
168 | )
169 | }
170 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 |
2 | # Contributor Covenant Code of Conduct
3 |
4 | ## Our Pledge
5 |
6 | We as members, contributors, and leaders pledge to make participation in our
7 | community a harassment-free experience for everyone, regardless of age, body
8 | size, visible or invisible disability, ethnicity, sex characteristics, gender
9 | identity and expression, level of experience, education, socio-economic status,
10 | nationality, personal appearance, race, caste, color, religion, or sexual
11 | identity and orientation.
12 |
13 | We pledge to act and interact in ways that contribute to an open, welcoming,
14 | diverse, inclusive, and healthy community.
15 |
16 | ## Our Standards
17 |
18 | Examples of behavior that contributes to a positive environment for our
19 | community include:
20 |
21 | * Demonstrating empathy and kindness toward other people
22 | * Being respectful of differing opinions, viewpoints, and experiences
23 | * Giving and gracefully accepting constructive feedback
24 | * Accepting responsibility and apologizing to those affected by our mistakes,
25 | and learning from the experience
26 | * Focusing on what is best not just for us as individuals, but for the overall
27 | community
28 |
29 | Examples of unacceptable behavior include:
30 |
31 | * The use of sexualized language or imagery, and sexual attention or advances of
32 | any kind
33 | * Trolling, insulting or derogatory comments, and personal or political attacks
34 | * Public or private harassment
35 | * Publishing others' private information, such as a physical or email address,
36 | without their explicit permission
37 | * Other conduct which could reasonably be considered inappropriate in a
38 | professional setting
39 |
40 | ## Enforcement Responsibilities
41 |
42 | Community leaders are responsible for clarifying and enforcing our standards of
43 | acceptable behavior and will take appropriate and fair corrective action in
44 | response to any behavior that they deem inappropriate, threatening, offensive,
45 | or harmful.
46 |
47 | Community leaders have the right and responsibility to remove, edit, or reject
48 | comments, commits, code, wiki edits, issues, and other contributions that are
49 | not aligned to this Code of Conduct, and will communicate reasons for moderation
50 | decisions when appropriate.
51 |
52 | ## Scope
53 |
54 | This Code of Conduct applies within all community spaces, and also applies when
55 | an individual is officially representing the community in public spaces.
56 | Examples of representing our community include using an official e-mail address,
57 | posting via an official social media account, or acting as an appointed
58 | representative at an online or offline event.
59 |
60 | ## Enforcement
61 |
62 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
63 | reported to the community leaders responsible for enforcement at
64 | [INSERT CONTACT METHOD].
65 | All complaints will be reviewed and investigated promptly and fairly.
66 |
67 | All community leaders are obligated to respect the privacy and security of the
68 | reporter of any incident.
69 |
70 | ## Enforcement Guidelines
71 |
72 | Community leaders will follow these Community Impact Guidelines in determining
73 | the consequences for any action they deem in violation of this Code of Conduct:
74 |
75 | ### 1. Correction
76 |
77 | **Community Impact**: Use of inappropriate language or other behavior deemed
78 | unprofessional or unwelcome in the community.
79 |
80 | **Consequence**: A private, written warning from community leaders, providing
81 | clarity around the nature of the violation and an explanation of why the
82 | behavior was inappropriate. A public apology may be requested.
83 |
84 | ### 2. Warning
85 |
86 | **Community Impact**: A violation through a single incident or series of
87 | actions.
88 |
89 | **Consequence**: A warning with consequences for continued behavior. No
90 | interaction with the people involved, including unsolicited interaction with
91 | those enforcing the Code of Conduct, for a specified period of time. This
92 | includes avoiding interactions in community spaces as well as external channels
93 | like social media. Violating these terms may lead to a temporary or permanent
94 | ban.
95 |
96 | ### 3. Temporary Ban
97 |
98 | **Community Impact**: A serious violation of community standards, including
99 | sustained inappropriate behavior.
100 |
101 | **Consequence**: A temporary ban from any sort of interaction or public
102 | communication with the community for a specified period of time. No public or
103 | private interaction with the people involved, including unsolicited interaction
104 | with those enforcing the Code of Conduct, is allowed during this period.
105 | Violating these terms may lead to a permanent ban.
106 |
107 | ### 4. Permanent Ban
108 |
109 | **Community Impact**: Demonstrating a pattern of violation of community
110 | standards, including sustained inappropriate behavior, harassment of an
111 | individual, or aggression toward or disparagement of classes of individuals.
112 |
113 | **Consequence**: A permanent ban from any sort of public interaction within the
114 | community.
115 |
116 | ## Attribution
117 |
118 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
119 | version 2.1, available at
120 | [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
121 |
122 | Community Impact Guidelines were inspired by
123 | [Mozilla's code of conduct enforcement ladder][Mozilla CoC].
124 |
125 | For answers to common questions about this code of conduct, see the FAQ at
126 | [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
127 | [https://www.contributor-covenant.org/translations][translations].
128 |
129 | [homepage]: https://www.contributor-covenant.org
130 | [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
131 | [Mozilla CoC]: https://github.com/mozilla/diversity
132 | [FAQ]: https://www.contributor-covenant.org/faq
133 | [translations]: https://www.contributor-covenant.org/translations
134 |
--------------------------------------------------------------------------------
/apps/expo-example/src/screens/apple/PlaygroundScreen/index.ios.tsx:
--------------------------------------------------------------------------------
1 | import { apple } from '@react-native-ai/apple'
2 | import { generateObject, generateText, streamText } from 'ai'
3 | import React, { useState } from 'react'
4 | import {
5 | ActivityIndicator,
6 | Alert,
7 | ScrollView,
8 | Text,
9 | TouchableOpacity,
10 | View,
11 | } from 'react-native'
12 | import { z } from 'zod'
13 |
14 | async function basicStringDemo() {
15 | const response = await generateText({
16 | model: apple(),
17 | prompt: 'Who founded Apple?',
18 | })
19 | return response.text
20 | }
21 |
22 | async function basicStringStreamingDemo() {
23 | const response = streamText({
24 | model: apple(),
25 | prompt: 'Write me short essay on the meaning of life',
26 | })
27 | for await (const chunk of response.textStream) {
28 | console.log(chunk)
29 | }
30 | return response.text
31 | }
32 |
33 | async function colorEnumDemo() {
34 | const response = await generateObject({
35 | model: apple(),
36 | prompt: 'What color is the grass?',
37 | schema: z
38 | .object({
39 | color: z.enum(['red', 'blue', 'green']).describe('Pick a color'),
40 | })
41 | .describe('Color response'),
42 | })
43 | return response.object
44 | }
45 |
46 | async function basicNumberDemo() {
47 | const response = await generateObject({
48 | model: apple(),
49 | system: 'There are 3 people in the room.',
50 | prompt: 'How many people are in the room?',
51 | schema: z
52 | .object({
53 | value: z.number().min(1).max(10).describe('A number between 1 and 10'),
54 | })
55 | .describe('Number response'),
56 | })
57 | return response.object
58 | }
59 |
60 | async function basicBooleanDemo() {
61 | const response = await generateObject({
62 | model: apple(),
63 | prompt: 'Is the sky blue?',
64 | schema: z
65 | .object({
66 | answer: z.boolean(),
67 | })
68 | .describe('Boolean response'),
69 | })
70 | return response.object
71 | }
72 |
73 | async function basicObjectDemo() {
74 | const response = await generateObject({
75 | model: apple(),
76 | prompt: 'Create a simple person',
77 | schema: z
78 | .object({
79 | name: z.string().describe('Person name'),
80 | age: z.number().int().min(1).max(100).describe('Age'),
81 | active: z.boolean().describe('Is active'),
82 | })
83 | .describe('Basic person info'),
84 | })
85 | return response.object
86 | }
87 |
88 | async function basicArrayDemo() {
89 | const response = await generateObject({
90 | model: apple(),
91 | prompt: 'Random list of fruits',
92 | topK: 50,
93 | temperature: 1,
94 | schema: z
95 | .object({
96 | items: z.array(z.string()).min(2).max(3).describe('List of items'),
97 | })
98 | .describe('Array response'),
99 | })
100 | return response.object
101 | }
102 |
103 | const playgroundDemos = {
104 | basicString: { name: 'String', func: basicStringDemo },
105 | basicStringStreaming: {
106 | name: 'String Streaming',
107 | func: basicStringStreamingDemo,
108 | },
109 | colorEnum: { name: 'Enum', func: colorEnumDemo },
110 | basicNumber: { name: 'Number', func: basicNumberDemo },
111 | basicBoolean: { name: 'Boolean', func: basicBooleanDemo },
112 | basicObject: { name: 'Object', func: basicObjectDemo },
113 | basicArray: { name: 'Array', func: basicArrayDemo },
114 | }
115 |
116 | export default function PlaygroundScreen() {
117 | const [loading, setLoading] = useState(null)
118 | const isAvailable = apple.isAvailable()
119 |
120 | const runDemo = async (key: string) => {
121 | if (loading) return
122 |
123 | setLoading(key)
124 |
125 | try {
126 | const result = await playgroundDemos[key].func()
127 | Alert.alert('Success', JSON.stringify(result, null, 2))
128 | } catch (error) {
129 | Alert.alert(
130 | 'Error',
131 | error instanceof Error ? error.message : String(error)
132 | )
133 | } finally {
134 | setLoading(null)
135 | }
136 | }
137 |
138 | const getBackgroundClass = (index: number, isLoading: boolean) => {
139 | if (isLoading) return 'bg-blue-100 border border-blue-300'
140 |
141 | const colors = [
142 | 'bg-blue-500',
143 | 'bg-green-500',
144 | 'bg-purple-500',
145 | 'bg-orange-500',
146 | 'bg-red-500',
147 | 'bg-indigo-500',
148 | 'bg-pink-500',
149 | ] as const
150 |
151 | return colors[index % colors.length]
152 | }
153 |
154 | return (
155 |
156 |
157 |
158 | Apple Intelligence:{' '}
159 | {isAvailable ? '✅ Available' : '❌ Not Available'}
160 |
161 |
162 |
163 | {Object.entries(playgroundDemos).map(([key, demo], index) => {
164 | const isLoading = loading === key
165 | const isDisabled = loading !== null && !isLoading
166 | const backgroundClass = getBackgroundClass(index, isLoading)
167 |
168 | return (
169 | runDemo(key)}
175 | disabled={loading !== null}
176 | >
177 | {isLoading ? (
178 |
179 | ) : (
180 | {demo.name}
181 | )}
182 |
183 | )
184 | })}
185 |
186 |
187 |
188 | )
189 | }
190 |
--------------------------------------------------------------------------------
/packages/apple-llm/ios/speech/AppleSpeech.mm:
--------------------------------------------------------------------------------
1 | //
2 | // AppleSpeech.mm
3 | // AppleLLM
4 | //
5 | // Created by Mike Grabowski on 04/08/2025.
6 | //
7 |
8 | #if __has_include("AppleLLM/AppleLLM-Swift.h")
9 | #import "AppleLLM/AppleLLM-Swift.h"
10 | #else
11 | #import "AppleLLM-Swift.h"
12 | #endif
13 |
14 | #import
15 | #import
16 | #import
17 | #import
18 |
19 | #import
20 | #import
21 |
22 | #import
23 |
24 | @interface AppleSpeech : NativeAppleSpeechSpecBase
25 | @property (strong, nonatomic) AppleSpeechImpl *speech;
26 | @end
27 |
28 | using namespace facebook;
29 | using namespace JS::NativeAppleLLM;
30 | using namespace react;
31 |
32 | @implementation AppleSpeech
33 |
34 | @synthesize callInvoker;
35 |
36 | - (instancetype)init {
37 | self = [super init];
38 | if (self) {
39 | _speech = [AppleSpeechImpl new];
40 | }
41 | return self;
42 | }
43 |
44 | + (NSString *)moduleName {
45 | return @"NativeAppleSpeech";
46 | }
47 |
48 | - (void)installJSIBindingsWithRuntime:(facebook::jsi::Runtime &)rt callInvoker:(const std::shared_ptr &)jsInvoker {
49 | AppleSpeechImpl *speechModule = _speech;
50 |
51 | @try {
52 | auto global = rt.global();
53 |
54 | auto generateAudioFunc = jsi::Function::createFromHostFunction(
55 | rt,
56 | jsi::PropNameID::forAscii(rt, "generateAudio"),
57 | 2,
58 | [speechModule, jsInvoker](jsi::Runtime& rt, const jsi::Value& thisVal, const jsi::Value* args, size_t count) -> jsi::Value {
59 | if (count < 1 || !args[0].isString()) {
60 | throw jsi::JSError(rt, "First argument must be a string (text)");
61 | }
62 |
63 | auto textStr = args[0].asString(rt).utf8(rt);
64 | NSString *text = [NSString stringWithUTF8String:textStr.c_str()];
65 |
66 | auto *options = [NSMutableDictionary new];
67 | if (count > 1 && args[1].isObject()) {
68 | auto opts = args[1].asObject(rt);
69 |
70 | if (opts.hasProperty(rt, "language")) {
71 | auto langProp = opts.getProperty(rt, "language");
72 | if (langProp.isString()) {
73 | auto langStr = langProp.asString(rt).utf8(rt);
74 | options[@"language"] = [NSString stringWithUTF8String:langStr.c_str()];
75 | }
76 | }
77 |
78 | if (opts.hasProperty(rt, "voice")) {
79 | auto voiceProp = opts.getProperty(rt, "voice");
80 | if (voiceProp.isString()) {
81 | auto voiceStr = voiceProp.asString(rt).utf8(rt);
82 | options[@"voice"] = [NSString stringWithUTF8String:voiceStr.c_str()];
83 | }
84 | }
85 | }
86 |
87 | auto Promise = rt.global().getPropertyAsFunction(rt, "Promise");
88 |
89 | return Promise.callAsConstructor(rt, jsi::Function::createFromHostFunction(
90 | rt,
91 | jsi::PropNameID::forAscii(rt, "executor"),
92 | 2,
93 | [speechModule, text, options, jsInvoker](jsi::Runtime& rt, const jsi::Value& thisVal, const jsi::Value* args, size_t count) -> jsi::Value {
94 | using ResolveCallback = facebook::react::AsyncCallback;
95 | using RejectCallback = facebook::react::AsyncCallback;
96 |
97 | auto resolve = ResolveCallback(rt, args[0].asObject(rt).asFunction(rt), jsInvoker);
98 | auto reject = RejectCallback(rt, args[1].asObject(rt).asFunction(rt), jsInvoker);
99 |
100 | [speechModule generateAudio:text options:options resolve:^(NSDictionary *result) {
101 | resolve.call([result](jsi::Runtime& rt, jsi::Function& resolveFunc) {
102 | class NSDataMutableBuffer : public facebook::jsi::MutableBuffer {
103 | public:
104 | NSDataMutableBuffer(uint8_t* data, size_t size) : _data(data), _size(size) {}
105 | uint8_t* data() override { return _data; }
106 | size_t size() const override { return _size; }
107 | private:
108 | uint8_t* _data;
109 | size_t _size;
110 | };
111 |
112 | // Extract PCM data
113 | NSData *audioData = result[@"data"];
114 | uint8_t* data = (uint8_t*)[audioData bytes];
115 | size_t size = [audioData length];
116 |
117 | auto mutableBuffer = std::make_shared(data, size);
118 | auto arrayBuffer = jsi::ArrayBuffer(rt, mutableBuffer);
119 |
120 | // Create result object with format information
121 | auto resultObj = jsi::Object(rt);
122 | resultObj.setProperty(rt, "data", std::move(arrayBuffer));
123 | resultObj.setProperty(rt, "sampleRate", jsi::Value(rt, [result[@"sampleRate"] intValue]));
124 | resultObj.setProperty(rt, "channels", jsi::Value(rt, [result[@"channels"] intValue]));
125 | resultObj.setProperty(rt, "bitsPerSample", jsi::Value(rt, [result[@"bitsPerSample"] intValue]));
126 | resultObj.setProperty(rt, "formatType", jsi::Value(rt, [result[@"formatType"] intValue]));
127 |
128 | resolveFunc.call(rt, std::move(resultObj));
129 | });
130 | } reject:^(NSString *code, NSString *message, NSError *error) {
131 | reject.call([message](jsi::Runtime& rt, jsi::Function& rejectFunc) {
132 | auto jsError = jsi::String::createFromUtf8(rt, [message UTF8String]);
133 | rejectFunc.call(rt, jsError);
134 | });
135 | }];
136 |
137 | return jsi::Value::undefined();
138 | }
139 | ));
140 | }
141 | );
142 |
143 | global.setProperty(rt, "__apple__llm__generate_audio__", generateAudioFunc);
144 | } @catch (NSException *exception) {
145 | throw jsi::JSError(rt, [[NSString stringWithFormat:@"Failed to install generateAudio handler: %@", exception.reason] UTF8String]);
146 | }
147 | }
148 |
149 | - (std::shared_ptr)getTurboModule:(const react::ObjCTurboModule::InitParams &)params {
150 | return std::make_shared(params);
151 | }
152 |
153 | - (void)getVoices:(nonnull RCTPromiseResolveBlock)resolve reject:(nonnull RCTPromiseRejectBlock)reject {
154 | [_speech getVoices:resolve reject:reject];
155 | }
156 |
157 | @end
158 |
--------------------------------------------------------------------------------
/website/src/docs/apple/generating.md:
--------------------------------------------------------------------------------
1 | # Generating
2 |
3 | You can generate response using Apple Foundation Models with the Vercel AI SDK's `generateText` or `generateObject` function.
4 |
5 | ## Requirements
6 |
7 | - **iOS 26+** - Apple Foundation Models is available in iOS 26 or later
8 | - **Apple Intelligence enabled device** - Device must support Apple Intelligence
9 |
10 | ## Text Generation
11 |
12 | ```typescript
13 | import { apple } from '@react-native-ai/apple';
14 | import { generateText } from 'ai';
15 |
16 | const result = await generateText({
17 | model: apple(),
18 | prompt: 'Explain quantum computing in simple terms'
19 | });
20 | ```
21 |
22 | ## Streaming
23 |
24 | ```typescript
25 | import { streamText } from 'ai';
26 | import { apple } from '@react-native-ai/apple';
27 |
28 | const { textStream } = await streamText({
29 | model: apple(),
30 | prompt: 'Write me a short essay on the meaning of life'
31 | });
32 |
33 | for await (const delta of textStream) {
34 | console.log(delta);
35 | }
36 | ```
37 |
38 | > [!NOTE]
39 | > Streaming objects is currently not supported.
40 |
41 | ## Structured Output
42 |
43 | Generate structured data that conforms to a specific schema:
44 |
45 | ```typescript
46 | import { generateObject } from 'ai';
47 | import { apple } from '@react-native-ai/apple';
48 | import { z } from 'zod';
49 |
50 | const schema = z.object({
51 | name: z.string(),
52 | age: z.number().int().min(0).max(150),
53 | email: z.string().email(),
54 | occupation: z.string()
55 | });
56 |
57 | const result = await generateObject({
58 | model: apple(),
59 | prompt: 'Generate a user profile for a software developer',
60 | schema
61 | });
62 |
63 | console.log(result.object);
64 | // { name: string, age: number, email: string, occupation: string }
65 | ```
66 |
67 | ## Tool Calling
68 |
69 | Enable Apple Foundation Models to use custom tools in your React Native applications.
70 |
71 | ### Important Apple-Specific Behavior
72 |
73 | Tools are executed by Apple, not the Vercel AI SDK, which means:
74 |
75 | - **No AI SDK callbacks**: `maxSteps`, `onStepStart`, and `onStepFinish` will not be executed
76 | - **Pre-register all tools**: You must pass all tools to `createAppleProvider` upfront
77 | - **Empty toolCallId**: Apple doesn't provide tool call IDs, so they will be empty strings
78 |
79 | ### Setup
80 |
81 | All tools must be registered ahead of time with Apple provider. To do so, you must create one by calling `createAppleProvider`:
82 |
83 | ```typescript
84 | import { createAppleProvider } from '@react-native-ai/apple';
85 | import { generateText, tool } from 'ai';
86 | import { z } from 'zod';
87 |
88 | const getWeather = tool({
89 | description: 'Get current weather information',
90 | parameters: z.object({
91 | city: z.string()
92 | }),
93 | execute: async ({ city }) => {
94 | return `Weather in ${city}: Sunny, 25°C`;
95 | }
96 | });
97 |
98 | const apple = createAppleProvider({
99 | availableTools: {
100 | getWeather
101 | }
102 | });
103 | ```
104 |
105 | ### Basic Tool Usage
106 |
107 | Then, generate output like with any other Vercel AI SDK provider:
108 |
109 | ```typescript
110 | const result = await generateText({
111 | model: apple(),
112 | prompt: 'What is the weather in Paris?',
113 | tools: {
114 | getWeather
115 | }
116 | });
117 | ```
118 |
119 | ### Inspecting Tool Calls
120 |
121 | You can inspect tool calls and their results after generation:
122 |
123 | ```typescript
124 | const result = await generateText({
125 | model: apple(),
126 | prompt: 'What is the weather in Paris?',
127 | tools: { getWeather }
128 | });
129 |
130 | // Inspect tool calls made during generation
131 | console.log(result.toolCalls);
132 | // Example: [{ toolCallId: '<< redacted >>', toolName: 'getWeather', input: '{"city":"Paris"}' }]
133 |
134 | // Inspect tool results returned
135 | console.log(result.toolResults);
136 | // Example: [{ toolCallId: '<< redacted >>', toolName: 'getWeather', result: 'Weather in Paris: Sunny, 25°C' }]
137 | ```
138 |
139 | ### Tool calling with structured output
140 |
141 | You can also use [`experimental_output`](https://v5.ai-sdk.dev/docs/reference/ai-sdk-core/generate-text#experimental_output) to generate structured output with `generateText`. This is useful when you want to perform tool calls at the same time.
142 |
143 | ```typescript
144 | const response = await generateText({
145 | model: apple(),
146 | system: `Help the person with getting weather information.`,
147 | prompt: 'What is the weather in Wroclaw?',
148 | tools: {
149 | getWeather,
150 | },
151 | experimental_output: Output.object({
152 | schema: z.object({
153 | weather: z.string(),
154 | city: z.string(),
155 | }),
156 | }),
157 | })
158 | ```
159 |
160 | ### Supported features
161 |
162 | We aim to cover most of the OpenAI supported formats, including the following:
163 |
164 | - **Objects**: `z.object({})` with nested properties
165 | - **Arrays**: `z.array()` with `minItems` and `maxItems` constraints
166 | - **Strings**: `z.string()`
167 | - **Numbers**: `z.number()` with `minimum`, `maximum`, `exclusiveMinimum`, `exclusiveMaximum`
168 | - **Booleans**: `z.boolean()`
169 | - **Enums**: `z.enum([])` for string and number values
170 |
171 | The following features are currently not supported due to underlying model limitations:
172 |
173 | - **String formats**: `email()`, `url()`, `uuid()`, `datetime()` etc.
174 | - **Regular expressions**: Due to a
175 | - **Unions**: `z.union()`, `z.discriminatedUnion()`
176 |
177 | ## Availability Check
178 |
179 | Always check if Apple Intelligence is available before using the provider:
180 |
181 | ```typescript
182 | import { apple } from '@react-native-ai/apple';
183 |
184 | if (!apple.isAvailable()) {
185 | // Handle fallback logic
186 | return;
187 | }
188 | ```
189 |
190 | ## Available Options
191 |
192 | Configure model behavior with generation options:
193 |
194 | - `temperature` (0-1): Controls randomness. Higher values = more creative, lower = more focused
195 | - `maxTokens`: Maximum number of tokens to generate
196 | - `topP` (0-1): Nucleus sampling threshold
197 | - `topK`: Top-K sampling parameter
198 |
199 | You can pass selected options with either `generateText` or `generateObject` as follows:
200 |
201 | ```typescript
202 | import { apple } from '@react-native-ai/apple';
203 | import { generateText } from 'ai';
204 |
205 | const result = await generateText({
206 | model: apple(),
207 | prompt: 'Write a creative story',
208 | temperature: 0.8,
209 | maxTokens: 500,
210 | topP: 0.9,
211 | });
212 | ```
213 |
214 | ## Direct API Access
215 |
216 | For advanced use cases, you can access the native Apple Foundation Models API directly:
217 |
218 | ### AppleFoundationModels
219 |
220 | ```tsx
221 | import { AppleFoundationModels } from '@react-native-ai/apple'
222 |
223 | // Check if Apple Intelligence is available
224 | const isAvailable = AppleFoundationModels.isAvailable()
225 |
226 | // Generate text responses
227 | const messages = [{ role: 'user', content: 'Hello' }]
228 | const options = { temperature: 0.7, maxTokens: 100 }
229 |
230 | const result = await AppleFoundationModels.generateText(messages, options)
231 | ```
232 |
--------------------------------------------------------------------------------
/apps/expo-example/src/screens/apple/TranscribeScreen/index.ios.tsx:
--------------------------------------------------------------------------------
1 | import { apple } from '@react-native-ai/apple'
2 | import { experimental_transcribe } from 'ai'
3 | import React, { useEffect, useRef, useState } from 'react'
4 | import { Alert, ScrollView, Text, TouchableOpacity, View } from 'react-native'
5 | import {
6 | AudioBuffer,
7 | AudioContext,
8 | AudioManager,
9 | AudioRecorder,
10 | RecorderAdapterNode,
11 | } from 'react-native-audio-api'
12 |
13 | import {
14 | float32ArrayToWAV,
15 | mergeBuffersToFloat32Array,
16 | } from '../../../utils/audioUtils'
17 |
18 | const SAMPLE_RATE = 16000
19 |
20 | export default function TranscribeScreen() {
21 | const [isTranscribing, setIsTranscribing] = useState(false)
22 | const [transcription, setTranscription] = useState<{
23 | text: string
24 | time: number
25 | } | null>(null)
26 | const [isRecording, setIsRecording] = useState(false)
27 | const [wavBuffer, setWavBuffer] = useState(null)
28 |
29 | const recorderRef = useRef(null)
30 | const aCtxRef = useRef(null)
31 | const recorderAdapterRef = useRef(null)
32 | const audioBuffersRef = useRef([])
33 |
34 | useEffect(() => {
35 | AudioManager.setAudioSessionOptions({
36 | iosCategory: 'playAndRecord',
37 | iosMode: 'voiceChat',
38 | iosOptions: ['defaultToSpeaker', 'allowBluetooth'],
39 | })
40 |
41 | AudioManager.requestRecordingPermissions()
42 |
43 | recorderRef.current = new AudioRecorder({
44 | sampleRate: SAMPLE_RATE,
45 | bufferLengthInSamples: SAMPLE_RATE,
46 | })
47 | }, [])
48 |
49 | const transcribe = async (audioBuffer: ArrayBuffer) => {
50 | setIsTranscribing(true)
51 | setTranscription(null)
52 |
53 | const startTime = Date.now()
54 |
55 | try {
56 | const result = await experimental_transcribe({
57 | model: apple.transcriptionModel(),
58 | audio: audioBuffer,
59 | })
60 |
61 | const endTime = Date.now()
62 | const duration = endTime - startTime
63 |
64 | setTranscription({
65 | text: result.text,
66 | time: duration,
67 | })
68 | } catch (error) {
69 | Alert.alert(
70 | 'Transcription Error',
71 | error instanceof Error ? error.message : 'Failed to transcribe audio'
72 | )
73 | } finally {
74 | setIsTranscribing(false)
75 | }
76 | }
77 |
78 | const startRecording = () => {
79 | if (!recorderRef.current) {
80 | console.error('AudioRecorder is not initialized')
81 | return
82 | }
83 |
84 | audioBuffersRef.current = []
85 |
86 | recorderRef.current.onAudioReady((event) => {
87 | const { buffer, numFrames, when } = event
88 | console.log(
89 | 'Audio recorder buffer ready:',
90 | buffer.duration,
91 | numFrames,
92 | when
93 | )
94 | audioBuffersRef.current.push(buffer)
95 | })
96 |
97 | aCtxRef.current = new AudioContext({ sampleRate: SAMPLE_RATE })
98 | recorderAdapterRef.current = aCtxRef.current.createRecorderAdapter()
99 | recorderAdapterRef.current.connect(aCtxRef.current.destination)
100 | recorderRef.current.connect(recorderAdapterRef.current)
101 |
102 | recorderRef.current.start()
103 | setIsRecording(true)
104 | console.log('Recording started')
105 |
106 | if (aCtxRef.current.state === 'suspended') {
107 | aCtxRef.current.resume()
108 | }
109 | }
110 |
111 | const stopRecording = async () => {
112 | if (!recorderRef.current) {
113 | console.error('AudioRecorder is not initialized')
114 | return
115 | }
116 |
117 | recorderRef.current.stop()
118 | setIsRecording(false)
119 |
120 | // Merge all recorded PCM data
121 | const mergedPCM = mergeBuffersToFloat32Array(audioBuffersRef.current)
122 | if (mergedPCM.length > 0) {
123 | const duration = mergedPCM.length / SAMPLE_RATE
124 | console.log(
125 | `Merged ${audioBuffersRef.current.length} buffers: ${duration.toFixed(1)}s, ${mergedPCM.length} samples`
126 | )
127 |
128 | // Convert to WAV and store in state
129 | const recordedWavBuffer = float32ArrayToWAV(mergedPCM, SAMPLE_RATE)
130 | setWavBuffer(recordedWavBuffer)
131 | }
132 |
133 | aCtxRef.current = null
134 | recorderAdapterRef.current = null
135 | console.log('Recording stopped')
136 | }
137 |
138 | return (
139 |
143 |
144 |
145 | Audio Recording
146 |
147 |
154 |
155 | {isRecording ? 'Recording...' : 'Start Recording'}
156 |
157 |
158 |
159 |
166 |
167 | Stop Recording
168 |
169 |
170 |
171 | {audioBuffersRef.current.length > 0 && (
172 |
173 | Recorded {audioBuffersRef.current.length} audio buffers
174 |
175 | )}
176 |
177 |
178 | {wavBuffer && (
179 |
180 | Loaded Audio
181 | transcribe(wavBuffer)}
186 | disabled={isTranscribing}
187 | >
188 |
189 | {isTranscribing ? 'Transcribing...' : 'Transcribe Audio'}
190 |
191 |
192 |
193 | )}
194 |
195 | {transcription && (
196 |
197 | Transcription
198 |
199 | Completed in {transcription.time}ms
200 |
201 |
202 | {transcription.text}
203 |
204 |
205 | )}
206 |
207 |
208 | )
209 | }
210 |
--------------------------------------------------------------------------------