diff --git a/examples/ai-transport-message-per-token/javascript/README.md b/examples/ai-transport-message-per-token/javascript/README.md new file mode 100644 index 0000000000..1a2ade8dec --- /dev/null +++ b/examples/ai-transport-message-per-token/javascript/README.md @@ -0,0 +1,60 @@ +# AI Transport message per token streaming + +Enable realtime streaming of AI/LLM responses by publishing tokens as they arrive from Large Language Model services. + +AI Transport token streaming allows applications to provide immediate, responsive AI interactions by streaming tokens in realtime rather than waiting for complete responses. This pattern is essential for creating engaging AI-powered experiences where users can see responses being generated as they happen. + +The streaming approach significantly improves perceived performance and user engagement. Instead of waiting 5-10 seconds for a complete AI response, users see tokens appearing progressively, creating a more natural conversation flow similar to watching someone type in realtime. + +Token streaming is implemented using [Ably AI Transport](/docs/ai-transport). AI Transport provides purpose-built APIs for realtime AI applications, offering reliable message delivery, automatic ordering, and seamless reconnection handling to ensure no tokens are lost during network interruptions. + +## Resources + +Use the following methods to implement AI Transport token streaming: + +- [`client.channels.get()`](/docs/channels#create): creates a new or retrieves an existing channel for AI Transport token streaming. +- [`channel.subscribe()`](/docs/channels#subscribe): subscribes to token messages from AI services by registering a listener for realtime streaming. +- [`channel.publish()`](/docs/channels#publish): publishes individual tokens as they arrive from the LLM service with response tracking headers. +- [`channel.history()`](/docs/channels/history) with [`untilAttach`](/docs/channels/options#attach): enables seamless message recovery during reconnections, ensuring no tokens are lost. + +Find out more about [AI Transport](/docs/ai-transport) and [message history](/docs/channels/history). + +## Getting started + +1. Clone the [Ably docs](https://github.com/ably/docs) repository where this example can be found: + + ```sh + git clone git@github.com:ably/docs.git + ``` + +2. Change directory: + + ```sh + cd examples/ + ``` + +3. Rename the environment file: + + ```sh + mv .env.example .env.local + ``` + +4. In `.env.local` update the value of `VITE_ABLY_KEY` to be your Ably API key. + +5. Install dependencies: + + ```sh + yarn install + ``` + +6. Run the server: + + ```sh + yarn run ai-transport-message-per-token-javascript + ``` + +7. Try it out by opening [http://localhost:5173/](http://localhost:5173/) with your browser and selecting a prompt to see realtime AI token streaming. + +## Open in CodeSandbox + +In CodeSandbox, rename the `.env.example` file to `.env.local` and update the value of your `VITE_ABLY_KEY` variable to use your Ably API key. \ No newline at end of file diff --git a/examples/ai-transport-message-per-token/javascript/index.html b/examples/ai-transport-message-per-token/javascript/index.html new file mode 100644 index 0000000000..0d6a7baa7f --- /dev/null +++ b/examples/ai-transport-message-per-token/javascript/index.html @@ -0,0 +1,46 @@ + + + + + + + AI Transport Token Streaming - JavaScript + + + +
+ +
+
+
+ +
+ + ready + + + +
+
+
+ Select a prompt below to get started + +
+
+
+ + +
+
+ +
+
+
+ + + + diff --git a/examples/ai-transport-message-per-token/javascript/package.json b/examples/ai-transport-message-per-token/javascript/package.json new file mode 100644 index 0000000000..45840dfa78 --- /dev/null +++ b/examples/ai-transport-message-per-token/javascript/package.json @@ -0,0 +1,10 @@ +{ + "name": "ai-transport-message-per-token-javascript", + "version": "1.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "vite build", + "preview": "vite preview" + } +} diff --git a/examples/ai-transport-message-per-token/javascript/src/agent.ts b/examples/ai-transport-message-per-token/javascript/src/agent.ts new file mode 100644 index 0000000000..d8a8f6d5f3 --- /dev/null +++ b/examples/ai-transport-message-per-token/javascript/src/agent.ts @@ -0,0 +1,63 @@ +// Agent Service +// This consumes LLM streams and publishes events to Ably + +import * as Ably from 'ably'; +import { MockLLM } from './llm'; + +export class Agent { + private client: Ably.Realtime; + private channel: Ably.RealtimeChannel; + private llm: MockLLM; + + constructor(ablyKey: string, channelName: string) { + this.client = new Ably.Realtime({ + key: ablyKey, + clientId: 'ai-agent', + }); + this.channel = this.client.channels.get(channelName); + this.llm = new MockLLM(); + } + + async processPrompt(prompt: string, responseId: string): Promise { + const stream = await this.llm.responses.create(prompt); + + for await (const event of stream) { + if (event.type === 'message_start') { + // Publish response start + this.channel.publish({ + name: 'start', + data: {}, + extras: { + headers: { + responseId, + }, + }, + }); + } else if (event.type === 'message_delta') { + // Publish tokens + this.channel.publish({ + name: 'token', + data: { + token: event.text, + }, + extras: { + headers: { + responseId, + }, + }, + }); + } else if (event.type === 'message_stop') { + // Publish response stop + this.channel.publish({ + name: 'stop', + data: {}, + extras: { + headers: { + responseId, + }, + }, + }); + } + } + } +} diff --git a/examples/ai-transport-message-per-token/javascript/src/config.ts b/examples/ai-transport-message-per-token/javascript/src/config.ts new file mode 100644 index 0000000000..8617022a2d --- /dev/null +++ b/examples/ai-transport-message-per-token/javascript/src/config.ts @@ -0,0 +1,3 @@ +export const config = { + ABLY_KEY: import.meta.env.VITE_ABLY_KEY || 'YOUR_ABLY_KEY_HERE', +}; diff --git a/examples/ai-transport-message-per-token/javascript/src/llm.ts b/examples/ai-transport-message-per-token/javascript/src/llm.ts new file mode 100644 index 0000000000..ab9f1061f8 --- /dev/null +++ b/examples/ai-transport-message-per-token/javascript/src/llm.ts @@ -0,0 +1,49 @@ +// Mock LLM Service +// This simulates a generic LLM SDK with streaming capabilities + +interface StreamEvent { + type: 'message_start' | 'message_delta' | 'message_stop'; + text?: string; + responseId: string; +} + +export class MockLLM { + private readonly responseText = + 'Ably AI Transport is a solution for building stateful, steerable, multi-device AI experiences into new or existing applications. You can use AI Transport as the transport layer with any LLM or agent framework, without rebuilding your existing stack or being locked to a particular vendor.'; + + responses = { + create: (prompt: string) => this.createStream(prompt), + }; + + private async *createStream(_prompt: string): AsyncIterable { + const responseId = `resp_${crypto.randomUUID()}`; + + // Yield start event + yield { type: 'message_start', responseId }; + + // Chunk text into tokens (simulates LLM tokenization) + const tokens = this.chunkTextLikeAI(this.responseText); + + for (const token of tokens) { + // Simulate realistic delay between tokens + await new Promise((resolve) => setTimeout(resolve, Math.random() * 150 + 50)); + + // Yield token event + yield { type: 'message_delta', text: token, responseId }; + } + + // Yield stop event + yield { type: 'message_stop', responseId }; + } + + private chunkTextLikeAI(text: string): string[] { + const chunks: string[] = []; + let pos = 0; + while (pos < text.length) { + const size = Math.floor(Math.random() * 8) + 1; + chunks.push(text.slice(pos, pos + size)); + pos += size; + } + return chunks.filter((chunk) => chunk.length > 0); + } +} diff --git a/examples/ai-transport-message-per-token/javascript/src/script.ts b/examples/ai-transport-message-per-token/javascript/src/script.ts new file mode 100644 index 0000000000..8687238245 --- /dev/null +++ b/examples/ai-transport-message-per-token/javascript/src/script.ts @@ -0,0 +1,123 @@ +import * as Ably from 'ably'; +import { Agent } from './agent'; +import { config } from './config'; + +// Generate unique channel name for this session +const CHANNEL_NAME = `ai-transport-${crypto.randomUUID()}`; +const client = new Ably.Realtime({ + key: config.ABLY_KEY, +}); +const channel = client.channels.get(CHANNEL_NAME); +const responseTextElement = document.getElementById('response-text') as HTMLDivElement; +const connectionToggle = document.getElementById('connection-toggle') as HTMLButtonElement; +const promptButton = document.getElementById('prompt-button') as HTMLButtonElement; +const processingStatus = document.getElementById('processing-status') as HTMLSpanElement; + +let currentResponseId: string | null = null; +let responseCompleted = false; +let responseText = ''; +let isHydrating = false; +let pendingTokens: string[] = []; + +const updateDisplay = () => { + responseTextElement.innerText = responseText; + processingStatus.innerText = responseCompleted ? 'Completed' : 'In Progress'; +}; + +channel.subscribe('start', (message) => { + const responseId = message.extras?.headers?.responseId; + if (responseId && currentResponseId === responseId) { + responseCompleted = false; + responseText = ''; + pendingTokens = []; + updateDisplay(); + } +}); + +channel.subscribe('token', (message) => { + const responseId = message.extras?.headers?.responseId; + if (responseId && currentResponseId === responseId) { + if (isHydrating) { + pendingTokens.push(message.data.token); + } else { + responseText += message.data.token; + updateDisplay(); + } + } +}); + +channel.subscribe('stop', (message) => { + const responseId = message.extras?.headers?.responseId; + if (responseId && currentResponseId === responseId) { + responseCompleted = true; + updateDisplay(); + } +}); + +// Hydrate from history after reattaching +const hydrateFromHistory = async () => { + if (!currentResponseId) { + isHydrating = false; + return; + } + + let page = await channel.history({ untilAttach: true }); + + const historyTokens: string[] = []; + while (page) { + for (const message of page.items) { + const responseId = message.extras?.headers?.responseId; + if (responseId !== currentResponseId) { + continue; + } + if (message.name === 'token') { + historyTokens.push(message.data.token); + } else if (message.name === 'stop') { + responseCompleted = true; + } + } + page = page.hasNext() ? await page.next() : null; + } + + // History arrives newest-first, so reverse it + // Then append any tokens that arrived during hydration + responseText = historyTokens.reverse().join('') + pendingTokens.join(''); + isHydrating = false; + updateDisplay(); +}; + +const handlePromptClick = () => { + currentResponseId = `request-${crypto.randomUUID()}`; + responseText = ''; + updateDisplay(); + const agent = new Agent(config.ABLY_KEY, CHANNEL_NAME); + agent.processPrompt('What is Ably AI Transport?', currentResponseId); +}; + +const handleConnect = async () => { + // Set hydrating before attach to buffer any live tokens + isHydrating = true; + pendingTokens = []; + + await channel.attach(); + await hydrateFromHistory(); + + connectionToggle.innerText = 'Disconnect'; +}; + +const handleDisconnect = async () => { + await channel.detach(); + processingStatus.innerText = 'Paused'; + connectionToggle.innerText = 'Connect'; +}; + +const handleConnectionToggle = () => { + if (channel.state === 'attached') { + handleDisconnect(); + } else { + handleConnect(); + } +}; + +connectionToggle.onclick = handleConnectionToggle; +promptButton.onclick = handlePromptClick; diff --git a/examples/ai-transport-message-per-token/javascript/src/styles.css b/examples/ai-transport-message-per-token/javascript/src/styles.css new file mode 100644 index 0000000000..bd6213e1df --- /dev/null +++ b/examples/ai-transport-message-per-token/javascript/src/styles.css @@ -0,0 +1,3 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; \ No newline at end of file diff --git a/examples/ai-transport-message-per-token/javascript/tailwind.config.ts b/examples/ai-transport-message-per-token/javascript/tailwind.config.ts new file mode 100644 index 0000000000..1c86e1c371 --- /dev/null +++ b/examples/ai-transport-message-per-token/javascript/tailwind.config.ts @@ -0,0 +1,9 @@ +import baseConfig from '../../tailwind.config'; +import type { Config } from 'tailwindcss'; + +const config: Config = { + ...baseConfig, + content: ['./src/**/*.{js,ts,tsx}', './index.html'], +}; + +export default config; diff --git a/examples/ai-transport-message-per-token/javascript/vite.config.ts b/examples/ai-transport-message-per-token/javascript/vite.config.ts new file mode 100644 index 0000000000..3b1cf13b4f --- /dev/null +++ b/examples/ai-transport-message-per-token/javascript/vite.config.ts @@ -0,0 +1,7 @@ +import { defineConfig } from 'vite'; +import baseConfig from '../../vite.config'; + +export default defineConfig({ + ...baseConfig, + envDir: '../../', +}); diff --git a/examples/ai-transport-message-per-token/react/README.md b/examples/ai-transport-message-per-token/react/README.md new file mode 100644 index 0000000000..d696ea421e --- /dev/null +++ b/examples/ai-transport-message-per-token/react/README.md @@ -0,0 +1,60 @@ +# AI Transport message per token streaming + +Enable realtime streaming of AI/LLM responses by publishing tokens as they arrive from Large Language Model services. + +AI Transport token streaming allows applications to provide immediate, responsive AI interactions by streaming tokens in realtime rather than waiting for complete responses. This pattern is essential for creating engaging AI-powered experiences where users can see responses being generated as they happen. + +The streaming approach significantly improves perceived performance and user engagement. Instead of waiting 5-10 seconds for a complete AI response, users see tokens appearing progressively, creating a more natural conversation flow similar to watching someone type in realtime. + +Token streaming is implemented using [Ably AI Transport](/docs/ai-transport). AI Transport provides purpose-built APIs for realtime AI applications, offering reliable message delivery, automatic ordering, and seamless reconnection handling to ensure no tokens are lost during network interruptions. + +## Resources + +Use the following components to implement AI Transport token streaming: + +- [`AblyProvider`](/docs/getting-started/react-hooks#ably-provider): initializes and manages a shared Ably client instance, passing it down through React context to enable realtime AI Transport functionality across the application. +- [`ChannelProvider`](/docs/getting-started/react-hooks#channel-provider): manages the state and functionality of a specific channel, providing access to AI response tokens and streaming state via React context. +- [`useChannel()`](/docs/getting-started/react-hooks#useChannel) hook: a hook to subscribe to token messages from AI services and manage streaming state. +- [`untilAttach`](/docs/channels/options#attach) history option: enables seamless message recovery during reconnections, ensuring no tokens are lost when connectivity is restored. + +Find out more about [AI Transport](/docs/ai-transport) and [message history](/docs/channels/history). + +## Getting started + +1. Clone the [Ably docs](https://github.com/ably/docs) repository where this example can be found: + + ```sh + git clone git@github.com:ably/docs.git + ``` + +2. Change directory: + + ```sh + cd examples/ + ``` + +3. Rename the environment file: + + ```sh + mv .env.example .env.local + ``` + +4. In `.env.local` update the value of `VITE_ABLY_KEY` to be your Ably API key. + +5. Install dependencies: + + ```sh + yarn install + ``` + +6. Run the server: + + ```sh + yarn run ai-transport-message-per-token-react + ``` + +7. Try it out by opening [http://localhost:5173/](http://localhost:5173/) with your browser and selecting a prompt to see realtime AI token streaming. + +## Open in CodeSandbox + +In CodeSandbox, rename the `.env.example` file to `.env.local` and update the value of your `VITE_ABLY_KEY` variable to use your Ably API key. \ No newline at end of file diff --git a/examples/ai-transport-message-per-token/react/index.html b/examples/ai-transport-message-per-token/react/index.html new file mode 100644 index 0000000000..d7655daeb7 --- /dev/null +++ b/examples/ai-transport-message-per-token/react/index.html @@ -0,0 +1,12 @@ + + + + + + AI Transport Token Streaming + + +
+ + + diff --git a/examples/ai-transport-message-per-token/react/package.json b/examples/ai-transport-message-per-token/react/package.json new file mode 100644 index 0000000000..b9e509d03f --- /dev/null +++ b/examples/ai-transport-message-per-token/react/package.json @@ -0,0 +1,10 @@ +{ + "name": "ai-transport-message-per-token-react", + "version": "1.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "vite build", + "preview": "vite preview" + } +} diff --git a/examples/ai-transport-message-per-token/react/postcss.config.js b/examples/ai-transport-message-per-token/react/postcss.config.js new file mode 100644 index 0000000000..2aa7205d4b --- /dev/null +++ b/examples/ai-transport-message-per-token/react/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + tailwindcss: {}, + autoprefixer: {}, + }, +}; diff --git a/examples/ai-transport-message-per-token/react/src/App.tsx b/examples/ai-transport-message-per-token/react/src/App.tsx new file mode 100644 index 0000000000..01dfd4eae2 --- /dev/null +++ b/examples/ai-transport-message-per-token/react/src/App.tsx @@ -0,0 +1,178 @@ +import React, { useState, useRef } from 'react'; +import { AblyProvider, ChannelProvider, useChannel, useConnectionStateListener } from 'ably/react'; +import { Realtime, Message } from 'ably'; +import { Agent } from './agent'; +import { config } from './config'; +import './styles/styles.css'; + +// Generate unique channel name for this session +const CHANNEL_NAME = `ai-transport-${crypto.randomUUID()}`; +const client = new Realtime({ + key: config.ABLY_KEY, +}); + +const AITransportDemo: React.FC = () => { + const [currentResponse, setCurrentResponse] = useState(''); + const [isProcessing, setIsProcessing] = useState(false); + const [connectionState, setConnectionState] = useState('disconnected'); + const [isChannelDetached, setIsChannelDetached] = useState(false); + + const currentResponseId = useRef(null); + const isHydrating = useRef(false); + const pendingTokens = useRef([]); + + const { channel } = useChannel(CHANNEL_NAME, (message: Message) => { + const responseId = message.extras?.headers?.responseId; + + if (!currentResponseId.current || responseId !== currentResponseId.current) { + return; + } + + if (message.name === 'start') { + setCurrentResponse(''); + pendingTokens.current = []; + } else if (message.name === 'token') { + if (isHydrating.current) { + // Buffer tokens while hydrating from history + pendingTokens.current.push(message.data.token); + } else { + setCurrentResponse((prev) => prev + message.data.token); + } + } else if (message.name === 'stop') { + setIsProcessing(false); + } + }); + + useConnectionStateListener((stateChange: { current: string }) => { + setConnectionState(stateChange.current); + }); + + const handlePromptClick = () => { + if (isProcessing || connectionState !== 'connected' || isChannelDetached) { + return; + } + + setIsProcessing(true); + setCurrentResponse(''); + + const responseId = `request-${crypto.randomUUID()}`; + currentResponseId.current = responseId; + const agent = new Agent(config.ABLY_KEY, CHANNEL_NAME); + agent.processPrompt('What is Ably AI Transport?', responseId); + }; + + const handleDisconnect = () => { + channel.detach(); + setIsChannelDetached(true); + }; + + const handleReconnect = async () => { + isHydrating.current = true; + pendingTokens.current = []; + + setIsChannelDetached(false); + await channel.attach(); + + if (currentResponseId.current) { + let page = await channel.history({ untilAttach: true }); + + const historyTokens: string[] = []; + let foundStreamComplete = false; + + while (page) { + for (const message of page.items) { + const responseId = message.extras?.headers?.responseId; + if (responseId === currentResponseId.current) { + if (message.name === 'token') { + historyTokens.push(message.data.token); + } else if (message.name === 'stop') { + foundStreamComplete = true; + } + } + } + page = page.hasNext() ? await page.next() : null; + } + + // History arrives newest-first, so reverse it + // Then append any tokens that arrived during hydration + setCurrentResponse(historyTokens.reverse().join('') + pendingTokens.current.join('')); + + if (foundStreamComplete) { + setIsProcessing(false); + } + } + + isHydrating.current = false; + }; + + return ( +
+ {/* Response section with always visible status */} +
+
+
+
+ + + {isChannelDetached && isProcessing + ? 'Paused' + : isProcessing + ? 'Streaming' + : currentResponse + ? 'Complete' + : connectionState === 'connected' + ? 'Ready' + : 'Disconnected'} + + {/* Disconnect/Reconnect button */} + +
+
+
+ {currentResponse || (isProcessing ? 'Thinking...' : 'Select a prompt below to get started')} + {isProcessing && } +
+
+
+ + {/* Prompt selection */} +
+
+ +
+
+
+ ); +}; + +// Main App component with providers +const App: React.FC = () => { + return ( + + + + + + ); +}; + +export default App; diff --git a/examples/ai-transport-message-per-token/react/src/agent.ts b/examples/ai-transport-message-per-token/react/src/agent.ts new file mode 100644 index 0000000000..d8a8f6d5f3 --- /dev/null +++ b/examples/ai-transport-message-per-token/react/src/agent.ts @@ -0,0 +1,63 @@ +// Agent Service +// This consumes LLM streams and publishes events to Ably + +import * as Ably from 'ably'; +import { MockLLM } from './llm'; + +export class Agent { + private client: Ably.Realtime; + private channel: Ably.RealtimeChannel; + private llm: MockLLM; + + constructor(ablyKey: string, channelName: string) { + this.client = new Ably.Realtime({ + key: ablyKey, + clientId: 'ai-agent', + }); + this.channel = this.client.channels.get(channelName); + this.llm = new MockLLM(); + } + + async processPrompt(prompt: string, responseId: string): Promise { + const stream = await this.llm.responses.create(prompt); + + for await (const event of stream) { + if (event.type === 'message_start') { + // Publish response start + this.channel.publish({ + name: 'start', + data: {}, + extras: { + headers: { + responseId, + }, + }, + }); + } else if (event.type === 'message_delta') { + // Publish tokens + this.channel.publish({ + name: 'token', + data: { + token: event.text, + }, + extras: { + headers: { + responseId, + }, + }, + }); + } else if (event.type === 'message_stop') { + // Publish response stop + this.channel.publish({ + name: 'stop', + data: {}, + extras: { + headers: { + responseId, + }, + }, + }); + } + } + } +} diff --git a/examples/ai-transport-message-per-token/react/src/config.ts b/examples/ai-transport-message-per-token/react/src/config.ts new file mode 100644 index 0000000000..28bdb0c670 --- /dev/null +++ b/examples/ai-transport-message-per-token/react/src/config.ts @@ -0,0 +1,3 @@ +export const config = { + ABLY_KEY: import.meta.env.VITE_ABLY_KEY || 'demo-key-for-examples:YOUR_ABLY_KEY_HERE', +}; diff --git a/examples/ai-transport-message-per-token/react/src/index.tsx b/examples/ai-transport-message-per-token/react/src/index.tsx new file mode 100644 index 0000000000..e17d50b103 --- /dev/null +++ b/examples/ai-transport-message-per-token/react/src/index.tsx @@ -0,0 +1,9 @@ +import { StrictMode } from 'react'; +import { createRoot } from 'react-dom/client'; +import App from './App.tsx'; + +createRoot(document.getElementById('root')!).render( + + + , +); diff --git a/examples/ai-transport-message-per-token/react/src/llm.ts b/examples/ai-transport-message-per-token/react/src/llm.ts new file mode 100644 index 0000000000..ab9f1061f8 --- /dev/null +++ b/examples/ai-transport-message-per-token/react/src/llm.ts @@ -0,0 +1,49 @@ +// Mock LLM Service +// This simulates a generic LLM SDK with streaming capabilities + +interface StreamEvent { + type: 'message_start' | 'message_delta' | 'message_stop'; + text?: string; + responseId: string; +} + +export class MockLLM { + private readonly responseText = + 'Ably AI Transport is a solution for building stateful, steerable, multi-device AI experiences into new or existing applications. You can use AI Transport as the transport layer with any LLM or agent framework, without rebuilding your existing stack or being locked to a particular vendor.'; + + responses = { + create: (prompt: string) => this.createStream(prompt), + }; + + private async *createStream(_prompt: string): AsyncIterable { + const responseId = `resp_${crypto.randomUUID()}`; + + // Yield start event + yield { type: 'message_start', responseId }; + + // Chunk text into tokens (simulates LLM tokenization) + const tokens = this.chunkTextLikeAI(this.responseText); + + for (const token of tokens) { + // Simulate realistic delay between tokens + await new Promise((resolve) => setTimeout(resolve, Math.random() * 150 + 50)); + + // Yield token event + yield { type: 'message_delta', text: token, responseId }; + } + + // Yield stop event + yield { type: 'message_stop', responseId }; + } + + private chunkTextLikeAI(text: string): string[] { + const chunks: string[] = []; + let pos = 0; + while (pos < text.length) { + const size = Math.floor(Math.random() * 8) + 1; + chunks.push(text.slice(pos, pos + size)); + pos += size; + } + return chunks.filter((chunk) => chunk.length > 0); + } +} diff --git a/examples/ai-transport-message-per-token/react/src/styles/styles.css b/examples/ai-transport-message-per-token/react/src/styles/styles.css new file mode 100644 index 0000000000..bd6213e1df --- /dev/null +++ b/examples/ai-transport-message-per-token/react/src/styles/styles.css @@ -0,0 +1,3 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; \ No newline at end of file diff --git a/examples/ai-transport-message-per-token/react/tailwind.config.ts b/examples/ai-transport-message-per-token/react/tailwind.config.ts new file mode 100644 index 0000000000..1c86e1c371 --- /dev/null +++ b/examples/ai-transport-message-per-token/react/tailwind.config.ts @@ -0,0 +1,9 @@ +import baseConfig from '../../tailwind.config'; +import type { Config } from 'tailwindcss'; + +const config: Config = { + ...baseConfig, + content: ['./src/**/*.{js,ts,tsx}', './index.html'], +}; + +export default config; diff --git a/examples/ai-transport-message-per-token/react/tsconfig.json b/examples/ai-transport-message-per-token/react/tsconfig.json new file mode 100644 index 0000000000..e92702dbee --- /dev/null +++ b/examples/ai-transport-message-per-token/react/tsconfig.json @@ -0,0 +1,20 @@ +{ + "compilerOptions": { + "target": "ESNext", + "lib": ["DOM", "DOM.Iterable", "ESNext"], + "allowJs": false, + "skipLibCheck": true, + "esModuleInterop": false, + "allowSyntheticDefaultImports": true, + "strict": true, + "forceConsistentCasingInFileNames": true, + "module": "ESNext", + "moduleResolution": "Node", + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + "jsx": "react-jsx" + }, + "include": ["src"], + "references": [{ "path": "./tsconfig.node.json" }] +} diff --git a/examples/ai-transport-message-per-token/react/tsconfig.node.json b/examples/ai-transport-message-per-token/react/tsconfig.node.json new file mode 100644 index 0000000000..42872c59f5 --- /dev/null +++ b/examples/ai-transport-message-per-token/react/tsconfig.node.json @@ -0,0 +1,10 @@ +{ + "compilerOptions": { + "composite": true, + "skipLibCheck": true, + "module": "ESNext", + "moduleResolution": "bundler", + "allowSyntheticDefaultImports": true + }, + "include": ["vite.config.ts"] +} diff --git a/examples/ai-transport-message-per-token/react/vite.config.ts b/examples/ai-transport-message-per-token/react/vite.config.ts new file mode 100644 index 0000000000..3b1cf13b4f --- /dev/null +++ b/examples/ai-transport-message-per-token/react/vite.config.ts @@ -0,0 +1,7 @@ +import { defineConfig } from 'vite'; +import baseConfig from '../../vite.config'; + +export default defineConfig({ + ...baseConfig, + envDir: '../../', +}); diff --git a/examples/package.json b/examples/package.json index e2782d82af..79b062522c 100644 --- a/examples/package.json +++ b/examples/package.json @@ -6,6 +6,9 @@ "node": ">=20.0.0" }, "workspaces": [ + "ai-transport-message-per-token/react", + "ai-transport-message-per-token/javascript", + "ai-transport-message-per-token/react", "auth-generate-jwt/react", "auth-generate-jwt/javascript", "auth-generate-jwt/server", @@ -54,6 +57,8 @@ "spaces-member-location/javascript" ], "scripts": { + "ai-transport-message-per-token-javascript": "yarn workspace ai-transport-message-per-token-javascript dev", + "ai-transport-message-per-token-react": "yarn workspace ai-transport-message-per-token-react dev", "auth-generate-jwt-javascript": "yarn workspace auth-generate-jwt-javascript dev", "auth-generate-jwt-react": "yarn workspace auth-generate-jwt-react dev", "auth-generate-jwt-server": "yarn workspace auth-generate-jwt-server dev", diff --git a/src/data/examples/index.ts b/src/data/examples/index.ts index 780f96c66b..0d3e144e04 100644 --- a/src/data/examples/index.ts +++ b/src/data/examples/index.ts @@ -3,6 +3,16 @@ import { Example } from './types'; export const DEFAULT_EXAMPLE_LANGUAGES = ['javascript', 'react']; export const examples: Example[] = [ + { + id: 'ai-transport-message-per-token', + name: 'Message per token streaming', + description: 'Stream AI responses token-by-token using the message-per-token pattern.', + products: ['ai_transport'], + layout: 'single-horizontal', + visibleFiles: ['src/script.ts', 'src/llm.ts', 'src/agent.ts', 'App.tsx', 'llm.ts', 'agent.ts', 'index.tsx'], + metaTitle: 'Build AI message-per-token streaming with Ably AI Transport', + metaDescription: `Stream AI-generated tokens in realtime using the message-per-token pattern with Ably's AI Transport. Implement scalable token streaming with low latency.`, + }, { id: 'chat-presence', name: 'Chat presence', @@ -287,7 +297,7 @@ export const products = { spaces: { label: 'Spaces', }, - aitransport: { + ai_transport: { label: 'AI Transport', }, };