Skip to content
Open
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions packages/x-markdown/benchmark/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@

# Playwright
node_modules/
/test-results/
/playwright-report/
/blob-report/
/playwright/.cache/
/playwright/.auth/
45 changes: 45 additions & 0 deletions packages/x-markdown/benchmark/components/MarkdownRenderer.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
import MarkdownIt from 'markdown-it';
import { marked } from 'marked';
import React, { FC } from 'react';
import ReactMarkdown from 'react-markdown';
import { Streamdown } from 'streamdown';
import XMarkdown from '../../src';

type MarkdownRendererProps = {
md: string;
};

const MarkedRenderer: FC<MarkdownRendererProps> = (props) => (
// biome-ignore lint/security/noDangerouslySetInnerHtml: benchmark only
<div dangerouslySetInnerHTML={{ __html: marked.parse(props.md) as string }} />
);

const MarkdownItRenderer: FC<MarkdownRendererProps> = (props) => {
const md = new MarkdownIt();

return (
// biome-ignore lint/security/noDangerouslySetInnerHtml: benchmark only
<div dangerouslySetInnerHTML={{ __html: md.render(props.md) }} />
);
};

const ReactMarkdownRenderer: FC<MarkdownRendererProps> = (props) => (
<ReactMarkdown>{props.md}</ReactMarkdown>
);

const XMarkdownRenderer: FC<MarkdownRendererProps> = (props) => <XMarkdown>{props.md}</XMarkdown>;

const StreamdownRenderer: FC<MarkdownRendererProps> = (props) => (
<Streamdown>{props.md}</Streamdown>
);

const Empty = () => <div />;

export {
MarkedRenderer,
MarkdownItRenderer,
ReactMarkdownRenderer,
XMarkdownRenderer,
StreamdownRenderer,
Empty,
};
34 changes: 34 additions & 0 deletions packages/x-markdown/benchmark/package.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
{
"name": "streaming-markdown-benchmark",
"version": "1.0.0",
"description": "Streaming Markdown performance comparison",
"main": "index.js",
"scripts": {
"start-react": "vite --config src/react-app/vite.config.js",
"test:all": "playwright test -c playwright-ct.config.ts",
"setup": "node scripts/setup.js",
"test-ct": "playwright test -c playwright-ct.config.ts"
},
"keywords": [],
"author": "",
"license": "ISC",
"devDependencies": {
"@ant-design/x": "^2.0.0-alpha.15",
"@ant-design/x-markdown": "^2.0.0-alpha.15",
"@playwright/experimental-ct-react": "^1.56.1",
"@playwright/test": "^1.48.2",
"@types/markdown-it": "^14.0.0",
"@types/marked": "^6.0.0",
"@types/node": "^20.10.3",
"@vitejs/plugin-react": "^4.2.1",
"antd": "6.0.0-alpha.4",
"markdown-it": "^14.0.0",
"marked": "^11.1.1",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-markdown": "^9.0.1",
"streamdown": "^1.4.0",
"typescript": "^5.3.2",
"vite": "^5.0.6"
}
}
38 changes: 38 additions & 0 deletions packages/x-markdown/benchmark/playwright-ct.config.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import { defineConfig, devices } from '@playwright/experimental-ct-react';

/**
* See https://playwright.dev/docs/test-configuration.
*/
export default defineConfig({
testDir: './',
/* The base directory, relative to the config file, for snapshot files created with toMatchSnapshot and toHaveScreenshot. */
snapshotDir: './__snapshots__',
/* Maximum time one test can run for. */
timeout: 10 * 1000,
/* Run tests in files in parallel */
fullyParallel: true,
/* Fail the build on CI if you accidentally left test.only in the source code. */
forbidOnly: !!process.env.CI,
/* Retry on CI only */
retries: process.env.CI ? 2 : 0,
/* Opt out of parallel tests on CI. */
workers: process.env.CI ? 1 : undefined,
/* Reporter to use. See https://playwright.dev/docs/test-reporters */
reporter: 'html',
/* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */
use: {
/* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */
trace: 'on-first-retry',

/* Port to use for Playwright component endpoint. */
ctPort: 3100,
},

/* Configure projects for major browsers */
projects: [
{
name: 'chromium',
use: { ...devices['Desktop Chrome'] },
},
],
});
12 changes: 12 additions & 0 deletions packages/x-markdown/benchmark/playwright/.cache/index.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Testing Page</title>
<script type="module" crossorigin src="/assets/index-Bw4ETuhr.js"></script>
</head>
<body>
<div id="root"></div>
</body>
</html>
12 changes: 12 additions & 0 deletions packages/x-markdown/benchmark/playwright/index.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Testing Page</title>
</head>
<body>
<div id="root"></div>
<script type="module" src="./index.tsx"></script>
</body>
</html>
2 changes: 2 additions & 0 deletions packages/x-markdown/benchmark/playwright/index.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
// Import styles, initialize component theme here.
// import '../src/common.css';
183 changes: 183 additions & 0 deletions packages/x-markdown/benchmark/tests/performance.spec.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,183 @@
import { test } from '@playwright/experimental-ct-react';
import fs from 'fs';
import path from 'path';
import React from 'react';
import {
Empty,
MarkdownItRenderer,
MarkedRenderer,
ReactMarkdownRenderer,
StreamdownRenderer,
XMarkdownRenderer,
} from '../components/MarkdownRenderer';

interface BenchmarkResult {
name: string;
duration: number;
avgFPS: number;
minFPS: number;
maxFPS: number;
maxMemory: number;
avgMemory: number;
totalFrames: number;
}

const fullText = fs.readFileSync(path.resolve(__dirname, 'test.md'), 'utf-8');

const renderers = ['marked', 'markdown-it', 'react-markdown', 'x-markdown', 'streamdown'];

const getRenderer = (name: string, md = '') => {
switch (name) {
case 'marked': {
return <MarkedRenderer md={md} />;
}
case 'markdown-it': {
return <MarkdownItRenderer md={md} />;
}
case 'react-markdown': {
return <ReactMarkdownRenderer md={md} />;
}
case 'x-markdown': {
return <XMarkdownRenderer md={md} />;
}
case 'streamdown': {
return <StreamdownRenderer md={md} />;
}
default: {
return <div>{md}</div>;
}
}
};

async function measure({
page,
name,
browserName,
mount,
}: {
name: string;
page: any;
mount: any;
browserName: string;
}): Promise<BenchmarkResult> {
console.log('🚀 start measure performance:', name);

await page.context().tracing.start({
screenshots: true,
title: `XMarkdown_Stream_Perf_${browserName}`,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The trace title is not unique per renderer. When running multiple benchmark tests, this can make it harder to distinguish between traces. Including the renderer name in the title will make each trace uniquely identifiable.

Suggested change
title: `XMarkdown_Stream_Perf_${browserName}`,
title: `XMarkdown_Stream_Perf_${name}_${browserName}`,

});

const component = await mount(<Empty />);

const updateInterval = 50;

await page.evaluate(() => {
(window as any).fpsSamples = [];
(window as any).memorySamples = [];
(window as any).frameCount = 0;
(window as any).startTime = performance.now();
(window as any).lastFrameTime = performance.now();

const trackFPS = () => {
const now = performance.now();
const frameTime = now - (window as any).lastFrameTime;
if (frameTime > 0) {
const fps = 1000 / frameTime;
(window as any).fpsSamples.push(fps);
}
(window as any).lastFrameTime = now;
(window as any).frameCount++;
requestAnimationFrame(trackFPS);
};
requestAnimationFrame(trackFPS);
});

const startTime = await page.evaluate(() => (window as any).startTime);

console.log('🚀 start render streaming markdown:', name);
for (let i = 0; i < fullText.length; i += 100) {
const partialText = fullText.substring(0, i + 100);
const renderer = getRenderer(name, partialText);
await component.update(renderer);
await page.evaluate(() => {
if ((performance as any).memory) {
(window as any).memorySamples.push((performance as any).memory.usedJSHeapSize);
}
});
await page.waitForTimeout(updateInterval);
}

const endTime = await page.evaluate(() => performance.now());
const totalDuration = endTime - startTime;

const finalMetrics = await page.evaluate(() => {
const fpsSamples = (window as any).fpsSamples || [];
const memorySamples = (window as any).memorySamples || [];
const validFpsSamples = fpsSamples.filter((fps: number) => fps > 1 && fps < 120);

return {
avgFPS:
validFpsSamples.length > 0
? validFpsSamples.reduce((a: number, b: number) => a + b, 0) / validFpsSamples.length
: 0,
minFPS: validFpsSamples.length > 0 ? Math.min(...validFpsSamples) : 0,
maxFPS: validFpsSamples.length > 0 ? Math.max(...validFpsSamples) : 0,
maxMemory: memorySamples.length > 0 ? Math.max(...memorySamples) : 0,
avgMemory:
memorySamples.length > 0
? memorySamples.reduce((a: number, b: number) => a + b, 0) / memorySamples.length
: 0,
totalFrames: (window as any).frameCount || 0,
};
});

await page.context().tracing.stop({ path: `test-results/trace-xmarkdown.zip` });

return {
name,
duration: totalDuration,
...finalMetrics,
};
}

test.describe('Streaming Markdown Benchmark', async () => {
const results: Array<BenchmarkResult> = [];

for (const rendererName of renderers) {
test(`${rendererName}-Performance`, async ({ page, mount, browserName }) => {
test.setTimeout(120000);
try {
const result = await measure({ name: rendererName, page, mount, browserName });
results.push(result);
} catch (error) {
console.error(`Error in ${rendererName}-Performance:`, error);
results.push({
name: rendererName,
duration: 0,
avgFPS: 0,
minFPS: 0,
maxFPS: 0,
maxMemory: 0,
avgMemory: 0,
totalFrames: 0,
});
}
});
}

test.afterAll(async () => {
console.log('\n📊 Benchmark Results Table');
console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
console.table(
results.map((r) => ({
MarkdownLength: fullText.length,
Renderer: r.name,
'Duration(ms)': Math.round(r.duration),
'Avg FPS': r.avgFPS.toFixed(1),
'Avg Memory(MB)': (r.avgMemory / 1024 / 1024).toFixed(2),
'Total Frames': r.totalFrames,
})),
);
console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
});
});
Loading
Loading