diff --git a/.claude-plugin/marketplace.json b/.claude-plugin/marketplace.json
index 6616b66..1c3da44 100644
--- a/.claude-plugin/marketplace.json
+++ b/.claude-plugin/marketplace.json
@@ -8,10 +8,10 @@
"name": "llmock",
"source": {
"source": "npm",
- "package": "@copilotkit/llmock",
- "version": "^1.5.0"
+ "package": "@copilotkit/aimock",
+ "version": "^1.7.0"
},
- "description": "Fixture authoring skill for @copilotkit/llmock — match fields, response types, embeddings, structured output, sequential responses, streaming physics, agent loop patterns, gotchas, and debugging"
+ "description": "Fixture authoring skill for @copilotkit/aimock — match fields, response types, embeddings, structured output, sequential responses, streaming physics, agent loop patterns, gotchas, and debugging"
}
]
}
diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json
index cd8e5ae..150c26f 100644
--- a/.claude-plugin/plugin.json
+++ b/.claude-plugin/plugin.json
@@ -1,7 +1,7 @@
{
"name": "llmock",
- "version": "1.5.0",
- "description": "Fixture authoring guidance for @copilotkit/llmock",
+ "version": "1.7.0",
+ "description": "Fixture authoring guidance for @copilotkit/aimock",
"author": {
"name": "CopilotKit"
},
diff --git a/.github/workflows/fix-drift.yml b/.github/workflows/fix-drift.yml
index 1e44b97..4d0da8f 100644
--- a/.github/workflows/fix-drift.yml
+++ b/.github/workflows/fix-drift.yml
@@ -33,7 +33,7 @@ jobs:
# Step 0: Configure git identity and create fix branch
- name: Configure git
run: |
- git config user.name "llmock-drift-bot"
+ git config user.name "aimock-drift-bot"
git config user.email "drift-bot@copilotkit.ai"
git checkout -B fix/drift-$(date +%Y-%m-%d)-${{ github.run_id }}
diff --git a/.github/workflows/publish-docker.yml b/.github/workflows/publish-docker.yml
index 3b40eab..2a75812 100644
--- a/.github/workflows/publish-docker.yml
+++ b/.github/workflows/publish-docker.yml
@@ -10,7 +10,8 @@ on:
env:
REGISTRY: ghcr.io
- IMAGE_NAME: ${{ github.repository }}
+ PRIMARY_IMAGE: ghcr.io/copilotkit/aimock
+ COMPAT_IMAGE: ghcr.io/copilotkit/llmock
jobs:
build-and-push:
@@ -37,11 +38,20 @@ jobs:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- - name: Extract metadata
- id: meta
+ - name: Extract metadata (primary — aimock)
+ id: meta-primary
uses: docker/metadata-action@v5
with:
- images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
+ images: ${{ env.PRIMARY_IMAGE }}
+ tags: |
+ type=semver,pattern={{version}}
+ type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') }}
+
+ - name: Extract metadata (compat — llmock)
+ id: meta-compat
+ uses: docker/metadata-action@v5
+ with:
+ images: ${{ env.COMPAT_IMAGE }}
tags: |
type=semver,pattern={{version}}
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') }}
@@ -52,7 +62,9 @@ jobs:
context: .
platforms: linux/amd64,linux/arm64
push: ${{ github.event_name != 'pull_request' }}
- tags: ${{ steps.meta.outputs.tags }}
- labels: ${{ steps.meta.outputs.labels }}
+ tags: |
+ ${{ steps.meta-primary.outputs.tags }}
+ ${{ steps.meta-compat.outputs.tags }}
+ labels: ${{ steps.meta-primary.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
diff --git a/.github/workflows/publish-pytest.yml b/.github/workflows/publish-pytest.yml
new file mode 100644
index 0000000..9f59aed
--- /dev/null
+++ b/.github/workflows/publish-pytest.yml
@@ -0,0 +1,42 @@
+name: Publish aimock-pytest
+on:
+ push:
+ branches: [main]
+ paths:
+ - "packages/aimock-pytest/**"
+ workflow_dispatch:
+jobs:
+ publish:
+ runs-on: ubuntu-latest
+ environment: pypi
+ permissions:
+ id-token: write
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+
+ - name: Install build tools
+ run: pip install hatch
+
+ - name: Check if version is already published
+ id: check
+ run: |
+ VERSION=$(python -c "import tomllib; print(tomllib.load(open('packages/aimock-pytest/pyproject.toml', 'rb'))['project']['version'])")
+ echo "version=$VERSION" >> "$GITHUB_OUTPUT"
+ if pip install "aimock-pytest==$VERSION" --dry-run --no-deps 2>/dev/null; then
+ echo "published=true" >> "$GITHUB_OUTPUT"
+ else
+ echo "published=false" >> "$GITHUB_OUTPUT"
+ fi
+
+ - name: Build
+ if: steps.check.outputs.published == 'false'
+ run: cd packages/aimock-pytest && hatch build
+
+ - name: Publish to PyPI
+ if: steps.check.outputs.published == 'false'
+ uses: pypa/gh-action-pypi-publish@release/v1
+ with:
+ packages-dir: packages/aimock-pytest/dist/
diff --git a/.github/workflows/test-pytest.yml b/.github/workflows/test-pytest.yml
new file mode 100644
index 0000000..35e7360
--- /dev/null
+++ b/.github/workflows/test-pytest.yml
@@ -0,0 +1,44 @@
+name: Python Tests
+on:
+ push:
+ branches: [main]
+ paths:
+ - "packages/aimock-pytest/**"
+ - "src/**"
+ - "dist/**"
+ pull_request:
+ branches: [main]
+ paths:
+ - "packages/aimock-pytest/**"
+ - "src/**"
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: ["3.10", "3.11", "3.12", "3.13"]
+ node-version: [20, 22]
+ steps:
+ - uses: actions/checkout@v4
+ - uses: pnpm/action-setup@v4
+ - uses: actions/setup-node@v4
+ with:
+ node-version: ${{ matrix.node-version }}
+ cache: pnpm
+ - uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ # Build the TS package first
+ - run: pnpm install --frozen-lockfile
+ - run: pnpm run build
+
+ # Set CLI path to local build
+ - name: Set AIMOCK_CLI_PATH
+ run: echo "AIMOCK_CLI_PATH=$PWD/dist/cli.js" >> $GITHUB_ENV
+
+ # Install and test Python package
+ - name: Install aimock-pytest
+ run: pip install ./packages/aimock-pytest[test]
+ - name: Run Python tests
+ run: cd packages/aimock-pytest && pytest tests/ -v
diff --git a/.gitignore b/.gitignore
index cf9381d..fb39fce 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,3 +3,5 @@ dist/
*.tsbuildinfo
.worktrees/
.superpowers/
+coverage/
+**/__pycache__/
diff --git a/.prettierignore b/.prettierignore
index 52af816..b9f7ab6 100644
--- a/.prettierignore
+++ b/.prettierignore
@@ -2,3 +2,5 @@ dist/
node_modules/
pnpm-lock.yaml
charts/
+coverage/
+.remember/
diff --git a/.remember/remember.md b/.remember/remember.md
new file mode 100644
index 0000000..9b68f88
--- /dev/null
+++ b/.remember/remember.md
@@ -0,0 +1,30 @@
+# Handoff
+
+## State
+
+aimock rebrand COMPLETE on `feat/aimock` in `/Users/jpr5/proj/cpk/llmock-v1.7.0-sp1`. PR #68 on CopilotKit/llmock. Package renamed to `@copilotkit/aimock`. 1989 tests, 55 files. All docs/source/Docker/Helm/CI/skills/README rebranded. 6 migration pages, aimock-pytest, 2 converters, control API, MCP/A2A/Vector metrics. 8 blog posts on Notion.
+
+**aimock-pytest CI + local dev path** added:
+
+- `AIMOCK_CLI_PATH` env var support in `_node_manager.py` (ensure_installed) and `_server.py` (start) — bypasses npm tarball download, points directly at a local `cli.js`
+- `tests/conftest.py` auto-detects `../../dist/cli.js` for local development
+- `.github/workflows/test-pytest.yml` — Python 3.10-3.13 x Node 20/22 matrix, builds TS first, sets AIMOCK_CLI_PATH
+- `.github/workflows/publish-pytest.yml` — publishes to PyPI on main push when version bumped (needs `PYPI_TOKEN` secret)
+- `pyproject.toml` — added `[test]` optional dependency group (pytest, requests)
+- `README.md` — added Development section with local test instructions and CI explanation
+
+## Next
+
+1. **Merge PR #68** → triggers npm publish + Docker push
+2. **GitHub repo rename**: CopilotKit/llmock → CopilotKit/aimock (Settings → General)
+3. **CNAME**: aimock.copilotkit.dev, update docs/CNAME, redirect llmock.copilotkit.dev
+4. **Deprecate @copilotkit/llmock**: final version re-exporting @copilotkit/aimock
+5. **Clean **pycache**** from aimock-pytest commit
+6. **Add `PYPI_TOKEN` secret** to CopilotKit/llmock (or aimock) GitHub repo for publish-pytest workflow
+
+## Context
+
+- Branch `feat/aimock`, worktree `/Users/jpr5/proj/cpk/llmock-v1.7.0-sp1`
+- Notion: Content (3353aa38-1852-81fb), Website (3353aa38-1852-811d), Conversion (3353aa38-1852-816d)
+- PRs #62 (reasoning) and #63 (requestTransform) awaiting contributor fixes
+- `npx aimock` always, `aimock` lowercase, `LLMock` class stays
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f684458..9f47f65 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,4 +1,4 @@
-# @copilotkit/llmock
+# @copilotkit/aimock
## 1.6.1
diff --git a/Dockerfile b/Dockerfile
index 09b9811..1f7e0ec 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -19,6 +19,10 @@ RUN pnpm run build
# --- Production stage ---
FROM node:22-alpine
+LABEL org.opencontainers.image.title="aimock"
+LABEL org.opencontainers.image.description="Mock infrastructure for AI application testing"
+LABEL org.opencontainers.image.source="https://github.com/CopilotKit/llmock"
+
WORKDIR /app
# No runtime dependencies — all imports are node:* built-ins
diff --git a/README.md b/README.md
index 2b3448b..1612eaa 100644
--- a/README.md
+++ b/README.md
@@ -1,98 +1,81 @@
-# @copilotkit/llmock [](https://github.com/CopilotKit/llmock/actions/workflows/test-unit.yml) [](https://github.com/CopilotKit/llmock/actions/workflows/test-drift.yml) [](https://www.npmjs.com/package/@copilotkit/llmock)
+# aimock [](https://github.com/CopilotKit/llmock/actions/workflows/test-unit.yml) [](https://github.com/CopilotKit/llmock/actions/workflows/test-drift.yml) [](https://www.npmjs.com/package/@copilotkit/aimock)
-Deterministic mock LLM server for testing. A real HTTP server on a real port — not an in-process interceptor — so every process in your stack (Playwright, Next.js, agent workers, microservices) can point at it via `OPENAI_BASE_URL` / `ANTHROPIC_BASE_URL` and get reproducible, instant responses. Streams SSE in real OpenAI, Claude, Gemini, Bedrock, Azure, Vertex AI, Ollama, and Cohere API formats, driven entirely by fixtures. Zero runtime dependencies.
+Mock infrastructure for AI application testing — LLM APIs, MCP tools, A2A agents, vector databases, search, rerank, and moderation. One package, one port, zero dependencies.
## Quick Start
```bash
-npm install @copilotkit/llmock
+npm install @copilotkit/aimock
```
```typescript
-import { LLMock } from "@copilotkit/llmock";
-
-const mock = new LLMock({ port: 5555 });
+import { LLMock } from "@copilotkit/aimock";
+const mock = new LLMock({ port: 0 });
mock.onMessage("hello", { content: "Hi there!" });
+await mock.start();
-const url = await mock.start();
-// Point your OpenAI client at `url` instead of https://api.openai.com
+process.env.OPENAI_BASE_URL = `${mock.url}/v1`;
// ... run your tests ...
await mock.stop();
```
-## Features
+## The aimock Suite
-- **[Multi-provider support](https://llmock.copilotkit.dev/compatible-providers.html)** — [OpenAI Chat Completions](https://llmock.copilotkit.dev/chat-completions.html), [OpenAI Responses](https://llmock.copilotkit.dev/responses-api.html), [Anthropic Claude](https://llmock.copilotkit.dev/claude-messages.html), [Google Gemini](https://llmock.copilotkit.dev/gemini.html), [AWS Bedrock](https://llmock.copilotkit.dev/aws-bedrock.html) (streaming + Converse), [Azure OpenAI](https://llmock.copilotkit.dev/azure-openai.html), [Vertex AI](https://llmock.copilotkit.dev/vertex-ai.html), [Ollama](https://llmock.copilotkit.dev/ollama.html), [Cohere](https://llmock.copilotkit.dev/cohere.html)
-- **[Embeddings API](https://llmock.copilotkit.dev/embeddings.html)** — OpenAI-compatible embedding responses with configurable dimensions
-- **[Structured output / JSON mode](https://llmock.copilotkit.dev/structured-output.html)** — `response_format`, `json_schema`, and function calling
-- **[Sequential responses](https://llmock.copilotkit.dev/sequential-responses.html)** — Stateful multi-turn fixtures that return different responses on each call
-- **[Streaming physics](https://llmock.copilotkit.dev/streaming-physics.html)** — Configurable `ttft`, `tps`, and `jitter` for realistic timing
-- **[WebSocket APIs](https://llmock.copilotkit.dev/websocket.html)** — OpenAI Responses WS, Realtime API, and Gemini Live
-- **[Error injection](https://llmock.copilotkit.dev/error-injection.html)** — One-shot errors, rate limiting, and provider-specific error formats
-- **[Chaos testing](https://llmock.copilotkit.dev/chaos-testing.html)** — Probabilistic failure injection: 500 errors, malformed JSON, mid-stream disconnects
-- **[Prometheus metrics](https://llmock.copilotkit.dev/metrics.html)** — Request counts, latencies, and fixture match rates at `/metrics`
-- **[Request journal](https://llmock.copilotkit.dev/docs.html)** — Record, inspect, and assert on every request
-- **[Fixture validation](https://llmock.copilotkit.dev/fixtures.html)** — Schema validation at load time with `--validate-on-load`
-- **CLI with hot-reload** — Standalone server with `--watch` for live fixture editing
-- **[Docker + Helm](https://llmock.copilotkit.dev/docker.html)** — Container image and Helm chart for CI/CD pipelines
-- **Record-and-replay** — VCR-style proxy-on-miss records real API responses as fixtures for deterministic replay
-- **[Drift detection](https://llmock.copilotkit.dev/drift-detection.html)** — Daily CI runs against real APIs to catch response format changes
-- **Claude Code integration** — `/write-fixtures` skill teaches your AI assistant how to write fixtures correctly
-
-## CLI Quick Reference
+aimock mocks everything your AI app talks to:
-```bash
-llmock [options]
-```
+| Tool | What it mocks | Docs |
+| -------------- | ----------------------------------------------------------------- | -------------------------------------------------------- |
+| **LLMock** | OpenAI, Claude, Gemini, Bedrock, Azure, Vertex AI, Ollama, Cohere | [Providers](https://aimock.copilotkit.dev/docs.html) |
+| **MCPMock** | MCP tools, resources, prompts with session management | [MCP](https://aimock.copilotkit.dev/mcp-mock.html) |
+| **A2AMock** | Agent-to-agent protocol with SSE streaming | [A2A](https://aimock.copilotkit.dev/a2a-mock.html) |
+| **VectorMock** | Pinecone, Qdrant, ChromaDB compatible endpoints | [Vector](https://aimock.copilotkit.dev/vector-mock.html) |
+| **Services** | Tavily search, Cohere rerank, OpenAI moderation | [Services](https://aimock.copilotkit.dev/services.html) |
-| Option | Short | Default | Description |
-| -------------------- | ----- | ------------ | ------------------------------------------- |
-| `--port` | `-p` | `4010` | Port to listen on |
-| `--host` | `-h` | `127.0.0.1` | Host to bind to |
-| `--fixtures` | `-f` | `./fixtures` | Path to fixtures directory or file |
-| `--latency` | `-l` | `0` | Latency between SSE chunks (ms) |
-| `--chunk-size` | `-c` | `20` | Characters per SSE chunk |
-| `--watch` | `-w` | | Watch fixture path for changes and reload |
-| `--log-level` | | `info` | Log verbosity: `silent`, `info`, `debug` |
-| `--validate-on-load` | | | Validate fixture schemas at startup |
-| `--chaos-drop` | | `0` | Chaos: probability of 500 errors (0-1) |
-| `--chaos-malformed` | | `0` | Chaos: probability of malformed JSON (0-1) |
-| `--chaos-disconnect` | | `0` | Chaos: probability of disconnect (0-1) |
-| `--metrics` | | | Enable Prometheus metrics at /metrics |
-| `--record` | | | Record mode: proxy unmatched to real APIs |
-| `--strict` | | | Strict mode: fail on unmatched requests |
-| `--provider-*` | | | Upstream URL per provider (with `--record`) |
-| `--help` | | | Show help |
+Run them all on one port with `npx aimock --config aimock.json`, or use the programmatic API to compose exactly what you need.
-```bash
-# Start with bundled example fixtures
-llmock
+## Features
-# Custom fixtures on a specific port
-llmock -p 8080 -f ./my-fixtures
+- **[Record & Replay](https://aimock.copilotkit.dev/record-replay.html)** — Proxy real APIs, save as fixtures, replay deterministically forever
+- **[11 LLM Providers](https://aimock.copilotkit.dev/docs.html)** — OpenAI, Claude, Gemini, Bedrock, Azure, Vertex AI, Ollama, Cohere — full streaming support
+- **[MCP / A2A / Vector](https://aimock.copilotkit.dev/mcp-mock.html)** — Mock every protocol your AI agents use
+- **[Chaos Testing](https://aimock.copilotkit.dev/chaos-testing.html)** — 500 errors, malformed JSON, mid-stream disconnects at any probability
+- **[Drift Detection](https://aimock.copilotkit.dev/drift-detection.html)** — Daily CI validation against real APIs
+- **[Streaming Physics](https://aimock.copilotkit.dev/streaming-physics.html)** — Configurable `ttft`, `tps`, and `jitter`
+- **[WebSocket APIs](https://aimock.copilotkit.dev/websocket.html)** — OpenAI Realtime, Responses WS, Gemini Live
+- **[Prometheus Metrics](https://aimock.copilotkit.dev/metrics.html)** — Request counts, latencies, fixture match rates
+- **[Docker + Helm](https://aimock.copilotkit.dev/docker.html)** — Container image and Helm chart for CI/CD
+- **Zero dependencies** — Everything from Node.js builtins
-# Simulate slow responses
-llmock --latency 100 --chunk-size 5
+## CLI
-# Record mode: proxy unmatched requests to real APIs and save as fixtures
-llmock --record --provider-openai https://api.openai.com --provider-anthropic https://api.anthropic.com
+```bash
+# LLM mocking only
+npx aimock -p 4010 -f ./fixtures
+
+# Full suite from config
+npx aimock --config aimock.json
+
+# Record mode: proxy to real APIs, save fixtures
+npx aimock --record --provider-openai https://api.openai.com
-# Strict mode in CI: fail if any request doesn't match a fixture
-llmock --strict -f ./fixtures
+# Docker
+docker run -d -p 4010:4010 -v ./fixtures:/fixtures ghcr.io/copilotkit/aimock -f /fixtures
```
-## Documentation
+## Switching from other tools?
+
+Step-by-step migration guides: [MSW](https://aimock.copilotkit.dev/migrate-from-msw.html) · [VidaiMock](https://aimock.copilotkit.dev/migrate-from-vidaimock.html) · [mock-llm](https://aimock.copilotkit.dev/migrate-from-mock-llm.html) · [Python mocks](https://aimock.copilotkit.dev/migrate-from-python-mocks.html) · [Mokksy](https://aimock.copilotkit.dev/migrate-from-mokksy.html)
-Full API reference, fixture format, E2E patterns, and provider-specific guides:
+## Documentation
-**[https://llmock.copilotkit.dev/docs.html](https://llmock.copilotkit.dev/docs.html)**
+**[https://aimock.copilotkit.dev](https://aimock.copilotkit.dev)**
## Real-World Usage
-[CopilotKit](https://github.com/CopilotKit/CopilotKit) uses llmock across its test suite to verify AI agent behavior across multiple LLM providers without hitting real APIs.
+[AG-UI](https://github.com/ag-ui-protocol/ag-ui) uses aimock for its [end-to-end test suite](https://github.com/ag-ui-protocol/ag-ui/tree/main/apps/dojo/e2e), verifying AI agent behavior across LLM providers with [fixture-driven responses](https://github.com/ag-ui-protocol/ag-ui/tree/main/apps/dojo/e2e/fixtures/openai).
## License
diff --git a/charts/aimock/Chart.yaml b/charts/aimock/Chart.yaml
new file mode 100644
index 0000000..6d23526
--- /dev/null
+++ b/charts/aimock/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: aimock
+description: Mock infrastructure for AI application testing (OpenAI, Anthropic, Gemini, MCP, A2A, vector)
+type: application
+version: 0.1.0
+appVersion: "1.7.0"
diff --git a/charts/llmock/templates/_helpers.tpl b/charts/aimock/templates/_helpers.tpl
similarity index 79%
rename from charts/llmock/templates/_helpers.tpl
rename to charts/aimock/templates/_helpers.tpl
index 896b8d6..b852baa 100644
--- a/charts/llmock/templates/_helpers.tpl
+++ b/charts/aimock/templates/_helpers.tpl
@@ -1,14 +1,14 @@
{{/*
Expand the name of the chart.
*/}}
-{{- define "llmock.name" -}}
+{{- define "aimock.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
*/}}
-{{- define "llmock.fullname" -}}
+{{- define "aimock.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
@@ -24,9 +24,9 @@ Create a default fully qualified app name.
{{/*
Common labels
*/}}
-{{- define "llmock.labels" -}}
+{{- define "aimock.labels" -}}
helm.sh/chart: {{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
-{{ include "llmock.selectorLabels" . }}
+{{ include "aimock.selectorLabels" . }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
@@ -34,7 +34,7 @@ app.kubernetes.io/managed-by: {{ .Release.Service }}
{{/*
Selector labels
*/}}
-{{- define "llmock.selectorLabels" -}}
-app.kubernetes.io/name: {{ include "llmock.name" . }}
+{{- define "aimock.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "aimock.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
diff --git a/charts/llmock/templates/deployment.yaml b/charts/aimock/templates/deployment.yaml
similarity index 88%
rename from charts/llmock/templates/deployment.yaml
rename to charts/aimock/templates/deployment.yaml
index 22534ca..61541f6 100644
--- a/charts/llmock/templates/deployment.yaml
+++ b/charts/aimock/templates/deployment.yaml
@@ -1,18 +1,18 @@
apiVersion: apps/v1
kind: Deployment
metadata:
- name: {{ include "llmock.fullname" . }}
+ name: {{ include "aimock.fullname" . }}
labels:
- {{- include "llmock.labels" . | nindent 4 }}
+ {{- include "aimock.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
- {{- include "llmock.selectorLabels" . | nindent 6 }}
+ {{- include "aimock.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
- {{- include "llmock.selectorLabels" . | nindent 8 }}
+ {{- include "aimock.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.nodeSelector }}
nodeSelector:
@@ -27,7 +27,7 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- - name: llmock
+ - name: aimock
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
diff --git a/charts/llmock/templates/service.yaml b/charts/aimock/templates/service.yaml
similarity index 58%
rename from charts/llmock/templates/service.yaml
rename to charts/aimock/templates/service.yaml
index 894b443..abd3742 100644
--- a/charts/llmock/templates/service.yaml
+++ b/charts/aimock/templates/service.yaml
@@ -1,9 +1,9 @@
apiVersion: v1
kind: Service
metadata:
- name: {{ include "llmock.fullname" . }}
+ name: {{ include "aimock.fullname" . }}
labels:
- {{- include "llmock.labels" . | nindent 4 }}
+ {{- include "aimock.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
@@ -12,4 +12,4 @@ spec:
protocol: TCP
name: http
selector:
- {{- include "llmock.selectorLabels" . | nindent 4 }}
+ {{- include "aimock.selectorLabels" . | nindent 4 }}
diff --git a/charts/llmock/values.yaml b/charts/aimock/values.yaml
similarity index 92%
rename from charts/llmock/values.yaml
rename to charts/aimock/values.yaml
index c33a2ea..52cfc9d 100644
--- a/charts/llmock/values.yaml
+++ b/charts/aimock/values.yaml
@@ -4,7 +4,7 @@ fullnameOverride: ""
replicaCount: 1
image:
- repository: ghcr.io/copilotkit/llmock
+ repository: ghcr.io/copilotkit/aimock
tag: ""
pullPolicy: IfNotPresent
diff --git a/charts/llmock/Chart.yaml b/charts/llmock/Chart.yaml
deleted file mode 100644
index 5603860..0000000
--- a/charts/llmock/Chart.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-apiVersion: v2
-name: llmock
-description: Deterministic mock LLM server for testing (OpenAI, Anthropic, Gemini)
-type: application
-version: 0.1.0
-appVersion: "1.6.0"
diff --git a/docs/a2a-mock.html b/docs/a2a-mock.html
new file mode 100644
index 0000000..7104e38
--- /dev/null
+++ b/docs/a2a-mock.html
@@ -0,0 +1,243 @@
+
+
+
+
+
+ A2AMock — aimock
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ A2AMock
+
+ Mock A2A (Agent-to-Agent) protocol server for testing multi-agent systems. Implements the
+ A2A JSON-RPC protocol with agent card discovery, message routing, task management, and SSE
+ streaming.
+
+
+ Quick Start
+
+
+
import { A2AMock } from "@copilotkit/aimock" ;
+
+const a2a = new A2AMock();
+
+a2a.registerAgent({
+ name: "translator" ,
+ description: "Translates text between languages" ,
+ skills: [{ id: "translate" , name: "Translate" }],
+});
+
+a2a.onMessage("translator" , "translate" , [{ text: "Translated text" }]);
+
+const url = await a2a.start();
+// Agent card at: ${url}/.well-known/agent-card.json
+// JSON-RPC at: ${url}/
+
+
+ Mounted Mode
+
+ Mount A2AMock onto an LLMock server to share a single port with LLM mocking and other
+ services:
+
+
+
+
import { LLMock, A2AMock } from "@copilotkit/aimock" ;
+
+const llm = new LLMock({ port: 5555 });
+const a2a = new A2AMock();
+
+a2a.registerAgent({ name: "assistant" });
+a2a.onMessage("assistant" , "hello" , [{ text: "Hi!" }]);
+
+llm.mount("/a2a" , a2a);
+await llm.start();
+// A2A available at http://127.0.0.1:5555/a2a
+
+
+ Subpath Import
+ A2AMock is also available via a dedicated subpath import for tree-shaking:
+
+
+
import { A2AMock } from "@copilotkit/aimock/a2a" ;
+
+
+ Agent Registration
+ Register agents with skills and capabilities:
+
+
+
a2a.registerAgent({
+ name: "researcher" ,
+ description: "Research assistant" ,
+ version: "1.0.0" ,
+ skills: [
+ { id: "search" , name: "Web Search" , tags: ["research" ] },
+ { id: "summarize" , name: "Summarize" },
+ ],
+ capabilities: { streaming: true },
+});
+
+
+ Message Patterns
+ Route messages to responses using string or RegExp patterns:
+
+
+
// String substring match
+a2a.onMessage("agent" , "hello" , [{ text: "Hi there!" }]);
+
+// RegExp match
+a2a.onMessage("agent" , / ^translate\s+(.+)/i , [{ text: "Translation result" }]);
+
+// Task with artifacts
+a2a.onTask("agent" , "compute" , [
+ { parts: [{ text: "42" }], name: "result" },
+]);
+
+
+ Streaming Tasks
+ Simulate streaming responses with SSE events:
+
+
+
a2a.onStreamingTask("agent" , "long-task" , [
+ { type: "status" , state: "TASK_STATE_WORKING" },
+ { type: "artifact" , parts: [{ text: "partial result" }], name: "output" },
+ { type: "artifact" , parts: [{ text: "final result" }], lastChunk: true , name: "output" },
+], 50 ); // 50ms delay between events
+
+
+ Config File
+ A2AMock can be configured via the aimock JSON config file:
+
+
+
{
+ "a2a" : {
+ "path" : "/a2a" ,
+ "agents" : [
+ {
+ "name" : "assistant" ,
+ "description" : "A helpful assistant" ,
+ "skills" : [{ "id" : "chat" , "name" : "Chat" }],
+ "messages" : [
+ { "pattern" : "hello" , "parts" : [{ "text" : "Hi there!" }] }
+ ]
+ }
+ ]
+ }
+}
+
+
+ JSON-RPC Methods
+
+
+
+ Method
+ Description
+
+
+
+
+ SendMessage
+ Send a message, get a synchronous response
+
+
+ SendStreamingMessage
+ Send a message, get an SSE stream of events
+
+
+ GetTask
+ Retrieve a task by ID
+
+
+ ListTasks
+ List tasks, optionally filtered by contextId
+
+
+ CancelTask
+ Cancel a non-terminal task
+
+
+
+
+ Agent Card
+
+ The agent card is served at GET /.well-known/agent-card.json and includes all
+ registered agents' skills and capabilities. The A2A-Version: 1.0 header is
+ included on all responses.
+
+
+ Inspection
+
+
+
a2a.health(); // { status: "ok", agents: 2, tasks: 5 }
+a2a.reset(); // Clears all agents and tasks
+
+
+
+
+
+
+
+
diff --git a/docs/aimock-cli.html b/docs/aimock-cli.html
new file mode 100644
index 0000000..04de8db
--- /dev/null
+++ b/docs/aimock-cli.html
@@ -0,0 +1,323 @@
+
+
+
+
+
+ aimock CLI — aimock
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ aimock CLI
+
+ aimock is the full-stack mock orchestrator. Where aimock serves
+ LLM endpoints only, aimock reads a JSON config file and serves LLM mocks
+ alongside additional mock services (MCP, A2A, vector stores) on a single port.
+
+
+ aimock vs aimock
+
+
+
+ Capability
+ llmock CLI
+ aimock CLI
+
+
+
+
+ LLM mock endpoints
+ Yes
+ Yes
+
+
+ Additional mock services
+ No
+ Yes (via mount)
+
+
+ Config file
+ CLI flags only
+ JSON config file
+
+
+ Single-port routing
+ LLM paths only
+ All services on one port
+
+
+
+
+ Quick Start
+
+
+
+
+
+
$ npx aimock --config aimock.json --port 4010
+
+
+
+
+
+
$ docker run -d -p 4010:4010 \
+ -v ./aimock.json:/config.json \
+ -v ./fixtures:/fixtures \
+ ghcr.io/copilotkit/aimock \
+ npx aimock --config /config.json --port 4010
+
+
+
+
+ Config File Format
+
+ The config file is a JSON object describing which services to run and how to configure
+ them. The llm section configures the core LLMock server. Additional services
+ are mounted at path prefixes.
+
+
+
+
+
{
+ "llm" : {
+ "fixtures" : "./fixtures" ,
+ "latency" : 0 ,
+ "chunkSize" : 20 ,
+ "logLevel" : "info" ,
+ "validateOnLoad" : true ,
+ "metrics" : true ,
+ "strict" : false
+ },
+ "services" : {
+ "/mcp" : {
+ "type" : "mcp" ,
+ "tools" : "./mcp-tools.json"
+ },
+ "/a2a" : {
+ "type" : "a2a" ,
+ "agents" : "./a2a-agents.json"
+ }
+ }
+}
+
+
+ Config Fields
+
+
+
+ Field
+ Type
+ Description
+
+
+
+
+ llm
+ object
+
+ LLMock configuration. Accepts fixtures, latency,
+ chunkSize, logLevel, validateOnLoad,
+ metrics, strict, chaos,
+ streamingProfile.
+
+
+
+ services
+ object
+
+ Map of mount paths to service configs. Each key is a URL path prefix (e.g.
+ /mcp), each value describes the service type and its options.
+
+
+
+
+
+ CLI Flags
+
+
+
+ Option
+ Default
+ Description
+
+
+
+
+ --config
+ aimock.json
+ Path to JSON config file
+
+
+ --port
+ 4010
+ Port to listen on (overrides config)
+
+
+ --host
+ 127.0.0.1
+ Host to bind to (overrides config)
+
+
+ --help
+ —
+ Show help
+
+
+
+
+ Single-Port Routing
+
+ All services share one port. Requests are routed by path prefix. LLM endpoints live at the
+ root, mounted services at their configured prefix:
+
+
+
+
+
+ Path
+ Service
+
+
+
+
+ /v1/chat/completions
+ LLMock (OpenAI Chat Completions)
+
+
+ /v1/messages
+ LLMock (Anthropic Claude)
+
+
+ /v1/embeddings
+ LLMock (Embeddings)
+
+
+ /mcp/*
+ MCP mock service
+
+
+ /a2a/*
+ A2A mock service
+
+
+ /health
+ Unified health check (all services)
+
+
+ /metrics
+ Prometheus metrics (if enabled)
+
+
+
+
+
+ Path stripping is automatic — a request to /mcp/tools/list arrives at
+ the MCP service as /tools/list.
+
+
+ Docker Usage
+
+
+
+
+
+
$ npx aimock --config aimock.json --host 0.0.0.0
+
+
+
+
+
+
# Mount config and fixtures into the container
+$ docker run -p 4010:4010 \
+ -v ./aimock.json:/config.json \
+ -v ./fixtures:/fixtures \
+ ghcr.io/copilotkit/aimock \
+ npx aimock --config /config.json --host 0.0.0.0
+
+
+
+
+ Docker Compose
+
+
+
+
services :
+ aimock :
+ image : ghcr.io/copilotkit/aimock:latest
+ command : aimock --config /app/aimock.json --host 0.0.0.0
+ ports :
+ - "4010:4010"
+ volumes :
+ - ./aimock.json:/app/aimock.json:ro
+ - ./fixtures:/app/fixtures:ro
+
+ app :
+ build : .
+ environment :
+ OPENAI_BASE_URL : http://aimock:4010/v1
+ MCP_SERVER_URL : http://aimock:4010/mcp
+ depends_on :
+ - aimock
+
+
+
+
+
+
+
+
diff --git a/docs/aws-bedrock.html b/docs/aws-bedrock.html
index 09cf238..f64f2f2 100644
--- a/docs/aws-bedrock.html
+++ b/docs/aws-bedrock.html
@@ -3,7 +3,7 @@
- AWS Bedrock — llmock
+ AWS Bedrock — aimock
@@ -24,7 +24,7 @@
>
☰
- $ llmock
+ $ aimock
+
+
+
-
+
AWS Bedrock
- llmock supports the AWS Bedrock Claude invoke and Converse API endpoints — both
- streaming and non-streaming. Point the AWS SDK at your llmock instance and fixtures match
+ aimock supports the AWS Bedrock Claude invoke and Converse API endpoints — both
+ streaming and non-streaming. Point the AWS SDK at your aimock instance and fixtures match
against the Bedrock-format requests, returning responses in the authentic Bedrock format
including AWS Event Stream binary framing for streaming.
@@ -96,13 +67,13 @@ How It Works
model field in the body (the model is in the URL).
- llmock detects the Bedrock URL pattern, extracts the model ID, translates the request to
+ aimock detects the Bedrock URL pattern, extracts the model ID, translates the request to
the internal fixture-matching format, and returns the response in the Anthropic Messages
API format — which is identical to the Bedrock Claude response format. For
streaming, responses use the AWS Event Stream binary framing protocol.
- llmock also supports the Converse API (Converse API (/model/{modelId}/converse
and /model/{modelId}/converse-stream), which uses a different
@@ -205,7 +176,7 @@ Model Resolution
SDK Configuration
- To point the AWS SDK Bedrock Runtime client at llmock, configure the endpoint URL:
+ To point the AWS SDK Bedrock Runtime client at aimock, configure the endpoint URL:
@@ -213,7 +184,7 @@
SDK Configuration
const client =
new BedrockRuntimeClient ({
region :
"us-east-1" ,
-
endpoint :
"http://localhost:4005" ,
// llmock URL
+
endpoint :
"http://localhost:4005" ,
// aimock URL
credentials : {
accessKeyId :
"mock" ,
secretAccessKey :
"mock" },
});
@@ -256,7 +227,7 @@
Fixture Examples
Fixtures are shared across all providers. The same fixture file works for OpenAI, Claude
- Messages, Gemini, Azure, and Bedrock endpoints — llmock translates each provider's
+ Messages, Gemini, Azure, and Bedrock endpoints — aimock translates each provider's
request format to a common internal format before matching.
@@ -264,7 +235,7 @@
Fixture Examples
Streaming (invoke-with-response-stream)
The invoke-with-response-stream endpoint returns responses using the
- AWS Event Stream binary protocol . llmock implements this protocol
+ AWS Event Stream binary protocol . aimock implements this protocol
natively — each response chunk is encoded as a binary frame with CRC32 checksums,
headers, and a JSON payload, exactly as the real Bedrock service sends them.
@@ -322,7 +293,7 @@
AWS Event Stream Binary Format
[message_crc32: 4B CRC32 of entire frame minus last 4 bytes]
- llmock encodes these frames with proper CRC32 checksums, so the AWS SDK can decode them
+ aimock encodes these frames with proper CRC32 checksums, so the AWS SDK can decode them
natively. The :event-type header in each frame carries the event name (e.g.
chunk), and the :content-type header is set to
application/json.
@@ -332,7 +303,7 @@
Converse API
The Converse API is AWS Bedrock's provider-agnostic conversation interface. It uses
camelCase field names and a different request structure than the Claude-native invoke
- endpoints. llmock supports both /model/{modelId}/converse (non-streaming) and
+ endpoints. aimock supports both /model/{modelId}/converse (non-streaming) and
/model/{modelId}/converse-stream (streaming via Event Stream binary).
@@ -369,21 +340,23 @@ Converse API
The Converse API also supports tool calls via toolUse and
toolResult content blocks, and tool definitions via the
- toolConfig field. llmock translates all of these to the unified internal
+ toolConfig field. aimock translates all of these to the unified internal
format for fixture matching.
+
+