Skip to content

Commit e15d680

Browse files
committed
fix(google): make codeExecutionResult.output optional in response schema
Gemini 3 Flash omits the `output` field in `codeExecutionResult` when code execution produces no text output (e.g., only saves files via PIL). This caused `AI_APICallError: Invalid JSON response` because the Zod schema required `output` to be a string. Changes: - Make `output` field `.nullish()` in the `codeExecutionResult` schema within `getContentSchema()` (affects both response and chunk schemas) - Default missing `output` to empty string in both doGenerate and doStream code paths - Add regression tests for both generateText and streamText with missing output field Closes #12217
1 parent dbdba9b commit e15d680

File tree

3 files changed

+153
-3
lines changed

3 files changed

+153
-3
lines changed
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
---
2+
'@ai-sdk/google': patch
3+
---
4+
5+
fix(google): make `codeExecutionResult.output` optional in response schema
6+
7+
Gemini 3 Flash omits the `output` field in `codeExecutionResult` when code execution produces no text output (e.g., only saves files). The Zod response schema now accepts a missing `output` field and defaults it to an empty string.

packages/google/src/google-generative-ai-language-model.test.ts

Lines changed: 143 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1519,6 +1519,69 @@ describe('doGenerate', () => {
15191519
`);
15201520
});
15211521

1522+
it('should handle code execution result with missing output field', async () => {
1523+
server.urls[TEST_URL_GEMINI_2_0_PRO].response = {
1524+
type: 'json-value',
1525+
body: {
1526+
candidates: [
1527+
{
1528+
content: {
1529+
parts: [
1530+
{
1531+
executableCode: {
1532+
language: 'PYTHON',
1533+
code: "import PIL.Image\nimg = PIL.Image.open('input.png')\nimg.save('output.png')\n",
1534+
},
1535+
},
1536+
{
1537+
codeExecutionResult: {
1538+
outcome: 'OUTCOME_OK',
1539+
},
1540+
},
1541+
],
1542+
role: 'model',
1543+
},
1544+
finishReason: 'STOP',
1545+
},
1546+
],
1547+
},
1548+
};
1549+
1550+
const model = provider.languageModel('gemini-2.0-pro');
1551+
const { content } = await model.doGenerate({
1552+
tools: [
1553+
{
1554+
type: 'provider',
1555+
id: 'google.code_execution',
1556+
name: 'code_execution',
1557+
args: {},
1558+
},
1559+
],
1560+
prompt: TEST_PROMPT,
1561+
});
1562+
1563+
expect(content).toMatchInlineSnapshot(`
1564+
[
1565+
{
1566+
"input": "{"language":"PYTHON","code":"import PIL.Image\\nimg = PIL.Image.open('input.png')\\nimg.save('output.png')\\n"}",
1567+
"providerExecuted": true,
1568+
"toolCallId": "test-id",
1569+
"toolName": "code_execution",
1570+
"type": "tool-call",
1571+
},
1572+
{
1573+
"result": {
1574+
"outcome": "OUTCOME_OK",
1575+
"output": "",
1576+
},
1577+
"toolCallId": "test-id",
1578+
"toolName": "code_execution",
1579+
"type": "tool-result",
1580+
},
1581+
]
1582+
`);
1583+
});
1584+
15221585
it('should return stop finish reason for code execution (provider-executed tool)', async () => {
15231586
server.urls[TEST_URL_GEMINI_2_0_PRO].response = {
15241587
type: 'json-value',
@@ -3152,6 +3215,86 @@ describe('doStream', () => {
31523215
`);
31533216
});
31543217

3218+
it('should stream code execution result with missing output field', async () => {
3219+
server.urls[TEST_URL_GEMINI_2_0_PRO].response = {
3220+
type: 'stream-chunks',
3221+
chunks: [
3222+
`data: ${JSON.stringify({
3223+
candidates: [
3224+
{
3225+
content: {
3226+
parts: [
3227+
{
3228+
executableCode: {
3229+
language: 'PYTHON',
3230+
code: "img = PIL.Image.open('input.png')\nimg.save('output.png')\n",
3231+
},
3232+
},
3233+
],
3234+
},
3235+
},
3236+
],
3237+
})}\n\n`,
3238+
`data: ${JSON.stringify({
3239+
candidates: [
3240+
{
3241+
content: {
3242+
parts: [
3243+
{
3244+
codeExecutionResult: {
3245+
outcome: 'OUTCOME_OK',
3246+
},
3247+
},
3248+
],
3249+
},
3250+
finishReason: 'STOP',
3251+
},
3252+
],
3253+
})}\n\n`,
3254+
],
3255+
};
3256+
3257+
const model = provider.languageModel('gemini-2.0-pro');
3258+
const { stream } = await model.doStream({
3259+
tools: [
3260+
{
3261+
type: 'provider',
3262+
id: 'google.code_execution',
3263+
name: 'code_execution',
3264+
args: {},
3265+
},
3266+
],
3267+
prompt: TEST_PROMPT,
3268+
});
3269+
3270+
const events = await convertReadableStreamToArray(stream);
3271+
3272+
const toolEvents = events.filter(
3273+
e => e.type === 'tool-call' || e.type === 'tool-result',
3274+
);
3275+
3276+
expect(toolEvents).toMatchInlineSnapshot(`
3277+
[
3278+
{
3279+
"input": "{"language":"PYTHON","code":"img = PIL.Image.open('input.png')\\nimg.save('output.png')\\n"}",
3280+
"providerExecuted": true,
3281+
"toolCallId": "test-id",
3282+
"toolName": "code_execution",
3283+
"type": "tool-call",
3284+
},
3285+
{
3286+
"result": {
3287+
"outcome": "OUTCOME_OK",
3288+
"output": "",
3289+
},
3290+
"toolCallId": "test-id",
3291+
"toolName": "code_execution",
3292+
"type": "tool-result",
3293+
},
3294+
]
3295+
`);
3296+
});
3297+
31553298
it('should return stop finish reason for streamed code execution (provider-executed tool)', async () => {
31563299
server.urls[TEST_URL_GEMINI_2_0_PRO].response = {
31573300
type: 'stream-chunks',

packages/google/src/google-generative-ai-language-model.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -263,7 +263,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
263263
toolName: 'code_execution',
264264
result: {
265265
outcome: part.codeExecutionResult.outcome,
266-
output: part.codeExecutionResult.output,
266+
output: part.codeExecutionResult.output ?? '',
267267
},
268268
});
269269
// Clear the ID after use to avoid accidental reuse.
@@ -476,7 +476,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
476476
toolName: 'code_execution',
477477
result: {
478478
outcome: part.codeExecutionResult.outcome,
479-
output: part.codeExecutionResult.output,
479+
output: part.codeExecutionResult.output ?? '',
480480
},
481481
});
482482
// Clear the ID after use.
@@ -895,7 +895,7 @@ const getContentSchema = () =>
895895
codeExecutionResult: z
896896
.object({
897897
outcome: z.string(),
898-
output: z.string(),
898+
output: z.string().nullish(),
899899
})
900900
.nullish(),
901901
text: z.string().nullish(),

0 commit comments

Comments
 (0)