Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
90 changes: 89 additions & 1 deletion src/api/providers/__tests__/openai.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ vi.mock("axios", () => ({
},
}))

import { OpenAiHandler, getOpenAiModels } from "../openai"
import { OpenAiHandler, getOpenAiModels, createThinkingAwareFetch } from "../openai"
import { ApiHandlerOptions } from "../../../shared/api"
import { Anthropic } from "@anthropic-ai/sdk"
import { openAiModelInfoSaneDefaults } from "@roo-code/types"
Expand Down Expand Up @@ -143,6 +143,7 @@ describe("OpenAiHandler", () => {
expect.objectContaining({
baseURL: "https://api.openai.com/v1",
apiKey: "test-api-key",
fetch: expect.any(Function),
}),
)
})
Expand Down Expand Up @@ -978,3 +979,90 @@ describe("getOpenAiModels", () => {
expect(result).toEqual(["gpt-4", "gpt-3.5-turbo"])
})
})

describe("createThinkingAwareFetch", () => {
const originalFetch = globalThis.fetch

afterEach(() => {
globalThis.fetch = originalFetch
})

it("should inject thinking: { type: 'enabled' } when reasoning_effort is present", async () => {
let capturedBody: string | undefined

globalThis.fetch = vi.fn(async (_input: RequestInfo | URL, init?: RequestInit) => {
capturedBody = init?.body as string
return new Response(JSON.stringify({}), { status: 200 })
}) as any

const wrappedFetch = createThinkingAwareFetch()
const body = JSON.stringify({ model: "some-model", reasoning_effort: "high" })
await wrappedFetch("https://example.com/v1/chat/completions", { method: "POST", body })

const parsed = JSON.parse(capturedBody!)
expect(parsed.thinking).toEqual({ type: "enabled" })
expect(parsed.reasoning_effort).toBe("high")
})

it("should not inject thinking when reasoning_effort is absent", async () => {
let capturedBody: string | undefined

globalThis.fetch = vi.fn(async (_input: RequestInfo | URL, init?: RequestInit) => {
capturedBody = init?.body as string
return new Response(JSON.stringify({}), { status: 200 })
}) as any

const wrappedFetch = createThinkingAwareFetch()
const body = JSON.stringify({ model: "some-model", messages: [] })
await wrappedFetch("https://example.com/v1/chat/completions", { method: "POST", body })

const parsed = JSON.parse(capturedBody!)
expect(parsed.thinking).toBeUndefined()
})

it("should not overwrite existing thinking parameter", async () => {
let capturedBody: string | undefined

globalThis.fetch = vi.fn(async (_input: RequestInfo | URL, init?: RequestInit) => {
capturedBody = init?.body as string
return new Response(JSON.stringify({}), { status: 200 })
}) as any

const wrappedFetch = createThinkingAwareFetch()
const body = JSON.stringify({
model: "some-model",
reasoning_effort: "high",
thinking: { type: "disabled", budget_tokens: 0 },
})
await wrappedFetch("https://example.com/v1/chat/completions", { method: "POST", body })

const parsed = JSON.parse(capturedBody!)
expect(parsed.thinking).toEqual({ type: "disabled", budget_tokens: 0 })
})

it("should pass through non-JSON bodies unchanged", async () => {
let capturedBody: string | undefined

globalThis.fetch = vi.fn(async (_input: RequestInfo | URL, init?: RequestInit) => {
capturedBody = init?.body as string
return new Response("ok", { status: 200 })
}) as any

const wrappedFetch = createThinkingAwareFetch()
const body = "not-json-body"
await wrappedFetch("https://example.com/v1/chat/completions", { method: "POST", body })

expect(capturedBody).toBe("not-json-body")
})

it("should pass through requests with no body", async () => {
globalThis.fetch = vi.fn(async () => {
return new Response("ok", { status: 200 })
}) as any

const wrappedFetch = createThinkingAwareFetch()
await wrappedFetch("https://example.com/v1/models")

expect(globalThis.fetch).toHaveBeenCalledWith("https://example.com/v1/models", undefined)
})
})
29 changes: 29 additions & 0 deletions src/api/providers/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
baseURL,
apiKey,
headers,
fetch: createThinkingAwareFetch(),
})
this.languageModelFactory = (modelId: string) => provider.chat(modelId)
}
Expand Down Expand Up @@ -373,6 +374,34 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
}
}

/**
* Creates a fetch wrapper that injects `thinking: { type: "enabled" }` into
* request bodies that already contain `reasoning_effort`. This fixes
* OpenAI-compatible APIs (e.g. VolcEngine) that default `thinking` to
* `disabled` when it's not explicitly provided, causing a 400 error when
* combined with `reasoning_effort`.
*
* Standard OpenAI endpoints silently ignore unrecognised body fields,
* so this is safe for all OpenAI-compatible providers.
*/
/** @internal Exported for testing. */
export function createThinkingAwareFetch(): typeof globalThis.fetch {
return async (input: RequestInfo | URL, init?: RequestInit) => {
if (init?.body && typeof init.body === "string") {
try {
const json = JSON.parse(init.body)
if (json.reasoning_effort && !json.thinking) {
json.thinking = { type: "enabled" }
init = { ...init, body: JSON.stringify(json) }
}
} catch {
// Not JSON – pass through untouched.
}
}
return globalThis.fetch(input, init)
}
}

export async function getOpenAiModels(baseUrl?: string, apiKey?: string, openAiHeaders?: Record<string, string>) {
try {
if (!baseUrl) {
Expand Down
Loading