fix(think-mode): support GitHub Copilot proxy provider (#336)

* fix(think-mode): support GitHub Copilot proxy provider

### Summary
- Adds `github-copilot` support to think-mode by resolving the underlying provider from the model name (Claude → Anthropic, Gemini → Google, GPT/o* → OpenAI).
- Normalizes model IDs to handle dotted versions defensively (e.g. `claude-opus-4.5` → `claude-opus-4-5`, `gpt-5.2` → `gpt-5-2`) so high-variant upgrades and capability checks work reliably.
- Expands high-variant mappings to cover Gemini preview/flash variants and aligns GPT-5.1/5.2 mappings with normalized IDs.
- Adds OpenAI “thinking mode” config (`reasoning_effort: "high"`) alongside existing provider configs.

### Tests
- Adds unit coverage for the switcher (`switcher.test.ts`) and integration coverage for the hook (`index.test.ts`), including:
  - GitHub Copilot model routing + thinking config injection
  - Dots vs hyphens normalization
  - Already-`-high` variants not being re-upgraded
  - Unknown models/providers handled gracefully

* fix: support multiple digits in model minor
This commit is contained in:
Marcus R. Brown
2025-12-30 01:46:16 -07:00
committed by GitHub
parent 0f0f49b823
commit 5138c50a6a
3 changed files with 789 additions and 43 deletions

View File

@@ -0,0 +1,359 @@
import { describe, expect, it, beforeEach, mock } from "bun:test"
import type { ThinkModeInput } from "./types"
const logMock = mock(() => {})
mock.module("../../shared", () => ({
log: logMock,
}))
const { createThinkModeHook, clearThinkModeState } = await import("./index")
/**
* Helper to create a mock ThinkModeInput for testing
*/
function createMockInput(
providerID: string,
modelID: string,
promptText: string
): ThinkModeInput {
return {
parts: [{ type: "text", text: promptText }],
message: {
model: {
providerID,
modelID,
},
},
}
}
/**
* Type helper for accessing dynamically injected properties on message
*/
type MessageWithInjectedProps = Record<string, unknown>
describe("createThinkModeHook integration", () => {
const sessionID = "test-session-id"
beforeEach(() => {
clearThinkModeState(sessionID)
})
describe("GitHub Copilot provider integration", () => {
describe("Claude models", () => {
it("should activate thinking mode for github-copilot Claude with think keyword", async () => {
// #given a github-copilot Claude model and prompt with "think" keyword
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"claude-opus-4-5",
"Please think deeply about this problem"
)
// #when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// #then should upgrade to high variant and inject thinking config
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-opus-4-5-high")
expect(message.thinking).toBeDefined()
expect((message.thinking as Record<string, unknown>)?.type).toBe(
"enabled"
)
expect(
(message.thinking as Record<string, unknown>)?.budgetTokens
).toBe(64000)
})
it("should handle github-copilot Claude with dots in version", async () => {
// #given a github-copilot Claude model with dot format (claude-opus-4.5)
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"claude-opus-4.5",
"ultrathink mode"
)
// #when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// #then should upgrade to high variant (hyphen format)
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-opus-4-5-high")
expect(message.thinking).toBeDefined()
})
it("should handle github-copilot Claude Sonnet", async () => {
// #given a github-copilot Claude Sonnet model
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"claude-sonnet-4-5",
"think about this"
)
// #when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// #then should upgrade to high variant
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-sonnet-4-5-high")
expect(message.thinking).toBeDefined()
})
})
describe("Gemini models", () => {
it("should activate thinking mode for github-copilot Gemini Pro", async () => {
// #given a github-copilot Gemini Pro model
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"gemini-3-pro-preview",
"think about this"
)
// #when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// #then should upgrade to high variant and inject google thinking config
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gemini-3-pro-preview-high")
expect(message.providerOptions).toBeDefined()
const googleOptions = (
message.providerOptions as Record<string, unknown>
)?.google as Record<string, unknown>
expect(googleOptions?.thinkingConfig).toBeDefined()
})
it("should activate thinking mode for github-copilot Gemini Flash", async () => {
// #given a github-copilot Gemini Flash model
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"gemini-3-flash-preview",
"ultrathink"
)
// #when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// #then should upgrade to high variant
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gemini-3-flash-preview-high")
expect(message.providerOptions).toBeDefined()
})
})
describe("GPT models", () => {
it("should activate thinking mode for github-copilot GPT-5.2", async () => {
// #given a github-copilot GPT-5.2 model
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"gpt-5.2",
"please think"
)
// #when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// #then should upgrade to high variant and inject openai thinking config
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gpt-5-2-high")
expect(message.reasoning_effort).toBe("high")
})
it("should activate thinking mode for github-copilot GPT-5", async () => {
// #given a github-copilot GPT-5 model
const hook = createThinkModeHook()
const input = createMockInput("github-copilot", "gpt-5", "think deeply")
// #when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// #then should upgrade to high variant
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gpt-5-high")
expect(message.reasoning_effort).toBe("high")
})
})
describe("No think keyword", () => {
it("should NOT activate for github-copilot without think keyword", async () => {
// #given a prompt without any think keyword
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"claude-opus-4-5",
"Just do this task"
)
const originalModelID = input.message.model?.modelID
// #when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// #then should NOT change model or inject config
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe(originalModelID)
expect(message.thinking).toBeUndefined()
})
})
})
describe("Backwards compatibility with direct providers", () => {
it("should still work for direct anthropic provider", async () => {
// #given direct anthropic provider
const hook = createThinkModeHook()
const input = createMockInput(
"anthropic",
"claude-sonnet-4-5",
"think about this"
)
// #when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// #then should work as before
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-sonnet-4-5-high")
expect(message.thinking).toBeDefined()
})
it("should still work for direct google provider", async () => {
// #given direct google provider
const hook = createThinkModeHook()
const input = createMockInput(
"google",
"gemini-3-pro",
"think about this"
)
// #when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// #then should work as before
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gemini-3-pro-high")
expect(message.providerOptions).toBeDefined()
})
it("should still work for direct openai provider", async () => {
// #given direct openai provider
const hook = createThinkModeHook()
const input = createMockInput("openai", "gpt-5", "think about this")
// #when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// #then should work
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gpt-5-high")
expect(message.reasoning_effort).toBe("high")
})
it("should still work for amazon-bedrock provider", async () => {
// #given amazon-bedrock provider
const hook = createThinkModeHook()
const input = createMockInput(
"amazon-bedrock",
"claude-sonnet-4-5",
"think"
)
// #when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// #then should inject bedrock thinking config
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-sonnet-4-5-high")
expect(message.reasoningConfig).toBeDefined()
})
})
describe("Already-high variants", () => {
it("should NOT re-upgrade already-high variants", async () => {
// #given an already-high variant model
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"claude-opus-4-5-high",
"think deeply"
)
// #when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// #then should NOT modify the model (already high)
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-opus-4-5-high")
// No additional thinking config should be injected
expect(message.thinking).toBeUndefined()
})
it("should NOT re-upgrade already-high GPT variants", async () => {
// #given an already-high GPT variant
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"gpt-5.2-high",
"ultrathink"
)
// #when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// #then should NOT modify the model
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gpt-5.2-high")
expect(message.reasoning_effort).toBeUndefined()
})
})
describe("Unknown models", () => {
it("should not crash for unknown models via github-copilot", async () => {
// #given an unknown model type
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"llama-3-70b",
"think about this"
)
// #when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// #then should not crash and model should remain unchanged
expect(input.message.model?.modelID).toBe("llama-3-70b")
})
})
describe("Edge cases", () => {
it("should handle missing model gracefully", async () => {
// #given input without a model
const hook = createThinkModeHook()
const input: ThinkModeInput = {
parts: [{ type: "text", text: "think about this" }],
message: {},
}
// #when the chat.params hook is called
// #then should not crash
await expect(
hook["chat.params"](input, sessionID)
).resolves.toBeUndefined()
})
it("should handle empty prompt gracefully", async () => {
// #given empty prompt
const hook = createThinkModeHook()
const input = createMockInput("github-copilot", "claude-opus-4-5", "")
// #when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// #then should not upgrade (no think keyword)
expect(input.message.model?.modelID).toBe("claude-opus-4-5")
})
})
})

View File

@@ -0,0 +1,325 @@
import { describe, expect, it } from "bun:test"
import {
getHighVariant,
getThinkingConfig,
isAlreadyHighVariant,
THINKING_CONFIGS,
} from "./switcher"
describe("think-mode switcher", () => {
describe("GitHub Copilot provider support", () => {
describe("Claude models via github-copilot", () => {
it("should resolve github-copilot Claude Opus to anthropic config", () => {
// #given a github-copilot provider with Claude Opus model
const providerID = "github-copilot"
const modelID = "claude-opus-4-5"
// #when getting thinking config
const config = getThinkingConfig(providerID, modelID)
// #then should return anthropic thinking config
expect(config).not.toBeNull()
expect(config?.thinking).toBeDefined()
expect((config?.thinking as Record<string, unknown>)?.type).toBe(
"enabled"
)
expect((config?.thinking as Record<string, unknown>)?.budgetTokens).toBe(
64000
)
})
it("should resolve github-copilot Claude Sonnet to anthropic config", () => {
// #given a github-copilot provider with Claude Sonnet model
const config = getThinkingConfig("github-copilot", "claude-sonnet-4-5")
// #then should return anthropic thinking config
expect(config).not.toBeNull()
expect(config?.thinking).toBeDefined()
})
it("should handle Claude with dots in version number", () => {
// #given a model ID with dots (claude-opus-4.5)
const config = getThinkingConfig("github-copilot", "claude-opus-4.5")
// #then should still return anthropic thinking config
expect(config).not.toBeNull()
expect(config?.thinking).toBeDefined()
})
})
describe("Gemini models via github-copilot", () => {
it("should resolve github-copilot Gemini Pro to google config", () => {
// #given a github-copilot provider with Gemini Pro model
const config = getThinkingConfig("github-copilot", "gemini-3-pro-preview")
// #then should return google thinking config
expect(config).not.toBeNull()
expect(config?.providerOptions).toBeDefined()
const googleOptions = (
config?.providerOptions as Record<string, unknown>
)?.google as Record<string, unknown>
expect(googleOptions?.thinkingConfig).toBeDefined()
})
it("should resolve github-copilot Gemini Flash to google config", () => {
// #given a github-copilot provider with Gemini Flash model
const config = getThinkingConfig(
"github-copilot",
"gemini-3-flash-preview"
)
// #then should return google thinking config
expect(config).not.toBeNull()
expect(config?.providerOptions).toBeDefined()
})
})
describe("GPT models via github-copilot", () => {
it("should resolve github-copilot GPT-5.2 to openai config", () => {
// #given a github-copilot provider with GPT-5.2 model
const config = getThinkingConfig("github-copilot", "gpt-5.2")
// #then should return openai thinking config
expect(config).not.toBeNull()
expect(config?.reasoning_effort).toBe("high")
})
it("should resolve github-copilot GPT-5 to openai config", () => {
// #given a github-copilot provider with GPT-5 model
const config = getThinkingConfig("github-copilot", "gpt-5")
// #then should return openai thinking config
expect(config).not.toBeNull()
expect(config?.reasoning_effort).toBe("high")
})
it("should resolve github-copilot o1 to openai config", () => {
// #given a github-copilot provider with o1 model
const config = getThinkingConfig("github-copilot", "o1-preview")
// #then should return openai thinking config
expect(config).not.toBeNull()
expect(config?.reasoning_effort).toBe("high")
})
it("should resolve github-copilot o3 to openai config", () => {
// #given a github-copilot provider with o3 model
const config = getThinkingConfig("github-copilot", "o3-mini")
// #then should return openai thinking config
expect(config).not.toBeNull()
expect(config?.reasoning_effort).toBe("high")
})
})
describe("Unknown models via github-copilot", () => {
it("should return null for unknown model types", () => {
// #given a github-copilot provider with unknown model
const config = getThinkingConfig("github-copilot", "llama-3-70b")
// #then should return null (no matching provider)
expect(config).toBeNull()
})
})
})
describe("Model ID normalization", () => {
describe("getHighVariant with dots vs hyphens", () => {
it("should handle dots in Claude version numbers", () => {
// #given a Claude model ID with dot format
const variant = getHighVariant("claude-opus-4.5")
// #then should return high variant with hyphen format
expect(variant).toBe("claude-opus-4-5-high")
})
it("should handle hyphens in Claude version numbers", () => {
// #given a Claude model ID with hyphen format
const variant = getHighVariant("claude-opus-4-5")
// #then should return high variant
expect(variant).toBe("claude-opus-4-5-high")
})
it("should handle dots in GPT version numbers", () => {
// #given a GPT model ID with dot format (gpt-5.2)
const variant = getHighVariant("gpt-5.2")
// #then should return high variant
expect(variant).toBe("gpt-5-2-high")
})
it("should handle dots in GPT-5.1 codex variants", () => {
// #given a GPT-5.1-codex model ID
const variant = getHighVariant("gpt-5.1-codex")
// #then should return high variant
expect(variant).toBe("gpt-5-1-codex-high")
})
it("should handle Gemini preview variants", () => {
// #given Gemini preview model IDs
expect(getHighVariant("gemini-3-pro-preview")).toBe(
"gemini-3-pro-preview-high"
)
expect(getHighVariant("gemini-3-flash-preview")).toBe(
"gemini-3-flash-preview-high"
)
})
it("should return null for already-high variants", () => {
// #given model IDs that are already high variants
expect(getHighVariant("claude-opus-4-5-high")).toBeNull()
expect(getHighVariant("gpt-5-2-high")).toBeNull()
expect(getHighVariant("gemini-3-pro-high")).toBeNull()
})
it("should return null for unknown models", () => {
// #given unknown model IDs
expect(getHighVariant("llama-3-70b")).toBeNull()
expect(getHighVariant("mistral-large")).toBeNull()
})
})
})
describe("isAlreadyHighVariant", () => {
it("should detect -high suffix", () => {
// #given model IDs with -high suffix
expect(isAlreadyHighVariant("claude-opus-4-5-high")).toBe(true)
expect(isAlreadyHighVariant("gpt-5-2-high")).toBe(true)
expect(isAlreadyHighVariant("gemini-3-pro-high")).toBe(true)
})
it("should detect -high suffix after normalization", () => {
// #given model IDs with dots that end in -high
expect(isAlreadyHighVariant("gpt-5.2-high")).toBe(true)
})
it("should return false for base models", () => {
// #given base model IDs without -high suffix
expect(isAlreadyHighVariant("claude-opus-4-5")).toBe(false)
expect(isAlreadyHighVariant("claude-opus-4.5")).toBe(false)
expect(isAlreadyHighVariant("gpt-5.2")).toBe(false)
expect(isAlreadyHighVariant("gemini-3-pro")).toBe(false)
})
it("should return false for models with 'high' in name but not suffix", () => {
// #given model IDs that contain 'high' but not as suffix
expect(isAlreadyHighVariant("high-performance-model")).toBe(false)
})
})
describe("getThinkingConfig", () => {
describe("Already high variants", () => {
it("should return null for already-high variants", () => {
// #given already-high model variants
expect(
getThinkingConfig("anthropic", "claude-opus-4-5-high")
).toBeNull()
expect(getThinkingConfig("openai", "gpt-5-2-high")).toBeNull()
expect(getThinkingConfig("google", "gemini-3-pro-high")).toBeNull()
})
it("should return null for already-high variants via github-copilot", () => {
// #given already-high model variants via github-copilot
expect(
getThinkingConfig("github-copilot", "claude-opus-4-5-high")
).toBeNull()
expect(getThinkingConfig("github-copilot", "gpt-5.2-high")).toBeNull()
})
})
describe("Non-thinking-capable models", () => {
it("should return null for non-thinking-capable models", () => {
// #given models that don't support thinking mode
expect(getThinkingConfig("anthropic", "claude-2")).toBeNull()
expect(getThinkingConfig("openai", "gpt-4")).toBeNull()
expect(getThinkingConfig("google", "gemini-1")).toBeNull()
})
})
describe("Unknown providers", () => {
it("should return null for unknown providers", () => {
// #given unknown provider IDs
expect(getThinkingConfig("unknown-provider", "some-model")).toBeNull()
expect(getThinkingConfig("azure", "gpt-5")).toBeNull()
})
})
})
describe("Direct provider configs (backwards compatibility)", () => {
it("should still work for direct anthropic provider", () => {
// #given direct anthropic provider
const config = getThinkingConfig("anthropic", "claude-opus-4-5")
// #then should return anthropic thinking config
expect(config).not.toBeNull()
expect(config?.thinking).toBeDefined()
expect((config?.thinking as Record<string, unknown>)?.type).toBe("enabled")
})
it("should still work for direct google provider", () => {
// #given direct google provider
const config = getThinkingConfig("google", "gemini-3-pro")
// #then should return google thinking config
expect(config).not.toBeNull()
expect(config?.providerOptions).toBeDefined()
})
it("should still work for amazon-bedrock provider", () => {
// #given amazon-bedrock provider with claude model
const config = getThinkingConfig("amazon-bedrock", "claude-sonnet-4-5")
// #then should return bedrock thinking config
expect(config).not.toBeNull()
expect(config?.reasoningConfig).toBeDefined()
})
it("should still work for google-vertex provider", () => {
// #given google-vertex provider
const config = getThinkingConfig("google-vertex", "gemini-3-pro")
// #then should return google-vertex thinking config
expect(config).not.toBeNull()
expect(config?.providerOptions).toBeDefined()
const vertexOptions = (config?.providerOptions as Record<string, unknown>)?.[
"google-vertex"
] as Record<string, unknown>
expect(vertexOptions?.thinkingConfig).toBeDefined()
})
it("should work for direct openai provider", () => {
// #given direct openai provider
const config = getThinkingConfig("openai", "gpt-5")
// #then should return openai thinking config
expect(config).not.toBeNull()
expect(config?.reasoning_effort).toBe("high")
})
})
describe("THINKING_CONFIGS structure", () => {
it("should have correct structure for anthropic", () => {
const config = THINKING_CONFIGS.anthropic
expect(config.thinking).toBeDefined()
expect(config.maxTokens).toBe(128000)
})
it("should have correct structure for google", () => {
const config = THINKING_CONFIGS.google
expect(config.providerOptions).toBeDefined()
})
it("should have correct structure for openai", () => {
const config = THINKING_CONFIGS.openai
expect(config.reasoning_effort).toBe("high")
})
it("should have correct structure for amazon-bedrock", () => {
const config = THINKING_CONFIGS["amazon-bedrock"]
expect(config.reasoningConfig).toBeDefined()
expect(config.maxTokens).toBe(64000)
})
})
})

View File

@@ -1,3 +1,67 @@
/**
* Think Mode Switcher
*
* This module handles "thinking mode" activation for reasoning-capable models.
* When a user includes "think" keywords in their prompt, models are upgraded to
* their high-reasoning variants with extended thinking budgets.
*
* PROVIDER ALIASING:
* GitHub Copilot acts as a proxy provider that routes to underlying providers
* (Anthropic, Google, OpenAI). We resolve the proxy to the actual provider
* based on model name patterns, allowing GitHub Copilot to inherit thinking
* configurations without duplication.
*
* NORMALIZATION:
* Model IDs are normalized (dots → hyphens in version numbers) to handle API
* inconsistencies defensively while maintaining backwards compatibility.
*/
/**
* Normalizes model IDs to use consistent hyphen formatting.
* GitHub Copilot may use dots (claude-opus-4.5) but our maps use hyphens (claude-opus-4-5).
* This ensures lookups work regardless of format.
*
* @example
* normalizeModelID("claude-opus-4.5") // "claude-opus-4-5"
* normalizeModelID("gemini-3.5-pro") // "gemini-3-5-pro"
* normalizeModelID("gpt-5.2") // "gpt-5-2"
*/
function normalizeModelID(modelID: string): string {
// Replace dots with hyphens when followed by a digit
// This handles version numbers like 4.5 → 4-5, 5.2 → 5-2
return modelID.replace(/\.(\d+)/g, "-$1")
}
/**
* Resolves proxy providers (like github-copilot) to their underlying provider.
* This allows GitHub Copilot to inherit thinking configurations from the actual
* model provider (Anthropic, Google, OpenAI).
*
* @example
* resolveProvider("github-copilot", "claude-opus-4-5") // "anthropic"
* resolveProvider("github-copilot", "gemini-3-pro") // "google"
* resolveProvider("github-copilot", "gpt-5.2") // "openai"
* resolveProvider("anthropic", "claude-opus-4-5") // "anthropic" (unchanged)
*/
function resolveProvider(providerID: string, modelID: string): string {
// GitHub Copilot is a proxy - infer actual provider from model name
if (providerID === "github-copilot") {
const modelLower = modelID.toLowerCase()
if (modelLower.includes("claude")) return "anthropic"
if (modelLower.includes("gemini")) return "google"
if (
modelLower.includes("gpt") ||
modelLower.includes("o1") ||
modelLower.includes("o3")
) {
return "openai"
}
}
// Direct providers or unknown - return as-is
return providerID
}
// Maps model IDs to their "high reasoning" variant (internal convention)
// For OpenAI models, this signals that reasoning_effort should be set to "high"
const HIGH_VARIANT_MAP: Record<string, string> = {
@@ -7,6 +71,9 @@ const HIGH_VARIANT_MAP: Record<string, string> = {
// Gemini
"gemini-3-pro": "gemini-3-pro-high",
"gemini-3-pro-low": "gemini-3-pro-high",
"gemini-3-pro-preview": "gemini-3-pro-preview-high",
"gemini-3-flash": "gemini-3-flash-high",
"gemini-3-flash-preview": "gemini-3-flash-preview-high",
// GPT-5
"gpt-5": "gpt-5-high",
"gpt-5-mini": "gpt-5-mini-high",
@@ -14,42 +81,20 @@ const HIGH_VARIANT_MAP: Record<string, string> = {
"gpt-5-pro": "gpt-5-pro-high",
"gpt-5-chat-latest": "gpt-5-chat-latest-high",
// GPT-5.1
"gpt-5.1": "gpt-5.1-high",
"gpt-5.1-chat-latest": "gpt-5.1-chat-latest-high",
"gpt-5.1-codex": "gpt-5.1-codex-high",
"gpt-5.1-codex-mini": "gpt-5.1-codex-mini-high",
"gpt-5.1-codex-max": "gpt-5.1-codex-max-high",
"gpt-5-1": "gpt-5-1-high",
"gpt-5-1-chat-latest": "gpt-5-1-chat-latest-high",
"gpt-5-1-codex": "gpt-5-1-codex-high",
"gpt-5-1-codex-mini": "gpt-5-1-codex-mini-high",
"gpt-5-1-codex-max": "gpt-5-1-codex-max-high",
// GPT-5.2
"gpt-5.2": "gpt-5.2-high",
"gpt-5.2-chat-latest": "gpt-5.2-chat-latest-high",
"gpt-5.2-pro": "gpt-5.2-pro-high",
"gpt-5-2": "gpt-5-2-high",
"gpt-5-2-chat-latest": "gpt-5-2-chat-latest-high",
"gpt-5-2-pro": "gpt-5-2-pro-high",
}
const ALREADY_HIGH: Set<string> = new Set([
// Claude
"claude-sonnet-4-5-high",
"claude-opus-4-5-high",
// Gemini
"gemini-3-pro-high",
// GPT-5
"gpt-5-high",
"gpt-5-mini-high",
"gpt-5-nano-high",
"gpt-5-pro-high",
"gpt-5-chat-latest-high",
// GPT-5.1
"gpt-5.1-high",
"gpt-5.1-chat-latest-high",
"gpt-5.1-codex-high",
"gpt-5.1-codex-mini-high",
"gpt-5.1-codex-max-high",
// GPT-5.2
"gpt-5.2-high",
"gpt-5.2-chat-latest-high",
"gpt-5.2-pro-high",
])
const ALREADY_HIGH: Set<string> = new Set(Object.values(HIGH_VARIANT_MAP))
export const THINKING_CONFIGS: Record<string, Record<string, unknown>> = {
export const THINKING_CONFIGS = {
anthropic: {
thinking: {
type: "enabled",
@@ -82,42 +127,59 @@ export const THINKING_CONFIGS: Record<string, Record<string, unknown>> = {
},
},
},
}
openai: {
reasoning_effort: "high",
},
} as const satisfies Record<string, Record<string, unknown>>
const THINKING_CAPABLE_MODELS: Record<string, string[]> = {
const THINKING_CAPABLE_MODELS = {
anthropic: ["claude-sonnet-4", "claude-opus-4", "claude-3"],
"amazon-bedrock": ["claude", "anthropic"],
google: ["gemini-2", "gemini-3"],
"google-vertex": ["gemini-2", "gemini-3"],
}
openai: ["gpt-5", "o1", "o3"],
} as const satisfies Record<string, readonly string[]>
export function getHighVariant(modelID: string): string | null {
if (ALREADY_HIGH.has(modelID)) {
const normalized = normalizeModelID(modelID)
if (ALREADY_HIGH.has(normalized)) {
return null
}
return HIGH_VARIANT_MAP[modelID] ?? null
return HIGH_VARIANT_MAP[normalized] ?? null
}
export function isAlreadyHighVariant(modelID: string): boolean {
return ALREADY_HIGH.has(modelID) || modelID.endsWith("-high")
const normalized = normalizeModelID(modelID)
return ALREADY_HIGH.has(normalized) || normalized.endsWith("-high")
}
type ThinkingProvider = keyof typeof THINKING_CONFIGS
function isThinkingProvider(provider: string): provider is ThinkingProvider {
return provider in THINKING_CONFIGS
}
export function getThinkingConfig(
providerID: string,
modelID: string
): Record<string, unknown> | null {
if (isAlreadyHighVariant(modelID)) {
const normalized = normalizeModelID(modelID)
if (isAlreadyHighVariant(normalized)) {
return null
}
const config = THINKING_CONFIGS[providerID]
const capablePatterns = THINKING_CAPABLE_MODELS[providerID]
const resolvedProvider = resolveProvider(providerID, modelID)
if (!config || !capablePatterns) {
if (!isThinkingProvider(resolvedProvider)) {
return null
}
const modelLower = modelID.toLowerCase()
const config = THINKING_CONFIGS[resolvedProvider]
const capablePatterns = THINKING_CAPABLE_MODELS[resolvedProvider]
const modelLower = normalized.toLowerCase()
const isCapable = capablePatterns.some((pattern) =>
modelLower.includes(pattern.toLowerCase())
)