fix(hooks): use last assistant message tokens instead of cumulative sum

Previously, token calculation accumulated ALL assistant messages' tokens,
causing incorrect usage display (e.g., 524.9%) after compaction.
Now uses only the last message's input tokens, which reflects the actual
current context window usage.

🤖 GENERATED WITH ASSISTANCE OF [OhMyOpenCode](https://github.com/code-yeongyu/oh-my-opencode)
This commit is contained in:
YeonGyu-Kim
2025-12-13 01:02:21 +09:00
parent 6a565ee126
commit 3d273ff853
2 changed files with 9 additions and 10 deletions

View File

@@ -52,11 +52,10 @@ export function createContextWindowMonitorHook(ctx: PluginInput) {
const lastAssistant = assistantMessages[assistantMessages.length - 1]
if (lastAssistant.providerID !== "anthropic") return
const totalInputTokens = assistantMessages.reduce((sum, m) => {
const inputTokens = m.tokens?.input ?? 0
const cacheReadTokens = m.tokens?.cache?.read ?? 0
return sum + inputTokens + cacheReadTokens
}, 0)
// Use only the last assistant message's input tokens
// This reflects the ACTUAL current context window usage (post-compaction)
const lastTokens = lastAssistant.tokens
const totalInputTokens = (lastTokens?.input ?? 0) + (lastTokens?.cache?.read ?? 0)
const actualUsagePercentage = totalInputTokens / ANTHROPIC_ACTUAL_LIMIT

View File

@@ -98,11 +98,11 @@ export function createGrepOutputTruncatorHook(ctx: PluginInput) {
if (assistantMessages.length === 0) return
const totalInputTokens = assistantMessages.reduce((sum, m) => {
const inputTokens = m.tokens?.input ?? 0
const cacheReadTokens = m.tokens?.cache?.read ?? 0
return sum + inputTokens + cacheReadTokens
}, 0)
// Use only the last assistant message's input tokens
// This reflects the ACTUAL current context window usage (post-compaction)
const lastAssistant = assistantMessages[assistantMessages.length - 1]
const lastTokens = lastAssistant.tokens
const totalInputTokens = (lastTokens?.input ?? 0) + (lastTokens?.cache?.read ?? 0)
const remainingTokens = ANTHROPIC_ACTUAL_LIMIT - totalInputTokens