Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
feat: implement request logging functionality
  • Loading branch information
zhangxinyu9 committed Apr 10, 2026
commit 6b297c3316d6b62abc46c16582c27d92ad783b9d
76 changes: 76 additions & 0 deletions src/lib/request-logger.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import type { Context, Next } from "hono"

const requestModelKey = "requestModel"
const resolvedModelKey = "resolvedModel"
const responseModelKey = "responseModel"

export function setRequestModel(c: Context, model: string | null | undefined) {
if (model) {
c.set(requestModelKey, model)
}
}

export function setResolvedModel(c: Context, model: string | null | undefined) {
if (model) {
c.set(resolvedModelKey, model)
}
}

export function setResponseModel(c: Context, model: string | null | undefined) {
if (model) {
c.set(responseModelKey, model)
}
}

export async function requestLogger(c: Context, next: Next) {
const start = Date.now()
const requestTarget = getRequestTarget(c)

console.log(`<-- ${c.req.method} ${requestTarget}`)

await next()

const duration = formatDuration(Date.now() - start)
const modelSuffix = formatModelSuffix(c)

console.log(
`--> ${c.req.method} ${requestTarget} ${c.res.status} ${duration}${modelSuffix}`,
)
}

function getRequestTarget(c: Context): string {
const url = new URL(c.req.url)
return `${url.pathname}${url.search}`
}

function formatDuration(durationMs: number): string {
if (durationMs >= 1000) {
return `${Math.round(durationMs / 1000)}s`
}

return `${durationMs}ms`
}

function formatModelSuffix(c: Context): string {
const requestModel = c.get(requestModelKey) as string | undefined
const resolvedModel = c.get(resolvedModelKey) as string | undefined
const responseModel = c.get(responseModelKey) as string | undefined

if (responseModel) {
return ` model=${responseModel}`
}

if (resolvedModel && requestModel && resolvedModel !== requestModel) {
return ` requested_model=${requestModel} resolved_model=${resolvedModel}`
}

if (resolvedModel) {
return ` model=${resolvedModel}`
}

if (requestModel) {
return ` model=${requestModel}`
}

return ""
}
8 changes: 8 additions & 0 deletions src/routes/chat-completions/handler.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@ import consola from "consola"

import { awaitApproval } from "~/lib/approval"
import { checkRateLimit } from "~/lib/rate-limit"
import {
setRequestModel,
setResolvedModel,
setResponseModel,
} from "~/lib/request-logger"
import { state } from "~/lib/state"
import { getTokenCount } from "~/lib/tokenizer"
import { isNullish } from "~/lib/utils"
Expand All @@ -18,6 +23,8 @@ export async function handleCompletion(c: Context) {
await checkRateLimit(state)

let payload = await c.req.json<ChatCompletionsPayload>()
setRequestModel(c, payload.model)
setResolvedModel(c, payload.model)
consola.debug("Request payload:", JSON.stringify(payload).slice(-400))

// Find the selected model
Expand Down Expand Up @@ -62,6 +69,7 @@ export async function handleCompletion(c: Context) {
const response = await createChatCompletions(payload)

if (isNonStreaming(response)) {
setResponseModel(c, response.model)
consola.debug("Non-streaming response:", JSON.stringify(response))
return c.json(response)
}
Expand Down
8 changes: 8 additions & 0 deletions src/routes/messages/handler.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,11 @@ import { streamSSE } from "hono/streaming"

import { awaitApproval } from "~/lib/approval"
import { checkRateLimit } from "~/lib/rate-limit"
import {
setRequestModel,
setResolvedModel,
setResponseModel,
} from "~/lib/request-logger"
import { state } from "~/lib/state"
import {
createChatCompletions,
Expand All @@ -27,9 +32,11 @@ export async function handleCompletion(c: Context) {
await checkRateLimit(state)

const anthropicPayload = await c.req.json<AnthropicMessagesPayload>()
setRequestModel(c, anthropicPayload.model)
consola.debug("Anthropic request payload:", JSON.stringify(anthropicPayload))

const openAIPayload = translateToOpenAI(anthropicPayload)
setResolvedModel(c, openAIPayload.model)
consola.debug(
"Translated OpenAI request payload:",
JSON.stringify(openAIPayload),
Expand All @@ -42,6 +49,7 @@ export async function handleCompletion(c: Context) {
const response = await createChatCompletions(openAIPayload)

if (isNonStreaming(response)) {
setResponseModel(c, response.model)
consola.debug(
"Non-streaming response from Copilot:",
JSON.stringify(response).slice(-400),
Expand Down
4 changes: 2 additions & 2 deletions src/server.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { Hono } from "hono"
import { cors } from "hono/cors"
import { logger } from "hono/logger"

import { requestLogger } from "./lib/request-logger"
import { completionRoutes } from "./routes/chat-completions/route"
import { embeddingRoutes } from "./routes/embeddings/route"
import { messageRoutes } from "./routes/messages/route"
Expand All @@ -11,7 +11,7 @@ import { usageRoute } from "./routes/usage/route"

export const server = new Hono()

server.use(logger())
server.use(requestLogger)
server.use(cors())

server.get("/", (c) => c.text("Server running"))
Expand Down