forked from ericc-ch/copilot-api
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy patherror.test.ts
More file actions
204 lines (181 loc) · 6.5 KB
/
error.test.ts
File metadata and controls
204 lines (181 loc) · 6.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
import type { Context } from "hono"
import { describe, test, expect, mock } from "bun:test"
import {
HTTPError,
buildAnthropicContextWindowErrorResponse,
forwardError,
isContextWindowError,
formatAnthropicContextWindowError,
sendAnthropicInvalidRequestError,
} from "~/lib/error"
function makeContext(jsonFn = mock(), headerFn = mock()) {
return { json: jsonFn, header: headerFn } as unknown as Context
}
function makeHTTPError(body: string, status: number): HTTPError {
const response = new Response(body, { status })
return new HTTPError("Failed", response)
}
describe("forwardError — HTTPError with JSON body", () => {
test("converts context-window Copilot error to Anthropic format with token numbers", async () => {
const jsonFn = mock()
const headerFn = mock()
const c = makeContext(jsonFn, headerFn)
const err = makeHTTPError(
JSON.stringify({
error: {
message: "prompt token count of 55059 exceeds the limit of 12288",
code: "model_max_prompt_tokens_exceeded",
},
}),
400,
)
await forwardError(c, err)
expect(jsonFn).toHaveBeenCalledTimes(1)
const [body, status] = jsonFn.mock.calls[0] as [unknown, number]
expect(status).toBe(400)
const typed = body as {
type: string
request_id: string
error: { type: string; message: string }
}
expect(typed.type).toBe("error")
expect(typed.request_id).toMatch(/^req_/)
expect(headerFn).toHaveBeenCalledWith("request-id", typed.request_id)
expect(typed.error.type).toBe("invalid_request_error")
expect(typed.error.message).toBe(
"prompt is too long: 55059 tokens > 12288 maximum",
)
})
test("wraps plain-text error in envelope", async () => {
const jsonFn = mock()
const c = makeContext(jsonFn)
const err = makeHTTPError("Bad Gateway", 502)
await forwardError(c, err)
const [body, status] = jsonFn.mock.calls[0] as [unknown, number]
expect(status).toBe(502)
expect(
(body as { error: { message: string; type: string } }).error.type,
).toBe("error")
expect((body as { error: { message: string } }).error.message).toBe(
"Bad Gateway",
)
})
test("preserves status code from upstream", async () => {
const jsonFn = mock()
const c = makeContext(jsonFn)
const err = makeHTTPError(
JSON.stringify({ error: { message: "not found" } }),
404,
)
await forwardError(c, err)
const [, status] = jsonFn.mock.calls[0] as [unknown, number]
expect(status).toBe(404)
})
})
describe("forwardError — non-HTTPError", () => {
test("returns 500 with error message", async () => {
const jsonFn = mock()
const c = makeContext(jsonFn)
await forwardError(c, new Error("something broke"))
const [body, status] = jsonFn.mock.calls[0] as [unknown, number]
expect(status).toBe(500)
expect((body as { error: { message: string } }).error.message).toBe(
"something broke",
)
})
})
describe("isContextWindowError", () => {
test("detects Copilot 'exceeds the limit' format", () => {
expect(
isContextWindowError(
"prompt token count of 622303 exceeds the limit of 168000",
),
).toBe(true)
})
test("detects model_max_prompt_tokens_exceeded code", () => {
expect(
isContextWindowError(
'{"error":{"message":"prompt too big","code":"model_max_prompt_tokens_exceeded"}}',
),
).toBe(true)
})
test("detects 'exceeds the context window'", () => {
expect(
isContextWindowError("This request exceeds the context window"),
).toBe(true)
})
test("detects context_length_exceeded", () => {
expect(isContextWindowError("context_length_exceeded")).toBe(true)
})
test("detects 'maximum context length'", () => {
expect(
isContextWindowError(
"This model's maximum context length is 128000 tokens",
),
).toBe(true)
})
test("detects 'input exceeds'", () => {
expect(isContextWindowError("input exceeds model limit")).toBe(true)
})
test("returns false for unrelated errors", () => {
expect(isContextWindowError("rate limit exceeded")).toBe(false)
expect(isContextWindowError("internal server error")).toBe(false)
expect(isContextWindowError("model not found")).toBe(false)
})
})
describe("formatAnthropicContextWindowError", () => {
test("extracts token numbers from Copilot error format", () => {
expect(
formatAnthropicContextWindowError(
"prompt token count of 622303 exceeds the limit of 168000",
),
).toBe("prompt is too long: 622303 tokens > 168000 maximum")
})
test("handles comma-separated numbers", () => {
expect(
formatAnthropicContextWindowError(
"prompt token count of 622,303 exceeds the limit of 168,000",
),
).toBe("prompt is too long: 622303 tokens > 168000 maximum")
})
test("falls back to defaults when no numbers found", () => {
const result = formatAnthropicContextWindowError("context_length_exceeded")
expect(result).toMatch(/^prompt is too long: \d+ tokens > \d+ maximum$/)
})
test("falls back to defaults for empty string", () => {
const result = formatAnthropicContextWindowError("")
expect(result).toMatch(/^prompt is too long: \d+ tokens > \d+ maximum$/)
})
})
describe("buildAnthropicContextWindowErrorResponse", () => {
test("uses the same request id in the header payload and body", () => {
const result = buildAnthropicContextWindowErrorResponse(
"prompt token count of 622303 exceeds the limit of 168000",
)
expect(result.requestId).toMatch(/^req_/)
expect(result.body.request_id).toBe(result.requestId)
expect(result.body.error.message).toBe(
"prompt is too long: 622303 tokens > 168000 maximum",
)
})
})
describe("sendAnthropicInvalidRequestError", () => {
test("returns Anthropic-shaped invalid_request_error with request-id header", () => {
const jsonFn = mock()
const headerFn = mock()
const c = makeContext(jsonFn, headerFn)
sendAnthropicInvalidRequestError(c, "Embedded image is too small.")
const [body, status] = jsonFn.mock.calls[0] as [unknown, number]
const typed = body as {
type: string
request_id: string
error: { type: string; message: string }
}
expect(status).toBe(400)
expect(typed.type).toBe("error")
expect(typed.request_id).toMatch(/^req_/)
expect(typed.error.type).toBe("invalid_request_error")
expect(typed.error.message).toBe("Embedded image is too small.")
expect(headerFn).toHaveBeenCalledWith("request-id", typed.request_id)
})
})