Skip to content

Commit aff7de2

Browse files
committed
Use tiktoken for faster token count
1 parent 3e4d4ee commit aff7de2

1 file changed

Lines changed: 12 additions & 7 deletions

File tree

Tool/Sources/OpenAIService/Memory/AutoManagedChatGPTMemoryStrategy/AutoManagedChatGPTMemoryGoogleAIStrategy.swift

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,18 @@ extension AutoManagedChatGPTMemory {
77
let configuration: ChatGPTConfiguration
88

99
func countToken(_ message: ChatMessage) async -> Int {
10-
guard let model = configuration.model else {
11-
return 0
12-
}
13-
let aiModel = GenerativeModel(name: model.info.modelName, apiKey: configuration.apiKey)
14-
if message.isEmpty { return 0 }
15-
let modelMessage = ModelContent(message)
16-
return (try? await aiModel.countTokens([modelMessage]).totalTokens) ?? 0
10+
(await OpenAIStrategy().countToken(message)) + 10
11+
// Using local tiktoken instead until I find a faster solution.
12+
// The official solution requires sending a lot of requests when adjusting the prompt.
13+
// adding 10 just incase.
14+
15+
// guard let model = configuration.model else {
16+
// return 0
17+
// }
18+
// let aiModel = GenerativeModel(name: model.info.modelName, apiKey: configuration.apiKey)
19+
// if message.isEmpty { return 0 }
20+
// let modelMessage = ModelContent(message)
21+
// return (try? await aiModel.countTokens([modelMessage]).totalTokens) ?? 0
1722
}
1823

1924
func countToken<F>(_: F) async -> Int where F: ChatGPTFunction {

0 commit comments

Comments
 (0)