Skip to content

Commit 9052362

Browse files
committed
Print token usage in DEBUG mode
1 parent 07630c4 commit 9052362

2 files changed

Lines changed: 40 additions & 6 deletions

File tree

Tool/Sources/OpenAIService/EmbeddingService.swift

Lines changed: 24 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import Foundation
2+
import Logger
23

34
public struct EmbeddingResponse: Decodable {
45
public struct Object: Decodable {
@@ -74,9 +75,19 @@ public struct EmbeddingService {
7475
.otherError(String(data: result, encoding: .utf8) ?? "Unknown Error")
7576
}
7677

77-
return try JSONDecoder().decode(EmbeddingResponse.self, from: result)
78+
let embeddingResponse = try JSONDecoder().decode(EmbeddingResponse.self, from: result)
79+
#if DEBUG
80+
Logger.service.info("""
81+
Embedding usage
82+
- number of strings: \(text.count)
83+
- prompt tokens: \(embeddingResponse.usage.prompt_tokens)
84+
- total tokens: \(embeddingResponse.usage.total_tokens)
85+
86+
""")
87+
#endif
88+
return embeddingResponse
7889
}
79-
90+
8091
public func embed(tokens: [[Int]]) async throws -> EmbeddingResponse {
8192
guard let url = URL(string: configuration.endpoint) else {
8293
throw ChatGPTServiceError.endpointIncorrect
@@ -112,7 +123,17 @@ public struct EmbeddingService {
112123
.otherError(String(data: result, encoding: .utf8) ?? "Unknown Error")
113124
}
114125

115-
return try JSONDecoder().decode(EmbeddingResponse.self, from: result)
126+
let embeddingResponse = try JSONDecoder().decode(EmbeddingResponse.self, from: result)
127+
#if DEBUG
128+
Logger.service.info("""
129+
Embedding usage
130+
- number of strings: \(tokens.count)
131+
- prompt tokens: \(embeddingResponse.usage.prompt_tokens)
132+
- total tokens: \(embeddingResponse.usage.total_tokens)
133+
134+
""")
135+
#endif
136+
return embeddingResponse
116137
}
117138
}
118139

Tool/Sources/OpenAIService/Memory/AutoManagedChatGPTMemory.swift

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
11
import Foundation
2+
import Logger
23
import Preferences
34
import TokenEncoder
45

56
/// A memory that automatically manages the history according to max tokens and max message count.
67
public actor AutoManagedChatGPTMemory: ChatGPTMemory {
78
public private(set) var messages: [ChatMessage] = []
8-
public private(set) var remainingTokens: Int? = nil
9+
public private(set) var remainingTokens: Int?
910

1011
public var systemPrompt: ChatMessage
1112
public var history: [ChatMessage] = [] {
@@ -44,7 +45,7 @@ public actor AutoManagedChatGPTMemory: ChatGPTMemory {
4445
await setOnHistoryChangeBlock(onChange)
4546
}
4647
}
47-
48+
4849
public func refresh() async {
4950
messages = generateSendingHistory()
5051
remainingTokens = generateRemainingTokens()
@@ -74,7 +75,8 @@ public actor AutoManagedChatGPTMemory: ChatGPTMemory {
7475
}
7576
partial += count
7677
}
77-
var allTokensCount = functionTokenCount + 3 // every reply is primed with <|start|>assistant<|message|>
78+
var allTokensCount = functionTokenCount +
79+
3 // every reply is primed with <|start|>assistant<|message|>
7880
allTokensCount += systemPrompt.isEmpty ? 0 : systemMessageTokenCount
7981

8082
for (index, message) in history.enumerated().reversed() {
@@ -93,6 +95,17 @@ public actor AutoManagedChatGPTMemory: ChatGPTMemory {
9395
if !systemPrompt.isEmpty {
9496
all.append(systemPrompt)
9597
}
98+
99+
#if DEBUG
100+
Logger.service.info("""
101+
Sending tokens count
102+
- system prompt: \(systemMessageTokenCount)
103+
- functions: \(functionTokenCount)
104+
- total: \(allTokensCount)
105+
106+
""")
107+
#endif
108+
96109
return all.reversed()
97110
}
98111

0 commit comments

Comments
 (0)