11import AsyncAlgorithms
22import Foundation
3+ import GPTEncoder
34import Preferences
45
56public protocol ChatGPTServiceType : ObservableObject {
@@ -54,7 +55,12 @@ public struct ChatGPTError: Error, Codable, LocalizedError {
5455
5556public actor ChatGPTService : ChatGPTServiceType {
5657 public var systemPrompt : String
57- public var temperature : Double
58+
59+ public var defaultTemperature : Double {
60+ min ( max ( 0 , UserDefaults . shared. value ( for: \. chatGPTTemperature) ) , 2 )
61+ }
62+
63+ var temperature : Double ?
5864
5965 public var model : String {
6066 let value = UserDefaults . shared. value ( for: \. chatGPTModel)
@@ -92,7 +98,7 @@ public actor ChatGPTService: ChatGPTServiceType {
9298
9399 public init (
94100 systemPrompt: String = " " ,
95- temperature: Double = 0.7
101+ temperature: Double ? = nil
96102 ) {
97103 self . systemPrompt = systemPrompt
98104 self . temperature = temperature
@@ -112,12 +118,14 @@ public actor ChatGPTService: ChatGPTServiceType {
112118 )
113119 history. append ( newMessage)
114120
121+ let ( messages, remainingTokens) = combineHistoryWithSystemPrompt ( )
122+
115123 let requestBody = CompletionRequestBody (
116124 model: model,
117- messages: combineHistoryWithSystemPrompt ( ) ,
118- temperature: temperature,
125+ messages: messages ,
126+ temperature: temperature ?? defaultTemperature ,
119127 stream: true ,
120- max_tokens: maxToken
128+ max_tokens: remainingTokens
121129 )
122130
123131 isReceivingMessage = true
@@ -190,12 +198,14 @@ public actor ChatGPTService: ChatGPTServiceType {
190198 )
191199 history. append ( newMessage)
192200
201+ let ( messages, remainingTokens) = combineHistoryWithSystemPrompt ( )
202+
193203 let requestBody = CompletionRequestBody (
194204 model: model,
195- messages: combineHistoryWithSystemPrompt ( ) ,
196- temperature: temperature,
205+ messages: messages ,
206+ temperature: temperature ?? defaultTemperature ,
197207 stream: true ,
198- max_tokens: maxToken
208+ max_tokens: remainingTokens
199209 )
200210
201211 isReceivingMessage = true
@@ -210,10 +220,10 @@ public actor ChatGPTService: ChatGPTServiceType {
210220 role: choice. message. role,
211221 content: choice. message. content
212222 ) )
213-
223+
214224 return choice. message. content
215225 }
216-
226+
217227 return nil
218228 }
219229
@@ -250,17 +260,34 @@ extension ChatGPTService {
250260 uuidGenerator = generator
251261 }
252262
253- func combineHistoryWithSystemPrompt( ) -> [ CompletionRequestBody . Message ] {
263+ func combineHistoryWithSystemPrompt(
264+ minimumReplyTokens: Int = 200 ,
265+ maxNumberOfMessages: Int = UserDefaults . shared. value ( for: \. chatGPTMaxMessageCount) ,
266+ maxTokens: Int = UserDefaults . shared. value ( for: \. chatGPTMaxToken) ,
267+ encoder: TokenEncoder = GPTEncoder ( )
268+ )
269+ -> ( messages: [ CompletionRequestBody . Message ] , remainingTokens: Int )
270+ {
254271 var all : [ CompletionRequestBody . Message ] = [ ]
255- var count = 0
272+ var allTokensCount = encoder . encode ( text : systemPrompt ) . count
256273 for message in history. reversed ( ) {
257- if count >= 5 { break }
274+ if maxNumberOfMessages > 0 , all . count >= maxNumberOfMessages { break }
258275 if message. content. isEmpty { continue }
276+ let tokensCount = encoder. encode ( text: message. content) . count
277+ if tokensCount + allTokensCount > maxTokens - minimumReplyTokens {
278+ break
279+ }
280+ allTokensCount += tokensCount
259281 all. append ( . init( role: message. role, content: message. content) )
260- count += 1
261282 }
262283
263284 all. append ( . init( role: . system, content: systemPrompt) )
264- return all. reversed ( )
285+ return ( all. reversed ( ) , max ( minimumReplyTokens , maxTokens - allTokensCount ) )
265286 }
266287}
288+
289+ protocol TokenEncoder {
290+ func encode( text: String ) -> [ Int ]
291+ }
292+
293+ extension GPTEncoder : TokenEncoder { }
0 commit comments