@@ -5,9 +5,20 @@ import TokenEncoder
55
66/// A memory that automatically manages the history according to max tokens and max message count.
77public actor AutoManagedChatGPTMemory : ChatGPTMemory {
8+ public struct ComposableMessages {
9+ public var systemPromptMessage : ChatMessage
10+ public var historyMessage : [ ChatMessage ]
11+ public var retrievedContentMessage : ChatMessage
12+ public var contextSystemPromptMessage : ChatMessage
13+ public var newMessage : ChatMessage
14+ }
15+
16+ public typealias HistoryComposer = ( ComposableMessages ) -> [ ChatMessage ]
17+
818 public private( set) var history : [ ChatMessage ] = [ ] {
919 didSet { onHistoryChange ( ) }
1020 }
21+
1122 public private( set) var remainingTokens : Int ?
1223
1324 public var systemPrompt : String
@@ -19,16 +30,36 @@ public actor AutoManagedChatGPTMemory: ChatGPTMemory {
1930 static let encoder : TokenEncoder = TiktokenCl100kBaseTokenEncoder ( )
2031
2132 var onHistoryChange : ( ) -> Void = { }
33+
34+ let composeHistory : HistoryComposer
2235
2336 public init (
2437 systemPrompt: String ,
2538 configuration: ChatGPTConfiguration ,
26- functionProvider: ChatGPTFunctionProvider
39+ functionProvider: ChatGPTFunctionProvider ,
40+ composeHistory: @escaping HistoryComposer = {
41+ /// Default Format:
42+ /// ```
43+ /// [System Prompt] priority: high
44+ /// [Functions] priority: high
45+ /// [Retrieved Content] priority: low
46+ /// [Retrieved Content A]
47+ /// <separator>
48+ /// [Retrieved Content B]
49+ /// [Message History] priority: medium
50+ /// [Context System Prompt] priority: high
51+ /// [Latest Message] priority: high
52+ /// ```
53+ [ $0. systemPromptMessage] +
54+ $0. historyMessage +
55+ [ $0. retrievedContentMessage, $0. contextSystemPromptMessage, $0. newMessage]
56+ }
2757 ) {
2858 self . systemPrompt = systemPrompt
2959 contextSystemPrompt = " "
3060 self . configuration = configuration
3161 self . functionProvider = functionProvider
62+ self . composeHistory = composeHistory
3263 _ = Self . encoder // force pre-initialize
3364 }
3465
@@ -60,19 +91,6 @@ public actor AutoManagedChatGPTMemory: ChatGPTMemory {
6091 }
6192
6293 /// https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
63- ///
64- /// Format:
65- /// ```
66- /// [System Prompt] priority: high
67- /// [Functions] priority: high
68- /// [Retrieved Content] priority: low
69- /// [Retrieved Content A]
70- /// <separator>
71- /// [Retrieved Content B]
72- /// [Message History] priority: medium
73- /// [Context System Prompt] priority: high
74- /// [Latest Message] priority: high
75- /// ```
7694 func generateSendingHistory(
7795 maxNumberOfMessages: Int = UserDefaults . shared. value ( for: \. chatGPTMaxMessageCount) ,
7896 encoder: TokenEncoder = AutoManagedChatGPTMemory . encoder
@@ -105,11 +123,13 @@ public actor AutoManagedChatGPTMemory: ChatGPTMemory {
105123 encoder: encoder
106124 )
107125
108- let allMessages : [ ChatMessage ] = (
109- [ systemPromptMessage] +
110- historyMessage +
111- [ retrievedContentMessage, contextSystemPromptMessage, newMessage]
112- ) . filter {
126+ let allMessages = composeHistory ( . init(
127+ systemPromptMessage: systemPromptMessage,
128+ historyMessage: historyMessage,
129+ retrievedContentMessage: retrievedContentMessage,
130+ contextSystemPromptMessage: contextSystemPromptMessage,
131+ newMessage: newMessage
132+ ) ) . filter {
113133 !( $0. content? . isEmpty ?? false )
114134 }
115135
@@ -269,7 +289,7 @@ extension AutoManagedChatGPTMemory {
269289 }
270290}
271291
272- extension TokenEncoder {
292+ public extension TokenEncoder {
273293 /// https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
274294 func countToken( message: ChatMessage ) -> Int {
275295 var total = 3
0 commit comments