Skip to content

Commit fd7f92d

Browse files
committed
Rename types
1 parent 1b213af commit fd7f92d

10 files changed

Lines changed: 239 additions & 54 deletions

Pro

Submodule Pro updated from 908dd29 to 5f1f1dd

Tool/Sources/OpenAIService/APIs/ChatCompletionsAPIDefinition.swift

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ import Foundation
33
import Preferences
44

55
/// https://platform.openai.com/docs/api-reference/chat/create
6-
struct CompletionRequestBody: Codable, Equatable {
6+
struct ChatCompletionsRequestBody: Codable, Equatable {
77
struct Message: Codable, Equatable {
88
/// The role of the message.
99
var role: ChatMessage.Role
@@ -21,7 +21,7 @@ struct CompletionRequestBody: Codable, Equatable {
2121
/// "arguments": "{ \"location\": \"earth\" }"
2222
/// }
2323
/// ```
24-
var function_call: CompletionRequestBody.MessageFunctionCall?
24+
var function_call: ChatCompletionsRequestBody.MessageFunctionCall?
2525
}
2626

2727
struct MessageFunctionCall: Codable, Equatable {
@@ -119,21 +119,21 @@ public enum FunctionCallStrategy: Codable, Equatable {
119119

120120
// MARK: - Stream API
121121

122-
typealias CompletionStreamAPIBuilder = (
122+
typealias ChatCompletionsStreamAPIBuilder = (
123123
String,
124124
ChatModel,
125125
URL,
126-
CompletionRequestBody,
126+
ChatCompletionsRequestBody,
127127
ChatGPTPrompt
128-
) -> any CompletionStreamAPI
128+
) -> any ChatCompletionsStreamAPI
129129

130-
protocol CompletionStreamAPI {
130+
protocol ChatCompletionsStreamAPI {
131131
associatedtype CompletionSequence: AsyncSequence
132-
where CompletionSequence.Element == CompletionStreamDataChunk
132+
where CompletionSequence.Element == ChatCompletionsStreamDataChunk
133133
func callAsFunction() async throws -> CompletionSequence
134134
}
135135

136-
struct CompletionStreamDataChunk: Codable {
136+
struct ChatCompletionsStreamDataChunk: Codable {
137137
var id: String?
138138
var object: String?
139139
var model: String?
@@ -159,15 +159,15 @@ struct CompletionStreamDataChunk: Codable {
159159

160160
// MARK: - Non Stream API
161161

162-
typealias CompletionAPIBuilder = (String, ChatModel, URL, CompletionRequestBody, ChatGPTPrompt)
163-
-> any CompletionAPI
162+
typealias ChatCompletionsAPIBuilder = (String, ChatModel, URL, ChatCompletionsRequestBody, ChatGPTPrompt)
163+
-> any ChatCompletionsAPI
164164

165-
protocol CompletionAPI {
166-
func callAsFunction() async throws -> CompletionResponseBody
165+
protocol ChatCompletionsAPI {
166+
func callAsFunction() async throws -> ChatCompletionResponseBody
167167
}
168168

169169
/// https://platform.openai.com/docs/api-reference/chat/create
170-
struct CompletionResponseBody: Codable, Equatable {
170+
struct ChatCompletionResponseBody: Codable, Equatable {
171171
struct Message: Codable, Equatable {
172172
/// The role of the message.
173173
var role: ChatMessage.Role
@@ -185,7 +185,7 @@ struct CompletionResponseBody: Codable, Equatable {
185185
/// "arguments": "{ \"location\": \"earth\" }"
186186
/// }
187187
/// ```
188-
var function_call: CompletionRequestBody.MessageFunctionCall?
188+
var function_call: ChatCompletionsRequestBody.MessageFunctionCall?
189189
}
190190

191191
struct Choice: Codable, Equatable {

Tool/Sources/OpenAIService/APIs/GoogleAICompletionAPI.swift

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,13 @@ import Foundation
33
import GoogleGenerativeAI
44
import Preferences
55

6-
struct GoogleCompletionAPI: CompletionAPI {
6+
struct GoogleCompletionAPI: ChatCompletionsAPI {
77
let apiKey: String
88
let model: ChatModel
9-
var requestBody: CompletionRequestBody
9+
var requestBody: ChatCompletionsRequestBody
1010
let prompt: ChatGPTPrompt
1111

12-
func callAsFunction() async throws -> CompletionResponseBody {
12+
func callAsFunction() async throws -> ChatCompletionResponseBody {
1313
let aiModel = GenerativeModel(
1414
name: model.info.modelName,
1515
apiKey: apiKey,

Tool/Sources/OpenAIService/APIs/GoogleAICompletionStreamAPI.swift

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,13 @@ import Foundation
33
import GoogleGenerativeAI
44
import Preferences
55

6-
struct GoogleCompletionStreamAPI: CompletionStreamAPI {
6+
struct GoogleCompletionStreamAPI: ChatCompletionsStreamAPI {
77
let apiKey: String
88
let model: ChatModel
9-
var requestBody: CompletionRequestBody
9+
var requestBody: ChatCompletionsRequestBody
1010
let prompt: ChatGPTPrompt
1111

12-
func callAsFunction() async throws -> AsyncThrowingStream<CompletionStreamDataChunk, Error> {
12+
func callAsFunction() async throws -> AsyncThrowingStream<ChatCompletionsStreamDataChunk, Error> {
1313
let aiModel = GenerativeModel(
1414
name: model.info.modelName,
1515
apiKey: apiKey,
@@ -31,13 +31,13 @@ struct GoogleCompletionStreamAPI: CompletionStreamAPI {
3131
)
3232
}
3333

34-
let stream = AsyncThrowingStream<CompletionStreamDataChunk, Error> { continuation in
34+
let stream = AsyncThrowingStream<ChatCompletionsStreamDataChunk, Error> { continuation in
3535
let stream = aiModel.generateContentStream(history)
3636
let task = Task {
3737
do {
3838
for try await response in stream {
3939
if Task.isCancelled { break }
40-
let chunk = CompletionStreamDataChunk(
40+
let chunk = ChatCompletionsStreamDataChunk(
4141
object: "",
4242
model: model.info.modelName,
4343
choices: response.candidates.map { candidate in
Lines changed: 185 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,185 @@
1+
import AIModel
2+
import Foundation
3+
import Preferences
4+
5+
public actor OllamaService {
6+
var apiKey: String
7+
var endpoint: URL
8+
var requestBody: ChatCompletionsRequestBody
9+
var model: ChatModel
10+
11+
public enum ResponseFormat: String {
12+
case none = ""
13+
case json
14+
}
15+
16+
init(
17+
apiKey: String,
18+
model: ChatModel,
19+
endpoint: URL,
20+
requestBody: ChatCompletionsRequestBody
21+
) {
22+
self.apiKey = apiKey
23+
self.endpoint = endpoint
24+
self.requestBody = requestBody
25+
self.model = model
26+
}
27+
}
28+
29+
extension OllamaService: ChatCompletionsAPI {
30+
func callAsFunction() async throws -> ChatCompletionResponseBody {
31+
fatalError()
32+
}
33+
}
34+
35+
extension OllamaService: ChatCompletionsStreamAPI {
36+
typealias CompletionSequence = AsyncMapSequence<ResponseStream<OllamaService.ChatCompletionResponseChunk>, ChatCompletionsStreamDataChunk>
37+
38+
func callAsFunction() async throws -> CompletionSequence {
39+
let requestBody = ChatCompletionRequestBody(
40+
model: model.info.modelName,
41+
messages: requestBody.messages.map { message in
42+
.init(role: {
43+
switch message.role {
44+
case .assistant:
45+
return .assistant
46+
case .user:
47+
return .user
48+
case .system:
49+
return .system
50+
case .function:
51+
return .user
52+
}
53+
}(), content: message.content)
54+
},
55+
stream: true,
56+
options: .init(
57+
temperature: requestBody.temperature,
58+
stop: requestBody.stop,
59+
num_predict: requestBody.max_tokens
60+
),
61+
keep_alive: nil,
62+
format: nil
63+
)
64+
65+
var request = URLRequest(url: endpoint)
66+
request.httpMethod = "POST"
67+
let encoder = JSONEncoder()
68+
request.httpBody = try encoder.encode(requestBody)
69+
request.setValue("application/json", forHTTPHeaderField: "Content-Type")
70+
let (result, response) = try await URLSession.shared.bytes(for: request)
71+
72+
guard let response = response as? HTTPURLResponse else {
73+
throw CancellationError()
74+
}
75+
76+
guard response.statusCode == 200 else {
77+
let text = try await result.lines.reduce(into: "") { partialResult, current in
78+
partialResult += current
79+
}
80+
throw Error.otherError(text)
81+
}
82+
83+
let stream = ResponseStream(result: result) {
84+
let chunk = try JSONDecoder().decode(
85+
ChatCompletionResponseChunk.self,
86+
from: $0.data(using: .utf8) ?? Data()
87+
)
88+
return .init(chunk: chunk, done: chunk.done)
89+
}
90+
91+
let sequence = stream.map { chunk in
92+
ChatCompletionsStreamDataChunk(
93+
id: UUID().uuidString,
94+
object: chunk.model,
95+
model: chunk.model,
96+
choices: [
97+
.init(
98+
delta: .init(
99+
role: {
100+
switch chunk.message?.role {
101+
case .none:
102+
return nil
103+
case .assistant:
104+
return .assistant
105+
case .user:
106+
return .user
107+
case .system:
108+
return .system
109+
}
110+
}(),
111+
content: chunk.message?.content
112+
)
113+
),
114+
]
115+
)
116+
}
117+
118+
return sequence
119+
}
120+
}
121+
122+
extension OllamaService {
123+
struct Message: Codable, Equatable {
124+
public enum Role: String, Codable {
125+
case user
126+
case assistant
127+
case system
128+
}
129+
130+
/// The role of the message.
131+
public var role: Role
132+
/// The content of the message.
133+
public var content: String
134+
}
135+
136+
enum Error: Swift.Error, LocalizedError {
137+
case decodeError(Swift.Error)
138+
case otherError(String)
139+
140+
public var errorDescription: String? {
141+
switch self {
142+
case let .decodeError(error):
143+
return error.localizedDescription
144+
case let .otherError(message):
145+
return message
146+
}
147+
}
148+
}
149+
}
150+
151+
// MARK: - Chat Completion API
152+
153+
/// https://github.com/ollama/ollama/blob/main/docs/api.md#chat-request-streaming
154+
extension OllamaService {
155+
struct ChatCompletionRequestBody: Codable {
156+
struct Options: Codable {
157+
var temperature: Double?
158+
var stop: [String]?
159+
var num_predict: Int?
160+
var top_k: Int?
161+
var top_p: Double?
162+
}
163+
164+
var model: String
165+
var messages: [Message]
166+
var stream: Bool
167+
var options: Options
168+
var keep_alive: String?
169+
var format: String?
170+
}
171+
172+
struct ChatCompletionResponseChunk: Decodable {
173+
var model: String
174+
var message: Message?
175+
var response: String?
176+
var done: Bool
177+
var total_duration: Int64?
178+
var load_duration: Int64?
179+
var prompt_eval_count: Int?
180+
var prompt_eval_duration: Int64?
181+
var eval_count: Int?
182+
var eval_duration: Int64?
183+
}
184+
}
185+

Tool/Sources/OpenAIService/APIs/OpenAICompletionAPI.swift

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,17 +15,17 @@ struct CompletionAPIError: Error, Codable, LocalizedError {
1515
var errorDescription: String? { error.message }
1616
}
1717

18-
struct OpenAICompletionAPI: CompletionAPI {
18+
struct OpenAICompletionAPI: ChatCompletionsAPI {
1919
var apiKey: String
2020
var endpoint: URL
21-
var requestBody: CompletionRequestBody
21+
var requestBody: ChatCompletionsRequestBody
2222
var model: ChatModel
2323

2424
init(
2525
apiKey: String,
2626
model: ChatModel,
2727
endpoint: URL,
28-
requestBody: CompletionRequestBody
28+
requestBody: ChatCompletionsRequestBody
2929
) {
3030
self.apiKey = apiKey
3131
self.endpoint = endpoint
@@ -34,7 +34,7 @@ struct OpenAICompletionAPI: CompletionAPI {
3434
self.model = model
3535
}
3636

37-
func callAsFunction() async throws -> CompletionResponseBody {
37+
func callAsFunction() async throws -> ChatCompletionResponseBody {
3838
var request = URLRequest(url: endpoint)
3939
request.httpMethod = "POST"
4040
let encoder = JSONEncoder()
@@ -63,7 +63,7 @@ struct OpenAICompletionAPI: CompletionAPI {
6363
}
6464

6565
do {
66-
return try JSONDecoder().decode(CompletionResponseBody.self, from: result)
66+
return try JSONDecoder().decode(ChatCompletionResponseBody.self, from: result)
6767
} catch {
6868
dump(error)
6969
throw error

0 commit comments

Comments
 (0)