├── .gitignore ├── .spi.yml ├── LICENSE ├── Package.swift ├── README.md ├── Sources └── AIProxy │ ├── AIProxy.swift │ ├── AIProxyCertificatePinning.swift │ ├── AIProxyDeviceCheck.swift │ ├── AIProxyError.swift │ ├── AIProxyHTTPVerb.swift │ ├── AIProxyIdentifier.swift │ ├── AIProxyJSONValue.swift │ ├── AIProxyLogger.swift │ ├── AIProxyURLRequest.swift │ ├── AIProxyURLSession.swift │ ├── AIProxyUtils.swift │ ├── AnonymousAccount │ ├── AIProxyKeychain.swift │ ├── AIProxyStorage.swift │ ├── AnonymousAccount.swift │ └── AnonymousAccountStorage.swift │ ├── Anthropic │ ├── AnthropicAsyncChunks.swift │ ├── AnthropicDirectService.swift │ ├── AnthropicMessageRequestBody.swift │ ├── AnthropicMessageResponseBody.swift │ ├── AnthropicMessageStreamingChunk.swift │ ├── AnthropicMessageStreamingContentBlockStart.swift │ ├── AnthropicMessageStreamingDeltaBlock.swift │ ├── AnthropicProxiedService.swift │ └── AnthropicService.swift │ ├── AudioController.swift │ ├── AudioPCMPlayer.swift │ ├── AudioPCMPlayerError.swift │ ├── BackgroundNetworker.swift │ ├── Brave │ ├── BraveDirectService.swift │ ├── BraveProxiedService.swift │ ├── BraveService.swift │ └── BraveWebSearchResponseBody.swift │ ├── ClientLibErrorLogger.swift │ ├── DeepL │ ├── DeepLAccountType.swift │ ├── DeepLDirectService.swift │ ├── DeepLProxiedService.swift │ ├── DeepLService.swift │ ├── DeepLTranslateRequestBody.swift │ └── DeepLTranslateResponseBody.swift │ ├── DeepSeek │ ├── DeepSeekChatCompletionRequestBody.swift │ ├── DeepSeekChatCompletionResponseBody.swift │ ├── DeepSeekChatCompletionStreamingChunk.swift │ ├── DeepSeekDirectService.swift │ ├── DeepSeekProxiedService.swift │ ├── DeepSeekService.swift │ └── DeepSeekUsage.swift │ ├── Deserializable.swift │ ├── DirectService.swift │ ├── EachAI │ ├── EachAIDirectService.swift │ ├── EachAIError.swift │ ├── EachAIProxiedService.swift │ ├── EachAIService.swift │ ├── EachAITriggerWorkflowRequestBody.swift │ ├── EachAITriggerWorkflowResponseBody.swift │ └── EachAIWorkflowExecutionResponseBody.swift │ ├── ElevenLabs │ ├── ElevenLabsDirectService.swift │ ├── ElevenLabsProxiedService.swift │ ├── ElevenLabsService.swift │ ├── ElevenLabsSpeechToSpeechRequestBody.swift │ ├── ElevenLabsSpeechToTextRequestBody.swift │ ├── ElevenLabsSpeechToTextResponseBody.swift │ └── ElevenLabsTTSRequestBody.swift │ ├── Fal │ ├── FalDirectService.swift │ ├── FalError.swift │ ├── FalFastSDXLInputSchema.swift │ ├── FalFastSDXLOutputSchema.swift │ ├── FalFluxLoRAFastTrainingInputSchema.swift │ ├── FalFluxLoRAFastTrainingOutputSchema.swift │ ├── FalFluxLoRAInputSchema.swift │ ├── FalFluxLoRAOutputSchema.swift │ ├── FalFluxSchnellInputSchema.swift │ ├── FalFluxSchnellOutputSchema.swift │ ├── FalInitiateUploadRequestBody.swift │ ├── FalInitiateUploadResponseBody.swift │ ├── FalOutputImage.swift │ ├── FalProxiedService.swift │ ├── FalQueueResponseBody.swift │ ├── FalService+Convenience.swift │ ├── FalService.swift │ ├── FalTimings.swift │ ├── FalTryonInputSchema.swift │ └── FalTryonOutputSchema.swift │ ├── FireworksAI │ ├── FireworksAIDirectService.swift │ ├── FireworksAIProxiedService.swift │ └── FireworksAIService.swift │ ├── Gemini │ ├── GeminiDirectService.swift │ ├── GeminiError.swift │ ├── GeminiFile.swift │ ├── GeminiFileUploadRequestBody.swift │ ├── GeminiFileUploadResponseBody.swift │ ├── GeminiGenerateContentRequestBody.swift │ ├── GeminiGenerateContentResponseBody.swift │ ├── GeminiImagenRequestBody.swift │ ├── GeminiImagenResponseBody.swift │ ├── GeminiProxiedService.swift │ └── GeminiService.swift │ ├── Groq │ ├── GrogChatCompletionResponseBody.swift │ ├── GroqChatCompletionRequestBody.swift │ ├── GroqChatCompletionStreamingChunk.swift │ ├── GroqDirectService.swift │ ├── GroqProxiedService.swift │ ├── GroqService.swift │ ├── GroqTranscriptionRequestBody.swift │ └── GroqTranscriptionResponseBody.swift │ ├── MicrophonePCMSampleVendor.swift │ ├── MicrophonePCMSampleVendorAE.swift │ ├── MicrophonePCMSampleVendorAT.swift │ ├── MicrophonePCMSampleVendorCommon.swift │ ├── MicrophonePCMSampleVendorError.swift │ ├── MicrophoneSampleVendor.swift │ ├── Mistral │ ├── MistralChatCompletionRequestBody.swift │ ├── MistralChatCompletionResponseBody.swift │ ├── MistralChatCompletionStreamingChunk.swift │ ├── MistralChatUsage.swift │ ├── MistralDirectService.swift │ ├── MistralOCRRequestBody.swift │ ├── MistralOCRResponseBody.swift │ ├── MistralProxiedService.swift │ └── MistralService.swift │ ├── MultipartFormEncodable.swift │ ├── NetworkActor.swift │ ├── OpenAI │ ├── OpenAIChatCompletionRequestBody.swift │ ├── OpenAIChatCompletionResponseBody.swift │ ├── OpenAIChatCompletionStreamingChunk.swift │ ├── OpenAIChatUsage.swift │ ├── OpenAICreateImageEditRequestBody.swift │ ├── OpenAICreateImageRequestBody.swift │ ├── OpenAICreateImageResponseBody.swift │ ├── OpenAICreateResponseRequestBody.swift │ ├── OpenAICreateTranscriptionRequestBody.swift │ ├── OpenAICreateTranscriptionResponseBody.swift │ ├── OpenAIDirectService.swift │ ├── OpenAIEmbeddingRequestBody.swift │ ├── OpenAIEmbeddingResponseBody.swift │ ├── OpenAIFileUploadRequestBody.swift │ ├── OpenAIFileUploadResponseBody.swift │ ├── OpenAIModerationRequestBody.swift │ ├── OpenAIModerationResponseBody.swift │ ├── OpenAIProxiedService.swift │ ├── OpenAIRealtimeConversationItemCreate.swift │ ├── OpenAIRealtimeInputAudioBufferAppend.swift │ ├── OpenAIRealtimeInputAudioBufferSpeechStarted.swift │ ├── OpenAIRealtimeMessage.swift │ ├── OpenAIRealtimeResponseCreate.swift │ ├── OpenAIRealtimeResponseFunctionCallArgumentsDone.swift │ ├── OpenAIRealtimeSession.swift │ ├── OpenAIRealtimeSessionConfiguration.swift │ ├── OpenAIRealtimeSessionUpdate.swift │ ├── OpenAIRequestFormat.swift │ ├── OpenAIResponse.swift │ ├── OpenAIService.swift │ └── OpenAITextToSpeechRequestBody.swift │ ├── OpenRouter │ ├── OpenRouterChatCompletionChunk.swift │ ├── OpenRouterChatCompletionRequestBody.swift │ ├── OpenRouterChatCompletionResponseBody.swift │ ├── OpenRouterDirectService.swift │ ├── OpenRouterProxiedService.swift │ └── OpenRouterService.swift │ ├── Perplexity │ ├── PerplexityChatCompletionRequestBody.swift │ ├── PerplexityChatCompletionResponseBody.swift │ ├── PerplexityDirectService.swift │ ├── PerplexityProxiedService.swift │ ├── PerplexityRole.swift │ └── PerplexityService.swift │ ├── ProtectedPropertyQueue.swift │ ├── ProxiedService.swift │ ├── RealtimeActor.swift │ ├── ReceiptValidation │ ├── ReceiptValidationRequestBody.swift │ ├── ReceiptValidationResponseBody.swift │ └── ReceiptValidationService.swift │ ├── RemoteLogger │ └── RemoteLoggerService.swift │ ├── Replicate │ ├── ReplicateCreateModelRequestBody.swift │ ├── ReplicateDeepSeekVL7BInputSchema.swift │ ├── ReplicateDirectService.swift │ ├── ReplicateError.swift │ ├── ReplicateFileUploadRequestBody.swift │ ├── ReplicateFileUploadResponseBody.swift │ ├── ReplicateFluxDevControlNetInputSchema.swift │ ├── ReplicateFluxDevInputSchema.swift │ ├── ReplicateFluxDevOutputSchema.swift │ ├── ReplicateFluxFineTuneInputSchema.swift │ ├── ReplicateFluxKontextInputSchema.swift │ ├── ReplicateFluxProInputSchema.swift │ ├── ReplicateFluxProInputSchema_v1_1.swift │ ├── ReplicateFluxProOutputSchema.swift │ ├── ReplicateFluxProUltraInputSchema_v1_1.swift │ ├── ReplicateFluxPulidInputSchema.swift │ ├── ReplicateFluxSchnellInputSchema.swift │ ├── ReplicateFluxSchnellOutputSchema.swift │ ├── ReplicateFluxTrainingInput.swift │ ├── ReplicateModelResponseBody.swift │ ├── ReplicateModelVisibility.swift │ ├── ReplicatePredictionRequestBody.swift │ ├── ReplicatePredictionResponseBody.swift │ ├── ReplicateProxiedService.swift │ ├── ReplicateSDXLFreshInkInputSchema.swift │ ├── ReplicateSDXLInputSchema.swift │ ├── ReplicateSDXLOutputSchema.swift │ ├── ReplicateService+Convenience.swift │ ├── ReplicateService.swift │ ├── ReplicateSynchronousAPIOutput.swift │ ├── ReplicateTrainingRequestBody.swift │ └── ReplicateTrainingResponseBody.swift │ ├── Resources │ └── PrivacyInfo.xcprivacy │ ├── RuntimeInfo.swift │ ├── Serializable.swift │ ├── ServiceMixin.swift │ ├── SingleOrPartsEncodable.swift │ ├── StabilityAI │ ├── StabilityAIDirectService.swift │ ├── StabilityAIImageResponse.swift │ ├── StabilityAIProxiedService.swift │ ├── StabilityAIService.swift │ ├── StabilityAIStableDiffusionRequestBody.swift │ └── StabilityAIUltraRequestBody.swift │ └── TogetherAI │ ├── TogetherAIChatCompletionRequestBody.swift │ ├── TogetherAIChatCompletionResponseBody.swift │ ├── TogetherAIChatCompletionStreamingChunk.swift │ ├── TogetherAIDirectService.swift │ ├── TogetherAIProxiedService.swift │ └── TogetherAIService.swift └── Tests └── AIProxyTests ├── AIProxyJSONValueTests.swift ├── AnthropicMessageRequestTests.swift ├── AnthropicMessageResponseTests.swift ├── AnthropicMessageStreamingChunkTests.swift ├── BraveWebSearchResponseTests.swift ├── DeepLTranslationRequestTests.swift ├── DeepLTranslationResponseTests.swift ├── EachAIWorkflowTests.swift ├── ElevenLabsSpeechToTextResponseBodyTests.swift ├── FalFastSDXLResponseTests.swift ├── FalFluxLoRAFastTrainingOutputSchemaTests.swift ├── FalFluxLoRAInputSchemaTests.swift ├── FalFluxLoRAOutputSchemaTests.swift ├── FalFluxSchnellResponseTests.swift ├── FalQueueResponseTests.swift ├── FalUploadResponseTests.swift ├── GeminiFileUploadResponseBodyTests.swift ├── GeminiGenerateContentRequestTests.swift ├── GeminiGenerateContentResponseTests.swift ├── GeminiGenerateImageResponseTests.swift ├── GeminiStructuredOutputsRequestTests.swift ├── OpenAIAudioCodablesTests.swift ├── OpenAIChatCompletionRequestTests.swift ├── OpenAIChatCompletionResponseTests.swift ├── OpenAIChatCompletionStreamingChunkTests.swift ├── OpenAICreateImageResponseTests.swift ├── OpenAICreateResponseRequestTests.swift ├── OpenAIEndToEndTests.swift ├── OpenAIResponseObjectTests.swift ├── OpenRouterChatCompletionStreamingChunkTests.swift ├── OpenRouterToolCallResponseBodyTests.swift ├── PerplexityChatCompletionResponseBodyTests.swift ├── ReplicateFileResponseBodyTests.swift ├── ReplicateFluxShnellSchemaTests.swift ├── ReplicateModelResponseBodyTests.swift ├── ReplicatePredictionRequestBodyTests.swift ├── ReplicatePredictionResponseBodyTests.swift ├── ReplicateSyncAPIResponseBodyTests.swift ├── ReplicateTrainingResponseBodyTests.swift ├── StabilityAIRequestTests.swift ├── TestHelpers.swift ├── TogetherAIChatCompletionRequestTests.swift ├── TogetherAIChatCompletionResponseTests.swift └── TogetherAIChatCompletionStreamingChunkTests.swift /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | /.build 3 | /Packages 4 | xcuserdata/ 5 | DerivedData/ 6 | .swiftpm/configuration/registries.json 7 | .swiftpm/xcode/package.xcworkspace/contents.xcworkspacedata 8 | .netrc 9 | -------------------------------------------------------------------------------- /.spi.yml: -------------------------------------------------------------------------------- 1 | version: 1 2 | builder: 3 | configs: 4 | - documentation_targets: [AIProxy] 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) AIProxy 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Package.swift: -------------------------------------------------------------------------------- 1 | // swift-tools-version: 5.9 2 | // The swift-tools-version declares the minimum version of Swift required to build this package. 3 | 4 | import PackageDescription 5 | 6 | let package = Package( 7 | name: "AIProxy", 8 | platforms: [ 9 | .iOS(.v15), 10 | .macOS(.v13), 11 | .visionOS(.v1), 12 | .watchOS(.v9) 13 | ], 14 | products: [ 15 | // Products define the executables and libraries a package produces, making them visible to other packages. 16 | .library( 17 | name: "AIProxy", 18 | targets: ["AIProxy"]), 19 | ], 20 | targets: [ 21 | // Targets are the basic building blocks of a package, defining a module or a test suite. 22 | // Targets can depend on other targets in this package and products from dependencies. 23 | .target( 24 | name: "AIProxy", 25 | resources: [ 26 | .process("Resources/PrivacyInfo.xcprivacy") 27 | ] 28 | ), 29 | .testTarget( 30 | name: "AIProxyTests", 31 | dependencies: ["AIProxy"] 32 | ), 33 | ] 34 | ) 35 | -------------------------------------------------------------------------------- /Sources/AIProxy/AIProxyDeviceCheck.swift: -------------------------------------------------------------------------------- 1 | // 2 | // AIProxyDeviceCheck.swift 3 | // 4 | // 5 | // Created by Lou Zell on 6/23/24. 6 | // 7 | 8 | import Foundation 9 | import DeviceCheck 10 | import OSLog 11 | 12 | 13 | private let deviceCheckWarning = """ 14 | AIProxy warning: DeviceCheck is not available on this device. 15 | 16 | To use AIProxy on an iOS simulator, set an AIPROXY_DEVICE_CHECK_BYPASS environment variable. 17 | 18 | See the README at https://github.com/lzell/AIProxySwift for instructions. 19 | """ 20 | 21 | 22 | enum AIProxyDeviceCheck { 23 | 24 | /// Gets a base64 encoded DeviceCheck token for this device, if possible. 25 | /// On iOS simulators, the return value will be nil and a log message will be written to console. 26 | /// 27 | /// If you are testing AIProxy on a simulator, please see the README.md file for instructions 28 | /// on adding a DeviceCheck bypass token to your Xcode env variables. 29 | /// 30 | /// It is important that you don't let a DeviceCheck bypass token slip into your production codebase. 31 | /// If you do, an attacker can easily use it themselves to bypass DeviceCheck. Your bypass token is intended 32 | /// to only be used by developers of your app, and is intended to only be included as a an environment variable. 33 | /// 34 | /// - Returns: A base 64 encoded DeviceCheck token, if possible 35 | @MainActor 36 | internal static func getToken(forClient clientID: String?) async -> String? { 37 | // We have seen `EXC_BAD_ACCESS` on accessing `DCDevice.current.isSupported` in the wild. 38 | // My theory is that the `DCDevice.h` header uses `NS_ASSUME_NONNULL_BEGIN` when it should not. 39 | // This juggling is an attempt at preventing the bad access crashes. 40 | let _dcDevice: DCDevice? = DCDevice.current 41 | guard let dcDevice = _dcDevice else { 42 | logIf(.error)?.error("DCDevice singleton is not available. Please contact Lou if you can reproduce this!") 43 | ClientLibErrorLogger.logDeviceCheckSingletonIsNil(clientID: clientID) 44 | return nil 45 | } 46 | 47 | guard dcDevice.isSupported else { 48 | if ProcessInfo.processInfo.environment["AIPROXY_DEVICE_CHECK_BYPASS"] == nil { 49 | logIf(.warning)?.warning("\(deviceCheckWarning, privacy: .public)") 50 | } 51 | #if !targetEnvironment(simulator) && !DEBUG 52 | ClientLibErrorLogger.logDeviceCheckNotSupported(clientID: clientID) 53 | #endif 54 | return nil 55 | } 56 | 57 | do { 58 | let data = try await dcDevice.generateToken() 59 | return data.base64EncodedString() 60 | } catch { 61 | logIf(.error)?.error("Could not create DeviceCheck token. Are you using an explicit bundle identifier?") 62 | #if !targetEnvironment(simulator) && !DEBUG 63 | ClientLibErrorLogger.logDeviceCheckCouldNotGenerateToken(error.localizedDescription, clientID: clientID) 64 | #endif 65 | return nil 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /Sources/AIProxy/AIProxyHTTPVerb.swift: -------------------------------------------------------------------------------- 1 | // 2 | // AIProxyHTTPVerb.swift 3 | // 4 | // 5 | // Created by Lou Zell on 8/6/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// The HTTP verb to associate with a request. 11 | /// If you select 'automatic', a request with a body will default to 'POST' while a request without a body will default to 'GET' 12 | public enum AIProxyHTTPVerb: String { 13 | case automatic 14 | case get = "GET" 15 | case post = "POST" 16 | case put = "PUT" 17 | case delete = "DELETE" 18 | case patch = "PATCH" 19 | 20 | func toString(hasBody: Bool) -> String { 21 | if self != .automatic { 22 | return self.rawValue 23 | } 24 | return hasBody ? AIProxyHTTPVerb.post.rawValue : AIProxyHTTPVerb.get.rawValue 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /Sources/AIProxy/AIProxyLogger.swift: -------------------------------------------------------------------------------- 1 | import OSLog 2 | 3 | public enum AIProxyLogLevel: Int { 4 | case debug 5 | case info 6 | case warning 7 | case error 8 | case critical 9 | 10 | func isAtOrAboveThresholdLevel(_ threshold: AIProxyLogLevel) -> Bool { 11 | return self.rawValue >= threshold.rawValue 12 | } 13 | } 14 | 15 | internal var aiproxyCallerDesiredLogLevel = AIProxyLogLevel.warning 16 | internal let aiproxyLogger = Logger( 17 | subsystem: Bundle.main.bundleIdentifier ?? "UnknownApp", 18 | category: "AIProxy" 19 | ) 20 | 21 | // Why not create a wrapper around OSLog instead of forcing log callsites to include an `logIf()` check? 22 | // Because I like the Xcode log feature that links to the source location of the log. 23 | // If you create a wrapper, even one that is inlined, the Xcode source feature always links to the wrapper location. 24 | // 25 | // H/T Quinn the Eskimo! 26 | // https://developer.apple.com/forums/thread/774931 27 | @inline(__always) 28 | internal func logIf(_ logLevel: AIProxyLogLevel) -> Logger? { 29 | return logLevel.isAtOrAboveThresholdLevel(aiproxyCallerDesiredLogLevel) ? aiproxyLogger : nil 30 | } 31 | -------------------------------------------------------------------------------- /Sources/AIProxy/AIProxyURLSession.swift: -------------------------------------------------------------------------------- 1 | // 2 | // AIProxyURLSession.swift 3 | // 4 | // 5 | // Created by Lou Zell on 8/5/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public enum AIProxyURLSession { 11 | public static var delegate = AIProxyCertificatePinningDelegate() 12 | 13 | /// Creates a URLSession that is configured for communication with aiproxy.pro 14 | static func create() -> URLSession { 15 | return URLSession( 16 | configuration: .ephemeral, 17 | delegate: self.delegate, 18 | delegateQueue: nil 19 | ) 20 | } 21 | 22 | } 23 | -------------------------------------------------------------------------------- /Sources/AIProxy/AnonymousAccount/AnonymousAccount.swift: -------------------------------------------------------------------------------- 1 | // 2 | // AnonymousAccount.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 2/2/25. 6 | // 7 | 8 | /// A best-effort anonymous ID that is stable across multiple devices of an iCloud account 9 | struct AnonymousAccount: Codable, Equatable { 10 | /// UUID of the anonymous account 11 | let uuid: String 12 | 13 | /// Unix time that the UUID was created 14 | let timestamp: Double 15 | } 16 | -------------------------------------------------------------------------------- /Sources/AIProxy/Anthropic/AnthropicMessageResponseBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // AnthropicMessageResponseBody.swift 3 | // 4 | // 5 | // Created by Lou Zell on 7/28/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// All docstrings in this file are from: https://docs.anthropic.com/en/api/messages 11 | public struct AnthropicMessageResponseBody: Decodable { 12 | public var content: [AnthropicMessageResponseContent] 13 | public let id: String 14 | public let model: String 15 | public let role: String 16 | public let stopReason: String? 17 | public let stopSequence: String? 18 | public let type: String 19 | public let usage: AnthropicMessageUsage 20 | 21 | public init(content: [AnthropicMessageResponseContent], id: String, model: String, role: String, stopReason: String?, stopSequence: String?, type: String, usage: AnthropicMessageUsage) { 22 | self.content = content 23 | self.id = id 24 | self.model = model 25 | self.role = role 26 | self.stopReason = stopReason 27 | self.stopSequence = stopSequence 28 | self.type = type 29 | self.usage = usage 30 | } 31 | 32 | private enum CodingKeys: String, CodingKey { 33 | case content 34 | case id 35 | case model 36 | case role 37 | case stopReason = "stop_reason" 38 | case stopSequence = "stop_sequence" 39 | case type 40 | case usage 41 | } 42 | } 43 | 44 | 45 | public enum AnthropicMessageResponseContent: Decodable { 46 | case text(String) 47 | case toolUse(id: String, name: String, input: [String: AIProxyJSONValue]) 48 | 49 | private enum CodingKeys: String, CodingKey { 50 | case type 51 | case text 52 | case id 53 | case name 54 | case input 55 | } 56 | 57 | private enum ContentType: String, Decodable { 58 | case text 59 | case toolUse = "tool_use" 60 | } 61 | 62 | public init(from decoder: Decoder) throws { 63 | let container = try decoder.container(keyedBy: CodingKeys.self) 64 | let type = try container.decode(ContentType.self, forKey: .type) 65 | switch type { 66 | case .text: 67 | let value = try container.decode(String.self, forKey: .text) 68 | self = .text(value) 69 | case .toolUse: 70 | let id = try container.decode(String.self, forKey: .id) 71 | let name = try container.decode(String.self, forKey: .name) 72 | let input = try container.decode([String: AIProxyJSONValue].self, forKey: .input) 73 | self = .toolUse(id: id, name: name, input: input) 74 | } 75 | } 76 | } 77 | 78 | 79 | public struct AnthropicMessageUsage: Decodable { 80 | public let inputTokens: Int 81 | public let outputTokens: Int 82 | 83 | enum CodingKeys: String, CodingKey { 84 | case inputTokens = "input_tokens" 85 | case outputTokens = "output_tokens" 86 | } 87 | 88 | public init(inputTokens: Int, outputTokens: Int) { 89 | self.inputTokens = inputTokens 90 | self.outputTokens = outputTokens 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /Sources/AIProxy/Anthropic/AnthropicMessageStreamingChunk.swift: -------------------------------------------------------------------------------- 1 | // 2 | // AnthropicMessageStreamingChunk.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/7/24. 6 | // 7 | 8 | import Foundation 9 | 10 | 11 | public enum AnthropicMessageStreamingChunk { 12 | /// The `String` argument is the chat completion response text "delta", meaning the new bit 13 | /// of text that just became available. It is not the full message. 14 | case text(String) 15 | 16 | /// The name of the tool that Claude wants to call, and a buffered input to the function. 17 | /// The input argument is not a "delta". Internally to this lib, we accumulate the tool 18 | /// call deltas and map them to `[String: Any]` once all tool call deltas have been 19 | /// received. 20 | case toolUse(name: String, input: [String: Any]) 21 | } 22 | -------------------------------------------------------------------------------- /Sources/AIProxy/Anthropic/AnthropicMessageStreamingContentBlockStart.swift: -------------------------------------------------------------------------------- 1 | // 2 | // AnthropicMessageStreamingContentBlockStart.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/7/24. 6 | // 7 | 8 | import Foundation 9 | 10 | internal struct AnthropicMessageStreamingContentBlockStart: Decodable { 11 | let contentBlock: ContentBlock 12 | 13 | static func from(line: String) -> Self? { 14 | guard line.hasPrefix(#"data: {"type":"content_block_start""#) else { 15 | return nil 16 | } 17 | guard let chunkJSON = line.dropFirst(6).data(using: .utf8), 18 | let chunk = try? JSONDecoder().decode(Self.self, from: chunkJSON) else 19 | { 20 | logIf(.warning)?.warning("Received unexpected JSON from Anthropic: \(line)") 21 | return nil 22 | } 23 | return chunk 24 | } 25 | 26 | private enum CodingKeys: String, CodingKey { 27 | case contentBlock = "content_block" 28 | } 29 | } 30 | 31 | extension AnthropicMessageStreamingContentBlockStart { 32 | enum ContentBlock: Decodable { 33 | case text(String) 34 | case toolUse(name: String) 35 | 36 | private enum CodingKeys: String, CodingKey { 37 | case name 38 | case text 39 | case type 40 | } 41 | 42 | private enum PossibleTypes: String, Decodable { 43 | case text = "text" 44 | case toolUse = "tool_use" 45 | } 46 | 47 | public init(from decoder: any Decoder) throws { 48 | let container = try decoder.container(keyedBy: CodingKeys.self) 49 | let type = try container.decode(PossibleTypes.self, forKey: .type) 50 | switch type { 51 | case PossibleTypes.text: 52 | self = .text(try container.decode(String.self, forKey: CodingKeys.text)) 53 | case PossibleTypes.toolUse: 54 | self = .toolUse(name: try container.decode(String.self, forKey: CodingKeys.name)) 55 | } 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /Sources/AIProxy/Anthropic/AnthropicMessageStreamingDeltaBlock.swift: -------------------------------------------------------------------------------- 1 | // 2 | // AnthropicMessageStreamingDeltaBlock.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/7/24. 6 | // 7 | 8 | import Foundation 9 | 10 | internal struct AnthropicMessageStreamingDeltaBlock: Decodable { 11 | /// We do not vend this type. See AnthropicMessageStreamingChunk for the final product that 12 | /// we vend to the client. 13 | let delta: Delta 14 | 15 | static func from(line: String) -> Self? { 16 | guard line.hasPrefix(#"data: {"type":"content_block_delta""#) else { 17 | return nil 18 | } 19 | guard let chunkJSON = line.dropFirst(6).data(using: .utf8), 20 | let chunk = try? JSONDecoder().decode(Self.self, from: chunkJSON) else 21 | { 22 | logIf(.warning)?.warning("Received unexpected JSON from Anthropic: \(line)") 23 | return nil 24 | } 25 | return chunk 26 | } 27 | } 28 | 29 | extension AnthropicMessageStreamingDeltaBlock { 30 | enum Delta: Decodable { 31 | case text(String) 32 | case toolUse(String) 33 | 34 | private enum CodingKeys: String, CodingKey { 35 | case partialJSON = "partial_json" 36 | case text 37 | case type 38 | } 39 | 40 | private enum PossibleTypes: String, Decodable { 41 | case textDelta = "text_delta" 42 | case inputJSONDelta = "input_json_delta" 43 | } 44 | 45 | public init(from decoder: any Decoder) throws { 46 | let container = try decoder.container(keyedBy: CodingKeys.self) 47 | let type = try container.decode(PossibleTypes.self, forKey: .type) 48 | switch type { 49 | case .textDelta: 50 | self = .text(try container.decode(String.self, forKey: .text)) 51 | case .inputJSONDelta: 52 | self = .toolUse(try container.decode(String.self, forKey: .partialJSON)) 53 | } 54 | } 55 | } 56 | } 57 | 58 | 59 | -------------------------------------------------------------------------------- /Sources/AIProxy/Anthropic/AnthropicService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // AnthropicService.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/13/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public protocol AnthropicService { 11 | 12 | /// Initiates a non-streaming request to /v1/messages. 13 | /// 14 | /// - Parameters: 15 | /// - body: The message request body. See this reference: 16 | /// https://docs.anthropic.com/en/api/messages 17 | /// - Returns: The message response body, See this reference: 18 | /// https://platform.openai.com/docs/api-reference/chat/object 19 | func messageRequest( 20 | body: AnthropicMessageRequestBody 21 | ) async throws -> AnthropicMessageResponseBody 22 | 23 | 24 | /// Initiates a streaming request to /v1/messages. 25 | /// 26 | /// - Parameters: 27 | /// - body: The message request body. See this reference: 28 | /// https://docs.anthropic.com/en/api/messages 29 | /// - Returns: The message response body, See this reference: 30 | /// https://platform.openai.com/docs/api-reference/chat/object 31 | func streamingMessageRequest( 32 | body: AnthropicMessageRequestBody 33 | ) async throws -> AnthropicAsyncChunks 34 | } 35 | 36 | -------------------------------------------------------------------------------- /Sources/AIProxy/AudioPCMPlayerError.swift: -------------------------------------------------------------------------------- 1 | // 2 | // AudioPCMPlayerError.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 2/20/25. 6 | // 7 | 8 | import Foundation 9 | 10 | public enum AudioPCMPlayerError: LocalizedError { 11 | case couldNotConfigureAudioEngine(String) 12 | 13 | public var errorDescription: String? { 14 | switch self { 15 | case .couldNotConfigureAudioEngine(let message): 16 | return message 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /Sources/AIProxy/BackgroundNetworker.swift: -------------------------------------------------------------------------------- 1 | // 2 | // Networker.swift 3 | // 4 | // 5 | // Created by Lou Zell on 8/24/24. 6 | 7 | import Foundation 8 | 9 | struct BackgroundNetworker { 10 | 11 | /// Throws AIProxyError.unsuccessfulRequest if the returned status code is non-200 12 | @NetworkActor 13 | static func makeRequestAndWaitForData( 14 | _ session: URLSession, 15 | _ request: URLRequest 16 | ) async throws -> (Data, HTTPURLResponse) { 17 | let (data, res) = try await session.data(for: request) 18 | guard let httpResponse = res as? HTTPURLResponse else { 19 | throw AIProxyError.assertion("Network response is not an http response") 20 | } 21 | if httpResponse.statusCode > 299 { 22 | logIf(.error)?.error("Receieved a non-200 status code: \(httpResponse.statusCode)") 23 | throw AIProxyError.unsuccessfulRequest( 24 | statusCode: httpResponse.statusCode, 25 | responseBody: String(data: data, encoding: .utf8) ?? "" 26 | ) 27 | } 28 | return (data, httpResponse) 29 | } 30 | 31 | /// Throws AIProxyError.unsuccessfulRequest if the returned status code is non-200 32 | @NetworkActor 33 | static func makeRequestAndWaitForAsyncBytes( 34 | _ session: URLSession, 35 | _ request: URLRequest 36 | ) async throws -> (URLSession.AsyncBytes, HTTPURLResponse) { 37 | let (asyncBytes, res) = try await session.bytes(for: request) 38 | 39 | guard let httpResponse = res as? HTTPURLResponse else { 40 | throw AIProxyError.assertion("Network response is not an http response") 41 | } 42 | 43 | if (httpResponse.statusCode > 299) { 44 | let responseBody = try await asyncBytes.lines.reduce(into: "") { $0 += $1 } 45 | throw AIProxyError.unsuccessfulRequest( 46 | statusCode: httpResponse.statusCode, 47 | responseBody: responseBody 48 | ) 49 | } 50 | return (asyncBytes, httpResponse) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /Sources/AIProxy/Brave/BraveDirectService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // BraveDirectService.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 2/7/25. 6 | // 7 | 8 | import Foundation 9 | 10 | open class BraveDirectService: BraveService, DirectService { 11 | private let unprotectedAPIKey: String 12 | 13 | /// This initializer is not public on purpose. 14 | /// Customers are expected to use the factory `AIProxy.braveDirectService` defined in AIProxy.swift 15 | internal init(unprotectedAPIKey: String) { 16 | self.unprotectedAPIKey = unprotectedAPIKey 17 | } 18 | 19 | /// Makes a web search through Brave. See this reference: 20 | /// https://api-dashboard.search.brave.com/app/documentation/web-search/get-started 21 | /// 22 | /// - Parameters: 23 | /// - query: The query to send to Brave 24 | /// - secondsToWait: Seconds to wait before raising `URLError.timedOut` 25 | /// - Returns: The search result. There are many properties on this result, so take some time with 26 | /// BraveWebSearchResponseBody to understand how to get the information you want out of it. 27 | public func webSearchRequest( 28 | query: String, 29 | secondsToWait: UInt 30 | ) async throws -> BraveWebSearchResponseBody { 31 | guard let encodedQuery = query.addingPercentEncoding(withAllowedCharacters: .urlQueryAllowed) else { 32 | throw AIProxyError.assertion("Could not create an encoded version of query params for brave search") 33 | } 34 | let request = try AIProxyURLRequest.createDirect( 35 | baseURL: "https://api.search.brave.com", 36 | path: "/res/v1/web/search?q=" + encodedQuery, 37 | body: nil, 38 | verb: .get, 39 | secondsToWait: secondsToWait, 40 | contentType: "application/json", 41 | additionalHeaders: [ 42 | "X-Subscription-Token": self.unprotectedAPIKey 43 | ] 44 | ) 45 | return try await self.makeRequestAndDeserializeResponse(request) 46 | } 47 | } 48 | 49 | -------------------------------------------------------------------------------- /Sources/AIProxy/Brave/BraveProxiedService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // BraveProxiedService.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 2/7/25. 6 | // 7 | 8 | import Foundation 9 | 10 | open class BraveProxiedService: BraveService, ProxiedService { 11 | 12 | private let partialKey: String 13 | private let serviceURL: String 14 | private let clientID: String? 15 | 16 | /// This initializer is not public on purpose. 17 | /// Customers are expected to use the factory `AIProxy.braveService` defined in AIProxy.swift 18 | internal init( 19 | partialKey: String, 20 | serviceURL: String, 21 | clientID: String? 22 | ) { 23 | self.partialKey = partialKey 24 | self.serviceURL = serviceURL 25 | self.clientID = clientID 26 | } 27 | 28 | /// Makes a web search through Brave. See this reference: 29 | /// https://api-dashboard.search.brave.com/app/documentation/web-search/get-started 30 | /// 31 | /// - Parameters: 32 | /// - query: The query to send to Brave 33 | /// - secondsToWait: Seconds to wait before raising `URLError.timedOut` 34 | /// - Returns: The search result. There are many properties on this result, so take some time with 35 | /// BraveWebSearchResponseBody to understand how to get the information you want out of it. 36 | public func webSearchRequest( 37 | query: String, 38 | secondsToWait: UInt 39 | ) async throws -> BraveWebSearchResponseBody { 40 | guard let encodedQuery = query.addingPercentEncoding(withAllowedCharacters: .urlQueryAllowed) else { 41 | throw AIProxyError.assertion("Could not create an encoded version of query params for brave search") 42 | } 43 | let request = try await AIProxyURLRequest.create( 44 | partialKey: self.partialKey, 45 | serviceURL: self.serviceURL, 46 | clientID: self.clientID, 47 | proxyPath: "/res/v1/web/search?q=" + encodedQuery, 48 | body: nil, 49 | verb: .get, 50 | secondsToWait: secondsToWait, 51 | contentType: "application/json" 52 | ) 53 | return try await self.makeRequestAndDeserializeResponse(request) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /Sources/AIProxy/Brave/BraveService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // BraveService.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 2/7/25. 6 | // 7 | 8 | import Foundation 9 | 10 | public protocol BraveService { 11 | 12 | /// Makes a web search through Brave. See this reference: 13 | /// https://api-dashboard.search.brave.com/app/documentation/web-search/get-started 14 | /// 15 | /// - Parameters: 16 | /// - query: The query to send to Brave 17 | /// - secondsToWait: Seconds to wait before raising `URLError.timedOut` 18 | /// - Returns: The search result. There are many properties on this result, so take some time with 19 | /// BraveWebSearchResponseBody to understand how to get the information you want out of it. 20 | func webSearchRequest( 21 | query: String, 22 | secondsToWait: UInt 23 | ) async throws -> BraveWebSearchResponseBody 24 | } 25 | 26 | extension BraveService { 27 | public func webSearchRequest(query: String) async throws -> BraveWebSearchResponseBody { 28 | return try await self.webSearchRequest(query: query, secondsToWait: 60) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /Sources/AIProxy/DeepL/DeepLAccountType.swift: -------------------------------------------------------------------------------- 1 | // 2 | // File.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/15/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public enum DeepLAccountType { 11 | case free 12 | case paid 13 | } 14 | -------------------------------------------------------------------------------- /Sources/AIProxy/DeepL/DeepLDirectService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // DeepLDirectService.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/15/24. 6 | // 7 | 8 | import Foundation 9 | 10 | open class DeepLDirectService: DeepLService, DirectService { 11 | private let unprotectedAPIKey: String 12 | private let accountType: DeepLAccountType 13 | 14 | /// This initializer is not public on purpose. 15 | /// Customers are expected to use the factory `AIProxy.directDeepLService` defined in AIProxy.swift 16 | internal init(unprotectedAPIKey: String, accountType: DeepLAccountType) { 17 | self.unprotectedAPIKey = unprotectedAPIKey 18 | self.accountType = accountType 19 | } 20 | 21 | /// Initiates a request to /v2/translate 22 | /// 23 | /// - Parameters: 24 | /// - body: The translation request. See this reference: 25 | /// https://developers.deepl.com/docs/api-reference/translate/openapi-spec-for-text-translation 26 | /// - Returns: The deserialized response body 27 | public func translateRequest( 28 | body: DeepLTranslateRequestBody 29 | ) async throws -> DeepLTranslateResponseBody { 30 | let request = try AIProxyURLRequest.createDirect( 31 | baseURL: self.accountType == .free ? "https://api-free.deepl.com" : "https://api.deepl.com", 32 | path: "/v2/translate", 33 | body: try body.serialize(), 34 | verb: .post, 35 | secondsToWait: 60, 36 | contentType: "application/json", 37 | additionalHeaders: [ 38 | "Authorization": "DeepL-Auth-Key \(self.unprotectedAPIKey)" 39 | ] 40 | ) 41 | return try await self.makeRequestAndDeserializeResponse(request) 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /Sources/AIProxy/DeepL/DeepLProxiedService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // DeepLProxiedService.swift 3 | // 4 | // 5 | // Created by Lou Zell on 8/3/24. 6 | // 7 | 8 | import Foundation 9 | 10 | open class DeepLProxiedService: DeepLService, ProxiedService { 11 | private let partialKey: String 12 | private let serviceURL: String 13 | private let clientID: String? 14 | 15 | /// This initializer is not public on purpose. 16 | /// Customers are expected to use the factory `AIProxy.deepLService` defined in AIProxy.swift 17 | internal init(partialKey: String, serviceURL: String, clientID: String?) { 18 | self.partialKey = partialKey 19 | self.serviceURL = serviceURL 20 | self.clientID = clientID 21 | } 22 | 23 | /// Initiates a request to /v2/translate 24 | /// 25 | /// - Parameters: 26 | /// - body: The translation request. See this reference: 27 | /// https://developers.deepl.com/docs/api-reference/translate/openapi-spec-for-text-translation 28 | /// - Returns: The deserialized response body 29 | public func translateRequest( 30 | body: DeepLTranslateRequestBody 31 | ) async throws -> DeepLTranslateResponseBody { 32 | let request = try await AIProxyURLRequest.create( 33 | partialKey: self.partialKey, 34 | serviceURL: self.serviceURL, 35 | clientID: self.clientID, 36 | proxyPath: "/v2/translate", 37 | body: try body.serialize(), 38 | verb: .post, 39 | secondsToWait: 60, 40 | contentType: "application/json" 41 | ) 42 | return try await self.makeRequestAndDeserializeResponse(request) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /Sources/AIProxy/DeepL/DeepLService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // DeepLService.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/15/24. 6 | // 7 | import Foundation 8 | 9 | public protocol DeepLService { 10 | 11 | /// Initiates a request to /v2/translate 12 | /// 13 | /// - Parameters: 14 | /// - body: The translation request. See this reference: 15 | /// https://developers.deepl.com/docs/api-reference/translate/openapi-spec-for-text-translation 16 | /// - Returns: The deserialized response body 17 | func translateRequest( 18 | body: DeepLTranslateRequestBody 19 | ) async throws -> DeepLTranslateResponseBody 20 | } 21 | -------------------------------------------------------------------------------- /Sources/AIProxy/DeepL/DeepLTranslateResponseBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // DeepLTranslateResponseBody.swift 3 | // 4 | // 5 | // Created by Lou Zell on 8/3/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// All docstrings on this model are from: 11 | /// https://developers.deepl.com/docs/api-reference/translate/openapi-spec-for-text-translation 12 | public struct DeepLTranslateResponseBody: Decodable { 13 | public let translations: [DeepLTranslation] 14 | 15 | public init(translations: [DeepLTranslation]) { 16 | self.translations = translations 17 | } 18 | } 19 | 20 | public struct DeepLTranslation: Decodable { 21 | /// The language detected in the source text. It reflects the value of the `source_lang` parameter, when specified. 22 | /// Example: "EN" 23 | public let detectedSourceLanguage: String 24 | 25 | /// The translated text. 26 | /// Example: "Hallo, Welt!" 27 | public let text: String 28 | 29 | public init(detectedSourceLanguage: String, text: String) { 30 | self.detectedSourceLanguage = detectedSourceLanguage 31 | self.text = text 32 | } 33 | 34 | private enum CodingKeys: String, CodingKey { 35 | case detectedSourceLanguage = "detected_source_language" 36 | case text 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /Sources/AIProxy/DeepSeek/DeepSeekService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // DeepSeekService.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 1/27/25. 6 | // 7 | 8 | import Foundation 9 | 10 | public protocol DeepSeekService { 11 | 12 | /// Initiates a non-streaming chat completion request to /chat/completions. 13 | /// 14 | /// - Parameters: 15 | /// - body: The request body to send to DeepSeek. See this reference: 16 | /// https://api-docs.deepseek.com/api/create-chat-completion 17 | /// - secondsToWait: The amount of time to wait before `URLError.timedOut` is raised 18 | /// - Returns: The chat response. See this reference: 19 | /// https://api-docs.deepseek.com/api/create-chat-completion#responses 20 | func chatCompletionRequest( 21 | body: DeepSeekChatCompletionRequestBody, 22 | secondsToWait: UInt 23 | ) async throws -> DeepSeekChatCompletionResponseBody 24 | 25 | /// Initiates a streaming chat completion request to /chat/completions. 26 | /// 27 | /// - Parameters: 28 | /// - body: The request body to send to DeepSeek. See this reference: 29 | /// https://api-docs.deepseek.com/api/create-chat-completion 30 | /// - secondsToWait: The amount of time to wait before `URLError.timedOut` is raised 31 | /// - Returns: An async sequence of completion chunks. See the 'Streaming' tab here: 32 | /// https://api-docs.deepseek.com/api/create-chat-completion#responses 33 | func streamingChatCompletionRequest( 34 | body: DeepSeekChatCompletionRequestBody, 35 | secondsToWait: UInt 36 | ) async throws -> AsyncCompactMapSequence, DeepSeekChatCompletionChunk> 37 | } 38 | 39 | extension DeepSeekService { 40 | public func chatCompletionRequest( 41 | body: DeepSeekChatCompletionRequestBody 42 | ) async throws -> DeepSeekChatCompletionResponseBody { 43 | return try await self.chatCompletionRequest(body: body, secondsToWait: 60) 44 | } 45 | 46 | public func streamingChatCompletionRequest( 47 | body: DeepSeekChatCompletionRequestBody 48 | ) async throws -> AsyncCompactMapSequence, DeepSeekChatCompletionChunk> { 49 | return try await self.streamingChatCompletionRequest(body: body, secondsToWait: 60) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /Sources/AIProxy/DeepSeek/DeepSeekUsage.swift: -------------------------------------------------------------------------------- 1 | // 2 | // DeepSeekUsage.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 1/27/25. 6 | // 7 | 8 | public struct DeepSeekUsage: Decodable { 9 | /// Number of tokens in the generated completion. 10 | public let completionTokens: Int? 11 | 12 | /// Breakdown of tokens used in a completion. 13 | public let completionTokensDetails: CompletionTokenDetails? 14 | 15 | /// Number of tokens in the prompt that hits the context cache. 16 | public let promptCacheHitTokens: Int? 17 | 18 | /// Number of tokens in the prompt that misses the context cache. 19 | public let promptCacheMissTokens: Int? 20 | 21 | /// Number of tokens in the prompt. It equals `prompt_cache_hit_tokens` + `prompt_cache_miss_tokens`. 22 | public let promptTokens: Int? 23 | 24 | /// Total number of tokens used in the request (prompt + completion). 25 | public let totalTokens: Int? 26 | 27 | private enum CodingKeys: String, CodingKey { 28 | case completionTokens = "completion_tokens" 29 | case completionTokensDetails = "completion_tokens_details" 30 | case promptCacheHitTokens = "prompt_cache_hit_tokens" 31 | case promptCacheMissTokens = "prompt_cache_miss_tokens" 32 | case promptTokens = "prompt_tokens" 33 | case totalTokens = "total_tokens" 34 | } 35 | } 36 | 37 | 38 | extension DeepSeekUsage { 39 | public struct CompletionTokenDetails: Decodable { 40 | /// Tokens generated by the model for reasoning. 41 | public let reasoningTokens: Int? 42 | 43 | private enum CodingKeys: String, CodingKey { 44 | case reasoningTokens = "reasoning_tokens" 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /Sources/AIProxy/Deserializable.swift: -------------------------------------------------------------------------------- 1 | // 2 | // Deserializable.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/14/24. 6 | // 7 | 8 | import Foundation 9 | 10 | extension Decodable { 11 | static func deserialize(from data: Data) throws -> Self { 12 | let decoder = JSONDecoder() 13 | return try decoder.decode(Self.self, from: data) 14 | } 15 | 16 | static func deserialize(from str: String) throws -> Self { 17 | guard let data = str.data(using: .utf8) else { 18 | throw AIProxyError.assertion("Could not create utf8 data from string") 19 | } 20 | return try self.deserialize(from: data) 21 | } 22 | 23 | static func deserialize(fromLine line: String) -> Self? { 24 | guard line.hasPrefix("data: ") else { 25 | // Special case to ignore OpenRouter and DeepSeek SSE comments 26 | if line != ": OPENROUTER PROCESSING" && line != ": keep-alive" { 27 | logIf(.warning)?.warning("Received unexpected line from aiproxy: \(line)") 28 | } 29 | return nil 30 | } 31 | 32 | guard line != "data: [DONE]" else { 33 | logIf(.debug)?.debug("Streaming response has finished") 34 | return nil 35 | } 36 | 37 | guard let chunkJSON = line.dropFirst(6).data(using: .utf8), 38 | let chunk = try? JSONDecoder().decode(Self.self, from: chunkJSON) else 39 | { 40 | logIf(.warning)?.warning("Received unexpected JSON from aiproxy: \(line)") 41 | return nil 42 | } 43 | 44 | // if ll(.debug) { aiproxyLogger.debug("Received a chunk: \(line)") } 45 | return chunk 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /Sources/AIProxy/DirectService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // DirectService.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/16/24. 6 | // 7 | 8 | import Foundation 9 | 10 | protocol DirectService: ServiceMixin {} 11 | extension DirectService { 12 | var urlSession: URLSession { 13 | return AIProxyUtils.directURLSession() 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /Sources/AIProxy/EachAI/EachAIDirectService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // EachAIDirectService.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/07/24. 6 | // 7 | 8 | import Foundation 9 | 10 | open class EachAIDirectService: EachAIService, DirectService { 11 | private let unprotectedAPIKey: String 12 | 13 | /// This initializer is not public on purpose. 14 | /// Customers are expected to use the factory `AIProxy.eachAIDirectService` defined in AIProxy.swift 15 | internal init( 16 | unprotectedAPIKey: String 17 | ) { 18 | self.unprotectedAPIKey = unprotectedAPIKey 19 | } 20 | 21 | /// Runs a workflow on EachAI 22 | /// 23 | /// - Parameters: 24 | /// - workflowID: The workflow ID to trigger. You can find your ID in the EachAI dashboard. 25 | /// It will be included in the URL of the workflow that you are viewing, e.g. 26 | /// https://console.eachlabs.ai/flow/ 27 | /// 28 | /// - body: The workflow request body. See this reference: 29 | /// https://docs.eachlabs.ai/api-reference/flows/trigger-ai-workflow 30 | /// 31 | /// - Returns: A trigger workflow response, which contains a triggerID that you can use to 32 | /// poll for the result. 33 | public func triggerWorkflow( 34 | workflowID: String, 35 | body: EachAITriggerWorkflowRequestBody 36 | ) async throws -> EachAITriggerWorkflowResponseBody { 37 | let request = try AIProxyURLRequest.createDirect( 38 | baseURL: "https://flows.eachlabs.ai", 39 | path: "/api/v1/\(workflowID)/trigger", 40 | body: try body.serialize(), 41 | verb: .post, 42 | secondsToWait: 60, 43 | contentType: "application/json", 44 | additionalHeaders: [ 45 | "X-API-Key": self.unprotectedAPIKey 46 | ] 47 | ) 48 | return try await self.makeRequestAndDeserializeResponse(request) 49 | } 50 | 51 | /// You probably want to use `pollForWorkflowExecutionComplete`, defined as a protocol extension in EachAIService.swift. 52 | /// This method gets the workflow execution response a single time, which may still be in the processing state. 53 | /// https://docs.eachlabs.ai/api-reference/execution/get-flow-execution 54 | public func getWorkflowExecution( 55 | workflowID: String, 56 | triggerID: String 57 | ) async throws -> EachAIWorkflowExecutionResponseBody { 58 | let request = try AIProxyURLRequest.createDirect( 59 | baseURL: "https://flows.eachlabs.ai", 60 | path: "/api/v1/\(workflowID)/executions/\(triggerID)", 61 | body: nil, 62 | verb: .get, 63 | secondsToWait: 60, 64 | additionalHeaders: [ 65 | "X-API-Key": self.unprotectedAPIKey 66 | ] 67 | ) 68 | return try await self.makeRequestAndDeserializeResponse(request) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /Sources/AIProxy/EachAI/EachAIError.swift: -------------------------------------------------------------------------------- 1 | // 2 | // EachAIError.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/8/24. 6 | // 7 | 8 | import Foundation 9 | 10 | enum EachAIError: LocalizedError { 11 | case reachedRetryLimit 12 | 13 | var errorDescription: String? { 14 | switch self { 15 | case .reachedRetryLimit: 16 | return "Reached EachAI polling retry limit" 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /Sources/AIProxy/EachAI/EachAIProxiedService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // EachAIProxiedService.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/07/24. 6 | // 7 | 8 | import Foundation 9 | 10 | open class EachAIProxiedService: EachAIService, ProxiedService { 11 | private let partialKey: String 12 | private let serviceURL: String 13 | private let clientID: String? 14 | 15 | /// This initializer is not public on purpose. 16 | /// Customers are expected to use the factory `AIProxy.eachAIService` defined in AIProxy.swift 17 | internal init( 18 | partialKey: String, 19 | serviceURL: String, 20 | clientID: String? 21 | ) { 22 | self.partialKey = partialKey 23 | self.serviceURL = serviceURL 24 | self.clientID = clientID 25 | } 26 | 27 | /// Runs a workflow on EachAI 28 | /// 29 | /// - Parameters: 30 | /// - workflowID: The workflow ID to trigger. You can find your ID in the EachAI dashboard. 31 | /// It will be included in the URL of the workflow that you are viewing, e.g. 32 | /// https://console.eachlabs.ai/flow/ 33 | /// 34 | /// - body: The workflow request body. See this reference: 35 | /// https://docs.eachlabs.ai/api-reference/flows/trigger-ai-workflow 36 | /// 37 | /// - Returns: A trigger workflow response, which contains a triggerID that you can use to 38 | /// poll for the result. 39 | public func triggerWorkflow( 40 | workflowID: String, 41 | body: EachAITriggerWorkflowRequestBody 42 | ) async throws -> EachAITriggerWorkflowResponseBody { 43 | let request = try await AIProxyURLRequest.create( 44 | partialKey: self.partialKey, 45 | serviceURL: self.serviceURL, 46 | clientID: self.clientID, 47 | proxyPath: "/api/v1/\(workflowID)/trigger", 48 | body: try body.serialize(), 49 | verb: .post, 50 | secondsToWait: 60, 51 | contentType: "application/json" 52 | ) 53 | return try await self.makeRequestAndDeserializeResponse(request) 54 | } 55 | 56 | /// You probably want to use `pollForWorkflowExecutionComplete`, defined as a protocol extension in EachAIService.swift. 57 | /// This method gets the workflow execution response a single time, which may still be in the processing state. 58 | /// https://docs.eachlabs.ai/api-reference/execution/get-flow-execution 59 | public func getWorkflowExecution( 60 | workflowID: String, 61 | triggerID: String 62 | ) async throws -> EachAIWorkflowExecutionResponseBody { 63 | let request = try await AIProxyURLRequest.create( 64 | partialKey: self.partialKey, 65 | serviceURL: self.serviceURL, 66 | clientID: self.clientID, 67 | proxyPath: "/api/v1/\(workflowID)/executions/\(triggerID)", 68 | body: nil, 69 | verb: .get, 70 | secondsToWait: 60 71 | ) 72 | return try await self.makeRequestAndDeserializeResponse(request) 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /Sources/AIProxy/EachAI/EachAITriggerWorkflowRequestBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // EachAITriggerWorkflowRequestBody.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/7/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// See this reference: https://docs.eachlabs.ai/api-reference/flows/trigger-ai-workflow 11 | /// Note that the workflowID is not contained in the body. Rather, it is supplied as part of the path. 12 | public struct EachAITriggerWorkflowRequestBody: Encodable { 13 | // Required 14 | let parameters: [String: AIProxyJSONValue] 15 | 16 | public init(parameters: [String : AIProxyJSONValue]) { 17 | self.parameters = parameters 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /Sources/AIProxy/EachAI/EachAITriggerWorkflowResponseBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // EachAITriggerWorkflowResponseBody.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/7/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public struct EachAITriggerWorkflowResponseBody: Decodable { 11 | public let triggerID: String 12 | public let message: String? 13 | public let status: String? 14 | 15 | public init(triggerID: String, message: String?, status: String?) { 16 | self.triggerID = triggerID 17 | self.message = message 18 | self.status = status 19 | } 20 | 21 | private enum CodingKeys: String, CodingKey { 22 | case triggerID = "trigger_id" 23 | case message 24 | case status 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /Sources/AIProxy/ElevenLabs/ElevenLabsService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ElevenLabsService.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/18/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public protocol ElevenLabsService { 11 | /// Converts text to speech with a request to `/v1/text-to-speech/` 12 | /// 13 | /// - Parameters: 14 | /// 15 | /// - voiceID: The Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the 16 | /// available voices. 17 | /// 18 | /// - body: The request body to send to ElevenLabs. See this reference: 19 | /// https://elevenlabs.io/docs/api-reference/text-to-speech/convert#request 20 | /// 21 | /// - secondsToWait: Seconds to wait before raising `URLError.timedOut` 22 | /// 23 | /// - Returns: Returns audio/mpeg data 24 | func ttsRequest( 25 | voiceID: String, 26 | body: ElevenLabsTTSRequestBody, 27 | secondsToWait: UInt 28 | ) async throws -> Data 29 | 30 | /// Converts speech to speech with a request to `/v1/speech-to-speech/` 31 | /// 32 | /// - Parameters: 33 | /// 34 | /// - voiceID: The Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the 35 | /// available voices. 36 | /// 37 | /// - body: The request body to send to ElevenLabs. See this reference: 38 | /// https://elevenlabs.io/docs/api-reference/speech-to-speech/convert#request 39 | /// 40 | /// - secondsToWait: Seconds to wait before raising `URLError.timedOut` 41 | /// 42 | /// - Returns: Returns audio/mpeg data 43 | func speechToSpeechRequest( 44 | voiceID: String, 45 | body: ElevenLabsSpeechToSpeechRequestBody, 46 | secondsToWait: UInt 47 | ) async throws -> Data 48 | 49 | /// Converts text to speech with a request to `/v1/speech-to-text` 50 | /// 51 | /// - Parameters: 52 | /// 53 | /// - body: The request body to send to ElevenLabs. See this reference: 54 | /// https://elevenlabs.io/docs/api-reference/speech-to-text/convert#request 55 | /// 56 | /// - secondsToWait: Seconds to wait before raising `URLError.timedOut` 57 | /// 58 | /// - Returns: The speech to text response body 59 | func speechToTextRequest( 60 | body: ElevenLabsSpeechToTextRequestBody, 61 | secondsToWait: UInt 62 | ) async throws -> ElevenLabsSpeechToTextResponseBody 63 | } 64 | -------------------------------------------------------------------------------- /Sources/AIProxy/ElevenLabs/ElevenLabsSpeechToSpeechRequestBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ElevenLabsSTSRequestBody.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 1/6/25. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Request body for a text-to-speech request to ElevenLabs. 11 | /// https://elevenlabs.io/docs/api-reference/speech-to-speech/convert 12 | /// Note that `voiceID` is set on the path, not in the request body. 13 | public struct ElevenLabsSpeechToSpeechRequestBody: MultipartFormEncodable { 14 | 15 | // Required 16 | 17 | /// The audio file data to convert 18 | public let audio: Data 19 | 20 | // Optional 21 | 22 | /// Identifier of the model that will be used, you can query them using GET /v1/models. 23 | /// The model needs to have support for speech to speech, you can check this using the `can_do_voice_conversion` property. 24 | /// Defaults to `eleven_english_sts_v2` 25 | public let modelID: String? 26 | 27 | /// If set will remove the background noise from your audio input using our audio isolation 28 | /// model. Only applies to Voice Changer. 29 | public let removeBackgroundNoise: Bool? 30 | 31 | /// If specified, our system will make a best effort to sample deterministically, such that 32 | /// repeated requests with the same seed and parameters should return the same result. 33 | /// Determinism is not guaranteed. Must be integer between 0 and 4294967295. 34 | public let seed: Int? 35 | 36 | public var formFields: [FormField] { 37 | return [ 38 | .fileField(name: "audio", content: self.audio, contentType: "audio/mpeg", filename: "aiproxy.m4a"), 39 | self.modelID.flatMap { .textField(name: "model_id", content: $0) }, 40 | self.removeBackgroundNoise.flatMap { .textField(name: "remove_background_noise", content: String($0)) }, 41 | self.seed.flatMap { .textField(name: "seed", content: String($0)) } 42 | ].compactMap { $0 } 43 | } 44 | 45 | // This memberwise initializer is autogenerated. 46 | // To regenerate, use `cmd-shift-a` > Generate Memberwise Initializer 47 | // To format, place the cursor in the initializer's parameter list and use `ctrl-m` 48 | public init(audio: Data, modelID: String? = nil, removeBackgroundNoise: Bool? = nil, seed: Int? = nil) { 49 | self.audio = audio 50 | self.modelID = modelID 51 | self.removeBackgroundNoise = removeBackgroundNoise 52 | self.seed = seed 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /Sources/AIProxy/Fal/FalError.swift: -------------------------------------------------------------------------------- 1 | // 2 | // FalError.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/14/24. 6 | // 7 | 8 | import Foundation 9 | 10 | enum FalError: LocalizedError { 11 | case missingResultURL 12 | case missingStatusURL 13 | case reachedRetryLimit 14 | 15 | var errorDescription: String? { 16 | switch self { 17 | case .missingResultURL: 18 | return "Fal finished inference, but did not contain a URL for us to fetch the result with" 19 | case .missingStatusURL: 20 | return "Fal request was queued, but the response did not contain a status URL" 21 | case .reachedRetryLimit: 22 | return "Reached Fal polling retry limit" 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /Sources/AIProxy/Fal/FalFastSDXLOutputSchema.swift: -------------------------------------------------------------------------------- 1 | // 2 | // FalFastSDXLOutputSchema.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/14/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public struct FalFastSDXLOutputSchema: Decodable { 11 | public let hasNSFWConcepts: [Bool]? 12 | public let images: [FalOutputImage]? 13 | public let prompt: String? 14 | public let seed: UInt64? 15 | public let timings: FalTimings? 16 | 17 | public init(hasNSFWConcepts: [Bool]?, images: [FalOutputImage]?, prompt: String?, seed: UInt64?, timings: FalTimings?) { 18 | self.hasNSFWConcepts = hasNSFWConcepts 19 | self.images = images 20 | self.prompt = prompt 21 | self.seed = seed 22 | self.timings = timings 23 | } 24 | 25 | private enum CodingKeys: String, CodingKey { 26 | case hasNSFWConcepts = "has_nsfw_concepts" 27 | case images 28 | case prompt 29 | case seed 30 | case timings 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /Sources/AIProxy/Fal/FalFluxLoRAFastTrainingOutputSchema.swift: -------------------------------------------------------------------------------- 1 | // 2 | // FalFluxLoRAFastTrainingOutputSchema.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/3/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Docstrings from https://fal.ai/models/fal-ai/flux-lora-fast-training/api#schema-output 11 | public struct FalFluxLoRAFastTrainingOutputSchema: Decodable { 12 | 13 | /// Remote training configuration file. 14 | public let configFile: File? 15 | 16 | /// Remote file holding the trained diffusers lora weights. 17 | public let diffusersLoraFile: File? 18 | 19 | public init(configFile: File?, diffusersLoraFile: File?) { 20 | self.configFile = configFile 21 | self.diffusersLoraFile = diffusersLoraFile 22 | } 23 | 24 | private enum CodingKeys: String, CodingKey { 25 | case configFile = "config_file" 26 | case diffusersLoraFile = "diffusers_lora_file" 27 | } 28 | } 29 | 30 | // MARK: - OutputSchema.File 31 | extension FalFluxLoRAFastTrainingOutputSchema { 32 | public struct File: Decodable { 33 | /// The mime type of the file. 34 | public let contentType: String? 35 | 36 | /// The name of the file. It will be auto-generated if not provided. 37 | public let fileName: String? 38 | 39 | /// The size of the file in bytes. 40 | public let fileSize: Int? 41 | 42 | /// The URL where the file can be downloaded from. 43 | public let url: URL? 44 | 45 | public init(contentType: String?, fileName: String?, fileSize: Int?, url: URL?) { 46 | self.contentType = contentType 47 | self.fileName = fileName 48 | self.fileSize = fileSize 49 | self.url = url 50 | } 51 | 52 | private enum CodingKeys: String, CodingKey { 53 | case contentType = "content_type" 54 | case fileName = "file_name" 55 | case fileSize = "file_size" 56 | case url 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /Sources/AIProxy/Fal/FalFluxLoRAOutputSchema.swift: -------------------------------------------------------------------------------- 1 | // 2 | // FalFluxLoRAOutputSchema.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/14/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public struct FalFluxLoRAOutputSchema: Decodable { 11 | public let hasNSFWConcepts: [Bool]? 12 | public let images: [FalOutputImage]? 13 | public let prompt: String? 14 | public let seed: UInt64? 15 | public let timings: FalTimings? 16 | 17 | public init(hasNSFWConcepts: [Bool]?, images: [FalOutputImage]?, prompt: String?, seed: UInt64?, timings: FalTimings?) { 18 | self.hasNSFWConcepts = hasNSFWConcepts 19 | self.images = images 20 | self.prompt = prompt 21 | self.seed = seed 22 | self.timings = timings 23 | } 24 | 25 | private enum CodingKeys: String, CodingKey { 26 | case hasNSFWConcepts = "has_nsfw_concepts" 27 | case images 28 | case prompt 29 | case seed 30 | case timings 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /Sources/AIProxy/Fal/FalFluxSchnellInputSchema.swift: -------------------------------------------------------------------------------- 1 | // 2 | // FalFluxSchnellInputSchema.swift 3 | // 4 | // 5 | // Created by Hunor Zoltáni on 01.03.2025. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Docstrings taken from the tooltips here: https://fal.ai/models/fal-ai/flux/schnell 11 | public struct FalFluxSchnellInputSchema: Encodable { 12 | // Required 13 | 14 | /// The prompt to generate an image from. 15 | public let prompt: String 16 | 17 | // Optional 18 | 19 | /// If set to true, the safety checker will be enabled. Default value: true 20 | public let enableSafetyChecker: Bool? 21 | 22 | /// The size of the generated image. 23 | /// Default value: `.landscape4x3` 24 | public let imageSize: ImageSize? 25 | 26 | /// The number of images to generate. 27 | /// Default value: `1` 28 | public let numImages: Int? 29 | 30 | /// The number of inference steps to perform. 31 | /// Default value: `4` 32 | public let numInferenceSteps: Int? 33 | 34 | /// The same seed and the same prompt given to the same version of the model will output the same image every time. 35 | public let seed: Int? 36 | 37 | /// If set to true, the function will wait for the image to be generated and uploaded 38 | /// before returning the response. This will increase the latency of the function but it 39 | /// allows you to get the image directly in the response without going through the CDN. 40 | public let syncMode: Bool? 41 | 42 | private enum CodingKeys: String, CodingKey { 43 | case enableSafetyChecker = "enable_safety_checker" 44 | case imageSize = "image_size" 45 | case numImages = "num_images" 46 | case numInferenceSteps = "num_inference_steps" 47 | case prompt 48 | case seed 49 | case syncMode = "sync_mode" 50 | } 51 | 52 | // This memberwise initializer is autogenerated. 53 | // To regenerate, use `cmd-shift-a` > Generate Memberwise Initializer 54 | // To format, place the cursor in the initializer's parameter list and use `ctrl-m` 55 | public init( 56 | prompt: String, 57 | enableSafetyChecker: Bool? = nil, 58 | imageSize: FalFluxSchnellInputSchema.ImageSize? = nil, 59 | numImages: Int? = nil, 60 | numInferenceSteps: Int? = nil, 61 | seed: Int? = nil, 62 | syncMode: Bool? = nil 63 | ) { 64 | self.prompt = prompt 65 | self.enableSafetyChecker = enableSafetyChecker 66 | self.imageSize = imageSize 67 | self.numImages = numImages 68 | self.numInferenceSteps = numInferenceSteps 69 | self.seed = seed 70 | self.syncMode = syncMode 71 | } 72 | 73 | } 74 | 75 | // MARK: - InputSchema.ImageSize 76 | extension FalFluxSchnellInputSchema { 77 | public enum ImageSize: String, Encodable { 78 | case landscape16x9 = "landscape_16_9" 79 | case landscape4x3 = "landscape_4_3" 80 | case portrait16x9 = "portrait_16_9" 81 | case portrait4x3 = "portrait_4_3" 82 | case square 83 | case squareHD = "square_hd" 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /Sources/AIProxy/Fal/FalFluxSchnellOutputSchema.swift: -------------------------------------------------------------------------------- 1 | // 2 | // FalFluxSchnellOutputSchema.swift 3 | // 4 | // 5 | // Created by Hunor Zoltáni on 01.03.2025. 6 | // 7 | 8 | import Foundation 9 | 10 | public struct FalFluxSchnellOutputSchema: Decodable { 11 | public let hasNSFWConcepts: [Bool]? 12 | public let images: [FalOutputImage]? 13 | public let prompt: String? 14 | public let seed: UInt64? 15 | public let timings: FalTimings? 16 | 17 | public init(hasNSFWConcepts: [Bool]?, images: [FalOutputImage]?, prompt: String?, seed: UInt64?, timings: FalTimings?) { 18 | self.hasNSFWConcepts = hasNSFWConcepts 19 | self.images = images 20 | self.prompt = prompt 21 | self.seed = seed 22 | self.timings = timings 23 | } 24 | 25 | private enum CodingKeys: String, CodingKey { 26 | case hasNSFWConcepts = "has_nsfw_concepts" 27 | case images 28 | case prompt 29 | case seed 30 | case timings 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /Sources/AIProxy/Fal/FalInitiateUploadRequestBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // FalInitiateUploadRequestBody.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/3/24. 6 | // 7 | 8 | import Foundation 9 | 10 | struct FalInitiateUploadRequestBody: Encodable { 11 | let contentType: String 12 | let fileName: String 13 | 14 | private enum CodingKeys: String, CodingKey { 15 | case contentType = "content_type" 16 | case fileName = "file_name" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /Sources/AIProxy/Fal/FalInitiateUploadResponseBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // FalInitiateUploadResponseBody.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/3/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public struct FalInitiateUploadResponseBody: Decodable { 11 | let fileURL: URL 12 | let uploadURL: URL 13 | 14 | public init(fileURL: URL, uploadURL: URL) { 15 | self.fileURL = fileURL 16 | self.uploadURL = uploadURL 17 | } 18 | 19 | private enum CodingKeys: String, CodingKey { 20 | case fileURL = "file_url" 21 | case uploadURL = "upload_url" 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /Sources/AIProxy/Fal/FalOutputImage.swift: -------------------------------------------------------------------------------- 1 | // 2 | // FalOutputImage.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/4/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public struct FalOutputImage: Decodable { 11 | public let contentType: String? 12 | public let height: Int? 13 | public let url: URL? 14 | public let width: Int? 15 | 16 | private enum CodingKeys: String, CodingKey { 17 | case contentType = "content_type" 18 | case height 19 | case url 20 | case width 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /Sources/AIProxy/Fal/FalQueueResponseBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // FalFastQueueResponseBody.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/13/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// https://fal.ai/docs/model-endpoints/queue 11 | public struct FalQueueResponseBody: Decodable { 12 | public let cancelURL: URL? 13 | public let logs: String? 14 | public let metrics: Metrics? 15 | public let responseURL: URL? 16 | public let requestID: String? 17 | public let status: Status? 18 | public let statusURL: URL? 19 | public let queuePosition: Int? 20 | 21 | public init(cancelURL: URL?, logs: String?, metrics: Metrics?, responseURL: URL?, requestID: String?, status: Status?, statusURL: URL?, queuePosition: Int?) { 22 | self.cancelURL = cancelURL 23 | self.logs = logs 24 | self.metrics = metrics 25 | self.responseURL = responseURL 26 | self.requestID = requestID 27 | self.status = status 28 | self.statusURL = statusURL 29 | self.queuePosition = queuePosition 30 | } 31 | 32 | private enum CodingKeys: String, CodingKey { 33 | case cancelURL = "cancel_url" 34 | case logs 35 | case metrics 36 | case responseURL = "response_url" 37 | case requestID = "request_id" 38 | case status 39 | case statusURL = "status_url" 40 | case queuePosition = "queue_position" 41 | } 42 | } 43 | 44 | extension FalQueueResponseBody { 45 | public enum Status: String, Decodable { 46 | case inQueue = "IN_QUEUE" 47 | case inProgress = "IN_PROGRESS" 48 | case completed = "COMPLETED" 49 | } 50 | } 51 | 52 | extension FalQueueResponseBody { 53 | public struct Metrics: Decodable { 54 | let inferenceTime: Double? 55 | 56 | public init(inferenceTime: Double?) { 57 | self.inferenceTime = inferenceTime 58 | } 59 | 60 | private enum CodingKeys: String, CodingKey { 61 | case inferenceTime = "inference_time" 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /Sources/AIProxy/Fal/FalTimings.swift: -------------------------------------------------------------------------------- 1 | // 2 | // FalTimings.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/4/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public struct FalTimings: Decodable { 11 | public let inference: Double? 12 | 13 | public init(inference: Double?) { 14 | self.inference = inference 15 | } 16 | 17 | private enum CodingKeys: String, CodingKey { 18 | case inference 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /Sources/AIProxy/Fal/FalTryonOutputSchema.swift: -------------------------------------------------------------------------------- 1 | // 2 | // FalTryonOutputSchema.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 12/31/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Docstrings from https://fal.ai/models/fashn/tryon/api#schema-output 11 | public struct FalTryonOutputSchema: Decodable { 12 | public let images: [TryonImage] 13 | 14 | public init(images: [TryonImage]) { 15 | self.images = images 16 | } 17 | } 18 | 19 | // MARK: - OutputSchema.TryonImage 20 | extension FalTryonOutputSchema { 21 | public struct TryonImage: Decodable { 22 | /// The mime type of the file. 23 | public let contentType: String? 24 | 25 | /// File data. 26 | public let fileData: String? 27 | 28 | /// The name of the file. It will be auto-generated if not provided. 29 | public let fileName: String? 30 | 31 | /// The size of the file in bytes. 32 | public let fileSize: Int? 33 | 34 | /// The height of the image in pixels. 35 | public let height: Int? 36 | 37 | /// The URL where the file can be downloaded from. 38 | public let url: URL 39 | 40 | /// The width of the image in pixels. 41 | public let width: Int? 42 | 43 | public init(contentType: String?, fileData: String?, fileName: String?, fileSize: Int?, height: Int?, url: URL, width: Int?) { 44 | self.contentType = contentType 45 | self.fileData = fileData 46 | self.fileName = fileName 47 | self.fileSize = fileSize 48 | self.height = height 49 | self.url = url 50 | self.width = width 51 | } 52 | 53 | private enum CodingKeys: String, CodingKey { 54 | case contentType = "content_type" 55 | case fileData = "file_data" 56 | case fileName = "file_name" 57 | case fileSize = "file_size" 58 | case height 59 | case url 60 | case width 61 | } 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /Sources/AIProxy/FireworksAI/FireworksAIService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // FireworksAIService.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 1/29/25. 6 | // 7 | 8 | import Foundation 9 | 10 | public protocol FireworksAIService { 11 | 12 | /// Initiates a non-streaming chat completion request to DeepSeek R1 at api.fireworks.ai/inference/v1/chat/completions 13 | /// 14 | /// - Parameters: 15 | /// - body: The request body to send to FireworksAI. See these references: 16 | /// https://fireworks.ai/models/fireworks/deepseek-r1 17 | /// https://api-docs.deepseek.com/api/create-chat-completion 18 | /// - secondsToWait: The number of seconds to wait before timing out 19 | /// - Returns: The chat response. See this reference: 20 | /// https://api-docs.deepseek.com/api/create-chat-completion#responses 21 | func deepSeekR1Request( 22 | body: DeepSeekChatCompletionRequestBody, 23 | secondsToWait: UInt 24 | ) async throws -> DeepSeekChatCompletionResponseBody 25 | 26 | /// Initiates a streaming chat completion request to DeepSeek R1 at api.fireworks.ai/inference/v1/chat/completions 27 | /// 28 | /// - Parameters: 29 | /// - body: The request body to send to FireworksAI. See these references: 30 | /// https://fireworks.ai/models/fireworks/deepseek-r1 31 | /// https://api-docs.deepseek.com/api/create-chat-completion 32 | /// - secondsToWait: The number of seconds to wait before timing out 33 | /// - Returns: An async sequence of completion chunks. See the 'Streaming' tab here: 34 | /// https://api-docs.deepseek.com/api/create-chat-completion#responses 35 | func streamingDeepSeekR1Request( 36 | body: DeepSeekChatCompletionRequestBody, 37 | secondsToWait: UInt 38 | ) async throws -> AsyncCompactMapSequence, DeepSeekChatCompletionChunk> 39 | } 40 | -------------------------------------------------------------------------------- /Sources/AIProxy/Gemini/GeminiError.swift: -------------------------------------------------------------------------------- 1 | // 2 | // GeminiError.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/24/24. 6 | // 7 | 8 | import Foundation 9 | 10 | enum GeminiError: LocalizedError { 11 | case reachedRetryLimit 12 | 13 | var errorDescription: String? { 14 | switch self { 15 | case .reachedRetryLimit: 16 | return "Reached Gemini polling retry limit" 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /Sources/AIProxy/Gemini/GeminiFile.swift: -------------------------------------------------------------------------------- 1 | // 2 | // GeminiFile.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/24/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public struct GeminiFile: Decodable { 11 | public let createTime: String? 12 | public let expirationTime: String? 13 | public let mimeType: String? 14 | public let name: String? 15 | public let sha256Hash: String? 16 | public let sizeBytes: String? 17 | public let state: State 18 | public let updateTime: String? 19 | public let uri: URL 20 | public let videoMetadata: VideoMetadata? 21 | 22 | public init(createTime: String?, expirationTime: String?, mimeType: String?, name: String?, sha256Hash: String?, sizeBytes: String?, state: State, updateTime: String?, uri: URL, videoMetadata: VideoMetadata?) { 23 | self.createTime = createTime 24 | self.expirationTime = expirationTime 25 | self.mimeType = mimeType 26 | self.name = name 27 | self.sha256Hash = sha256Hash 28 | self.sizeBytes = sizeBytes 29 | self.state = state 30 | self.updateTime = updateTime 31 | self.uri = uri 32 | self.videoMetadata = videoMetadata 33 | } 34 | } 35 | 36 | // MARK: - GeminiFile.State 37 | extension GeminiFile { 38 | public enum State: String, Decodable { 39 | case processing = "PROCESSING" 40 | case active = "ACTIVE" 41 | } 42 | } 43 | 44 | // MARK: - GeminiFile.VideoMetadata 45 | extension GeminiFile { 46 | public struct VideoMetadata: Decodable { 47 | public let videoDuration: String 48 | 49 | public init(videoDuration: String) { 50 | self.videoDuration = videoDuration 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /Sources/AIProxy/Gemini/GeminiFileUploadRequestBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // GeminiFileUploadRequestBody.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/23/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public struct GeminiFileUploadRequestBody { 11 | 12 | let fileData: Data 13 | let mimeType: String 14 | 15 | func serialize(withBoundary boundary: String) -> Data { 16 | var encoded = Data() 17 | let u: (String) -> Data = { $0.data(using: .utf8)! } 18 | encoded += u("--\(boundary)\r\n") 19 | encoded += u("Content-Type: application/json; charset=utf-8\r\n\r\n") 20 | encoded += u("{\"file\":{\"mimeType\":\"\(self.mimeType)\"}}\r\n") 21 | encoded += u("--\(boundary)\r\n") 22 | encoded += u("Content-Type: \(self.mimeType)\r\n\r\n") 23 | encoded += self.fileData 24 | encoded += u("\r\n") 25 | encoded += u("--\(boundary)--") 26 | return encoded 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /Sources/AIProxy/Gemini/GeminiFileUploadResponseBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // GeminiFileUploadResponseBody.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/24/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public struct GeminiFileUploadResponseBody: Decodable { 11 | public let file: GeminiFile 12 | 13 | public init(file: GeminiFile) { 14 | self.file = file 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /Sources/AIProxy/Gemini/GeminiImagenResponseBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // GeminiImagenResponseBody.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 3/18/25. 6 | // 7 | 8 | public struct GeminiImagenResponseBody: Decodable { 9 | public let predictions: [Prediction] 10 | public init(predictions: [Prediction]) { 11 | self.predictions = predictions 12 | } 13 | } 14 | 15 | extension GeminiImagenResponseBody { 16 | public struct Prediction: Decodable { 17 | public let mimeType: String? 18 | public let bytesBase64Encoded: String 19 | 20 | public init(mimeType: String?, bytesBase64Encoded: String) { 21 | self.mimeType = mimeType 22 | self.bytesBase64Encoded = bytesBase64Encoded 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /Sources/AIProxy/Groq/GroqChatCompletionStreamingChunk.swift: -------------------------------------------------------------------------------- 1 | // 2 | // GroqChatCompletionStreamingChunk.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/1/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Docstrings from: https://platform.openai.com/docs/api-reference/chat/streaming 11 | public struct GroqChatCompletionStreamingChunk: Decodable { 12 | /// A list of chat completion choices. Can contain more than one elements if 13 | /// OpenAIChatCompletionRequestBody's `n` property is greater than 1. Can also be empty for 14 | /// the last chunk, which contains usage information only. 15 | public let choices: [Choice] 16 | 17 | public init(choices: [Choice]) { 18 | self.choices = choices 19 | } 20 | } 21 | 22 | // MARK: - Chunk.Choice 23 | extension GroqChatCompletionStreamingChunk { 24 | public struct Choice: Codable { 25 | public let delta: Delta 26 | public let finishReason: String? 27 | 28 | public init(delta: Delta, finishReason: String?) { 29 | self.delta = delta 30 | self.finishReason = finishReason 31 | } 32 | 33 | private enum CodingKeys: String, CodingKey { 34 | case delta 35 | case finishReason = "finish_reason" 36 | } 37 | } 38 | } 39 | 40 | // MARK: - Chunk.Choice.Delta 41 | extension GroqChatCompletionStreamingChunk.Choice { 42 | public struct Delta: Codable { 43 | public let role: String? 44 | public let content: String? 45 | 46 | public init(role: String?, content: String?) { 47 | self.role = role 48 | self.content = content 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /Sources/AIProxy/Groq/GroqService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // GroqService.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/18/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public protocol GroqService { 11 | /// Initiates a non-streaming chat completion request to Groq 12 | /// 13 | /// - Parameters: 14 | /// - body: The chat completion request body. See this reference: 15 | /// https://console.groq.com/docs/api-reference#chat-create 16 | /// - secondsToWait: Seconds to wait before raising `URLError.timedOut` 17 | /// - Returns: A ChatCompletionResponse. See this reference: 18 | /// https://platform.openai.com/docs/api-reference/chat/object 19 | func chatCompletionRequest( 20 | body: GroqChatCompletionRequestBody, 21 | secondsToWait: UInt 22 | ) async throws -> GroqChatCompletionResponseBody 23 | 24 | /// Initiates a streaming chat completion request to Groq. 25 | /// 26 | /// - Parameters: 27 | /// - body: The chat completion request body. See this reference: 28 | /// https://console.groq.com/docs/api-reference#chat-create 29 | /// - secondsToWait: Seconds to wait before raising `URLError.timedOut` 30 | /// - Returns: An async sequence of completion chunks. See this reference: 31 | /// https://platform.openai.com/docs/api-reference/chat/streaming 32 | func streamingChatCompletionRequest( 33 | body: GroqChatCompletionRequestBody, 34 | secondsToWait: UInt 35 | ) async throws -> AsyncCompactMapSequence, GroqChatCompletionStreamingChunk> 36 | 37 | /// Initiates a transcription request to /openai/v1/audio/transcriptions 38 | /// 39 | /// - Parameters: 40 | /// - body: The audio transcription request body. See this reference: 41 | /// https://console.groq.com/docs/api-reference#audio-transcription 42 | /// - secondsToWait: Seconds to wait before raising `URLError.timedOut` 43 | /// - Returns: An transcription response. See this reference: 44 | /// https://platform.openai.com/docs/api-reference/audio/json-object 45 | func createTranscriptionRequest( 46 | body: GroqTranscriptionRequestBody, 47 | secondsToWait: UInt 48 | ) async throws -> GroqTranscriptionResponseBody 49 | } 50 | -------------------------------------------------------------------------------- /Sources/AIProxy/MicrophonePCMSampleVendor.swift: -------------------------------------------------------------------------------- 1 | // 2 | // MicrophonePCMSampleVendor.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 5/29/25. 6 | // 7 | 8 | import AVFoundation 9 | 10 | @RealtimeActor 11 | protocol MicrophonePCMSampleVendor: AnyObject { 12 | func start() throws -> AsyncStream 13 | func stop() 14 | } 15 | -------------------------------------------------------------------------------- /Sources/AIProxy/MicrophonePCMSampleVendorError.swift: -------------------------------------------------------------------------------- 1 | // 2 | // MicrophonePCMSampleVendorError.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 2/20/25. 6 | // 7 | 8 | import Foundation 9 | 10 | public enum MicrophonePCMSampleVendorError: LocalizedError { 11 | case couldNotConfigureAudioUnit(String) 12 | 13 | public var errorDescription: String? { 14 | switch self { 15 | case .couldNotConfigureAudioUnit(let message): 16 | return message 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /Sources/AIProxy/Mistral/MistralChatCompletionResponseBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // GrogChatCompletionResponseBody.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/30/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Docstrings from: https://platform.openai.com/docs/api-reference/chat/object 11 | public struct MistralChatCompletionResponseBody: Decodable { 12 | /// A list of chat completion choices. 13 | /// Can be more than one if `n` on `MistralChatCompletionRequestBody` is greater than 1. 14 | public let choices: [Choice] 15 | 16 | /// The Unix timestamp (in seconds) of when the chat completion was created. 17 | public let created: Int 18 | 19 | /// The model used for the chat completion. 20 | public let model: String 21 | 22 | /// Usage statistics for the completion request. 23 | public let usage: MistralChatUsage? 24 | 25 | public init(choices: [Choice], created: Int, model: String, usage: MistralChatUsage?) { 26 | self.choices = choices 27 | self.created = created 28 | self.model = model 29 | self.usage = usage 30 | } 31 | } 32 | 33 | // MARK: - ResponseBody.Choice 34 | extension MistralChatCompletionResponseBody { 35 | public struct Choice: Decodable { 36 | /// The reason the model stopped generating tokens. This will be `stop` if the model hit a 37 | /// natural stop point or a provided stop sequence, `length` if the maximum number of 38 | /// tokens specified in the request was reached, `content_filter` if content was omitted 39 | /// due to a flag from our content filters, `tool_calls` if the model called a tool, or 40 | /// `function_call` (deprecated) if the model called a function. 41 | public let finishReason: String? 42 | 43 | /// A chat completion message generated by the model. 44 | public let message: Message 45 | 46 | public init(finishReason: String?, message: Message) { 47 | self.finishReason = finishReason 48 | self.message = message 49 | } 50 | 51 | private enum CodingKeys: String, CodingKey { 52 | case finishReason = "finish_reason" 53 | case message 54 | } 55 | } 56 | } 57 | 58 | // MARK: - ResponseBody.Choice.Message 59 | extension MistralChatCompletionResponseBody.Choice { 60 | public struct Message: Decodable { 61 | /// The contents of the message. 62 | public let content: String 63 | 64 | /// The role of the author of this message. 65 | public let role: String 66 | 67 | /// The tool calls generated by the model, such as function calls. 68 | // public let toolCalls: [ToolCall]? 69 | 70 | public init(content: String, role: String) { 71 | self.content = content 72 | self.role = role 73 | } 74 | 75 | private enum CodingKeys: String, CodingKey { 76 | case content 77 | case role 78 | // case toolCalls = "tool_calls" 79 | } 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /Sources/AIProxy/Mistral/MistralChatCompletionStreamingChunk.swift: -------------------------------------------------------------------------------- 1 | // 2 | // MistralChatCompletionStreamingChunk.swift 3 | // 4 | // 5 | // Created by Lou Zell on 11/24/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Docstrings from: https://platform.openai.com/docs/api-reference/chat/streaming 11 | public struct MistralChatCompletionStreamingChunk: Decodable { 12 | /// A list of chat completion capphoices. Can contain more than one elements if 13 | /// MistralChatCompletionRequestBody's `n` property is greater than 1. Can also be empty for 14 | /// the last chunk, which contains usage information only. 15 | public let choices: [Choice] 16 | 17 | /// This property is nil for all chunks except for the last chunk, which contains the token 18 | /// usage statistics for the entire request. 19 | public let usage: MistralChatUsage? 20 | } 21 | 22 | // MARK: - Chunk.Choice 23 | extension MistralChatCompletionStreamingChunk { 24 | public struct Choice: Codable { 25 | public let delta: Delta 26 | public let finishReason: String? 27 | 28 | private enum CodingKeys: String, CodingKey { 29 | case delta 30 | case finishReason = "finish_reason" 31 | } 32 | } 33 | } 34 | 35 | // MARK: - Chunk.Choice.Delta 36 | extension MistralChatCompletionStreamingChunk.Choice { 37 | public struct Delta: Codable { 38 | public let role: String? 39 | public let content: String? 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /Sources/AIProxy/Mistral/MistralChatUsage.swift: -------------------------------------------------------------------------------- 1 | // 2 | // MistralChatUsage.swift 3 | // 4 | // Created by Lou Zell on 11/24/24. 5 | // 6 | 7 | import Foundation 8 | 9 | public struct MistralChatUsage: Decodable { 10 | /// Number of tokens in the generated completion. 11 | public let completionTokens: Int? 12 | 13 | /// Number of tokens in the prompt. 14 | public let promptTokens: Int? 15 | 16 | /// Total number of tokens used in the request (prompt + completion). 17 | public let totalTokens: Int? 18 | 19 | private enum CodingKeys: String, CodingKey { 20 | case completionTokens = "completion_tokens" 21 | case promptTokens = "prompt_tokens" 22 | case totalTokens = "total_tokens" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /Sources/AIProxy/Mistral/MistralOCRRequestBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // MistralOCRRequestBody.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 5/9/25. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Docstrings from: https://docs.mistral.ai/api/#tag/ocr/operation/ocr_v1_ocr_post 11 | public struct MistralOCRRequestBody: Encodable { 12 | // Required 13 | 14 | /// Document to run OCR on 15 | public let document: Document 16 | 17 | /// The model to use, e.g. `.mistralOCRLatest` 18 | public let model: Model 19 | 20 | // Optional 21 | 22 | /// Max images to extract 23 | public let imageLimit: Int? 24 | 25 | /// Minimum height and width of image to extract 26 | public let imageMinSize: Int? 27 | 28 | /// Include image URLs in response 29 | public let includeImageBase64: Bool? 30 | 31 | private enum CodingKeys: String, CodingKey { 32 | case document 33 | case model 34 | 35 | case imageLimit = "image_limit" 36 | case imageMinSize = "image_min_size" 37 | case includeImageBase64 = "include_image_base64" 38 | } 39 | 40 | // This memberwise initializer is autogenerated. 41 | // To regenerate, use `cmd-shift-a` > Generate Memberwise Initializer 42 | // To format, place the cursor in the initializer's parameter list and use `ctrl-m` 43 | public init( 44 | document: MistralOCRRequestBody.Document, 45 | model: MistralOCRRequestBody.Model, 46 | imageLimit: Int? = nil, 47 | imageMinSize: Int? = nil, 48 | includeImageBase64: Bool? = nil 49 | ) { 50 | self.document = document 51 | self.model = model 52 | self.imageLimit = imageLimit 53 | self.imageMinSize = imageMinSize 54 | self.includeImageBase64 = includeImageBase64 55 | } 56 | } 57 | 58 | extension MistralOCRRequestBody { 59 | public enum Model: String, Encodable { 60 | case mistralOCRLatest = "mistral-ocr-latest" 61 | } 62 | 63 | public enum Document: Encodable { 64 | case imageURLChunk(URL) 65 | 66 | private enum RootKey: String, CodingKey { 67 | case imageURL = "image_url" 68 | case type 69 | } 70 | 71 | public func encode(to encoder: Encoder) throws { 72 | var container = encoder.container(keyedBy: RootKey.self) 73 | switch self { 74 | case .imageURLChunk(let imageURL): 75 | try container.encode(imageURL, forKey: .imageURL) 76 | try container.encode("image_url", forKey: .type) 77 | } 78 | } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /Sources/AIProxy/Mistral/MistralOCRResponseBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // MistralOCRResponseBody.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 5/9/25. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Docstrings from: https://docs.mistral.ai/api/#tag/ocr/operation/ocr_v1_ocr_post 11 | public struct MistralOCRResponseBody: Decodable { 12 | public let model: String? 13 | public let pages: [Page] 14 | public let usageInfo: UsageInfo? 15 | } 16 | 17 | extension MistralOCRResponseBody { 18 | 19 | public struct Page: Decodable { 20 | public let dimensions: Dimensions? 21 | public let images: [ExtractedImage]? 22 | public let index: Int? 23 | public let markdown: String? 24 | } 25 | 26 | public struct UsageInfo: Decodable { 27 | public let docSizeBytes: Int? 28 | public let pagesProcessed: Int? 29 | 30 | private enum CodingKeys: String, CodingKey { 31 | case docSizeBytes = "doc_size_bytes" 32 | case pagesProcessed = "pages_processed" 33 | } 34 | } 35 | } 36 | 37 | extension MistralOCRResponseBody.Page { 38 | public struct ExtractedImage: Decodable { 39 | public let imageBase64: String? 40 | 41 | private enum CodingKeys: String, CodingKey { 42 | case imageBase64 = "image_base64" 43 | } 44 | } 45 | 46 | public struct Dimensions: Decodable { 47 | /// Dots per inch of the page-image 48 | public let dpi: Int? 49 | 50 | /// Height of the image in pixels 51 | public let height: Int? 52 | 53 | /// Width of the image in pixels 54 | public let width: Int? 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /Sources/AIProxy/Mistral/MistralService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // MistralService.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/19/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public protocol MistralService { 11 | /// Initiates a non-streaming chat completion request to Mistral 12 | /// 13 | /// - Parameters: 14 | /// - body: The chat completion request body. See this reference: 15 | /// https://docs.mistral.ai/api/#tag/chat/operation/chat_completion_v1_chat_completions_post 16 | /// - secondsToWait: The amount of time to wait before `URLError.timedOut` is raised 17 | /// - Returns: A ChatCompletionResponse. 18 | func chatCompletionRequest( 19 | body: MistralChatCompletionRequestBody, 20 | secondsToWait: UInt 21 | ) async throws -> MistralChatCompletionResponseBody 22 | 23 | /// Initiates a streaming chat completion request to Mistral. 24 | /// 25 | /// - Parameters: 26 | /// - body: The chat completion request body. See this reference: 27 | /// https://docs.mistral.ai/api/#tag/chat/operation/chat_completion_v1_chat_completions_post 28 | /// - secondsToWait: The amount of time to wait before `URLError.timedOut` is raised 29 | /// - Returns: An async sequence of completion chunks. See this reference: 30 | /// https://platform.openai.com/docs/api-reference/chat/streaming 31 | func streamingChatCompletionRequest( 32 | body: MistralChatCompletionRequestBody, 33 | secondsToWait: UInt 34 | ) async throws -> AsyncCompactMapSequence, MistralChatCompletionStreamingChunk> 35 | 36 | /// Initiates an OCR request to Mistral 37 | /// 38 | /// - Parameters: 39 | /// - body: The OCR request body. See this reference: 40 | /// https://docs.mistral.ai/api/#tag/ocr/operation/ocr_v1_ocr_post 41 | /// - secondsToWait: The amount of time to wait before `URLError.timedOut` is raised 42 | /// - Returns: The OCR result 43 | func ocrRequest( 44 | body: MistralOCRRequestBody, 45 | secondsToWait: UInt 46 | ) async throws -> MistralOCRResponseBody 47 | } 48 | -------------------------------------------------------------------------------- /Sources/AIProxy/MultipartFormEncodable.swift: -------------------------------------------------------------------------------- 1 | // 2 | // MultipartFormEncodable.swift 3 | // 4 | // 5 | // Created by Lou Zell on 7/22/24. 6 | // 7 | 8 | import Foundation 9 | 10 | 11 | public protocol MultipartFormEncodable { 12 | var formFields: [FormField] { get} 13 | } 14 | 15 | public enum FormField { 16 | case fileField(name: String, content: Data, contentType: String, filename: String) 17 | case textField(name: String, content: String) 18 | } 19 | 20 | func formEncode(_ body: MultipartFormEncodable, _ boundary: String) -> Data { 21 | var encoded = Data() 22 | let u: (String) -> Data = { $0.data(using: .utf8)! } 23 | for field in body.formFields { 24 | switch field { 25 | case .fileField( 26 | name: let name, 27 | content: let content, 28 | contentType: let contentType, 29 | filename: let filename 30 | ): 31 | encoded += u("--\(boundary)\r\n") 32 | encoded += u("Content-Disposition: form-data; name=\"\(name)\"; filename=\"\(filename)\"\r\n") 33 | encoded += u("Content-Type: \(contentType)\r\n\r\n") 34 | encoded += content 35 | encoded += u("\r\n") 36 | case .textField(name: let name, content: let content): 37 | encoded += u("--\(boundary)\r\n") 38 | encoded += u("Content-Disposition: form-data; name=\"\(name)\"\r\n\r\n") 39 | encoded += u(content) 40 | encoded += u("\r\n") 41 | } 42 | } 43 | encoded += u("--\(boundary)--") 44 | return encoded 45 | } 46 | -------------------------------------------------------------------------------- /Sources/AIProxy/NetworkActor.swift: -------------------------------------------------------------------------------- 1 | // 2 | // NetworkActor.swift 3 | // 4 | // 5 | // Created by Lou Zell on 8/25/24. 6 | // 7 | 8 | import Foundation 9 | 10 | @globalActor actor NetworkActor { 11 | static let shared = NetworkActor() 12 | } 13 | -------------------------------------------------------------------------------- /Sources/AIProxy/OpenAI/OpenAIChatUsage.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenAIChatUsage.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/19/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Docstrings from: 11 | /// https://platform.openai.com/docs/api-reference/chat/object#chat/object-usage 12 | public struct OpenAIChatUsage: Decodable { 13 | /// Number of tokens in the generated completion. 14 | public let completionTokens: Int? 15 | 16 | /// Number of tokens in the prompt. 17 | public let promptTokens: Int? 18 | 19 | /// Total number of tokens used in the request (prompt + completion). 20 | public let totalTokens: Int? 21 | 22 | /// Breakdown of tokens used in a completion. 23 | public let completionTokensDetails: Details? 24 | 25 | private enum CodingKeys: String, CodingKey { 26 | case completionTokens = "completion_tokens" 27 | case promptTokens = "prompt_tokens" 28 | case totalTokens = "total_tokens" 29 | case completionTokensDetails = "completion_tokens_details" 30 | } 31 | } 32 | 33 | // MARK: - 34 | extension OpenAIChatUsage { 35 | public struct Details: Decodable { 36 | /// Tokens generated by the model for reasoning. 37 | public let reasoningTokens: Int? 38 | 39 | private enum CodingKeys: String, CodingKey { 40 | case reasoningTokens = "reasoning_tokens" 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /Sources/AIProxy/OpenAI/OpenAICreateImageResponseBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenAICreateImageResponseBody.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 3/11/25. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Response body for the 'Create image' endpoint: 11 | /// https://platform.openai.com/docs/api-reference/images/create 12 | public struct OpenAICreateImageResponseBody: Decodable { 13 | /// A list of generated images returned from the 'Create Image' endpoint 14 | public let data: [ImageData] 15 | 16 | public init(data: [ImageData]) { 17 | self.data = data 18 | } 19 | } 20 | 21 | // MARK: - 22 | extension OpenAICreateImageResponseBody { 23 | /// https://platform.openai.com/docs/api-reference/images/object 24 | public struct ImageData: Decodable { 25 | /// The base64-encoded JSON of the generated image, if `responseFormat` on OpenAICreateImageRequestBody is `b64_json`. 26 | public let b64JSON: String? 27 | 28 | /// The prompt that was used to generate the image, if there was any revision to the prompt. 29 | public let revisedPrompt: String? 30 | 31 | /// The URL of the generated image, if `responseFormat` on OpenAICreateImageRequestBody is `url` (default). 32 | public let url: URL? 33 | 34 | public init(b64JSON: String?, revisedPrompt: String?, url: URL?) { 35 | self.b64JSON = b64JSON 36 | self.revisedPrompt = revisedPrompt 37 | self.url = url 38 | } 39 | 40 | enum CodingKeys: String, CodingKey { 41 | case b64JSON = "b64_json" 42 | case revisedPrompt = "revised_prompt" 43 | case url 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /Sources/AIProxy/OpenAI/OpenAIEmbeddingResponseBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenAIEmbeddingResponseBody.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 2/16/25. 6 | // 7 | 8 | /// https://platform.openai.com/docs/api-reference/embeddings/object 9 | public struct OpenAIEmbeddingResponseBody: Decodable { 10 | public let embeddings: [Embedding] 11 | public let model: String? 12 | public let usage: Usage? 13 | 14 | public init(embeddings: [Embedding], model: String?, usage: Usage?) { 15 | self.embeddings = embeddings 16 | self.model = model 17 | self.usage = usage 18 | } 19 | 20 | private enum CodingKeys: CodingKey { 21 | case data 22 | case model 23 | case usage 24 | } 25 | 26 | public init(from decoder: any Decoder) throws { 27 | let container = try decoder.container(keyedBy: CodingKeys.self) 28 | self.embeddings = try container.decode([Embedding].self, forKey: .data) 29 | self.model = try container.decodeIfPresent(String.self, forKey: .model) 30 | self.usage = try container.decodeIfPresent(Usage.self, forKey: .usage) 31 | } 32 | } 33 | 34 | // MARK: - 35 | extension OpenAIEmbeddingResponseBody { 36 | public struct Embedding: Decodable { 37 | public let vector: [Double] 38 | public let index: Int? 39 | 40 | public init(vector: [Double], index: Int?) { 41 | self.vector = vector 42 | self.index = index 43 | } 44 | 45 | private enum CodingKeys: CodingKey { 46 | case embedding 47 | case index 48 | } 49 | 50 | public init(from decoder: any Decoder) throws { 51 | let container = try decoder.container(keyedBy: CodingKeys.self) 52 | self.vector = try container.decode([Double].self, forKey: .embedding) 53 | self.index = try container.decodeIfPresent(Int.self, forKey: .index) 54 | } 55 | } 56 | } 57 | 58 | // MARK: - 59 | extension OpenAIEmbeddingResponseBody { 60 | public struct Usage: Decodable { 61 | public let promptTokens: Int 62 | public let totalTokens: Int 63 | 64 | public init(promptTokens: Int, totalTokens: Int) { 65 | self.promptTokens = promptTokens 66 | self.totalTokens = totalTokens 67 | } 68 | 69 | private enum CodingKeys: String, CodingKey { 70 | case promptTokens = "prompt_tokens" 71 | case totalTokens = "total_tokens" 72 | } 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /Sources/AIProxy/OpenAI/OpenAIFileUploadRequestBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenAIFileUploadRequestBody.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 3/27/25. 6 | // 7 | 8 | import Foundation 9 | 10 | internal struct OpenAIFileUploadRequestBody: MultipartFormEncodable { 11 | 12 | /// The binary contents of the file 13 | let contents: Data 14 | 15 | /// The file mime type 16 | let contentType: String 17 | 18 | /// The name of the file 19 | let fileName: String 20 | 21 | /// The intended purpose of the uploaded file. One of: 22 | /// - assistants: Used in the Assistants API 23 | /// - batch: Used in the Batch API 24 | /// - fine-tune: Used for fine-tuning 25 | /// - vision: Images used for vision fine-tuning 26 | /// - user_data: Flexible file type for any purpose 27 | /// - evals: Used for eval data sets 28 | let purpose: String 29 | 30 | var formFields: [FormField] { 31 | return [ 32 | .fileField( 33 | name: "file", 34 | content: self.contents, 35 | contentType: self.contentType, 36 | filename: self.fileName 37 | ), 38 | .textField( 39 | name: "purpose", 40 | content: self.purpose 41 | ) 42 | ] 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /Sources/AIProxy/OpenAI/OpenAIFileUploadResponseBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenAIFileUploadResponseBody.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 3/27/25. 6 | // 7 | 8 | 9 | public struct OpenAIFileUploadResponseBody: Decodable { 10 | /// The size of the file, in bytes. 11 | public let bytes: Int? 12 | 13 | /// The Unix timestamp (in seconds) for when the file was created. 14 | public let createdAt: Int? 15 | 16 | /// The Unix timestamp (in seconds) for when the file will expire. 17 | public let expiresAt: Int? 18 | 19 | /// The name of the file. 20 | public let filename: String? 21 | 22 | /// The file identifier, which can be referenced in the API endpoints. 23 | public let id: String 24 | 25 | /// The intended purpose of the file. 26 | public let purpose: String? 27 | 28 | public init(bytes: Int?, createdAt: Int?, expiresAt: Int?, filename: String?, id: String, purpose: String?) { 29 | self.bytes = bytes 30 | self.createdAt = createdAt 31 | self.expiresAt = expiresAt 32 | self.filename = filename 33 | self.id = id 34 | self.purpose = purpose 35 | } 36 | 37 | private enum CodingKeys: String, CodingKey { 38 | case bytes 39 | case createdAt = "created_at" 40 | case expiresAt = "expires_at" 41 | case filename 42 | case id 43 | case purpose 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /Sources/AIProxy/OpenAI/OpenAIModerationRequestBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenAIModerationRequestBody.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/17/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Docstrings from https://platform.openai.com/docs/api-reference/moderations/create 11 | public struct OpenAIModerationRequestBody: Encodable { 12 | /// An array of multi-modal inputs to classify. 13 | public let input: [ModerationInput] 14 | 15 | /// The model to use. E.g. "omni-moderation-latest" 16 | public let model: String 17 | 18 | // This memberwise initializer is autogenerated. 19 | // To regenerate, use `cmd-shift-a` > Generate Memberwise Initializer 20 | // To format, place the cursor in the initializer's parameter list and use `ctrl-m` 21 | public init( 22 | input: [OpenAIModerationRequestBody.ModerationInput], 23 | model: String 24 | ) { 25 | self.input = input 26 | self.model = model 27 | } 28 | } 29 | 30 | // MARK: - 31 | extension OpenAIModerationRequestBody { 32 | /// Represents a single multi-modal input, which can be either text or an image. 33 | public enum ModerationInput: Encodable { 34 | /// The input text to classify 35 | case text(String) 36 | 37 | /// The input image to classify, where the image is represented as a base64-encoded data URL 38 | case image(URL) // Create image string with AIProxy.encodeImageAsURL 39 | 40 | private enum RootKey: String, CodingKey { 41 | case imageURL = "image_url" 42 | case text 43 | case type 44 | } 45 | 46 | private enum NestedKey: String, CodingKey { 47 | case url 48 | } 49 | 50 | public func encode(to encoder: Encoder) throws { 51 | var container = encoder.container(keyedBy: RootKey.self) 52 | switch self { 53 | case .text(let textInput): 54 | try container.encode("text", forKey: .type) 55 | try container.encode(textInput, forKey: .text) 56 | case .image(let encodedImage): 57 | try container.encode("image_url", forKey: .type) 58 | var nestedContainer = container.nestedContainer(keyedBy: NestedKey.self, forKey: .imageURL) 59 | try nestedContainer.encode(encodedImage, forKey: .url) 60 | } 61 | } 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /Sources/AIProxy/OpenAI/OpenAIRealtimeConversationItemCreate.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenAIRealtimeConversationItemCreate.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/12/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// https://platform.openai.com/docs/api-reference/realtime-client-events/conversation/item/create 11 | public struct OpenAIRealtimeConversationItemCreate: Encodable { 12 | public let type = "conversation.item.create" 13 | public let item: Item 14 | 15 | public init(item: Item) { 16 | self.item = item 17 | } 18 | } 19 | 20 | // MARK: - 21 | public extension OpenAIRealtimeConversationItemCreate { 22 | struct Item: Encodable { 23 | public let type = "message" 24 | public let role: String 25 | public let content: [Content] 26 | 27 | public init(role: String, text: String) { 28 | self.role = role 29 | self.content = [.init(text: text)] 30 | } 31 | } 32 | } 33 | 34 | // MARK: - 35 | public extension OpenAIRealtimeConversationItemCreate.Item { 36 | struct Content: Encodable { 37 | public let type = "input_text" 38 | public let text: String 39 | 40 | public init(text: String) { 41 | self.text = text 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /Sources/AIProxy/OpenAI/OpenAIRealtimeInputAudioBufferAppend.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenAIRealtimeInputAudioBufferAppend.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/30/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public struct OpenAIRealtimeInputAudioBufferAppend: Encodable { 11 | public let type = "input_audio_buffer.append" 12 | 13 | /// base64 encoded PCM16 data 14 | public let audio: String 15 | 16 | public init(audio: String) { 17 | self.audio = audio 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /Sources/AIProxy/OpenAI/OpenAIRealtimeInputAudioBufferSpeechStarted.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenAIRealtimeInputAudioBufferSpeechStarted.swift 3 | // 4 | // 5 | // Created by Lou Zell on 11/4/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// This is not actually used! I'm not using decodable for this event, just inspecting the 'type' string 11 | /// This is sent from server to client when vad detects that speech started. 12 | public struct OpenAIRealtimeInputAudioBufferSpeechStarted: Decodable { 13 | public let type = "input_audio_buffer.speech_started" 14 | public let audioStartMs: Int 15 | 16 | public init(audioStartMs: Int) { 17 | self.audioStartMs = audioStartMs 18 | } 19 | 20 | private enum CodingKeys: String, CodingKey { 21 | case audioStartMs = "audio_start_ms" 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /Sources/AIProxy/OpenAI/OpenAIRealtimeMessage.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenAIRealtimeMessage.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 12/29/24. 6 | // Update by harr-sudo 05/05/2025 7 | 8 | public enum OpenAIRealtimeMessage { 9 | case error(String?) 10 | case sessionCreated // "session.created" 11 | case sessionUpdated // "session.updated" 12 | case responseCreated // "response.created" 13 | case responseAudioDelta(String) // "response.audio.delta" 14 | case inputAudioBufferSpeechStarted // "input_audio_buffer.speech_started" 15 | case responseFunctionCallArgumentsDone(String, String) // "response.function_call_arguments.done" 16 | 17 | // Add new cases for transcription 18 | case responseTranscriptDelta(String) // "response.audio_transcript.delta" 19 | case responseTranscriptDone(String) // "response.audio_transcript.done" 20 | case inputAudioBufferTranscript(String) // "input_audio_buffer.transcript" 21 | case inputAudioTranscriptionDelta(String) // "conversation.item.input_audio_transcription.delta" 22 | case inputAudioTranscriptionCompleted(String) // "conversation.item.input_audio_transcription.completed" 23 | } 24 | -------------------------------------------------------------------------------- /Sources/AIProxy/OpenAI/OpenAIRealtimeResponseCreate.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenAIRealtimeResponseCreate.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/14/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// https://platform.openai.com/docs/api-reference/realtime-client-events/response 11 | public struct OpenAIRealtimeResponseCreate: Encodable { 12 | public let type = "response.create" 13 | public let response: Response? 14 | 15 | public init(response: Response? = nil) { 16 | self.response = response 17 | } 18 | } 19 | 20 | // MARK: - 21 | extension OpenAIRealtimeResponseCreate { 22 | public struct Response: Encodable { 23 | public let instructions: String? 24 | public let modalities: [String]? 25 | public let tools: [Tool]? 26 | 27 | public init( 28 | instructions: String? = nil, 29 | modalities: [String]? = nil, 30 | tools: [Tool]? = nil 31 | ) { 32 | self.instructions = instructions 33 | self.modalities = modalities 34 | self.tools = tools 35 | } 36 | } 37 | } 38 | 39 | // MARK: - 40 | extension OpenAIRealtimeResponseCreate.Response { 41 | public struct Tool: Encodable { 42 | public let name: String 43 | public let description: String 44 | public let parameters: [String: AIProxyJSONValue] 45 | public let type = "function" 46 | 47 | public init(name: String, description: String, parameters: [String: AIProxyJSONValue]) { 48 | self.name = name 49 | self.description = description 50 | self.parameters = parameters 51 | 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /Sources/AIProxy/OpenAI/OpenAIRealtimeResponseFunctionCallArgumentsDone.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenAIRealtimeResponseDone.swift 3 | // AIProxy 4 | // 5 | // Created by Tim Wheeler on 4/22/25. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Returned when the model-generated function call arguments are done streaming. 11 | /// Also emitted when a Response is interrupted, incomplete, or cancelled. 12 | /// https://platform.openai.com/docs/api-reference/realtime-server-events/response/function_call_arguments/done 13 | public struct OpenAIRealtimeResponseFunctionCallArgumentsDone: Encodable { 14 | public let type = "response.function_call_arguments.done" 15 | public let name: String? 16 | public let arguments: String? 17 | 18 | public init(name: String, arguments: String) { 19 | self.name = name 20 | self.arguments = arguments 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /Sources/AIProxy/OpenAI/OpenAIRealtimeSessionUpdate.swift: -------------------------------------------------------------------------------- 1 | /// Send this event to update the session’s default configuration. 2 | /// 3 | /// Docstrings from: 4 | /// https://platform.openai.com/docs/api-reference/realtime-client-events/session/update 5 | public struct OpenAIRealtimeSessionUpdate: Encodable { 6 | /// Optional client-generated ID used to identify this event. 7 | public let eventId: String? 8 | 9 | /// Session configuration to update 10 | public let session: OpenAIRealtimeSessionConfiguration 11 | 12 | /// The event type, must be "session.update". 13 | public let type = "session.update" 14 | 15 | private enum CodingKeys: String, CodingKey { 16 | case eventId = "event_id" 17 | case session 18 | case type 19 | } 20 | 21 | public init( 22 | eventId: String? = nil, 23 | session: OpenAIRealtimeSessionConfiguration 24 | ) { 25 | self.eventId = eventId 26 | self.session = session 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /Sources/AIProxy/OpenAI/OpenAIRequestFormat.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenAIRequestFormat.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/18/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public enum OpenAIRequestFormat { 11 | /// Requests are formatted for use with OpenAI 12 | case standard 13 | 14 | /// Requests are formatted for use with your own Azure deployment 15 | case azureDeployment(apiVersion: String) 16 | } 17 | -------------------------------------------------------------------------------- /Sources/AIProxy/OpenAI/OpenAITextToSpeechRequestBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenAITextToSpeechRequestBody.swift 3 | // 4 | // 5 | // Created by Daniel Aditya Istyana on 10/9/24. 6 | // 7 | 8 | 9 | import Foundation 10 | 11 | /// Docstrings from 12 | /// https://platform.openai.com/docs/api-reference/audio/createSpeech 13 | public struct OpenAITextToSpeechRequestBody: Encodable { 14 | 15 | /// The text to generate audio for. The maximum length is 4096 characters. 16 | public let input: String 17 | 18 | /// One of the available TTS models: `.tts1`, `.tts1HD` or `.gpt4oMiniTTS` 19 | public let model: Model 20 | 21 | /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. 22 | public let voice: Voice 23 | 24 | // MARK: Optional properties 25 | 26 | /// Control the voice of your generated audio with additional instructions. Does not work with `tts-1` or `tts-1-hd`. 27 | public let instructions: String? 28 | 29 | /// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`. 30 | /// Default to `mp3` 31 | public let responseFormat: ResponseFormat? 32 | 33 | /// The speed of the generated audio. Select a value from 0.25 to 4.0. 34 | /// Default to `1.0` 35 | public let speed: Float? 36 | 37 | public init( 38 | input: String, 39 | model: Model = .tts1, 40 | voice: OpenAITextToSpeechRequestBody.Voice, 41 | instructions: String? = nil, 42 | responseFormat: OpenAITextToSpeechRequestBody.ResponseFormat? = .mp3, 43 | speed: Float? = 1.0 44 | ) { 45 | self.input = input 46 | self.model = model 47 | self.voice = voice 48 | self.instructions = instructions 49 | self.responseFormat = responseFormat 50 | self.speed = speed 51 | } 52 | 53 | private enum CodingKeys: String, CodingKey { 54 | case input 55 | case model 56 | case voice 57 | 58 | // Optional properties 59 | case instructions 60 | case responseFormat = "response_format" 61 | case speed 62 | } 63 | } 64 | 65 | // MARK: - 66 | extension OpenAITextToSpeechRequestBody { 67 | public enum Model: String, Encodable { 68 | case gpt4oMiniTTS = "gpt-4o-mini-tts" 69 | case tts1 = "tts-1" 70 | case tts1HD = "tts-1-hd" 71 | } 72 | } 73 | 74 | // MARK: - 75 | extension OpenAITextToSpeechRequestBody { 76 | public enum ResponseFormat: String, Encodable { 77 | case aac 78 | case flac 79 | case mp3 80 | case pcm 81 | case opus 82 | case wav 83 | } 84 | } 85 | 86 | // MARK: - 87 | extension OpenAITextToSpeechRequestBody { 88 | public enum Voice: String, Encodable { 89 | case alloy 90 | case ash 91 | case ballad 92 | case coral 93 | case echo 94 | case fable 95 | case onyx 96 | case nova 97 | case sage 98 | case shimmer 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /Sources/AIProxy/OpenRouter/OpenRouterChatCompletionChunk.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenRouterChatCompletionChunk.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 12/30/24. 6 | // 7 | 8 | public struct OpenRouterChatCompletionChunk: Decodable { 9 | /// A list of chat completion choices. Can contain more than one elements if 10 | /// OpenRouterChatCompletionRequestBody's `n` property is greater than 1. Can also be empty for 11 | /// the last chunk, which contains usage information only. 12 | public let choices: [Choice] 13 | 14 | /// The model used for the chat completion. 15 | public let model: String? 16 | 17 | /// The provider used to fulfill the chat completion. 18 | public let provider: String? 19 | 20 | /// This property is nil for all chunks except for the last chunk, which contains the token 21 | /// usage statistics for the entire request. 22 | public let usage: OpenRouterChatCompletionResponseBody.Usage? 23 | 24 | public init(choices: [Choice], model: String?, provider: String?, usage: OpenRouterChatCompletionResponseBody.Usage?) { 25 | self.choices = choices 26 | self.model = model 27 | self.provider = provider 28 | self.usage = usage 29 | } 30 | } 31 | 32 | // MARK: Chunk.Choice 33 | extension OpenRouterChatCompletionChunk { 34 | public struct Choice: Decodable { 35 | public let delta: Delta 36 | public let finishReason: String? 37 | 38 | public init(delta: Delta, finishReason: String?) { 39 | self.delta = delta 40 | self.finishReason = finishReason 41 | } 42 | 43 | private enum CodingKeys: String, CodingKey { 44 | case delta 45 | case finishReason = "finish_reason" 46 | } 47 | } 48 | } 49 | 50 | // MARK: Chunk.Choice.Delta 51 | extension OpenRouterChatCompletionChunk.Choice { 52 | public struct Delta: Codable { 53 | public let role: String 54 | 55 | /// Output content. For reasoning models, these chunks arrive after `reasoning` has finished. 56 | public let content: String? 57 | 58 | /// Reasoning content. For reasoning models, these chunks arrive before `content`. 59 | public let reasoning: String? 60 | 61 | public init(role: String, content: String?, reasoning: String?) { 62 | self.role = role 63 | self.content = content 64 | self.reasoning = reasoning 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /Sources/AIProxy/OpenRouter/OpenRouterService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenRouterService.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 12/30/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public protocol OpenRouterService { 11 | 12 | /// Initiates a non-streaming chat completion request to /api/v1/chat/completions. 13 | /// 14 | /// - Parameters: 15 | /// - body: The request body to send to OpenRouter through AIProxy. See this reference: 16 | /// https://openrouter.ai/docs/requests 17 | /// - secondsToWait: The amount of time to wait before `URLError.timedOut` is raised 18 | /// 19 | /// - Returns: The response body from OpenRouter. See this reference: 20 | /// https://openrouter.ai/docs/responses 21 | func chatCompletionRequest( 22 | body: OpenRouterChatCompletionRequestBody, 23 | secondsToWait: UInt 24 | ) async throws -> OpenRouterChatCompletionResponseBody 25 | 26 | /// Initiates a streaming chat completion request to /api/v1/chat/completions. 27 | /// 28 | /// - Parameters: 29 | /// - body: The request body to send to OpenRouter through AIProxy. See this reference: 30 | /// https://openrouter.ai/docs/requests 31 | /// - secondsToWait: The amount of time to wait before `URLError.timedOut` is raised 32 | /// 33 | /// - Returns: The response body from OpenRouter. See this reference: 34 | /// https://openrouter.ai/docs/responses 35 | func streamingChatCompletionRequest( 36 | body: OpenRouterChatCompletionRequestBody, 37 | secondsToWait: UInt 38 | ) async throws -> AsyncCompactMapSequence, OpenRouterChatCompletionChunk> 39 | } 40 | 41 | extension OpenRouterService { 42 | public func chatCompletionRequest( 43 | body: OpenRouterChatCompletionRequestBody 44 | ) async throws -> OpenRouterChatCompletionResponseBody { 45 | return try await self.chatCompletionRequest(body: body, secondsToWait: 60) 46 | } 47 | 48 | public func streamingChatCompletionRequest( 49 | body: OpenRouterChatCompletionRequestBody 50 | ) async throws -> AsyncCompactMapSequence, OpenRouterChatCompletionChunk> { 51 | return try await self.streamingChatCompletionRequest(body: body, secondsToWait: 60) 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /Sources/AIProxy/Perplexity/PerplexityDirectService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // PerplexityDirectService.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/19/24. 6 | // 7 | 8 | import Foundation 9 | 10 | open class PerplexityDirectService: PerplexityService, DirectService { 11 | private let unprotectedAPIKey: String 12 | 13 | /// This initializer is not public on purpose. 14 | /// Customers are expected to use the factory `AIProxy.perplexityDirectService` defined in AIProxy.swift 15 | internal init( 16 | unprotectedAPIKey: String 17 | ) { 18 | self.unprotectedAPIKey = unprotectedAPIKey 19 | } 20 | 21 | /// Initiates a non-streaming chat completion request to Perplexity 22 | /// 23 | /// - Parameters: 24 | /// - body: The chat completion request body. See this reference: 25 | /// https://platform.openai.com/docs/api-reference/chat/object 26 | /// 27 | /// - Returns: A ChatCompletionResponse 28 | public func chatCompletionRequest( 29 | body: PerplexityChatCompletionRequestBody 30 | ) async throws -> PerplexityChatCompletionResponseBody { 31 | var body = body 32 | body.stream = false 33 | let request = try AIProxyURLRequest.createDirect( 34 | baseURL: "https://api.perplexity.ai", 35 | path: "/chat/completions", 36 | body: try body.serialize(), 37 | verb: .post, 38 | secondsToWait: 60, 39 | contentType: "application/json", 40 | additionalHeaders: [ 41 | "Authorization": "Bearer \(self.unprotectedAPIKey)" 42 | ] 43 | ) 44 | return try await self.makeRequestAndDeserializeResponse(request) 45 | } 46 | 47 | /// Initiates a streaming chat completion request to Perplexity. 48 | /// 49 | /// - Parameters: 50 | /// 51 | /// - body: The chat completion request body. See this reference: 52 | /// https://platform.openai.com/docs/api-reference/chat/object 53 | /// 54 | /// - Returns: An async sequence of completion chunks. 55 | public func streamingChatCompletionRequest( 56 | body: PerplexityChatCompletionRequestBody 57 | ) async throws -> AsyncCompactMapSequence, PerplexityChatCompletionResponseBody> { 58 | var body = body 59 | body.stream = true 60 | let request = try AIProxyURLRequest.createDirect( 61 | baseURL: "https://api.perplexity.ai", 62 | path: "/chat/completions", 63 | body: try body.serialize(), 64 | verb: .post, 65 | secondsToWait: 60, 66 | contentType: "application/json", 67 | additionalHeaders: [ 68 | "Authorization": "Bearer \(self.unprotectedAPIKey)" 69 | ] 70 | ) 71 | return try await self.makeRequestAndDeserializeStreamingChunks(request) 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /Sources/AIProxy/Perplexity/PerplexityProxiedService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // PerplexityProxiedService.swift 3 | // 4 | // 5 | // Created by Lou Zell on 11/06/24. 6 | // 7 | 8 | import Foundation 9 | 10 | open class PerplexityProxiedService: PerplexityService, ProxiedService { 11 | private let partialKey: String 12 | private let serviceURL: String 13 | private let clientID: String? 14 | 15 | /// This initializer is not public on purpose. 16 | /// Customers are expected to use the factory `AIProxy.perplexityService` defined in AIProxy.swift 17 | internal init( 18 | partialKey: String, 19 | serviceURL: String, 20 | clientID: String? 21 | ) { 22 | self.partialKey = partialKey 23 | self.serviceURL = serviceURL 24 | self.clientID = clientID 25 | } 26 | 27 | /// Initiates a non-streaming chat completion request to Perplexity 28 | /// 29 | /// - Parameters: 30 | /// - body: The chat completion request body. See this reference: 31 | /// https://platform.openai.com/docs/api-reference/chat/object 32 | /// 33 | /// - Returns: A ChatCompletionResponse 34 | public func chatCompletionRequest( 35 | body: PerplexityChatCompletionRequestBody 36 | ) async throws -> PerplexityChatCompletionResponseBody { 37 | var body = body 38 | body.stream = false 39 | let request = try await AIProxyURLRequest.create( 40 | partialKey: self.partialKey, 41 | serviceURL: self.serviceURL, 42 | clientID: self.clientID, 43 | proxyPath: "/chat/completions", 44 | body: try body.serialize(), 45 | verb: .post, 46 | secondsToWait: 60, 47 | contentType: "application/json" 48 | ) 49 | return try await self.makeRequestAndDeserializeResponse(request) 50 | } 51 | 52 | /// Initiates a streaming chat completion request to Perplexity. 53 | /// 54 | /// - Parameters: 55 | /// 56 | /// - body: The chat completion request body. See this reference: 57 | /// https://platform.openai.com/docs/api-reference/chat/object 58 | /// 59 | /// - Returns: An async sequence of completion chunks. 60 | public func streamingChatCompletionRequest( 61 | body: PerplexityChatCompletionRequestBody 62 | ) async throws -> AsyncCompactMapSequence, PerplexityChatCompletionResponseBody> { 63 | var body = body 64 | body.stream = true 65 | let request = try await AIProxyURLRequest.create( 66 | partialKey: self.partialKey, 67 | serviceURL: self.serviceURL, 68 | clientID: self.clientID, 69 | proxyPath: "/chat/completions", 70 | body: try body.serialize(), 71 | verb: .post, 72 | secondsToWait: 60, 73 | contentType: "application/json" 74 | ) 75 | return try await self.makeRequestAndDeserializeStreamingChunks(request) 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /Sources/AIProxy/Perplexity/PerplexityRole.swift: -------------------------------------------------------------------------------- 1 | // 2 | // PerplexityRole.swift 3 | // 4 | // 5 | // Created by Lou Zell on 11/6/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public enum PerplexityRole: String, Codable { 11 | case system 12 | case user 13 | case assistant 14 | } 15 | -------------------------------------------------------------------------------- /Sources/AIProxy/Perplexity/PerplexityService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // PerplexityService.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/19/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public protocol PerplexityService { 11 | /// Initiates a non-streaming chat completion request to Perplexity 12 | /// 13 | /// - Parameters: 14 | /// - body: The chat completion request body. See this reference: 15 | /// https://platform.openai.com/docs/api-reference/chat/object 16 | /// 17 | /// - Returns: A ChatCompletionResponse 18 | func chatCompletionRequest( 19 | body: PerplexityChatCompletionRequestBody 20 | ) async throws -> PerplexityChatCompletionResponseBody 21 | 22 | /// Initiates a streaming chat completion request to Perplexity. 23 | /// 24 | /// - Parameters: 25 | /// 26 | /// - body: The chat completion request body. See this reference: 27 | /// https://platform.openai.com/docs/api-reference/chat/object 28 | /// 29 | /// - Returns: An async sequence of completion chunks. 30 | func streamingChatCompletionRequest( 31 | body: PerplexityChatCompletionRequestBody 32 | ) async throws -> AsyncCompactMapSequence, PerplexityChatCompletionResponseBody> 33 | } 34 | -------------------------------------------------------------------------------- /Sources/AIProxy/ProtectedPropertyQueue.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ProtectedPropertyQueue.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 4/23/25. 6 | // 7 | 8 | import Foundation 9 | 10 | internal let protectedPropertyQueue = DispatchQueue( 11 | label: "aiproxy-protected-property-queue", 12 | attributes: .concurrent 13 | ) 14 | -------------------------------------------------------------------------------- /Sources/AIProxy/ProxiedService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ProxiedService.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/16/24. 6 | // 7 | 8 | import Foundation 9 | 10 | protocol ProxiedService: ServiceMixin {} 11 | extension ProxiedService { 12 | var urlSession: URLSession { 13 | return AIProxyUtils.proxiedURLSession() 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /Sources/AIProxy/RealtimeActor.swift: -------------------------------------------------------------------------------- 1 | // 2 | // RealtimeActor.swift 3 | // 4 | // 5 | // Created by Lou Zell on 11/27/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Use this actor for realtime work 11 | @globalActor public actor RealtimeActor { 12 | public static let shared = RealtimeActor() 13 | } 14 | -------------------------------------------------------------------------------- /Sources/AIProxy/ReceiptValidation/ReceiptValidationRequestBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReceiptValidationRequestBody.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 1/28/25. 6 | // 7 | 8 | /// Encapsulates the data from StoreKit's AppTransaction 9 | public struct ReceiptValidationRequestBody: Encodable { 10 | /// The JSON representation of the transaction. 11 | public let jsonRepresentationBase64: String 12 | 13 | /// A number the App Store uses to uniquely identify the application. 14 | public let appID: UInt64? 15 | 16 | /// The application version the transaction is for. 17 | public let appVersion: String 18 | 19 | /// A number the App Store uses to uniquely identify the version of the application. 20 | public let appVersionID: UInt64? 21 | 22 | /// Identifies the application the transaction is for. 23 | public let bundleID: String 24 | 25 | /// The server environment this transaction was created in. 26 | public let environment: String 27 | 28 | /// The version of the app originally purchased. 29 | public let originalAppVersion: String 30 | 31 | /// The date this original app purchase occurred on. 32 | public let originalPurchaseDate: Double 33 | 34 | /// A SHA-384 hash of `AppStore.deviceVerificationID` appended after 35 | /// `deviceVerificationNonce` (both lowercased UUID strings). 36 | public let deviceVerificationBase64: String 37 | 38 | /// The nonce used when computing `deviceVerification`. 39 | /// - SeeAlso: `AppStore.deviceVerificationID` 40 | public let deviceVerificationNonce: String 41 | 42 | /// The date this transaction was generated and signed. 43 | public let signedDate: Double 44 | 45 | /// The message from StoreKit 2 detailing why verification failed. 46 | public let verificationError: String? 47 | 48 | public init( 49 | jsonRepresentationBase64: String, 50 | appID: UInt64? = nil, 51 | appVersion: String, 52 | appVersionID: UInt64? = nil, 53 | bundleID: String, 54 | environment: String, 55 | originalAppVersion: String, 56 | originalPurchaseDate: Double, 57 | deviceVerificationBase64: String, 58 | deviceVerificationNonce: String, 59 | signedDate: Double, 60 | verificationError: String? 61 | ) { 62 | self.jsonRepresentationBase64 = jsonRepresentationBase64 63 | self.appID = appID 64 | self.appVersion = appVersion 65 | self.appVersionID = appVersionID 66 | self.bundleID = bundleID 67 | self.environment = environment 68 | self.originalAppVersion = originalAppVersion 69 | self.originalPurchaseDate = originalPurchaseDate 70 | self.deviceVerificationBase64 = deviceVerificationBase64 71 | self.deviceVerificationNonce = deviceVerificationNonce 72 | self.signedDate = signedDate 73 | self.verificationError = verificationError 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /Sources/AIProxy/ReceiptValidation/ReceiptValidationResponseBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReceiptValidationResponseBody.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 1/28/25. 6 | // 7 | 8 | public struct ReceiptValidationResponseBody: Decodable { 9 | public let isValid: Bool 10 | 11 | public init(isValid: Bool) { 12 | self.isValid = isValid 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /Sources/AIProxy/ReceiptValidation/ReceiptValidationService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReceiptValidationService.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 1/28/25. 6 | // 7 | 8 | open class ReceiptValidationService: ProxiedService { 9 | 10 | public let publishableKey: String 11 | public let serviceURL: String 12 | public let clientID: String? 13 | 14 | public init( 15 | publishableKey: String, 16 | serviceURL: String, 17 | clientID: String? = nil 18 | ) { 19 | self.publishableKey = publishableKey 20 | self.serviceURL = serviceURL 21 | self.clientID = clientID 22 | } 23 | 24 | /// Makes a request to validate the app purchase 25 | /// 26 | /// - Parameters: 27 | /// - body: The request body to send to aiproxy 28 | /// - Returns: The validation result 29 | public func validateReceipt( 30 | body: ReceiptValidationRequestBody 31 | ) async throws -> ReceiptValidationResponseBody { 32 | let request = try await AIProxyURLRequest.create( 33 | partialKey: self.publishableKey, 34 | serviceURL: self.serviceURL, 35 | clientID: self.clientID, 36 | proxyPath: "/validate", 37 | body: try body.serialize(), 38 | verb: .post, 39 | secondsToWait: 60, 40 | contentType: "application/json" 41 | ) 42 | return try await self.makeRequestAndDeserializeResponse(request) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /Sources/AIProxy/RemoteLogger/RemoteLoggerService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // RemoteLoggerService.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 1/31/25. 6 | // 7 | 8 | open class RemoteLoggerService: ProxiedService { 9 | 10 | public let publishableKey: String 11 | public let serviceURL: String 12 | public let clientID: String? 13 | 14 | public init( 15 | publishableKey: String, 16 | serviceURL: String, 17 | clientID: String? = nil 18 | ) { 19 | self.publishableKey = publishableKey 20 | self.serviceURL = serviceURL 21 | self.clientID = clientID 22 | } 23 | 24 | /// Logs a breadcrumb as fire-and-forget. 25 | /// There are no smarts built into this. 26 | /// If the message can't reach the destination, it is not retried. 27 | /// 28 | /// - Parameters: 29 | /// - body: The request body to send to aiproxy 30 | public func logBreadcrumb( 31 | context: String, 32 | errorMessage: String? = nil 33 | ) async { 34 | if let request = try? await AIProxyURLRequest.create( 35 | partialKey: self.publishableKey, 36 | serviceURL: self.serviceURL, 37 | clientID: self.clientID, 38 | proxyPath: "/breadcrumb", 39 | body: try Payload( 40 | breadcrumbContext: context, 41 | errorMessage: errorMessage 42 | ).serialize(), 43 | verb: .post, 44 | secondsToWait: 60, 45 | contentType: "application/json" 46 | ) { 47 | _ = try? await BackgroundNetworker.makeRequestAndWaitForData( 48 | self.urlSession, 49 | request 50 | ) 51 | } 52 | } 53 | } 54 | 55 | extension RemoteLoggerService { 56 | fileprivate struct Payload: Encodable { 57 | let breadcrumbContext: String 58 | let errorMessage: String? 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /Sources/AIProxy/Replicate/ReplicateCreateModelRequestBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicateCreateModelRequestBody.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/7/24. 6 | // 7 | 8 | import Foundation 9 | 10 | struct ReplicateCreateModelRequestBody: Encodable { 11 | let description: String 12 | let hardware: String? 13 | let name: String 14 | let owner: String 15 | let visibility: ReplicateModelVisibility 16 | } 17 | -------------------------------------------------------------------------------- /Sources/AIProxy/Replicate/ReplicateDeepSeekVL7BInputSchema.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicateDeepSeekVL7BInputSchema.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 3/5/25. 6 | // 7 | 8 | import Foundation 9 | 10 | /// https://replicate.com/deepseek-ai/deepseek-vl-7b-base?output=json 11 | public struct ReplicateDeepSeekVL7BInputSchema: Encodable { 12 | // Required 13 | 14 | /// Input image 15 | public let image: URL 16 | 17 | // Optional 18 | 19 | /// Maximum number of tokens to generate 20 | /// Default: 512 21 | public let maxNewTokens: Int? 22 | 23 | /// Input prompt 24 | /// Default: "Describe this image" 25 | public let prompt: String? 26 | 27 | 28 | private enum CodingKeys: String, CodingKey { 29 | case image 30 | case maxNewTokens = "max_new_tokens" 31 | case prompt 32 | } 33 | 34 | // This memberwise initializer is autogenerated. 35 | // To regenerate, use `cmd-shift-a` > Generate Memberwise Initializer 36 | // To format, place the cursor in the initializer's parameter list and use `ctrl-m` 37 | public init( 38 | image: URL, 39 | maxNewTokens: Int? = nil, 40 | prompt: String? = nil 41 | ) { 42 | self.image = image 43 | self.maxNewTokens = maxNewTokens 44 | self.prompt = prompt 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /Sources/AIProxy/Replicate/ReplicateError.swift: -------------------------------------------------------------------------------- 1 | import Foundation 2 | 3 | public enum ReplicateError: LocalizedError { 4 | case predictionCanceled 5 | case predictionDidNotIncludeOutput 6 | case predictionDidNotIncludeURL 7 | case predictionFailed(String?) 8 | case missingModelURL 9 | case reachedRetryLimit 10 | 11 | public var errorDescription: String? { 12 | switch self { 13 | case .predictionCanceled: 14 | return "The prediction was canceled" 15 | case .predictionDidNotIncludeOutput: 16 | return "A prediction was successful, but replicate did not include the output schema" 17 | case .predictionDidNotIncludeURL: 18 | return "A prediction was created, but replicate did not respond with a URL to poll" 19 | case .predictionFailed(let message): 20 | return message ?? "The prediction failed with an unspecificed error from replicate." 21 | case .missingModelURL: 22 | return "The replicate model does not contain a URL" 23 | case .reachedRetryLimit: 24 | return "Reached secondsToWait without the prediction completing" 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /Sources/AIProxy/Replicate/ReplicateFileUploadRequestBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicateFile.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/9/24. 6 | // 7 | 8 | import Foundation 9 | 10 | internal struct ReplicateFileUploadRequestBody: MultipartFormEncodable { 11 | 12 | /// The binary contents of the file 13 | let contents: Data 14 | 15 | /// The file mime type 16 | let contentType: String 17 | 18 | /// The name of the file. I believe this does not get preserved on replicate's CDN. Can it be removed? 19 | let fileName: String 20 | 21 | var formFields: [FormField] { 22 | return [ 23 | .fileField( 24 | name: "content", 25 | content: self.contents, 26 | contentType: self.contentType, 27 | filename: self.fileName 28 | ) 29 | ] 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /Sources/AIProxy/Replicate/ReplicateFileUploadResponseBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicateFileUploadResponseBody.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/9/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public struct ReplicateFileUploadResponseBody: Decodable { 11 | public let contentType: String? 12 | public let checksums: Checksums? 13 | public let createdAt: String? 14 | public let etag: String? 15 | public let expiresAt: String? 16 | public let id: String? 17 | public let name: String? 18 | public let size: Int? 19 | public let urls: ActionURLs 20 | 21 | private enum CodingKeys: String, CodingKey { 22 | case contentType = "content_type" 23 | case checksums 24 | case createdAt = "created_at" 25 | case etag 26 | case expiresAt = "expires_at" 27 | case id 28 | case name 29 | case size 30 | case urls 31 | } 32 | 33 | public init(contentType: String?, checksums: Checksums?, createdAt: String?, etag: String?, expiresAt: String?, id: String?, name: String?, size: Int?, urls: ActionURLs) { 34 | self.contentType = contentType 35 | self.checksums = checksums 36 | self.createdAt = createdAt 37 | self.etag = etag 38 | self.expiresAt = expiresAt 39 | self.id = id 40 | self.name = name 41 | self.size = size 42 | self.urls = urls 43 | } 44 | } 45 | 46 | extension ReplicateFileUploadResponseBody { 47 | public struct Checksums: Decodable { 48 | public let md5: String 49 | public let sha256: String 50 | 51 | public init(md5: String, sha256: String) { 52 | self.md5 = md5 53 | self.sha256 = sha256 54 | } 55 | } 56 | } 57 | 58 | extension ReplicateFileUploadResponseBody { 59 | public struct ActionURLs: Decodable { 60 | public let get: URL 61 | 62 | public init(get: URL) { 63 | self.get = get 64 | } 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /Sources/AIProxy/Replicate/ReplicateFluxDevOutputSchema.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicateFluxDevOutputSchema.swift 3 | // 4 | // 5 | // Created by Lou Zell on 8/28/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Output schema for use with requests to Black Forest Lab's Flux Dev model: 11 | /// https://replicate.com/black-forest-labs/flux-dev/api/schema#output-schema 12 | public typealias ReplicateFluxDevOutputSchema = [URL] 13 | -------------------------------------------------------------------------------- /Sources/AIProxy/Replicate/ReplicateFluxKontextInputSchema.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicateFluxKontextInputSchema.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 5/31/25. 6 | // 7 | 8 | import Foundation 9 | 10 | public struct ReplicateFluxKontextInputSchema: Encodable { 11 | // Required 12 | 13 | /// Image to use as reference. Must be a URI (jpeg, png, gif, or webp). 14 | public let inputImage: URL 15 | 16 | /// Text description of what you want to generate, or the instruction on how to edit the given image. 17 | public let prompt: String 18 | 19 | // Optional 20 | 21 | /// Aspect ratio of the generated image. 22 | /// Use `"match_input_image"` to match the aspect ratio of the input image. 23 | public let aspectRatio: String? 24 | 25 | /// Random seed for reproducible generation. 26 | public let seed: Int? 27 | 28 | private enum CodingKeys: String, CodingKey { 29 | case inputImage = "input_image" 30 | case prompt 31 | case aspectRatio = "aspect_ratio" 32 | case seed 33 | } 34 | 35 | public init( 36 | inputImage: URL, 37 | prompt: String, 38 | aspectRatio: String? = nil, 39 | seed: Int? = nil 40 | ) { 41 | self.inputImage = inputImage 42 | self.prompt = prompt 43 | self.aspectRatio = aspectRatio 44 | self.seed = seed 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /Sources/AIProxy/Replicate/ReplicateFluxProOutputSchema.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicateFluxProOutputSchema.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/6/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Output schema for use with requests to Black Forest Lab's Flux Pro model: 11 | /// https://replicate.com/black-forest-labs/flux-pro/api/schema#output-schema 12 | public typealias ReplicateFluxProOutputSchema = URL 13 | -------------------------------------------------------------------------------- /Sources/AIProxy/Replicate/ReplicateFluxProUltraInputSchema_v1_1.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicateFluxProUltraInputSchemaV1_1.swift 3 | // 4 | // 5 | // Created by Ronald Mannak on 11/10/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Input schema for use with requests to Black Forest Lab's Pro Ultra model: 11 | /// https://replicate.com/black-forest-labs/flux-1.1-pro-ultra/api/schema 12 | public struct ReplicateFluxProUltraInputSchema_v1_1: Encodable { 13 | // Required 14 | 15 | /// Text prompt for image generation 16 | public let prompt: String 17 | 18 | // Optional 19 | 20 | /// Aspect ratio of the image between 21:9 and 9:21 21 | /// 22 | /// default: 1:1 (BlackForest's default is 16:9) 23 | public let aspectRatio: String? 24 | 25 | /// Format of the output images. 26 | /// 27 | /// Valid choices: jpeg, png 28 | /// default: jpg 29 | public let outputFormat: OutputFormat? 30 | 31 | /// Tolerance level for input and output moderation. 32 | /// 33 | /// Between 0 and 6, 0 being most strict, 6 being least strict. 34 | /// Default 2 35 | public let safetyTolerance: Int? 36 | 37 | /// Random seed. Set for reproducible generation 38 | public let seed: Int? 39 | 40 | /// Generate less processed, more natural-looking images 41 | /// default: false 42 | public let raw: Bool? 43 | 44 | private enum CodingKeys: String, CodingKey { 45 | case aspectRatio = "aspect_ratio" 46 | case outputFormat = "output_format" 47 | case prompt 48 | case safetyTolerance = "safety_tolerance" 49 | case seed 50 | case raw 51 | } 52 | 53 | // This memberwise initializer is autogenerated. 54 | // To regenerate, use `cmd-shift-a` > Generate Memberwise Initializer 55 | // To format, place the cursor in the initializer's parameter list and use `ctrl-m` 56 | public init( 57 | prompt: String, 58 | aspectRatio: String? = nil, 59 | height: Int? = nil, 60 | outputFormat: OutputFormat? = nil, 61 | safetyTolerance: Int? = nil, 62 | seed: Int? = nil, 63 | raw: Bool? = nil 64 | ) { 65 | self.prompt = prompt 66 | self.aspectRatio = aspectRatio 67 | self.outputFormat = outputFormat 68 | self.safetyTolerance = safetyTolerance 69 | self.seed = seed 70 | self.raw = raw 71 | } 72 | } 73 | 74 | // MARK: - InputSchema.OutputFormat 75 | extension ReplicateFluxProUltraInputSchema_v1_1 { 76 | public enum OutputFormat: String, Encodable { 77 | case jpg 78 | case png 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /Sources/AIProxy/Replicate/ReplicateFluxSchnellInputSchema.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicateFluxSchnellInputSchema.swift 3 | // 4 | // 5 | // Created by Lou Zell on 8/28/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Input schema for use with requests to Black Forest Lab's Flux Schnell model: 11 | /// https://replicate.com/black-forest-labs/flux-schnell/api/schema#input-schema 12 | public struct ReplicateFluxSchnellInputSchema: Encodable { 13 | // Required 14 | 15 | /// Prompt for generated image 16 | public let prompt: String 17 | 18 | // Optional 19 | 20 | /// Aspect ratio for the generated image 21 | /// Valid ratios are: `1:1`, `16:9`, `21:9`, `2:3`, `3:2`, `4:5`, `5:4`, `9:16`, `9:21` 22 | public let aspectRatio: String? 23 | 24 | /// Disable safety checker for generated images. 25 | public let disableSafetyChecker: Bool? 26 | 27 | /// Run faster predictions with model optimized for speed (currently fp8 quantized); disable to run in original bf16 28 | /// Defaults to `true` 29 | public let goFast: Bool? 30 | 31 | /// Number of outputs to generate 32 | public let numOutputs: Int? 33 | 34 | /// Format of the output images 35 | public let outputFormat: OutputFormat? 36 | 37 | /// Quality when saving the output images, from 0 to 100. 100 is best quality, 0 is lowest quality. 38 | /// Not relevant for .png outputs 39 | public let outputQuality: Int? 40 | 41 | /// Random seed. Set for reproducible generation 42 | public let seed: Int? 43 | 44 | private enum CodingKeys: String, CodingKey { 45 | case aspectRatio = "aspect_ratio" 46 | case disableSafetyChecker = "disable_safety_checker" 47 | case goFast = "go_fast" 48 | case numOutputs = "num_outputs" 49 | case outputFormat = "output_format" 50 | case outputQuality = "output_quality" 51 | case prompt 52 | case seed 53 | } 54 | 55 | // This memberwise initializer is autogenerated. 56 | // To regenerate, use `cmd-shift-a` > Generate Memberwise Initializer 57 | // To format, place the cursor in the initializer's parameter list and use `ctrl-m` 58 | public init( 59 | prompt: String, 60 | aspectRatio: String? = nil, 61 | disableSafetyChecker: Bool? = nil, 62 | goFast: Bool? = nil, 63 | numOutputs: Int? = nil, 64 | outputFormat: ReplicateFluxSchnellInputSchema.OutputFormat? = nil, 65 | outputQuality: Int? = nil, 66 | seed: Int? = nil 67 | ) { 68 | self.prompt = prompt 69 | self.aspectRatio = aspectRatio 70 | self.disableSafetyChecker = disableSafetyChecker 71 | self.goFast = goFast 72 | self.numOutputs = numOutputs 73 | self.outputFormat = outputFormat 74 | self.outputQuality = outputQuality 75 | self.seed = seed 76 | } 77 | } 78 | 79 | // MARK: - InputSchema.OutputFormat 80 | extension ReplicateFluxSchnellInputSchema { 81 | public enum OutputFormat: String, Encodable { 82 | case jpg 83 | case png 84 | case webp 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /Sources/AIProxy/Replicate/ReplicateFluxSchnellOutputSchema.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicateFluxSchnellOutputSchema.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/6/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Output schema for use with requests to Black Forest Lab's Flux Schnell model: 11 | /// https://replicate.com/black-forest-labs/flux-schnell/api/schema#output-schema 12 | public typealias ReplicateFluxSchnellOutputSchema = [URL] 13 | -------------------------------------------------------------------------------- /Sources/AIProxy/Replicate/ReplicateModelResponseBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicateModelResponseBody.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/7/24. 6 | // 7 | 8 | import Foundation 9 | 10 | struct ReplicateModelResponseBody: Decodable { 11 | let description: String? 12 | let name: String? 13 | let owner: String? 14 | let url: URL? 15 | let visibility: ReplicateModelVisibility? 16 | 17 | public init(description: String?, name: String?, owner: String?, url: URL?, visibility: ReplicateModelVisibility?) { 18 | self.description = description 19 | self.name = name 20 | self.owner = owner 21 | self.url = url 22 | self.visibility = visibility 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /Sources/AIProxy/Replicate/ReplicateModelVisibility.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicateModelVisibility.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/7/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public enum ReplicateModelVisibility: String, Codable { 11 | case `public` 12 | case `private` 13 | } 14 | -------------------------------------------------------------------------------- /Sources/AIProxy/Replicate/ReplicatePredictionRequestBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicatePredictionRequestBody.swift 3 | // 4 | // 5 | // Created by Lou Zell on 8/27/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// The request body for creating a Replicate prediction. 11 | /// 12 | /// This type is used for both community models and official models. 13 | /// When using with an official model, the `version` property can remain `nil`. 14 | /// 15 | /// Community model reference: https://replicate.com/docs/reference/http#predictions.create 16 | /// Official model reference: https://replicate.com/docs/reference/http#models.predictions.create 17 | public struct ReplicatePredictionRequestBody: Encodable { 18 | 19 | /// The replicate input schema, for example ReplicateSDXLInputSchema 20 | /// TThe input schema depends on what model you are running. To see the available inputs, click the "API" tab on the model you are running or get the model version and look at its `openapi_schema` property. For example, `stability-ai/sdxl` takes `prompt` as an input. 21 | public let input: Encodable 22 | 23 | /// You do not need to set this field if you are using an official model. 24 | /// For community models, set it to the ID of the model version that you want to run. 25 | public let version: String? 26 | 27 | public init( 28 | input: any Encodable, 29 | version: String? = nil 30 | ) { 31 | self.input = input 32 | self.version = version 33 | } 34 | 35 | private enum RootKey: CodingKey { 36 | case input 37 | case version 38 | } 39 | 40 | public func encode(to encoder: any Encoder) throws { 41 | var container = encoder.container(keyedBy: RootKey.self) 42 | try container.encode(self.input, forKey: .input) 43 | try container.encodeIfPresent(self.version, forKey: .version) 44 | } 45 | } 46 | 47 | -------------------------------------------------------------------------------- /Sources/AIProxy/Replicate/ReplicateSDXLOutputSchema.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicateSDXLOutputSchema.swift 3 | // 4 | // 5 | // Created by Lou Zell on 8/27/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// Output schema for use with requests to Stability AI's SDXL model: 11 | /// https://replicate.com/stability-ai/sdxl/api/schema#output-schema 12 | typealias ReplicateSDXLOutputSchema = [URL] 13 | -------------------------------------------------------------------------------- /Sources/AIProxy/Replicate/ReplicateSynchronousAPIOutput.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicateSynchronousAPIOutput.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 10/19/24. 6 | // 7 | 8 | import Foundation 9 | 10 | @available(*, deprecated, message: "Use ReplicatePrediction as a replacement") 11 | public typealias ReplicateSynchronousAPIOutput = ReplicateSynchronousResponseBody 12 | 13 | @available(*, deprecated, message: "Use ReplicatePrediction as a replacement") 14 | public struct ReplicateSynchronousResponseBody: Decodable { 15 | public let error: String? 16 | 17 | public let output: T? 18 | 19 | public let status: String? 20 | 21 | /// The location of a ReplicatePrediction 22 | public let predictionResultURL: URL? 23 | 24 | private enum CodingKeys: String, CodingKey { 25 | case error 26 | case output 27 | case status 28 | case urls 29 | } 30 | 31 | private enum NestedKeys: String, CodingKey { 32 | case get 33 | } 34 | 35 | public init(from decoder: any Decoder) throws { 36 | let container = try decoder.container(keyedBy: CodingKeys.self) 37 | self.error = try container.decodeIfPresent(String.self, forKey: .error) 38 | self.status = try container.decodeIfPresent(String.self, forKey: .status) 39 | self.output = try container.decodeIfPresent(T.self, forKey: .output) 40 | let nestedContainer = try container.nestedContainer( 41 | keyedBy: NestedKeys.self, 42 | forKey: .urls 43 | ) 44 | self.predictionResultURL = try nestedContainer.decode(URL?.self, forKey: .get) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /Sources/AIProxy/Replicate/ReplicateTrainingRequestBody.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicateTrainingRequestBody.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/8/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public struct ReplicateTrainingRequestBody: Encodable { 11 | 12 | public let destination: String 13 | public let input: T 14 | 15 | public init(destination: String, input: T) { 16 | self.destination = destination 17 | self.input = input 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /Sources/AIProxy/Resources/PrivacyInfo.xcprivacy: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | NSPrivacyCollectedDataTypes 6 | 7 | 8 | NSPrivacyCollectedDataType 9 | NSPrivacyCollectedDataTypeDeviceID 10 | NSPrivacyCollectedDataTypeLinked 11 | 12 | NSPrivacyCollectedDataTypeTracking 13 | 14 | NSPrivacyCollectedDataTypePurposes 15 | 16 | AI functionality usage limits 17 | NSPrivacyCollectedDataTypePurposeAnalytics 18 | 19 | 20 | 21 | NSPrivacyCollectedDataType 22 | IP Address 23 | NSPrivacyCollectedDataTypeLinked 24 | 25 | NSPrivacyCollectedDataTypeTracking 26 | 27 | NSPrivacyCollectedDataTypePurposes 28 | 29 | AI functionality usage limits 30 | NSPrivacyCollectedDataTypePurposeAnalytics 31 | 32 | 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /Sources/AIProxy/RuntimeInfo.swift: -------------------------------------------------------------------------------- 1 | // 2 | // RuntimeInfo.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 5/21/25. 6 | // 7 | 8 | import Foundation 9 | 10 | #if canImport(UIKit) 11 | import UIKit 12 | #endif 13 | 14 | struct RuntimeInfo { 15 | let appName: String 16 | let appVersion: String 17 | let buildNumber: String 18 | let bundleID: String 19 | let deviceModel: String 20 | let osVersion: String 21 | let systemName: String 22 | 23 | static var current: RuntimeInfo = { 24 | let bundle = Bundle.main 25 | let infoDict = bundle.infoDictionary ?? [:] 26 | 27 | return RuntimeInfo( 28 | appName: infoDict["CFBundleName"] as? String ?? "Unknown", 29 | appVersion: infoDict["CFBundleShortVersionString"] as? String ?? "Unknown", 30 | buildNumber: infoDict["CFBundleVersion"] as? String ?? "Unknown", 31 | bundleID: bundle.bundleIdentifier ?? "Unknown", 32 | deviceModel: getDeviceModel(), 33 | osVersion: getOSVersion(), 34 | systemName: getSystemName() 35 | ) 36 | }() 37 | } 38 | 39 | private func getDeviceModel() -> String { 40 | #if os(macOS) 41 | let sysCallName = "hw.model" 42 | #else 43 | let sysCallName = "hw.machine" 44 | #endif 45 | var size: size_t = 0 46 | guard sysctlbyname(sysCallName, nil, &size, nil, 0) == noErr, size > 0 else { 47 | return "Unknown" 48 | } 49 | 50 | var machine = [CChar](repeating: 0, count: size) 51 | guard sysctlbyname(sysCallName, &machine, &size, nil, 0) == noErr else { 52 | return "Unknown" 53 | } 54 | 55 | return String(cString: machine) 56 | } 57 | 58 | private func getSystemName() -> String { 59 | #if os(macOS) 60 | return "macOS" 61 | #elseif os(watchOS) 62 | return "watchOS" 63 | #else 64 | return UIDevice.current.systemName 65 | #endif 66 | } 67 | 68 | private func getOSVersion() -> String { 69 | let osVersion = ProcessInfo.processInfo.operatingSystemVersion 70 | return "\(osVersion.majorVersion).\(osVersion.minorVersion).\(osVersion.patchVersion)" 71 | } 72 | -------------------------------------------------------------------------------- /Sources/AIProxy/Serializable.swift: -------------------------------------------------------------------------------- 1 | // 2 | // Serializable.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/14/24. 6 | // 7 | 8 | import Foundation 9 | 10 | extension Encodable { 11 | func serialize(pretty: Bool = false) throws -> Data { 12 | let pretty = pretty || AIProxy.printRequestBodies 13 | let encoder = JSONEncoder() 14 | encoder.outputFormatting = [.sortedKeys] 15 | if pretty { 16 | encoder.outputFormatting.insert(.prettyPrinted) 17 | } 18 | return try encoder.encode(self) 19 | } 20 | 21 | func serialize(pretty: Bool = false) throws -> String { 22 | let data: Data = try self.serialize(pretty: pretty) 23 | guard let str = String(data: data, encoding: .utf8) else { 24 | throw AIProxyError.assertion("Could not get utf8 string representation of data") 25 | } 26 | return str 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /Sources/AIProxy/ServiceMixin.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ServiceMixin.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 4/24/25. 6 | // 7 | 8 | import Foundation 9 | 10 | protocol ServiceMixin { 11 | var urlSession: URLSession { get } 12 | } 13 | 14 | extension ServiceMixin { 15 | func makeRequestAndDeserializeResponse(_ request: URLRequest) async throws -> T { 16 | if AIProxy.printRequestBodies { 17 | printRequestBody(request) 18 | } 19 | let (data, _) = try await BackgroundNetworker.makeRequestAndWaitForData( 20 | self.urlSession, 21 | request 22 | ) 23 | if AIProxy.printResponseBodies { 24 | printBufferedResponseBody(data) 25 | } 26 | return try T.deserialize(from: data) 27 | } 28 | 29 | func makeRequestAndDeserializeStreamingChunks(_ request: URLRequest) async throws -> AsyncCompactMapSequence, T> { 30 | if AIProxy.printRequestBodies { 31 | printRequestBody(request) 32 | } 33 | let (asyncBytes, _) = try await BackgroundNetworker.makeRequestAndWaitForAsyncBytes( 34 | self.urlSession, 35 | request 36 | ) 37 | return asyncBytes.lines.compactMap { 38 | if AIProxy.printResponseBodies { 39 | printStreamingResponseChunk($0) 40 | } 41 | return T.deserialize(fromLine: $0) 42 | } 43 | } 44 | } 45 | 46 | private extension URLRequest { 47 | var readableURL: String { 48 | return self.url?.absoluteString ?? "" 49 | } 50 | 51 | var readableBody: String { 52 | guard let body = self.httpBody else { 53 | return "None" 54 | } 55 | 56 | return String(data: body, encoding: .utf8) ?? "None" 57 | } 58 | } 59 | 60 | private func printRequestBody(_ request: URLRequest) { 61 | logIf(.debug)?.debug( 62 | """ 63 | Making a request to \(request.readableURL) 64 | with request body: 65 | \(request.readableBody) 66 | """ 67 | ) 68 | } 69 | 70 | private func printBufferedResponseBody(_ data: Data) { 71 | logIf(.debug)?.debug( 72 | """ 73 | Received response body: 74 | \(String(data: data, encoding: .utf8) ?? "") 75 | """ 76 | ) 77 | } 78 | 79 | private func printStreamingResponseChunk(_ chunk: String) { 80 | logIf(.debug)?.debug( 81 | """ 82 | Received streaming response chunk: 83 | \(chunk) 84 | """ 85 | ) 86 | } 87 | -------------------------------------------------------------------------------- /Sources/AIProxy/SingleOrPartsEncodable.swift: -------------------------------------------------------------------------------- 1 | // 2 | // SingleOrPartsEncodable.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 1/16/25. 6 | // 7 | 8 | protocol SingleOrPartsEncodable { 9 | var encodableItem: any Encodable { get } 10 | } 11 | 12 | extension SingleOrPartsEncodable { 13 | public func encode(to encoder: any Encoder) throws { 14 | var container = encoder.singleValueContainer() 15 | try container.encode(self.encodableItem) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /Sources/AIProxy/StabilityAI/StabilityAIImageResponse.swift: -------------------------------------------------------------------------------- 1 | // 2 | // StabilityAIUltraResponseBody.swift 3 | // 4 | // 5 | // Created by Lou Zell on 7/29/24. 6 | // 7 | 8 | import Foundation 9 | 10 | /// This type diverges a touch from the other response models in this package. Stability 11 | /// includes pertinent information in the response header, and the response body is the image 12 | /// binary. This type encompasses both. 13 | /// 14 | /// See the "Response Headers" section here: 15 | /// https://platform.stability.ai/docs/api-reference#tag/Generate/paths/~1v2beta~1stable-image~1generate~1ultra/post 16 | public struct StabilityAIImageResponse { 17 | 18 | /// The image data 19 | public let imageData: Data 20 | 21 | /// The format of the generated image. 22 | /// 23 | /// To receive the bytes of the image directly, specify `image/*` in the accept header. To 24 | /// receive the bytes base64 encoded inside of a JSON payload, specify `application/json`. 25 | public let contentType: String? 26 | 27 | /// Indicates the reason the generation finished. 28 | /// 29 | /// SUCCESS = successful generation. 30 | /// CONTENT_FILTERED = successful generation, however the output violated our content 31 | /// moderation policy and has been blurred as a result. 32 | /// 33 | /// NOTE: This header is absent on JSON encoded responses because it is present in the body 34 | /// as finish_reason. 35 | public let finishReason: String? 36 | 37 | /// The seed used as random noise for this generation. 38 | /// Example: "343940597" 39 | /// 40 | /// NOTE: This header is absent on JSON encoded responses because it is present in the body 41 | /// as seed. 42 | public let seed: String? 43 | 44 | public init(imageData: Data, contentType: String?, finishReason: String?, seed: String?) { 45 | self.imageData = imageData 46 | self.contentType = contentType 47 | self.finishReason = finishReason 48 | self.seed = seed 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /Sources/AIProxy/StabilityAI/StabilityAIService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // StabilityAIService.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/15/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public protocol StabilityAIService { 11 | 12 | /// Initiates a request to /v2beta/stable-image/generate/ultra 13 | /// 14 | /// - Parameters: 15 | /// - body: The request body to send to aiproxy and StabilityAI. See this reference: 16 | /// https://platform.stability.ai/docs/api-reference#tag/Generate/paths/~1v2beta~1stable-image~1generate~1ultra/post 17 | /// - Returns: The response as StabilityAIUltraResponse, wth image binary data stored on 18 | /// the `imageData` property 19 | func ultraRequest( 20 | body: StabilityAIUltraRequestBody 21 | ) async throws -> StabilityAIImageResponse 22 | 23 | 24 | /// Initiates a request to /v2beta/stable-image/generate/sd3 25 | /// 26 | /// - Parameters: 27 | /// - body: The request body to send to aiproxy and StabilityAI. See this reference: 28 | /// https://platform.stability.ai/docs/api-reference#tag/Generate/paths/~1v2beta~1stable-image~1generate~1sd3/post 29 | /// - Returns: The response as StabilityAIUltraResponse, wth image binary data stored on 30 | /// the `imageData` property 31 | func stableDiffusionRequest( 32 | body: StabilityAIStableDiffusionRequestBody 33 | ) async throws -> StabilityAIImageResponse 34 | } 35 | -------------------------------------------------------------------------------- /Sources/AIProxy/TogetherAI/TogetherAIChatCompletionStreamingChunk.swift: -------------------------------------------------------------------------------- 1 | // 2 | // TogetherAIChatCompletionStreamingChunk.swift 3 | // 4 | // 5 | // Created by Lou Zell on 8/16/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public struct TogetherAIChatCompletionStreamingChunk: Decodable { 11 | /// Generated choices 12 | public let choices: [TogetherAIStreamingChunkChoice] 13 | 14 | /// Time in seconds since unix epoch 15 | public let created: Int 16 | 17 | /// The model used to create the chunk 18 | public let model: String 19 | 20 | /// Usage information, which is only included on the last chunk of the stream 21 | public let usage: TogetherAIChatUsage? 22 | } 23 | 24 | public struct TogetherAIStreamingChunkChoice: Decodable { 25 | /// The text for this generation is within the 'delta' property 26 | public let delta: TogetherAIStreamingChunkDelta 27 | 28 | /// The reason the stream ended 29 | public let finishReason: TogetherAIFinishReason? 30 | } 31 | 32 | public struct TogetherAIStreamingChunkDelta: Decodable { 33 | /// The text of the generation 34 | public let content: String 35 | 36 | /// The role of the generation 37 | public let role: TogetherAIRole 38 | } 39 | -------------------------------------------------------------------------------- /Sources/AIProxy/TogetherAI/TogetherAIDirectService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // TogetherAIDirectService.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/16/24. 6 | // 7 | 8 | import Foundation 9 | 10 | open class TogetherAIDirectService: TogetherAIService, DirectService { 11 | private let unprotectedAPIKey: String 12 | 13 | /// This initializer is not public on purpose. 14 | /// Customers are expected to use the factory `AIProxy.togetherAIDirectService` defined in AIProxy.swift 15 | internal init(unprotectedAPIKey: String) { 16 | self.unprotectedAPIKey = unprotectedAPIKey 17 | } 18 | 19 | /// Initiates a non-streaming chat completion request to /v1/chat/completions. 20 | /// 21 | /// - Parameters: 22 | /// - body: The request body to send to aiproxy and Together.ai. See this reference: 23 | /// https://docs.together.ai/reference/completions-1 24 | /// - Returns: A ChatCompletionResponse. See this reference: 25 | /// https://platform.openai.com/docs/api-reference/chat/object 26 | public func chatCompletionRequest( 27 | body: TogetherAIChatCompletionRequestBody, 28 | secondsToWait: UInt 29 | ) async throws -> TogetherAIChatCompletionResponseBody { 30 | var body = body 31 | body.stream = false 32 | let request = try AIProxyURLRequest.createDirect( 33 | baseURL: "https://api.together.xyz", 34 | path: "/v1/chat/completions", 35 | body: try body.serialize(), 36 | verb: .post, 37 | secondsToWait: secondsToWait, 38 | contentType: "application/json", 39 | additionalHeaders: [ 40 | "Authorization": "Bearer \(self.unprotectedAPIKey)" 41 | ] 42 | ) 43 | return try await self.makeRequestAndDeserializeResponse(request) 44 | } 45 | 46 | /// Initiates a streaming chat completion request to /v1/chat/completions. 47 | /// 48 | /// - Parameters: 49 | /// - body: The request body to send to aiproxy and Together.ai. See this reference: 50 | /// https://docs.together.ai/reference/completions-1 51 | /// - Returns: A chat completion response. See the reference above. 52 | public func streamingChatCompletionRequest( 53 | body: TogetherAIChatCompletionRequestBody 54 | ) async throws -> AsyncCompactMapSequence, OpenAIChatCompletionChunk> { 55 | var body = body 56 | body.stream = true 57 | let request = try AIProxyURLRequest.createDirect( 58 | baseURL: "https://api.together.xyz", 59 | path: "/v1/chat/completions", 60 | body: try body.serialize(), 61 | verb: .post, 62 | secondsToWait: 60, 63 | contentType: "application/json", 64 | additionalHeaders: [ 65 | "Authorization": "Bearer \(self.unprotectedAPIKey)" 66 | ] 67 | ) 68 | return try await self.makeRequestAndDeserializeStreamingChunks(request) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /Sources/AIProxy/TogetherAI/TogetherAIProxiedService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // TogetherAIService.swift 3 | // 4 | // 5 | // Created by Lou Zell on 8/14/24. 6 | // 7 | 8 | import Foundation 9 | 10 | open class TogetherAIProxiedService: TogetherAIService, ProxiedService { 11 | private let partialKey: String 12 | private let serviceURL: String 13 | private let clientID: String? 14 | 15 | /// This initializer is not public on purpose. 16 | /// Customers are expected to use the factory `AIProxy.togetherAIService` defined in AIProxy.swift 17 | internal init(partialKey: String, serviceURL: String, clientID: String?) { 18 | self.partialKey = partialKey 19 | self.serviceURL = serviceURL 20 | self.clientID = clientID 21 | } 22 | 23 | /// Initiates a non-streaming chat completion request to /v1/chat/completions. 24 | /// 25 | /// - Parameters: 26 | /// - body: The request body to send to aiproxy and Together.ai. See this reference: 27 | /// https://docs.together.ai/reference/completions-1 28 | /// - Returns: A ChatCompletionResponse. See this reference: 29 | /// https://platform.openai.com/docs/api-reference/chat/object 30 | public func chatCompletionRequest( 31 | body: TogetherAIChatCompletionRequestBody, 32 | secondsToWait: UInt 33 | ) async throws -> TogetherAIChatCompletionResponseBody { 34 | var body = body 35 | body.stream = false 36 | let request = try await AIProxyURLRequest.create( 37 | partialKey: self.partialKey, 38 | serviceURL: self.serviceURL, 39 | clientID: self.clientID, 40 | proxyPath: "/v1/chat/completions", 41 | body: try body.serialize(), 42 | verb: .post, 43 | secondsToWait: secondsToWait, 44 | contentType: "application/json" 45 | ) 46 | return try await self.makeRequestAndDeserializeResponse(request) 47 | } 48 | 49 | /// Initiates a streaming chat completion request to /v1/chat/completions. 50 | /// 51 | /// - Parameters: 52 | /// - body: The request body to send to aiproxy and Together.ai. See this reference: 53 | /// https://docs.together.ai/reference/completions-1 54 | /// - Returns: A chat completion response. See the reference above. 55 | public func streamingChatCompletionRequest( 56 | body: TogetherAIChatCompletionRequestBody 57 | ) async throws -> AsyncCompactMapSequence, OpenAIChatCompletionChunk> { 58 | var body = body 59 | body.stream = true 60 | let request = try await AIProxyURLRequest.create( 61 | partialKey: self.partialKey, 62 | serviceURL: self.serviceURL, 63 | clientID: self.clientID, 64 | proxyPath: "/v1/chat/completions", 65 | body: try body.serialize(), 66 | verb: .post, 67 | secondsToWait: 60, 68 | contentType: "application/json" 69 | ) 70 | return try await self.makeRequestAndDeserializeStreamingChunks(request) 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /Sources/AIProxy/TogetherAI/TogetherAIService.swift: -------------------------------------------------------------------------------- 1 | // 2 | // TogetherAIService.swift 3 | // 4 | // 5 | // Created by Lou Zell on 12/16/24. 6 | // 7 | 8 | import Foundation 9 | 10 | public protocol TogetherAIService { 11 | /// Initiates a non-streaming chat completion request to /v1/chat/completions. 12 | /// 13 | /// - Parameters: 14 | /// - body: The request body to send to aiproxy and Together.ai. See this reference: 15 | /// https://docs.together.ai/reference/completions-1 16 | /// - Returns: A ChatCompletionResponse. See this reference: 17 | /// https://platform.openai.com/docs/api-reference/chat/object 18 | func chatCompletionRequest( 19 | body: TogetherAIChatCompletionRequestBody, 20 | secondsToWait: UInt 21 | ) async throws -> TogetherAIChatCompletionResponseBody 22 | 23 | /// Initiates a streaming chat completion request to /v1/chat/completions. 24 | /// 25 | /// - Parameters: 26 | /// - body: The request body to send to aiproxy and Together.ai. See this reference: 27 | /// https://docs.together.ai/reference/completions-1 28 | /// - Returns: A chat completion response. See the reference above. 29 | func streamingChatCompletionRequest( 30 | body: TogetherAIChatCompletionRequestBody 31 | ) async throws -> AsyncCompactMapSequence, OpenAIChatCompletionChunk> 32 | } 33 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/AnthropicMessageResponseTests.swift: -------------------------------------------------------------------------------- 1 | import XCTest 2 | import Foundation 3 | @testable import AIProxy 4 | 5 | 6 | final class AnthropicMessageResponseTests: XCTestCase { 7 | 8 | func testMessageResponseIsDecodable() { 9 | let sampleResponse = """ 10 | { 11 | "id": "msg_01XVo4dCtcbBTds8pG1BC6Xf", 12 | "type": "message", 13 | "role": "assistant", 14 | "model": "claude-3-5-sonnet-20240620", 15 | "content": [ 16 | { 17 | "type": "text", 18 | "text": "Hello!" 19 | } 20 | ], 21 | "stop_reason": "end_turn", 22 | "stop_sequence": null, 23 | "usage": { 24 | "input_tokens": 9, 25 | "output_tokens": 28 26 | } 27 | } 28 | """ 29 | let decoder = JSONDecoder() 30 | 31 | let res = try! decoder.decode( 32 | AnthropicMessageResponseBody.self, 33 | from: sampleResponse.data(using: .utf8)! 34 | ) 35 | switch res.content.first! { 36 | case .text(let string): 37 | XCTAssertEqual("Hello!", string) 38 | default: 39 | XCTFail() 40 | } 41 | } 42 | 43 | func testMessageResponseWithToolUseIsDecodable() { 44 | let sampleResponse = """ 45 | { 46 | "id": "msg_011UvQXaEMMwmN4kapqwUpN2", 47 | "type": "message", 48 | "role": "assistant", 49 | "model": "claude-3-5-sonnet-20240620", 50 | "content": [ 51 | { 52 | "type": "text", 53 | "text": "To get Nvidia's stock price, we need to use the stock symbol for Nvidia, which is NVDA. I'll use the available function to retrieve this information for you." 54 | }, 55 | { 56 | "type": "tool_use", 57 | "id": "toolu_01GZovj2vHs5AKsNAshWFgUT", 58 | "name": "get_stock_symbol", 59 | "input": { 60 | "ticker": "NVDA" 61 | } 62 | } 63 | ], 64 | "stop_reason": "tool_use", 65 | "stop_sequence": null, 66 | "usage": { 67 | "input_tokens": 389, 68 | "output_tokens": 96 69 | } 70 | } 71 | """ 72 | 73 | let res = try! AnthropicMessageResponseBody.deserialize(from: sampleResponse.data(using: .utf8)!) 74 | 75 | switch res.content.last! { 76 | case .toolUse(id: let id, name: let toolName, input: let input): 77 | XCTAssertEqual("get_stock_symbol", toolName) 78 | XCTAssertEqual("toolu_01GZovj2vHs5AKsNAshWFgUT", id) 79 | if case .string(let ticker) = input["ticker"] { 80 | XCTAssertEqual("NVDA", ticker) 81 | } else { 82 | XCTFail() 83 | } 84 | default: 85 | XCTFail() 86 | } 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/AnthropicMessageStreamingChunkTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // AnthropicMessageStreamingChunkTests.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/7/24. 6 | // 7 | 8 | import XCTest 9 | import Foundation 10 | @testable import AIProxy 11 | 12 | 13 | final class AnthropicMessageStreamingChunkTests: XCTestCase { 14 | 15 | func testDeltaBlockStartIsDecodable() { 16 | let serializedChunk = #"data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Hello! How"} }"# 17 | let deltaBlock = AnthropicMessageStreamingDeltaBlock.from(line: serializedChunk) 18 | switch deltaBlock?.delta { 19 | case .text(let txt): 20 | XCTAssertEqual("Hello! How", txt) 21 | default: 22 | XCTFail() 23 | } 24 | } 25 | 26 | func testContentBlockStartIsDecodable() { 27 | let serializedChunk = #"data: {"type":"content_block_start","index":1,"content_block":{"type":"tool_use","id":"toolu_01T1x1fJ34qAmk2tNTrN7Up6","name":"get_weather","input":{}}}"# 28 | let contentBlockStart = AnthropicMessageStreamingContentBlockStart.from(line: serializedChunk) 29 | switch contentBlockStart?.contentBlock { 30 | case .toolUse(name: let name): 31 | XCTAssertEqual("get_weather", name) 32 | default: 33 | XCTFail() 34 | } 35 | } 36 | } 37 | 38 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/DeepLTranslationRequestTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // DeepLTranslationRequestTests.swift 3 | // 4 | // 5 | // Created by Lou Zell on 7/29/24. 6 | // 7 | 8 | import XCTest 9 | import Foundation 10 | @testable import AIProxy 11 | 12 | final class DeepLTranslationRequestTests: XCTestCase { 13 | 14 | func testBasicRequestBodyIsEncodable() { 15 | let translationRequestBody = DeepLTranslateRequestBody( 16 | targetLang: "ES", 17 | text: ["hello world"] 18 | ) 19 | let encoder = JSONEncoder() 20 | encoder.outputFormatting = .sortedKeys 21 | 22 | let jsonData = try! encoder.encode(translationRequestBody) 23 | XCTAssertEqual( 24 | #"{"target_lang":"ES","text":["hello world"]}"#, 25 | String(data: jsonData, encoding: .utf8)! 26 | ) 27 | } 28 | 29 | func testFullyPopulatedRequestBodyIsEncodable() { 30 | let translationRequestBody = DeepLTranslateRequestBody( 31 | targetLang: "ES", 32 | text: ["hello world"], 33 | context: "this is context", 34 | formality: .preferLess, 35 | glossaryID: "123", 36 | ignoreTags: ["
"], 37 | nonSplittingTags: ["
"], 38 | outlineDetection: true, 39 | preserveFormatting: true, 40 | sourceLang: "EN", 41 | splitSentences: .punctuation, 42 | splittingTags: ["
"], 43 | tagHandling: .xml 44 | ) 45 | let encoder = JSONEncoder() 46 | encoder.outputFormatting = .sortedKeys 47 | 48 | let jsonData = try! encoder.encode(translationRequestBody) 49 | XCTAssertEqual( 50 | #"{"context":"this is context","formality":"prefer_less","glossary_id":"123","ignore_tags":["
"],"non_splitting_tags":["
"],"outline_detection":true,"preserve_formatting":true,"source_lang":"EN","split_sentences":"nonewlines","splitting_tags":["
"],"tag_handling":"xml","target_lang":"ES","text":["hello world"]}"#, 51 | String(data: jsonData, encoding: .utf8)! 52 | ) 53 | } 54 | 55 | } 56 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/DeepLTranslationResponseTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // DeepLTranslationResponseTests.swift 3 | // 4 | // 5 | // Created by Lou Zell on 8/3/24. 6 | // 7 | 8 | import XCTest 9 | import Foundation 10 | @testable import AIProxy 11 | 12 | final class DeepLTranslationResponseTests: XCTestCase { 13 | 14 | func testResponseBodyIsDecodable() { 15 | let sampleResponse = """ 16 | {"translations":[{"detected_source_language":"EN","text":"hola mundo"}]} 17 | """ 18 | let translationModel = try! JSONDecoder().decode( 19 | DeepLTranslateResponseBody.self, 20 | from: sampleResponse.data(using: .utf8)! 21 | ) 22 | 23 | XCTAssertEqual("hola mundo", translationModel.translations.first!.text) 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/ElevenLabsSpeechToTextResponseBodyTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ElevenLabsSpeechToTextResponseBodyTests.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 4/21/25. 6 | // 7 | 8 | import XCTest 9 | import Foundation 10 | @testable import AIProxy 11 | 12 | final class ElevenLabsSpeechToTextResponseBodyTests: XCTestCase { 13 | 14 | func testResponseIsDecodable() throws { 15 | let sampleResponse = """ 16 | { 17 | "language_code": "eng", 18 | "language_probability": 0.7772299647331238, 19 | "text": "[clicking] Hello, world.", 20 | "words": [ 21 | { 22 | "text": "[clicking]", 23 | "start": 0.079, 24 | "end": 0.78, 25 | "type": "audio_event" 26 | }, 27 | { 28 | "text": " ", 29 | "start": 0.78, 30 | "end": 0.979, 31 | "type": "spacing" 32 | }, 33 | { 34 | "text": "Hello,", 35 | "start": 0.979, 36 | "end": 1.499, 37 | "type": "word" 38 | }, 39 | { 40 | "text": " ", 41 | "start": 1.499, 42 | "end": 1.699, 43 | "type": "spacing" 44 | }, 45 | { 46 | "text": "world.", 47 | "start": 1.699, 48 | "end": 2.199, 49 | "type": "word" 50 | } 51 | ] 52 | } 53 | """ 54 | let res = try ElevenLabsSpeechToTextResponseBody.deserialize(from: sampleResponse) 55 | XCTAssertEqual("eng", res.languageCode) 56 | XCTAssertEqual("[clicking] Hello, world.", res.text) 57 | XCTAssertEqual(.audioEvent, res.words?.first?.type) 58 | XCTAssertEqual("[clicking]", res.words?.first?.text) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/FalFastSDXLResponseTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // FalFastSDXLResponseTests.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/14/24. 6 | // 7 | 8 | import XCTest 9 | import Foundation 10 | @testable import AIProxy 11 | 12 | final class FalFastSDXLResponseTests: XCTestCase { 13 | 14 | func testResponseIsDecodable() throws { 15 | let sampleResponse = """ 16 | { 17 | "images": [ 18 | { 19 | "url": "https://fal.media/files/zebra/_9NDmaWO5okNq9idPY7Il.jpeg", 20 | "width": 1024, 21 | "height": 1024, 22 | "content_type": "image/jpeg" 23 | } 24 | ], 25 | "timings": { 26 | "inference": 2.1141920797526836 27 | }, 28 | "seed": 2062765390712234200, 29 | "has_nsfw_concepts": [ 30 | false 31 | ], 32 | "prompt": "winter wonderland" 33 | } 34 | """ 35 | let res = try FalFastSDXLOutputSchema.deserialize(from: sampleResponse) 36 | XCTAssertEqual( 37 | "https://fal.media/files/zebra/_9NDmaWO5okNq9idPY7Il.jpeg", 38 | res.images?.first?.url?.absoluteString 39 | ) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/FalFluxLoRAFastTrainingOutputSchemaTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // FalLoraTrainingResponseTests.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/3/24. 6 | // 7 | 8 | import XCTest 9 | import Foundation 10 | @testable import AIProxy 11 | 12 | final class FalFluxLoRAFastTrainingOutputSchemaTests: XCTestCase { 13 | 14 | func testResponseIsDecodable() throws { 15 | let sampleResponse = """ 16 | { 17 | "diffusers_lora_file": { 18 | "url": "https://storage.googleapis.com/fal-flux-lora/57256f3082ab4c1498c76b060ff3ffdb_pytorch_lora_weights.safetensors", 19 | "content_type": "application/octet-stream", 20 | "file_name": "pytorch_lora_weights.safetensors", 21 | "file_size": 89745224 22 | }, 23 | "config_file": { 24 | "url": "https://storage.googleapis.com/fal-flux-lora/3b950c5c7bfc42929ba8e383363864ee_config.json", 25 | "content_type": "application/octet-stream", 26 | "file_name": "config.json", 27 | "file_size": 1268 28 | } 29 | } 30 | """ 31 | let res = try FalFluxLoRAFastTrainingOutputSchema.deserialize(from: sampleResponse) 32 | XCTAssertEqual( 33 | "https://storage.googleapis.com/fal-flux-lora/57256f3082ab4c1498c76b060ff3ffdb_pytorch_lora_weights.safetensors", 34 | res.diffusersLoraFile?.url?.absoluteString 35 | ) 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/FalFluxLoRAInputSchemaTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // FalFluxLoRAInputSchemaTests.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/3/24. 6 | // 7 | 8 | import XCTest 9 | import Foundation 10 | @testable import AIProxy 11 | 12 | final class FalFluxLoRAInputSchemaTests: XCTestCase { 13 | 14 | func testResponseIsDecodable() throws { 15 | let expected = """ 16 | { 17 | "guidance_scale" : 6, 18 | "image_size" : "landscape_16_9", 19 | "loras" : [ 20 | { 21 | "path" : "https:\\/\\/storage.googleapis.com\\/fal-flux-lora\\/57256f3082ab4c1498c76b060ff3ffdb_pytorch_lora_weights.safetensors", 22 | "scale" : 1.2 23 | } 24 | ], 25 | "num_images" : 2, 26 | "num_inference_steps" : 21, 27 | "output_format" : "jpeg", 28 | "prompt" : "face", 29 | "seed" : 7467957 30 | } 31 | """ 32 | let inputSchema = FalFluxLoRAInputSchema( 33 | prompt: "face", 34 | guidanceScale: 6, 35 | imageSize: .landscape16x9, 36 | loras: [ 37 | .init( 38 | path: URL(string: "https://storage.googleapis.com/fal-flux-lora/57256f3082ab4c1498c76b060ff3ffdb_pytorch_lora_weights.safetensors")!, 39 | scale: 1.2 40 | ) 41 | ], 42 | numImages: 2, 43 | numInferenceSteps: 21, 44 | outputFormat: .jpeg, 45 | seed: 7467957 46 | ) 47 | XCTAssertEqual( 48 | expected, 49 | try inputSchema.serialize(pretty: true) 50 | ) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/FalFluxLoRAOutputSchemaTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // FalFluxLoRAOutputSchemaTests.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/4/24. 6 | // 7 | 8 | import XCTest 9 | import Foundation 10 | @testable import AIProxy 11 | 12 | final class FalFluxLoRAOutputSchemaTests: XCTestCase { 13 | 14 | func testResponseIsDecodable() throws { 15 | let sampleResponse = #""" 16 | { 17 | "images": [ 18 | { 19 | "url": "https://fal.media/files/zebra/q9r5uYfobNHrVAa_4GGor_e9377bbec594486784d3cf306fd19a85.jpg", 20 | "width": 1024, 21 | "height": 576, 22 | "content_type": "image/jpeg" 23 | }, 24 | { 25 | "url": "https://fal.media/files/panda/VMV6roybQeHhUvhvgVaPF_686e75aef65443d186f25f25d87a48cb.jpg", 26 | "width": 1024, 27 | "height": 576, 28 | "content_type": "image/jpeg" 29 | } 30 | ], 31 | "timings": { 32 | "inference": 4.393133059842512 33 | }, 34 | "seed": 7467957, 35 | "has_nsfw_concepts": [ 36 | false, 37 | false 38 | ], 39 | "prompt": "face" 40 | } 41 | """# 42 | let res = try FalFluxLoRAOutputSchema.deserialize(from: sampleResponse) 43 | XCTAssertEqual( 44 | "https://fal.media/files/zebra/q9r5uYfobNHrVAa_4GGor_e9377bbec594486784d3cf306fd19a85.jpg", 45 | res.images?.first?.url?.absoluteString 46 | ) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/FalFluxSchnellResponseTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // FalFluxSchnellResponseTests.swift 3 | // 4 | // 5 | // Created by Hunor Zoltáni on 01.03.2025. 6 | // 7 | 8 | import XCTest 9 | import Foundation 10 | @testable import AIProxy 11 | 12 | final class FalFluxSchnellResponseTests: XCTestCase { 13 | 14 | func testResponseIsDecodable() throws { 15 | let sampleResponse = """ 16 | { 17 | "images": [ 18 | { 19 | "url": "https://fal.media/files/zebra/_9NDmaWO5okNq9idPY7Il.jpeg", 20 | "width": 1024, 21 | "height": 1024, 22 | "content_type": "image/jpeg" 23 | } 24 | ], 25 | "timings": { 26 | "inference": 2.1141920797526836 27 | }, 28 | "seed": 2062765390712234200, 29 | "has_nsfw_concepts": [ 30 | false 31 | ], 32 | "prompt": "winter wonderland" 33 | } 34 | """ 35 | let res = try FalFluxSchnellOutputSchema.deserialize(from: sampleResponse) 36 | XCTAssertEqual( 37 | "https://fal.media/files/zebra/_9NDmaWO5okNq9idPY7Il.jpeg", 38 | res.images?.first?.url?.absoluteString 39 | ) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/FalUploadResponseTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // FalUploadResponseTest.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/3/24. 6 | // 7 | 8 | import XCTest 9 | import Foundation 10 | @testable import AIProxy 11 | 12 | final class FalUploadResponseTests: XCTestCase { 13 | 14 | func testResponseIsDecodable() throws { 15 | let sampleResponse = """ 16 | { 17 | "file_url": "https://storage.googleapis.com/isolate-dev-hot-rooster-A", 18 | "upload_url": "https://storage.googleapis.com/isolate-dev-hot-rooster-B" 19 | } 20 | """ 21 | let res = try FalInitiateUploadResponseBody.deserialize(from: sampleResponse) 22 | XCTAssertEqual( 23 | "https://storage.googleapis.com/isolate-dev-hot-rooster-A", 24 | res.fileURL.absoluteString 25 | ) 26 | XCTAssertEqual( 27 | "https://storage.googleapis.com/isolate-dev-hot-rooster-B", 28 | res.uploadURL.absoluteString 29 | ) 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/GeminiFileUploadResponseBodyTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // GeminiFileUploadResponseBodyTests.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/24/24. 6 | // 7 | 8 | import XCTest 9 | import Foundation 10 | @testable import AIProxy 11 | 12 | final class GeminiFileUploadResponseBodyTests: XCTestCase { 13 | 14 | func testUploadFileResponseIsDecodable() throws { 15 | let sampleResponse = #""" 16 | { 17 | "file": { 18 | "createTime": "2024-10-23T23:57:56.546446Z", 19 | "expirationTime": "2024-10-25T23:57:56.477127362Z", 20 | "mimeType": "video/mp4", 21 | "name": "files/mry3nxa31le3", 22 | "sha256Hash": "MjQzZjY4MWFmMDllNDhiYjJkNGNlZWUxYzg5ZGM3MWRmNzcyMjgwODUyMDFlMjUxM2JjZWQ0OGI1NDdlZDg4OQ==", 23 | "sizeBytes": "106677", 24 | "state": "PROCESSING", 25 | "updateTime": "2024-10-23T23:57:56.546446Z", 26 | "uri": "https://generativelanguage.googleapis.com/v1beta/files/mry3nxa31le3" 27 | } 28 | } 29 | """# 30 | let res = try GeminiFileUploadResponseBody.deserialize(from: sampleResponse) 31 | XCTAssertEqual(.processing, res.file.state) 32 | } 33 | } 34 | 35 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/GeminiGenerateImageResponseTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // GeminiGenerateImageResponseTests.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 3/17/25. 6 | // 7 | 8 | import XCTest 9 | import Foundation 10 | @testable import AIProxy 11 | 12 | 13 | final class GeminiGenerateImageResponseTests: XCTestCase { 14 | 15 | func testResponseIsDecodable() throws { 16 | let sampleResponse = #""" 17 | { 18 | "candidates": [ 19 | { 20 | "content": { 21 | "parts": [ 22 | { 23 | "inlineData": { 24 | "mimeType": "image/png", 25 | "data": "" 26 | } 27 | } 28 | ], 29 | "role": "model" 30 | }, 31 | "finishReason": "STOP", 32 | "index": 0 33 | } 34 | ], 35 | "usageMetadata": { 36 | "promptTokenCount": 36, 37 | "totalTokenCount": 36, 38 | "promptTokensDetails": [ 39 | { 40 | "modality": "TEXT", 41 | "tokenCount": 36 42 | } 43 | ] 44 | }, 45 | "modelVersion": "gemini-2.0-flash-exp-image-generation" 46 | } 47 | """# 48 | 49 | let body = try GeminiGenerateContentResponseBody.deserialize(from: sampleResponse) 50 | if case .inlineData(mimeType: let mimeType, base64Data: let b64Data) = body.candidates?.first?.content?.parts?.first { 51 | XCTAssertEqual("image/png", mimeType) 52 | XCTAssertEqual("", b64Data) 53 | } else { 54 | XCTFail() 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/GeminiStructuredOutputsRequestTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // GeminiStructuredOutputsRequestTests.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 3/15/25. 6 | // 7 | 8 | import XCTest 9 | import Foundation 10 | @testable import AIProxy 11 | 12 | 13 | final class GeminiStructuredOutputsRequestTests: XCTestCase { 14 | // This example is from generative-ai-js/samples/controlled_generation.js 15 | func testRequestIsEncodableToJson() throws { 16 | let schema: [String: AIProxyJSONValue] = [ 17 | "description": "List of recipes", 18 | "type": "array", 19 | "items": [ 20 | "type": "object", 21 | "properties": [ 22 | "recipeName": [ 23 | "type": "string", 24 | "description": "Name of the recipe", 25 | "nullable": false 26 | ] 27 | ], 28 | "required": ["recipeName"] 29 | ] 30 | ] 31 | 32 | let requestBody = GeminiGenerateContentRequestBody( 33 | contents: [ 34 | .init( 35 | parts: [.text("List a few popular cookie recipes.")], 36 | role: "user" 37 | ) 38 | ], 39 | generationConfig: .init( 40 | responseMimeType: "application/json", 41 | responseSchema: schema 42 | ) 43 | ) 44 | XCTAssertEqual(#""" 45 | { 46 | "contents" : [ 47 | { 48 | "parts" : [ 49 | { 50 | "text" : "List a few popular cookie recipes." 51 | } 52 | ], 53 | "role" : "user" 54 | } 55 | ], 56 | "generationConfig" : { 57 | "responseMimeType" : "application\/json", 58 | "responseSchema" : { 59 | "description" : "List of recipes", 60 | "items" : { 61 | "properties" : { 62 | "recipeName" : { 63 | "description" : "Name of the recipe", 64 | "nullable" : false, 65 | "type" : "string" 66 | } 67 | }, 68 | "required" : [ 69 | "recipeName" 70 | ], 71 | "type" : "object" 72 | }, 73 | "type" : "array" 74 | } 75 | } 76 | } 77 | """#, 78 | try requestBody.serialize(pretty: true) 79 | ) 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/OpenAIChatCompletionStreamingChunkTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenAIChatCompletionStreamingChunkTests.swift 3 | // 4 | // 5 | // Created by Lou Zell on 8/17/24. 6 | // 7 | 8 | import XCTest 9 | @testable import AIProxy 10 | 11 | final class OpenAIChatCompletionStreamingChunkTests: XCTestCase { 12 | func testChatCompletionResponseChunkIsDecodable() { 13 | let line = """ 14 | data: {"id":"chatcmpl-9jAXUtD5xAKjjgo3XBZEawyoRdUGk","object":"chat.completion.chunk","created":1720552300,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"FINDME"},"logprobs":null,"finish_reason":null}],"usage":null} 15 | """ 16 | let res = OpenAIChatCompletionChunk.deserialize(fromLine: line) 17 | XCTAssertEqual( 18 | "FINDME", 19 | res?.choices.first?.delta.content 20 | ) 21 | } 22 | 23 | func testUsageIsDecodable() { 24 | let line = """ 25 | data: {"id":"chatcmpl-A9MtoyoAD7JI10m0acOb3ouODNHzK","object":"chat.completion.chunk","created":1726796340,"model":"gpt-4o-mini-2024-07-18","system_fingerprint":"fp_1bb46167f9","choices":[],"usage":{"prompt_tokens":9,"completion_tokens":9,"total_tokens":18,"completion_tokens_details":{"reasoning_tokens":0}}} 26 | """ 27 | let res = OpenAIChatCompletionChunk.deserialize(fromLine: line) 28 | XCTAssertEqual(9, res?.usage?.promptTokens) 29 | XCTAssertEqual(9, res?.usage?.completionTokens) 30 | XCTAssertEqual(18, res?.usage?.totalTokens) 31 | XCTAssertEqual(0, res?.usage?.completionTokensDetails?.reasoningTokens) 32 | } 33 | 34 | func testFunctionCallIsDecodable() { 35 | let line = #""" 36 | data: {"id":"chatcmpl-AmzGvtW0SKhVtUcwDhcNoRSvxAZ3M","object":"chat.completion.chunk","created":1736238637,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_5f20662549","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"location"}}]},"logprobs":null,"finish_reason":null}],"usage":null} 37 | """# 38 | let res = OpenAIChatCompletionChunk.deserialize(fromLine: line) 39 | XCTAssertEqual("location", res?.choices.first?.delta.toolCalls?.first?.function?.arguments) 40 | } 41 | 42 | func testFunctionCallIsAlsoDecodable() { 43 | let line = #""" 44 | data: {"id":"chatcmpl-AnKQL2c5yoJxoeqrnZl01bkmR7M3c","object":"chat.completion.chunk","created":1736319945,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_5f20662549","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}],"usage":null} 45 | """# 46 | let res = OpenAIChatCompletionChunk.deserialize(fromLine: line) 47 | XCTAssertEqual("{\"", res?.choices.first?.delta.toolCalls?.first?.function?.arguments) 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/OpenAICreateImageResponseTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenAICreateImageResponseTests.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 4/23/25. 6 | // 7 | 8 | import XCTest 9 | @testable import AIProxy 10 | 11 | final class OpenAICreateImageResponseTests: XCTestCase { 12 | 13 | func testResponseBodyIsDecodable() throws { 14 | let sampleResponse = """ 15 | { 16 | "created": 1745433403, 17 | "data": [ 18 | { 19 | "b64_json": "" 20 | } 21 | ], 22 | "usage": { 23 | "input_tokens": 8, 24 | "input_tokens_details": { 25 | "image_tokens": 0, 26 | "text_tokens": 8 27 | }, 28 | "output_tokens": 1584, 29 | "total_tokens": 1592 30 | } 31 | } 32 | """ 33 | let res = try OpenAICreateImageResponseBody.deserialize(from: sampleResponse) 34 | XCTAssertEqual("", res.data.first?.b64JSON) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/OpenRouterChatCompletionStreamingChunkTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenRouterChatCompletionStreamingChunkTests.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 2/6/25. 6 | // 7 | 8 | import Foundation 9 | import XCTest 10 | @testable import AIProxy 11 | 12 | final class OpenRouterChatCompletionStreamingChunkTests: XCTestCase { 13 | 14 | func testChunkWithReasoningIsDecodable() throws { 15 | let serializedChunk = #"data: {"id":"gen-1738800351-03h6DftoW7wSTm6uSXb7","provider":"DeepInfra","model":"deepseek/deepseek-r1","object":"chat.completion.chunk","created":1738800351,"choices":[{"index":0,"delta":{"role":"assistant","content":null,"reasoning":"ABC"},"finish_reason":null,"native_finish_reason":null,"logprobs":null}]}"# 16 | 17 | let body = OpenRouterChatCompletionChunk.deserialize(fromLine: serializedChunk) 18 | XCTAssertEqual("ABC", body!.choices.first!.delta.reasoning) 19 | } 20 | } 21 | 22 | 23 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/OpenRouterToolCallResponseBodyTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenRouterToolCallResponseBodyTests.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 4/16/25. 6 | // 7 | 8 | import Foundation 9 | import XCTest 10 | @testable import AIProxy 11 | 12 | final class OpenRouterToolCallResponseBodyTests: XCTestCase { 13 | 14 | func testResponseWithToolUseIsDecodable() throws { 15 | let sampleResponse = #""" 16 | { 17 | "id": "gen-1744845790-VDqUk3HsqsMOdoNex7FH", 18 | "provider": "DeepInfra", 19 | "model": "meta-llama/llama-3.3-70b-instruct", 20 | "object": "chat.completion", 21 | "created": 1744845790, 22 | "choices": [ 23 | { 24 | "logprobs": null, 25 | "finish_reason": "stop", 26 | "native_finish_reason": "stop", 27 | "index": 0, 28 | "message": { 29 | "role": "assistant", 30 | "content": "", 31 | "refusal": null, 32 | "reasoning": null, 33 | "tool_calls": [ 34 | { 35 | "index": 0, 36 | "id": "call_mWR7JQZV1ttdehPlT2y0tP2k", 37 | "function": { 38 | "arguments": "{\"location\": \"San Francisco, USA\"}", 39 | "name": "get_weather" 40 | }, 41 | "type": "function" 42 | } 43 | ] 44 | } 45 | } 46 | ], 47 | "usage": { 48 | "prompt_tokens": 243, 49 | "completion_tokens": 16, 50 | "total_tokens": 259 51 | } 52 | } 53 | """# 54 | let res = try OpenRouterChatCompletionResponseBody.deserialize(from: sampleResponse) 55 | let functionToCall = res.choices.first?.message.toolCalls?.first?.function 56 | XCTAssertEqual("get_weather", functionToCall?.name) 57 | XCTAssertEqual( 58 | "San Francisco, USA", 59 | functionToCall?.arguments?["location"] as? String 60 | ) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/PerplexityChatCompletionResponseBodyTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // PerplexityChatCompletionResponseBodyTests.swift 3 | // 4 | // 5 | // Created by Lou Zell on 11/6/24. 6 | // 7 | 8 | import Foundation 9 | 10 | 11 | import XCTest 12 | import Foundation 13 | @testable import AIProxy 14 | 15 | final class PerplexityChatCompletionResponseBodyTests: XCTestCase { 16 | 17 | func testResponseIsDecodable() throws { 18 | let responseBody = #""" 19 | { 20 | "id": "167584e2-3a26-4f17-be20-2c844d40d85a", 21 | "model": "llama-3.1-sonar-small-128k-online", 22 | "created": 1730918154, 23 | "usage": { 24 | "prompt_tokens": 2, 25 | "completion_tokens": 360, 26 | "total_tokens": 362 27 | }, 28 | "object": "chat.completion", 29 | "choices": [ 30 | { 31 | "index": 0, 32 | "finish_reason": "stop", 33 | "message": { 34 | "role": "assistant", 35 | "content": "Hello world." 36 | }, 37 | "delta": { 38 | "role": "assistant", 39 | "content": "" 40 | } 41 | } 42 | ] 43 | } 44 | """# 45 | let res = try PerplexityChatCompletionResponseBody.deserialize( 46 | from: responseBody 47 | ) 48 | XCTAssertEqual("Hello world.", res.choices.first?.message?.content) 49 | XCTAssertEqual(.assistant, res.choices.first?.message?.role) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/ReplicateFileResponseBodyTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicateFileResponseBodyTests.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/9/24. 6 | // 7 | 8 | import XCTest 9 | import Foundation 10 | @testable import AIProxy 11 | 12 | final class ReplicateFileResponseBodyTests: XCTestCase { 13 | 14 | func testUploadFileResponseIsDecodable() throws { 15 | let responseBody = """ 16 | { 17 | "id": "MzQ4YjZmYWQtZWMzNS00ZWI2LTk2MGEtZDQ2YTJkYWU4YzAx", 18 | "name": "training.zip", 19 | "content_type": "application/zip", 20 | "size": 358732, 21 | "etag": "c6f4001d286dc33f28763042bacb05a8", 22 | "checksums": { 23 | "sha256": "6b6fa3db39421c7ee4ae6d0099c8351f3d53599f3b2e41993c8a7c91cd502a87", 24 | "md5": "c6f4001d286dc33f28763042bacb05a8" 25 | }, 26 | "metadata": {}, 27 | "created_at": "2024-09-10T00:31:35.115Z", 28 | "expires_at": "2024-09-11T00:31:35.115Z", 29 | "urls": { 30 | "get": "https://api.replicate.com/v1/files/MzQ4YjZmYWQtZWMzNS00ZWI2LTk2MGEtZDQ2YTJkYWU4YzAx" 31 | } 32 | } 33 | """ 34 | let res = try ReplicateFileUploadResponseBody.deserialize( 35 | from: responseBody 36 | ) 37 | XCTAssertEqual("2024-09-11T00:31:35.115Z", res.expiresAt) 38 | XCTAssertEqual( 39 | "https://api.replicate.com/v1/files/MzQ4YjZmYWQtZWMzNS00ZWI2LTk2MGEtZDQ2YTJkYWU4YzAx", 40 | res.urls.get.absoluteString 41 | ) 42 | XCTAssertNotNil(res.expiresAt) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/ReplicateFluxShnellSchemaTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicateFluxSchnellSchemaTests.swift 3 | // 4 | // 5 | // Created by Lou Zell on 10/21/24. 6 | // 7 | 8 | import XCTest 9 | import Foundation 10 | @testable import AIProxy 11 | 12 | final class ReplicateFluxSchnellSchemaTests: XCTestCase { 13 | 14 | func testGoFastArgumentIsEncodedInRequest() throws { 15 | let input = ReplicateFluxSchnellInputSchema( 16 | prompt: "abc", 17 | goFast: true 18 | ) 19 | XCTAssertEqual( 20 | #""" 21 | { 22 | "go_fast" : true, 23 | "prompt" : "abc" 24 | } 25 | """#, 26 | try input.serialize(pretty: true) 27 | ) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/ReplicateModelResponseBodyTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicateModelResponseBodyTests.swift 3 | // 4 | // 5 | // Created by Lou Zell on 9/7/24. 6 | // 7 | 8 | import XCTest 9 | import Foundation 10 | @testable import AIProxy 11 | 12 | final class ReplicateModelResponseBodyTests: XCTestCase { 13 | 14 | func testCreateModelResponseIsDecodable() throws { 15 | let sampleResponse = """ 16 | { 17 | "cover_image_url": null, 18 | "created_at": "2024-09-08T02:22:30.297358Z", 19 | "default_example": null, 20 | "description": "My first model", 21 | "github_url": null, 22 | "latest_version": null, 23 | "license_url": null, 24 | "name": "my-model", 25 | "owner": "lzell", 26 | "paper_url": null, 27 | "run_count": 0, 28 | "url": "https://replicate.com/lzell/my-model", 29 | "visibility": "private" 30 | } 31 | """ 32 | let res = try ReplicateModelResponseBody.deserialize(from: sampleResponse) 33 | XCTAssertEqual(.private, res.visibility) 34 | XCTAssertEqual( 35 | "https://replicate.com/lzell/my-model", 36 | res.url?.absoluteString 37 | ) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/ReplicatePredictionRequestBodyTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicatePredictionRequestBodyTests.swift 3 | // 4 | // 5 | // Created by Lou Zell on 8/27/24. 6 | // 7 | 8 | import XCTest 9 | import Foundation 10 | @testable import AIProxy 11 | 12 | final class ReplicatePredictionRequestBodyTests: XCTestCase { 13 | 14 | func testBasicSDXLPredictionRequestIsEncodable() throws { 15 | let input = ReplicateSDXLInputSchema( 16 | prompt: "Monument valley, Utah" 17 | ) 18 | let requestBody = ReplicatePredictionRequestBody( 19 | input: input, 20 | version: "abc" 21 | ) 22 | XCTAssertEqual( 23 | #""" 24 | { 25 | "input" : { 26 | "prompt" : "Monument valley, Utah" 27 | }, 28 | "version" : "abc" 29 | } 30 | """#, 31 | try requestBody.serialize(pretty: true) 32 | ) 33 | } 34 | 35 | func testFullSDXLPredictionRequestIsEncodable() throws { 36 | let input = ReplicateSDXLInputSchema( 37 | prompt: "Monument valley, Utah", 38 | applyWatermark: true, 39 | disableSafetyChecker: true, 40 | guidanceScale: 0.5, 41 | height: 512, 42 | highNoiseFrac: 0.5, 43 | image: URL(string: "https://example.com/image")!, 44 | loraScale: 0.5, 45 | mask: URL(string: "https://example.com/mask")!, 46 | negativePrompt: "low quality", 47 | numInferenceSteps: 50, 48 | numOutputs: 2, 49 | promptStrength: 0.5, 50 | refine: .baseImageRefiner, 51 | refineSteps: 50, 52 | scheduler: .kEuler, 53 | seed: 123, 54 | width: 512 55 | ) 56 | let requestBody = ReplicatePredictionRequestBody( 57 | input: input, 58 | version: "abc" 59 | ) 60 | XCTAssertEqual( 61 | #""" 62 | { 63 | "input" : { 64 | "apply_watermark" : true, 65 | "disable_safety_checker" : true, 66 | "guidance_scale" : 0.5, 67 | "height" : 512, 68 | "high_noise_frac" : 0.5, 69 | "image" : "https:\/\/example.com\/image", 70 | "lora_scale" : 0.5, 71 | "mask" : "https:\/\/example.com\/mask", 72 | "negative_prompt" : "low quality", 73 | "num_inference_steps" : 50, 74 | "num_outputs" : 2, 75 | "prompt" : "Monument valley, Utah", 76 | "prompt_strength" : 0.5, 77 | "refine" : "base_image_refiner", 78 | "refine_steps" : 50, 79 | "scheduler" : "K_EULER", 80 | "seed" : 123, 81 | "width" : 512 82 | }, 83 | "version" : "abc" 84 | } 85 | """#, 86 | try requestBody.serialize(pretty: true) 87 | ) 88 | } 89 | 90 | } 91 | 92 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/ReplicateSyncAPIResponseBodyTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ReplicateSyncAPIResponseTests.swift 3 | // AIProxy 4 | // 5 | // Created by Lou Zell on 1/29/25. 6 | // 7 | 8 | import XCTest 9 | import Foundation 10 | @testable import AIProxy 11 | 12 | final class ReplicateSyncAPIResponseBodyTests: XCTestCase { 13 | func testReplicateFluxProResponseIsDecodable() throws { 14 | let responseBody = #""" 15 | { 16 | "id": "kwvnpcac8drj00cmpp48fvvgbr", 17 | "model": "philz1337x/clarity-upscaler", 18 | "version": "dfad41707589d68ecdccd1dfa600d55a208f9310748e44bfe35b4a6291453d5e", 19 | "input": { 20 | "creativity": 0.35, 21 | "downscaling_resolution": 768, 22 | "dynamic": 6, 23 | "handfix": "disabled", 24 | "image": "data:image/jpeg;base64,...", 25 | "negative_prompt": "(worst quality, low quality, normal quality:2) JuggernautNegative-neg", 26 | "num_inference_steps": 18, 27 | "output_format": "png", 28 | "prompt": "masterpiece, best quality, highres, ", 29 | "resemblance": 0.6, 30 | "scale_factor": 122.5, 31 | "scheduler": "DPM++ 3M SDE Karras", 32 | "sd_model": "juggernaut_reborn.safetensors [338b85bc4f]", 33 | "seed": 1337, 34 | "tiling_height": 144, 35 | "tiling_width": 112 36 | }, 37 | "logs": "", 38 | "output": null, 39 | "data_removed": false, 40 | "error": null, 41 | "status": "starting", 42 | "created_at": "2025-01-30T04:46:36.099Z", 43 | "urls": { 44 | "cancel": "https://api.replicate.com/v1/predictions/kwvnpcac8drj00cmpp48fvvgbr/cancel", 45 | "get": "https://api.replicate.com/v1/predictions/kwvnpcac8drj00cmpp48fvvgbr", 46 | "stream": "https://stream.replicate.com/v1/files/yswh-3iil5a2sisvp4yn2iahzihmbud3xzb6zaphn6265qbekothccx2a" 47 | } 48 | } 49 | """# 50 | let response = try ReplicatePrediction<[String]>.deserialize(from: responseBody) 51 | XCTAssertNil(response.output) 52 | XCTAssertEqual( 53 | "https://api.replicate.com/v1/predictions/kwvnpcac8drj00cmpp48fvvgbr", 54 | response.predictionResultURL?.absoluteString 55 | ) 56 | } 57 | } 58 | 59 | 60 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/TestHelpers.swift: -------------------------------------------------------------------------------- 1 | // 2 | // TestHelpers.swift 3 | // 4 | // 5 | // Created by Lou Zell on 8/11/24. 6 | // 7 | 8 | #if canImport(AppKit) 9 | import AppKit 10 | #elseif canImport(UIKit) 11 | import UIKit 12 | #endif 13 | 14 | #if canImport(AppKit) 15 | func createImage(width: Int, height: Int) -> NSImage { 16 | let cgImage = createTestImage(width: width, height: height) 17 | return NSImage( 18 | cgImage: cgImage, 19 | size: CGSize(width: width, height: height) 20 | ) 21 | } 22 | #elseif canImport(UIKit) 23 | func createImage(width: Int, height: Int) -> UIImage { 24 | let cgImage = createTestImage(width: width, height: height) 25 | return UIImage(cgImage: cgImage) 26 | } 27 | #endif 28 | 29 | /// Creates a red rectangle of `width` and `height` 30 | private func createTestImage(width: Int, height: Int) -> CGImage { 31 | let numComponents = 3 32 | let numBytes = height * width * numComponents 33 | var pixelData = [UInt8](repeating: 0, count: numBytes) 34 | for i in stride(from: 0, to: numBytes, by: 3) { 35 | pixelData[i] = 255 36 | pixelData[i + 1] = 0 37 | pixelData[i + 2] = 0 38 | } 39 | let colorspace = CGColorSpaceCreateDeviceRGB() 40 | let rgbData = CFDataCreate(nil, pixelData, numBytes)! 41 | let provider = CGDataProvider(data: rgbData)! 42 | return CGImage(width: width, 43 | height: height, 44 | bitsPerComponent: 8, 45 | bitsPerPixel: 8 * numComponents, 46 | bytesPerRow: width * numComponents, 47 | space: colorspace, 48 | bitmapInfo: CGBitmapInfo(rawValue: 0), 49 | provider: provider, 50 | decode: nil, 51 | shouldInterpolate: true, 52 | intent: CGColorRenderingIntent.defaultIntent)! 53 | } 54 | -------------------------------------------------------------------------------- /Tests/AIProxyTests/TogetherAIChatCompletionStreamingChunkTests.swift: -------------------------------------------------------------------------------- 1 | // 2 | // TogetherAIChatCompletionStreamingChunkTests.swift 3 | // 4 | // 5 | // Created by Lou Zell on 8/16/24. 6 | // 7 | 8 | import Foundation 9 | import XCTest 10 | @testable import AIProxy 11 | 12 | final class TogetherAIChatCompletionStreamingChunkTests: XCTestCase { 13 | 14 | func testChunkWithoutUsageIsDecodable() throws { 15 | let serializedChunk = #"data: {"id":"8b43a5cfadf99e5c-SJC","object":"chat.completion.chunk","created":1723834621,"choices":[{"index":0,"text":"ABC","logprobs":null,"finish_reason":null,"seed":null,"delta":{"token_id":30,"role":"assistant","content":"ABC","tool_calls":null}}],"model":"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo","usage":null}"# 16 | 17 | let body = TogetherAIChatCompletionStreamingChunk.deserialize(fromLine: serializedChunk) 18 | XCTAssertEqual("ABC", body!.choices.first!.delta.content) 19 | } 20 | 21 | func testChunkWithUsageIsDecodable() throws { 22 | let serializedChunk = #"data: {"id":"8b43a5cfadf99e5c-SJC","object":"chat.completion.chunk","created":1723834621,"choices":[{"index":0,"text":"ABC","logprobs":null,"finish_reason":"eos","seed":2039908639335532000,"delta":{"token_id":128009,"role":"assistant","content":"ABC","tool_calls":null}}],"model":"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo","usage":{"prompt_tokens":12,"completion_tokens":27,"total_tokens":39}}"# 23 | let body = TogetherAIChatCompletionStreamingChunk.deserialize(fromLine: serializedChunk) 24 | XCTAssertEqual("ABC", body!.choices.first!.delta.content) 25 | XCTAssertEqual(12, body!.usage!.promptTokens) 26 | } 27 | } 28 | --------------------------------------------------------------------------------