├── Example ├── Podfile ├── OpenAIKit.xcodeproj │ ├── project.xcworkspace │ │ └── contents.xcworkspacedata │ ├── xcshareddata │ │ └── xcschemes │ │ │ └── OpenAIKit-Example.xcscheme │ └── project.pbxproj ├── OpenAIKit.xcworkspace │ ├── contents.xcworkspacedata │ └── xcshareddata │ │ └── IDEWorkspaceChecks.plist ├── Podfile.lock └── OpenAIKit │ ├── Images.xcassets │ └── AppIcon.appiconset │ │ └── Contents.json │ ├── Info.plist │ ├── AppDelegate.swift │ ├── ViewControllerImages.swift │ ├── Base.lproj │ ├── LaunchScreen.xib │ └── Main.storyboard │ └── ViewController.swift ├── .github └── workflows │ └── swift.yml ├── Package.swift ├── Sources └── OpenAIKit │ ├── Extensions │ └── Extensions.swift │ ├── Models │ ├── EditsModels.swift │ ├── ImagesModels.swift │ ├── CompletionsModels.swift │ ├── ChatCompletionsRequest.swift │ └── AIModel.swift │ ├── Helpers │ ├── NetworkRoutes.swift │ ├── OpenAISSLDelegate.swift │ ├── AIEventStream.swift │ └── OpenAIKitNetwork.swift │ ├── OpenAIKit.swift │ └── OpenAIKitRequests │ ├── Edits.swift │ ├── Images.swift │ ├── Completions.swift │ └── Chat.swift ├── LICENSE ├── OpenAIKit.podspec ├── .gitignore ├── Tests └── OpenAIKitTests │ └── OpenAIKitTests.swift ├── CODE_OF_CONDUCT.md └── README.md /Example/Podfile: -------------------------------------------------------------------------------- 1 | use_frameworks! 2 | 3 | platform :ios, '13.0' 4 | 5 | target 'OpenAIKit_Example' do 6 | pod 'OpenAIKit', :path => '../' 7 | pod 'SDWebImage' 8 | end 9 | -------------------------------------------------------------------------------- /Example/OpenAIKit.xcodeproj/project.xcworkspace/contents.xcworkspacedata: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /Example/OpenAIKit.xcworkspace/contents.xcworkspacedata: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /Example/OpenAIKit.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | IDEDidComputeMac32BitWarning 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /.github/workflows/swift.yml: -------------------------------------------------------------------------------- 1 | # This workflow will build a Swift project 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-swift 3 | 4 | name: Swift 5 | 6 | on: 7 | push: 8 | branches: [ "main" ] 9 | pull_request: 10 | branches: [ "main" ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: macos-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v3 19 | - name: Build 20 | run: swift build -v 21 | 22 | -------------------------------------------------------------------------------- /Package.swift: -------------------------------------------------------------------------------- 1 | // swift-tools-version: 5.7 2 | // The swift-tools-version declares the minimum version of Swift required to build this package. 3 | 4 | import PackageDescription 5 | 6 | let package = Package( 7 | name: "OpenAIKit", 8 | products: [ 9 | .library( 10 | name: "OpenAIKit", 11 | targets: ["OpenAIKit"]), 12 | ], 13 | targets: [ 14 | .target( 15 | name: "OpenAIKit", 16 | dependencies: [], 17 | path: "Sources"), 18 | .testTarget( 19 | name: "OpenAIKitTests", 20 | dependencies: ["OpenAIKit"]), 21 | ]) 22 | -------------------------------------------------------------------------------- /Example/Podfile.lock: -------------------------------------------------------------------------------- 1 | PODS: 2 | - OpenAIKit (1.8.0) 3 | - SDWebImage (5.15.4): 4 | - SDWebImage/Core (= 5.15.4) 5 | - SDWebImage/Core (5.15.4) 6 | 7 | DEPENDENCIES: 8 | - OpenAIKit (from `../`) 9 | - SDWebImage 10 | 11 | SPEC REPOS: 12 | trunk: 13 | - SDWebImage 14 | 15 | EXTERNAL SOURCES: 16 | OpenAIKit: 17 | :path: "../" 18 | 19 | SPEC CHECKSUMS: 20 | OpenAIKit: b8fe0e542751a809af8246fe8930ff5c5bfe4909 21 | SDWebImage: 1c39de67663e5eebb2f41324d5d580eeea12dd4c 22 | 23 | PODFILE CHECKSUM: 14cb438400b969c56f333517c46fa41b611026f0 24 | 25 | COCOAPODS: 1.12.1 26 | -------------------------------------------------------------------------------- /Sources/OpenAIKit/Extensions/Extensions.swift: -------------------------------------------------------------------------------- 1 | // 2 | // Extensions.swift 3 | // 4 | // 5 | // Created by Kyrylo Mukha on 03.03.2023. 6 | // 7 | 8 | import Foundation 9 | 10 | extension JSONDecoder { 11 | static var aiDecoder: JSONDecoder { 12 | let decoder = JSONDecoder() 13 | decoder.keyDecodingStrategy = .convertFromSnakeCase 14 | decoder.dateDecodingStrategy = .millisecondsSince1970 15 | 16 | return decoder 17 | } 18 | } 19 | 20 | extension JSONEncoder { 21 | static var aiEncoder: JSONEncoder { 22 | let encoder = JSONEncoder() 23 | encoder.keyEncodingStrategy = .convertToSnakeCase 24 | return encoder 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /Example/OpenAIKit/Images.xcassets/AppIcon.appiconset/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "images" : [ 3 | { 4 | "idiom" : "iphone", 5 | "size" : "20x20", 6 | "scale" : "2x" 7 | }, 8 | { 9 | "idiom" : "iphone", 10 | "size" : "20x20", 11 | "scale" : "3x" 12 | }, 13 | { 14 | "idiom" : "iphone", 15 | "size" : "29x29", 16 | "scale" : "2x" 17 | }, 18 | { 19 | "idiom" : "iphone", 20 | "size" : "29x29", 21 | "scale" : "3x" 22 | }, 23 | { 24 | "idiom" : "iphone", 25 | "size" : "40x40", 26 | "scale" : "2x" 27 | }, 28 | { 29 | "idiom" : "iphone", 30 | "size" : "40x40", 31 | "scale" : "3x" 32 | }, 33 | { 34 | "idiom" : "iphone", 35 | "size" : "60x60", 36 | "scale" : "2x" 37 | }, 38 | { 39 | "idiom" : "iphone", 40 | "size" : "60x60", 41 | "scale" : "3x" 42 | }, 43 | { 44 | "idiom" : "ios-marketing", 45 | "size" : "1024x1024", 46 | "scale" : "1x" 47 | } 48 | ], 49 | "info" : { 50 | "version" : 1, 51 | "author" : "xcode" 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Futurra Group 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Sources/OpenAIKit/Models/EditsModels.swift: -------------------------------------------------------------------------------- 1 | // 2 | // EditsModels.swift 3 | // 4 | // 5 | // Created by Kyrylo Mukha on 03.03.2023. 6 | // 7 | 8 | import Foundation 9 | 10 | public struct EditsRequest: Codable { 11 | /// ID of the model to use. 12 | public let model: AIModelType 13 | /// The input text to use as a starting point for the edit. 14 | public let input: String 15 | /// The instruction that tells the model how to edit the prompt. 16 | public let instruction: String 17 | /// How many completions to generate for each prompt. 18 | public var n: Int? = nil 19 | /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `topP` but not both. 20 | public var temperature: Double? = nil 21 | /// An alternative to sampling with `temperature`, called nucleus sampling, where the model considers the results of the tokens with `topP` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. 22 | public var topP: Double? = nil 23 | } 24 | -------------------------------------------------------------------------------- /Example/OpenAIKit/Info.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | CFBundleDevelopmentRegion 6 | en 7 | CFBundleExecutable 8 | $(EXECUTABLE_NAME) 9 | CFBundleIdentifier 10 | $(PRODUCT_BUNDLE_IDENTIFIER) 11 | CFBundleInfoDictionaryVersion 12 | 6.0 13 | CFBundleName 14 | $(PRODUCT_NAME) 15 | CFBundlePackageType 16 | APPL 17 | CFBundleShortVersionString 18 | 1.0 19 | CFBundleSignature 20 | ???? 21 | CFBundleVersion 22 | 1 23 | LSRequiresIPhoneOS 24 | 25 | UILaunchStoryboardName 26 | LaunchScreen 27 | UIMainStoryboardFile 28 | Main 29 | UIRequiredDeviceCapabilities 30 | 31 | armv7 32 | 33 | UISupportedInterfaceOrientations 34 | 35 | UIInterfaceOrientationPortrait 36 | UIInterfaceOrientationLandscapeLeft 37 | 38 | UIUserInterfaceStyle 39 | Light 40 | 41 | 42 | -------------------------------------------------------------------------------- /OpenAIKit.podspec: -------------------------------------------------------------------------------- 1 | # 2 | # Be sure to run `pod lib lint OpenAIKit.podspec' to ensure this is a 3 | # valid spec before submitting. 4 | # 5 | # Any lines starting with a # are optional, but their use is encouraged 6 | # To learn more about a Podspec see https://guides.cocoapods.org/syntax/podspec.html 7 | # 8 | 9 | Pod::Spec.new do |s| 10 | s.name = 'OpenAIKit' 11 | s.version = '1.9.3' 12 | s.summary = 'OpenAI is a community-maintained repository containing Swift implementation over OpenAI public API.' 13 | 14 | s.description = <<-DESC 15 | The OpenAI API can be applied to virtually any task that involves understanding or generating natural language or code. We offer a spectrum of models with different levels of power suitable for different tasks, as well as the ability to fine-tune your own custom models. These models can be used for everything from content generation to semantic search and classification. 16 | DESC 17 | 18 | s.homepage = 'https://github.com/FuturraGroup/OpenAI' 19 | s.license = { :type => 'MIT', :file => 'LICENSE' } 20 | s.author = { 'Kyrylo Mukha' => 'kirill.mukha@icloud.com' } 21 | s.source = { :git => 'https://github.com/FuturraGroup/OpenAI.git', :tag => s.version.to_s } 22 | 23 | s.ios.deployment_target = '13.0' 24 | s.swift_version = "5.5" 25 | 26 | s.source_files = 'Sources/OpenAIKit/**/*.{swift}' 27 | 28 | end 29 | -------------------------------------------------------------------------------- /Sources/OpenAIKit/Helpers/NetworkRoutes.swift: -------------------------------------------------------------------------------- 1 | // 2 | // NetworkRoutes.swift 3 | // 4 | // 5 | // Created by Kyrylo Mukha on 01.03.2023. 6 | // 7 | 8 | import Foundation 9 | 10 | enum OpenAIHTTPMethod: String { 11 | case get = "GET" 12 | case post = "POST" 13 | case put = "PUT" 14 | case patch = "PATCH" 15 | case delete = "DELETE" 16 | case head = "HEAD" 17 | case options = "OPTIONS" 18 | case connect = "CONNECT" 19 | case trace = "TRACE" 20 | } 21 | 22 | typealias OpenAIHeaders = [String: String] 23 | 24 | @available(swift 5.5) 25 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 26 | protocol Endpoint { 27 | var route: String { get } 28 | var method: OpenAIHTTPMethod { get } 29 | func urlPath(for aiKit: OpenAIKit) -> String 30 | } 31 | 32 | enum OpenAIEndpoint { 33 | case completions 34 | case chatCompletions 35 | case edits 36 | case dalleImage 37 | case dalleImageEdit 38 | } 39 | 40 | @available(swift 5.5) 41 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 42 | extension OpenAIEndpoint: Endpoint { 43 | var route: String { 44 | switch self { 45 | case .completions: 46 | return "/v1/completions" 47 | case .chatCompletions: 48 | return "/v1/chat/completions" 49 | case .edits: 50 | return "/v1/edits" 51 | case .dalleImage: 52 | return "/v1/images/generations" 53 | case .dalleImageEdit: 54 | return "/v1/images/edits" 55 | } 56 | } 57 | 58 | var method: OpenAIHTTPMethod { 59 | switch self { 60 | default: 61 | return .post 62 | } 63 | } 64 | 65 | private var baseURL: String { 66 | switch self { 67 | default: 68 | return "https://api.openai.com" 69 | } 70 | } 71 | 72 | func urlPath(for aiKit: OpenAIKit) -> String { 73 | (aiKit.customOpenAIURL ?? baseURL) + route 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /Sources/OpenAIKit/Helpers/OpenAISSLDelegate.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenAISSLDelegate.swift 3 | // OpenAIKit 4 | // 5 | // Created by Kyrylo Mukha on 31.01.2024. 6 | // 7 | 8 | import Foundation 9 | 10 | @available(swift 5.5) 11 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 12 | final class OpenAISSLDelegate: NSObject { 13 | private let sslCerificatePath: String? 14 | 15 | init(sslCerificatePath: String?) { 16 | self.sslCerificatePath = sslCerificatePath 17 | } 18 | } 19 | 20 | @available(swift 5.5) 21 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 22 | extension OpenAISSLDelegate: URLSessionDelegate { 23 | public func urlSession(_ session: URLSession, didReceive challenge: URLAuthenticationChallenge, completionHandler: @escaping (URLSession.AuthChallengeDisposition, URLCredential?) -> Void) { 24 | guard let serverTrust = challenge.protectionSpace.serverTrust, let certificate = SecTrustGetCertificateAtIndex(serverTrust, 0) else { 25 | completionHandler(.useCredential, nil) 26 | return 27 | } 28 | 29 | guard let sslCerificatePath else { 30 | let credential = URLCredential(trust: serverTrust) 31 | completionHandler(.useCredential, credential) 32 | return 33 | } 34 | 35 | let policy = NSMutableArray() 36 | policy.add(SecPolicyCreateSSL(true, challenge.protectionSpace.host as CFString)) 37 | 38 | let isServerTrusted = SecTrustEvaluateWithError(serverTrust, nil) 39 | 40 | let remoteCertificateData: NSData = SecCertificateCopyData(certificate) 41 | 42 | guard let localCertificateData = NSData(contentsOfFile: sslCerificatePath) else { 43 | completionHandler(.cancelAuthenticationChallenge, nil) 44 | return 45 | } 46 | 47 | if isServerTrusted, remoteCertificateData.isEqual(to: localCertificateData as Data) { 48 | let credential = URLCredential(trust: serverTrust) 49 | completionHandler(.useCredential, credential) 50 | } else { 51 | completionHandler(.cancelAuthenticationChallenge, nil) 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | **/.DS_Store 3 | # Xcode 4 | # 5 | # gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore 6 | 7 | ## Build generated 8 | build/ 9 | DerivedData/ 10 | 11 | ## Various settings 12 | *.pbxuser 13 | !default.pbxuser 14 | *.mode1v3 15 | !default.mode1v3 16 | *.mode2v3 17 | !default.mode2v3 18 | *.perspectivev3 19 | !default.perspectivev3 20 | xcuserdata/ 21 | 22 | ## Other 23 | *.moved-aside 24 | *.xccheckout 25 | *.xcscmblueprint 26 | 27 | ## Obj-C/Swift specific 28 | *.hmap 29 | *.ipa 30 | *.dSYM.zip 31 | *.dSYM 32 | 33 | ## Playgrounds 34 | timeline.xctimeline 35 | playground.xcworkspace 36 | 37 | # Swift Package Manager 38 | # 39 | # Add this line if you want to avoid checking in source code from Swift Package Manager dependencies. 40 | # Packages/ 41 | # Package.pins 42 | # Package.resolved 43 | .build/ 44 | 45 | # CocoaPods 46 | # 47 | # We recommend against adding the Pods directory to your .gitignore. However 48 | # you should judge for yourself, the pros and cons are mentioned at: 49 | # https://guides.cocoapods.org/using/using-cocoapods.html#should-i-check-the-pods-directory-into-source-control 50 | # 51 | Pods 52 | pods/Pods 53 | 54 | # Carthage 55 | # 56 | # Add this line if you want to avoid checking in source code from Carthage dependencies. 57 | # Carthage/Checkouts 58 | 59 | Carthage/Build 60 | 61 | # fastlane 62 | # 63 | # It is recommended to not store the screenshots in the git repo. Instead, use fastlane to re-generate the 64 | # screenshots whenever they are needed. 65 | # For more information about the recommended setup visit: 66 | # https://docs.fastlane.tools/best-practices/source-control/#source-control 67 | 68 | fastlane/report.xml 69 | fastlane/Preview.html 70 | fastlane/screenshots/**/*.png 71 | fastlane/test_output 72 | 73 | # Code Injection 74 | # 75 | # After new code Injection tools there's a generated folder /iOSInjectionProject 76 | # https://github.com/johnno1962/injectionforxcode 77 | 78 | iOSInjectionProject/ 79 | -------------------------------------------------------------------------------- /Sources/OpenAIKit/Models/ImagesModels.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ImagesModels.swift 3 | // 4 | // 5 | // Created by Kyrylo Mukha on 03.03.2023. 6 | // 7 | 8 | import Foundation 9 | 10 | public typealias AIImageSize = String 11 | 12 | public extension AIImageSize { 13 | static let size256 = "256x256" 14 | static let size512 = "512x512" 15 | static let size1024 = "1024x1024" 16 | } 17 | 18 | public struct ImageRequest: Codable { 19 | /// The prompt to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. 20 | public var prompt: String 21 | /// How many completions to generate for each prompt. 22 | public var n: Int? = nil 23 | /// The size of the generated images. Must be one of `size256`, `size512`, or `size1024`. 24 | public var size: AIImageSize = .size1024 25 | /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 26 | public var user: String? = nil 27 | } 28 | 29 | public struct ImagesResponse: Codable { 30 | public struct AIImage: Codable { 31 | public let url: String 32 | } 33 | 34 | public let created: TimeInterval 35 | public var data: [AIImage] 36 | } 37 | 38 | public struct ImageEditRequest: Codable { 39 | /// The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. 40 | public var image: String 41 | /// An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image. 42 | public var mask: String? 43 | /// A text description of the desired image(s). The maximum length is 1000 characters. 44 | public var prompt: String 45 | /// The number of images to generate. Must be between 1 and 10. 46 | public var n: Int? = nil 47 | /// The size of the generated images. Must be one of `size256`, `size512`, or `size1024`. 48 | public var size: AIImageSize = .size1024 49 | /// The format in which the generated images are returned. Must be one of `url` or `b64_json` 50 | /// DEFAULT: url 51 | public var responseFormat: String? 52 | /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 53 | public var user: String? = nil 54 | } 55 | 56 | public struct ImageEditResponse: Codable { 57 | public struct AIImage: Codable { 58 | public let url: String 59 | } 60 | 61 | public let created: TimeInterval 62 | public var data: [AIImage] 63 | } 64 | -------------------------------------------------------------------------------- /Sources/OpenAIKit/OpenAIKit.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenAIKit.swift 3 | // 4 | // 5 | // Created by Kyrylo Mukha on 01.03.2023. 6 | // 7 | 8 | import Foundation 9 | 10 | @available(swift 5.5) 11 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 12 | public final class OpenAIKit: NSObject { 13 | private let apiToken: String 14 | private let organization: String? 15 | 16 | internal var network = OpenAIKitNetwork(session: URLSession(configuration: URLSessionConfiguration.default)) 17 | 18 | internal let jsonEncoder = JSONEncoder.aiEncoder 19 | 20 | public let customOpenAIURL: String? 21 | 22 | private let sslCerificatePath: String? 23 | 24 | private(set) weak var sslDelegate: OpenAISSLDelegate? 25 | 26 | /// Initialize `OpenAIKit` with your API Token wherever convenient in your project. Organization name is optional. 27 | public init(apiToken: String, organization: String? = nil, timeoutInterval: TimeInterval = 60, customOpenAIURL: String? = nil, sslCerificatePath: String? = nil) { 28 | self.apiToken = apiToken 29 | self.organization = organization 30 | self.customOpenAIURL = customOpenAIURL 31 | self.sslCerificatePath = sslCerificatePath 32 | 33 | let delegate = OpenAISSLDelegate(sslCerificatePath: sslCerificatePath) 34 | 35 | let configuration = URLSessionConfiguration.default 36 | configuration.timeoutIntervalForRequest = timeoutInterval 37 | configuration.timeoutIntervalForResource = timeoutInterval 38 | configuration.requestCachePolicy = .reloadIgnoringLocalAndRemoteCacheData 39 | 40 | let session = URLSession(configuration: configuration, delegate: delegate, delegateQueue: nil) 41 | 42 | self.network = OpenAIKitNetwork(session: session, sslDelegate: delegate) 43 | self.sslDelegate = delegate 44 | } 45 | } 46 | 47 | @available(swift 5.5) 48 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 49 | extension OpenAIKit { 50 | var baseHeaders: OpenAIHeaders { 51 | var headers: OpenAIHeaders = [:] 52 | 53 | headers["Authorization"] = "Bearer \(apiToken)" 54 | 55 | if let organization { 56 | headers["OpenAI-Organization"] = organization 57 | } 58 | 59 | headers["content-type"] = "application/json" 60 | 61 | return headers 62 | } 63 | 64 | var baseMultipartHeaders: OpenAIHeaders { 65 | var headers: OpenAIHeaders = [:] 66 | 67 | headers["Authorization"] = "Bearer \(apiToken)" 68 | 69 | if let organization { 70 | headers["OpenAI-Organization"] = organization 71 | } 72 | 73 | headers["content-type"] = "multipart/form-data" 74 | 75 | return headers 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /Sources/OpenAIKit/Models/CompletionsModels.swift: -------------------------------------------------------------------------------- 1 | // 2 | // CompletionsModels.swift 3 | // 4 | // 5 | // Created by Kyrylo Mukha on 03.03.2023. 6 | // 7 | 8 | import Foundation 9 | 10 | public struct CompletionsRequest: Codable { 11 | /// ID of the model to use. 12 | public let model: AIModelType 13 | /// The prompt to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. 14 | public let prompt: String 15 | /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. 16 | public var temperature: Double? = nil 17 | /// How many chat completion choices to generate for each input message. 18 | public var n: Int? = nil 19 | /// The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). 20 | public var maxTokens: Int? = nil 21 | /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with `topP` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. 22 | public var topP: Double? = nil 23 | /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. 24 | public var frequencyPenalty: Double? = nil 25 | /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. 26 | public var presencePenalty: Double? = nil 27 | /// Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. 28 | public var logprobs: Int? = nil 29 | /// Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. 30 | public var stop: [String]? = nil 31 | /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 32 | public var user: String? = nil 33 | /// Whether to stream back partial progress. If set, tokens will be sent as data-only server-sent events as they become available 34 | public var stream: Bool = false 35 | } 36 | -------------------------------------------------------------------------------- /Example/OpenAIKit/AppDelegate.swift: -------------------------------------------------------------------------------- 1 | // 2 | // AppDelegate.swift 3 | // OpenAIKit 4 | // 5 | // Created by Kyrylo Mukha on 03/06/2023. 6 | // Copyright (c) 2023 Kyrylo Mukha. All rights reserved. 7 | // 8 | 9 | import OpenAIKit 10 | import UIKit 11 | 12 | let apiToken: String = "" 13 | let organizationName: String = "" 14 | 15 | /// Initialize OpenAIKit with your API Token wherever convenient in your project. Organization name is optional. 16 | public let openAI = OpenAIKit(apiToken: apiToken, organization: organizationName) 17 | 18 | @UIApplicationMain 19 | class AppDelegate: UIResponder, UIApplicationDelegate { 20 | var window: UIWindow? 21 | 22 | func application(_ application: UIApplication, didFinishLaunchingWithOptions launchOptions: [UIApplication.LaunchOptionsKey: Any]?) -> Bool { 23 | // Override point for customization after application launch. 24 | return true 25 | } 26 | 27 | func applicationWillResignActive(_ application: UIApplication) { 28 | // Sent when the application is about to move from active to inactive state. This can occur for certain types of temporary interruptions (such as an incoming phone call or SMS message) or when the user quits the application and it begins the transition to the background state. 29 | // Use this method to pause ongoing tasks, disable timers, and throttle down OpenGL ES frame rates. Games should use this method to pause the game. 30 | } 31 | 32 | func applicationDidEnterBackground(_ application: UIApplication) { 33 | // Use this method to release shared resources, save user data, invalidate timers, and store enough application state information to restore your application to its current state in case it is terminated later. 34 | // If your application supports background execution, this method is called instead of applicationWillTerminate: when the user quits. 35 | } 36 | 37 | func applicationWillEnterForeground(_ application: UIApplication) { 38 | // Called as part of the transition from the background to the inactive state; here you can undo many of the changes made on entering the background. 39 | } 40 | 41 | func applicationDidBecomeActive(_ application: UIApplication) { 42 | // Restart any tasks that were paused (or not yet started) while the application was inactive. If the application was previously in the background, optionally refresh the user interface. 43 | } 44 | 45 | func applicationWillTerminate(_ application: UIApplication) { 46 | // Called when the application is about to terminate. Save data if appropriate. See also applicationDidEnterBackground:. 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /Sources/OpenAIKit/OpenAIKitRequests/Edits.swift: -------------------------------------------------------------------------------- 1 | // 2 | // Edits.swift 3 | // 4 | // 5 | // Created by Kyrylo Mukha on 03.03.2023. 6 | // 7 | 8 | import Foundation 9 | 10 | @available(swift 5.5) 11 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 12 | public extension OpenAIKit { 13 | /// Creates a new edit for the provided input, instruction, and parameters. 14 | /// 15 | /// - Parameters: 16 | /// - instruction: The instruction that tells the model how to edit the prompt. 17 | /// - model: ID of the model to use. 18 | /// - input: The input text to use as a starting point for the edit. 19 | /// - n: How many completions to generate for each prompt. 20 | /// - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `topP` but not both. 21 | /// - topP: An alternative to sampling with `temperature`, called nucleus sampling, where the model considers the results of the tokens with `topP` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. 22 | func sendEdits(instruction: String, 23 | model: AIModelType, 24 | input: String = "", 25 | n: Int? = nil, 26 | temperature: Double? = 1, 27 | topP: Double? = nil, 28 | completion: @escaping (Result) -> Void) 29 | { 30 | let endpoint = OpenAIEndpoint.edits 31 | 32 | let requestBody = EditsRequest(model: model, input: input, instruction: instruction, n: n, temperature: temperature, topP: topP) 33 | 34 | let requestData = try? jsonEncoder.encode(requestBody) 35 | 36 | let headers = baseHeaders 37 | 38 | network.request(endpoint.method, url: endpoint.urlPath(for: self), body: requestData, headers: headers, completion: completion) 39 | } 40 | 41 | @available(swift 5.5) 42 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 43 | func sendEdits(instruction: String, 44 | model: AIModelType, 45 | input: String = "", 46 | n: Int? = nil, 47 | temperature: Double? = 1, 48 | topP: Double? = nil) async -> Result 49 | { 50 | return await withCheckedContinuation { continuation in 51 | sendEdits(instruction: instruction, model: model, input: input, n: n, temperature: temperature, topP: topP) { result in 52 | continuation.resume(returning: result) 53 | } 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /Example/OpenAIKit/ViewControllerImages.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ViewControllerImages.swift 3 | // OpenAIKit_Example 4 | // 5 | // Created by Kyrylo Mukha on 06.03.2023. 6 | // Copyright © 2023 Futurra Group. All rights reserved. 7 | // 8 | 9 | import Foundation 10 | import OpenAIKit 11 | import SDWebImage 12 | import UIKit 13 | 14 | class ViewControllerImages: UIViewController { 15 | @IBOutlet private weak var textField: UITextField! 16 | @IBOutlet private weak var imageView: UIImageView! 17 | @IBOutlet private weak var loader: UIActivityIndicatorView! 18 | @IBOutlet private weak var sendBtn: UIButton! 19 | @IBOutlet private weak var bottomOffset: NSLayoutConstraint! 20 | 21 | // MARK: - 22 | 23 | private var observers = [NSObjectProtocol]() 24 | 25 | // MARK: - 26 | 27 | deinit { 28 | observers.forEach { NotificationCenter.default.removeObserver($0) } 29 | observers.removeAll() 30 | } 31 | 32 | override func viewDidLoad() { 33 | super.viewDidLoad() 34 | 35 | configureOBservers() 36 | stopLoading() 37 | 38 | textField.becomeFirstResponder() 39 | } 40 | } 41 | 42 | extension ViewControllerImages { 43 | @IBAction private func sendQuestion() { 44 | startLoading() 45 | imageView.image = nil 46 | 47 | let prompt = textField.text ?? "" 48 | 49 | openAI.sendImagesRequest(prompt: prompt, size: .size512, n: 1) { [weak self] result in 50 | DispatchQueue.main.async { self?.stopLoading() } 51 | 52 | switch result { 53 | case .success(let aiResult): 54 | 55 | DispatchQueue.main.async { [weak self] in 56 | if let urlString = aiResult.data.first?.url { 57 | self?.imageView.sd_setImage(with: URL(string: urlString)) 58 | } 59 | } 60 | case .failure(let error): 61 | DispatchQueue.main.async { [weak self] in 62 | let alert = UIAlertController(title: "Error", message: error.localizedDescription, preferredStyle: .alert) 63 | alert.addAction(UIAlertAction(title: "Ok", style: .default)) 64 | self?.present(alert, animated: true) 65 | } 66 | } 67 | } 68 | } 69 | } 70 | 71 | extension ViewControllerImages { 72 | private func configureOBservers() { 73 | observers.append(NotificationCenter.default.addObserver( 74 | forName: UIResponder.keyboardWillShowNotification, 75 | object: nil, 76 | queue: OperationQueue.main, 77 | using: { [weak self] notification in 78 | DispatchQueue.main.async { [weak self] in 79 | guard let keyboardHeight = (notification.userInfo?[UIResponder.keyboardFrameEndUserInfoKey] as? NSValue)?.cgRectValue.height else { return } 80 | self?.bottomOffset.constant = keyboardHeight + 16 81 | self?.sendBtn.isHidden = false 82 | self?.view.layoutSubviews() 83 | } 84 | })) 85 | 86 | observers.append(NotificationCenter.default.addObserver( 87 | forName: UIResponder.keyboardWillHideNotification, 88 | object: nil, 89 | queue: OperationQueue.main, 90 | using: { [weak self] _ in 91 | DispatchQueue.main.async { [weak self] in 92 | UIView.animate(withDuration: 0.275) { [weak self] in 93 | self?.bottomOffset.constant = 16 94 | self?.sendBtn.isHidden = true 95 | self?.view.layoutSubviews() 96 | } 97 | } 98 | })) 99 | } 100 | 101 | private func startLoading() { 102 | view.isUserInteractionEnabled = false 103 | loader.startAnimating() 104 | loader.isHidden = false 105 | } 106 | 107 | private func stopLoading() { 108 | textField.resignFirstResponder() 109 | view.isUserInteractionEnabled = true 110 | loader.stopAnimating() 111 | loader.isHidden = true 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /Sources/OpenAIKit/Models/ChatCompletionsRequest.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ChatCompletionsModels.swift 3 | // 4 | // 5 | // Created by Kyrylo Mukha on 10.03.2023. 6 | // 7 | 8 | import Foundation 9 | 10 | public struct ChatCompletionsRequest: Codable { 11 | /// ID of the model to use. 12 | public let model: AIModelType 13 | /// The messages to generate chat completions for. Must be an array of `AIMessage` objects. 14 | public let messages: [AIMessage] 15 | /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. 16 | public var temperature: Double? = nil 17 | /// How many chat completion choices to generate for each input message. 18 | public var n: Int? = nil 19 | /// The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). 20 | public var maxTokens: Int? = nil 21 | /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with `topP` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. 22 | public var topP: Double? = nil 23 | /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. 24 | public var frequencyPenalty: Double? = nil 25 | /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. 26 | public var presencePenalty: Double? = nil 27 | /// Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. 28 | public var logprobs: Int? = nil 29 | /// An object specifying the format that the model must output. 30 | /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates 31 | /// is valid JSON. 32 | /// **Important:** when using JSON mode, you must also instruct the model to produce JSON yourself via a system or 33 | /// user message. Without this, the model may generate an unending stream of whitespace until the generation reaches 34 | /// the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content 35 | /// may be partially cut off if finish_reason="length", which indicates the generation exceeded max_tokens or the 36 | /// conversation exceeded the max context length. 37 | public var responseFormat: ResponseFormat? 38 | /// Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. 39 | public var stop: [String]? = nil 40 | /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 41 | public var user: String? = nil 42 | /// Whether to stream back partial progress. If set, tokens will be sent as data-only server-sent events as they become available 43 | public var stream: Bool = false 44 | public struct ResponseFormat: Codable { 45 | internal var type: FormatType? 46 | internal enum FormatType: String, Codable { 47 | case text, jsonObject = "json_object" 48 | } 49 | 50 | public static let json = ResponseFormat(type: .jsonObject) 51 | public static let text = ResponseFormat(type: .text) 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /Example/OpenAIKit/Base.lproj/LaunchScreen.xib: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 25 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /Tests/OpenAIKitTests/OpenAIKitTests.swift: -------------------------------------------------------------------------------- 1 | @testable import OpenAIKit 2 | import XCTest 3 | 4 | final class OpenAIKitTests: XCTestCase { 5 | var openAI: OpenAIKit? 6 | 7 | override func setUp() { 8 | super.setUp() 9 | 10 | openAI = OpenAIKit(apiToken: "") 11 | } 12 | 13 | func testCompletions() async { 14 | let result = await openAI?.sendCompletion(prompt: "Write a 100-word essay about the earth", model: .gptV3_5(.davinciText003), maxTokens: 300, temperature: 0.7) 15 | 16 | switch result { 17 | case .success(let aiResult): 18 | XCTAssertFalse(aiResult.choices.isEmpty) 19 | case .failure(let error): 20 | print(error.localizedDescription) 21 | XCTAssertFalse(true) 22 | default: 23 | XCTAssertFalse(true) 24 | } 25 | } 26 | 27 | func testEdits() async { 28 | let result = await openAI?.sendEdits(instruction: "Fix the spelling mistakes", model: .custom("text-davinci-edit-001"), input: "What day of the wek is it?") 29 | 30 | switch result { 31 | case .success(let aiResult): 32 | XCTAssertFalse(aiResult.choices.isEmpty) 33 | case .failure(let error): 34 | print(error.localizedDescription) 35 | XCTAssertFalse(true) 36 | default: 37 | XCTAssertFalse(true) 38 | } 39 | } 40 | 41 | func testImages() async { 42 | let result = await openAI?.sendImagesRequest(prompt: "Draw orange butterfly", size: .size256, n: 1) 43 | 44 | switch result { 45 | case .success(let aiResult): 46 | XCTAssertFalse(aiResult.data.isEmpty) 47 | case .failure(let error): 48 | print(error.localizedDescription) 49 | XCTAssertFalse(true) 50 | default: 51 | XCTAssertFalse(true) 52 | } 53 | } 54 | 55 | func testStreamCompletions() { 56 | let expectation = XCTestExpectation(description: "Async operation completes") 57 | 58 | var resultText = "" 59 | 60 | openAI?.sendStreamCompletion(prompt: "Write a 100-word essay about the earth", model: .gptV3_5(.davinciText003), maxTokens: 300, completion: { result in 61 | switch result { 62 | case .success(let streamResult): 63 | resultText += streamResult.message?.choices.first?.text ?? "" 64 | 65 | if streamResult.isFinished { 66 | expectation.fulfill() 67 | } 68 | case .failure(let error): 69 | print(error.localizedDescription) 70 | expectation.fulfill() 71 | } 72 | }) 73 | 74 | wait(for: [expectation], timeout: 300) 75 | XCTAssertFalse(resultText.isEmpty) 76 | } 77 | 78 | func testStreamChatCompletions() { 79 | let expectation = XCTestExpectation(description: "Async operation completes") 80 | 81 | var resultText = "" 82 | 83 | openAI?.sendStreamChatCompletion(newMessage: AIMessage(role: .user, content: "Write a 100-word essay about the earth"), model: .gptV3_5(.gptTurbo), maxTokens: 300, temperature: 0.7) { result in 84 | 85 | switch result { 86 | case .success(let streamResult): 87 | resultText += streamResult.message?.choices.first?.message?.content ?? "" 88 | 89 | if streamResult.isFinished { 90 | expectation.fulfill() 91 | } 92 | case .failure(let error): 93 | print(error.localizedDescription) 94 | expectation.fulfill() 95 | } 96 | } 97 | 98 | wait(for: [expectation], timeout: 300) 99 | XCTAssertFalse(resultText.isEmpty) 100 | } 101 | 102 | func testChatResponseFormatCompletions() { 103 | let expectation = XCTestExpectation(description: "Async operation completes") 104 | 105 | var resultText = "" 106 | 107 | openAI?.sendChatCompletion(newMessage: AIMessage(role: .user, content: "Write a 100-word essay about the earth"), previousMessages: [AIMessage(role: .system, content: "You are a helpful assistant designed to output JSON.")], model: .custom("gpt-3.5-turbo-1106"), maxTokens: 300, temperature: 0.7, responseFormat: .json) { result in 108 | 109 | switch result { 110 | case .success(let streamResult): 111 | resultText += streamResult.choices.first?.message?.content ?? "" 112 | 113 | expectation.fulfill() 114 | case .failure(let error): 115 | print(error.localizedDescription) 116 | expectation.fulfill() 117 | } 118 | } 119 | 120 | wait(for: [expectation], timeout: 300) 121 | XCTAssertFalse(resultText.isEmpty) 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /Example/OpenAIKit/ViewController.swift: -------------------------------------------------------------------------------- 1 | // 2 | // ViewController.swift 3 | // OpenAIKit 4 | // 5 | // Created by Kyrylo Mukha on 03/06/2023. 6 | // Copyright (c) 2023 Kyrylo Mukha. All rights reserved. 7 | // 8 | 9 | import Foundation 10 | import OpenAIKit 11 | import UIKit 12 | 13 | class ViewController: UIViewController { 14 | @IBOutlet private weak var textField: UITextField! 15 | @IBOutlet private weak var textView: UITextView! 16 | @IBOutlet private weak var loader: UIActivityIndicatorView! 17 | @IBOutlet private weak var sendBtn: UIButton! 18 | @IBOutlet private weak var bottomOffset: NSLayoutConstraint! 19 | 20 | // MARK: - 21 | 22 | private var observers = [NSObjectProtocol]() 23 | 24 | // MARK: - 25 | 26 | deinit { 27 | observers.forEach { NotificationCenter.default.removeObserver($0) } 28 | observers.removeAll() 29 | } 30 | 31 | override func viewDidLoad() { 32 | super.viewDidLoad() 33 | 34 | configureOBservers() 35 | stopLoading() 36 | 37 | textField.becomeFirstResponder() 38 | } 39 | } 40 | 41 | extension ViewController { 42 | @IBAction private func sendQuestion() { 43 | startLoading() 44 | textView.text = " " 45 | 46 | let prompt = textField.text ?? "" 47 | /// You can put messages you send and recieved before for maka chat uderstand your context. 48 | /// Be careful! There is limit of total tokens count. The total limit of tokens is 4096. 49 | /// So if you requests maxTokens = 2048, total sum of tokens in newMessage + previousMessages must be 2048. 50 | /// Number of tokens you can recieve from response model from field usage. 51 | let previousMessages: [AIMessage] = [] 52 | /// Creates a completion for the chat message 53 | openAI.sendChatCompletion(newMessage: AIMessage(role: .user, content: prompt), previousMessages: previousMessages, model: .gptV3_5(.gptTurbo), maxTokens: 2048, n: 1, completion: { [weak self] result in 54 | DispatchQueue.main.async { self?.stopLoading() } 55 | 56 | switch result { 57 | case .success(let aiResult): 58 | DispatchQueue.main.async { [weak self] in 59 | if let text = aiResult.choices.first?.message?.content { 60 | self?.textView.text = text 61 | } 62 | } 63 | case .failure(let error): 64 | DispatchQueue.main.async { [weak self] in 65 | let alert = UIAlertController(title: "Error", message: error.localizedDescription, preferredStyle: .alert) 66 | alert.addAction(UIAlertAction(title: "Ok", style: .default)) 67 | self?.present(alert, animated: true) 68 | } 69 | } 70 | }) 71 | 72 | /// Creates a completion for the provided prompt and parameters 73 | openAI.sendCompletion(prompt: prompt, model: .gptV3_5(.davinciText003), maxTokens: 2048) { [weak self] result in 74 | } 75 | } 76 | } 77 | 78 | extension ViewController { 79 | private func configureOBservers() { 80 | observers.append(NotificationCenter.default.addObserver( 81 | forName: UIResponder.keyboardWillShowNotification, 82 | object: nil, 83 | queue: OperationQueue.main, 84 | using: { [weak self] notification in 85 | DispatchQueue.main.async { [weak self] in 86 | guard let keyboardHeight = (notification.userInfo?[UIResponder.keyboardFrameEndUserInfoKey] as? NSValue)?.cgRectValue.height else { return } 87 | self?.bottomOffset.constant = keyboardHeight + 16 88 | self?.sendBtn.isHidden = false 89 | self?.view.layoutSubviews() 90 | } 91 | })) 92 | 93 | observers.append(NotificationCenter.default.addObserver( 94 | forName: UIResponder.keyboardWillHideNotification, 95 | object: nil, 96 | queue: OperationQueue.main, 97 | using: { [weak self] _ in 98 | DispatchQueue.main.async { [weak self] in 99 | UIView.animate(withDuration: 0.275) { [weak self] in 100 | self?.bottomOffset.constant = 16 101 | self?.sendBtn.isHidden = true 102 | self?.view.layoutSubviews() 103 | } 104 | } 105 | })) 106 | } 107 | 108 | private func startLoading() { 109 | view.isUserInteractionEnabled = false 110 | loader.startAnimating() 111 | loader.isHidden = false 112 | } 113 | 114 | private func stopLoading() { 115 | textField.resignFirstResponder() 116 | view.isUserInteractionEnabled = true 117 | loader.stopAnimating() 118 | loader.isHidden = true 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /Example/OpenAIKit.xcodeproj/xcshareddata/xcschemes/OpenAIKit-Example.xcscheme: -------------------------------------------------------------------------------- 1 | 2 | 5 | 8 | 9 | 15 | 21 | 22 | 23 | 29 | 35 | 36 | 37 | 38 | 39 | 44 | 45 | 51 | 52 | 53 | 54 | 56 | 62 | 63 | 64 | 65 | 66 | 76 | 78 | 84 | 85 | 86 | 87 | 93 | 95 | 101 | 102 | 103 | 104 | 106 | 107 | 110 | 111 | 112 | -------------------------------------------------------------------------------- /Sources/OpenAIKit/OpenAIKitRequests/Images.swift: -------------------------------------------------------------------------------- 1 | // 2 | // Images.swift 3 | // 4 | // 5 | // Created by Kyrylo Mukha on 03.03.2023. 6 | // 7 | 8 | import Foundation 9 | 10 | @available(swift 5.5) 11 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 12 | public extension OpenAIKit { 13 | // MARK: - Create image 14 | /// Given a prompt and/or an input image, the model will generate a new image. 15 | /// 16 | /// - Parameters: 17 | /// - prompt: The prompt to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. 18 | /// - size: The size of the generated images. Must be one of `size256`, `size512`, or `size1024`. 19 | /// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 20 | /// - n: How many completions to generate for each prompt. 21 | func sendImagesRequest(prompt: String, 22 | size: AIImageSize = .size1024, 23 | user: String? = nil, 24 | n: Int? = nil, 25 | completion: @escaping (Result) -> Void) 26 | { 27 | let endpoint = OpenAIEndpoint.dalleImage 28 | 29 | let requestBody = ImageRequest(prompt: prompt, n: n, size: size, user: user) 30 | 31 | let requestData = try? jsonEncoder.encode(requestBody) 32 | 33 | let headers = baseHeaders 34 | 35 | network.request(endpoint.method, url: endpoint.urlPath(for: self), body: requestData, headers: headers, completion: completion) 36 | } 37 | 38 | @available(swift 5.5) 39 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 40 | func sendImagesRequest(prompt: String, 41 | size: AIImageSize = .size1024, 42 | user: String? = nil, 43 | n: Int? = nil) async -> Result 44 | { 45 | return await withCheckedContinuation { continuation in 46 | sendImagesRequest(prompt: prompt, size: size, user: user, n: n) { result in 47 | continuation.resume(returning: result) 48 | } 49 | } 50 | } 51 | 52 | // MARK: - Create image edit 53 | /// Given a prompt and an input image, the model will genreate generate a modified version of the input image 54 | /// 55 | /// - Parameters: 56 | /// - image: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. 57 | /// - prompt: A text description of the desired image(s). The maximum length is 1000 characters. 58 | /// - size: The size of the generated images. Must be one of `size256`, `size512`, or `size1024`. 59 | /// - responseFormat: The format in which the generated images are returned. Must be one of url or b64_json. 60 | /// - mask: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image. 61 | /// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 62 | /// - n : How many completions to generate for each prompt. 63 | func sendImageEditRequest(image: String, 64 | prompt: String, 65 | size: AIImageSize = .size1024, 66 | responseFormat: String? = nil, 67 | mask: String? = nil, 68 | user: String? = nil, 69 | n: Int? = nil, 70 | completion: @escaping (Result) -> Void) 71 | { 72 | let endpoint = OpenAIEndpoint.dalleImageEdit 73 | 74 | let requestBody = ImageEditRequest(image: image, mask: mask, prompt: prompt, n: n, size: size, responseFormat: responseFormat, user: user) 75 | 76 | let requestData = try? jsonEncoder.encode(requestBody) 77 | 78 | let headers = baseMultipartHeaders 79 | 80 | network.request(endpoint.method, url: endpoint.urlPath(for: self), body: requestData, headers: headers, completion: completion) 81 | } 82 | 83 | @available(swift 5.5) 84 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 85 | func sendImageEditRequest(image: String, 86 | prompt: String, 87 | size: AIImageSize = .size1024, 88 | responseFormat: String? = nil, 89 | mask: String? = nil, 90 | user: String? = nil, 91 | n: Int? = nil) async -> Result 92 | { 93 | return await withCheckedContinuation { continuation in 94 | sendImageEditRequest(image: image, prompt: prompt, size: size, responseFormat: responseFormat, mask: mask, user: user, n: n) { 95 | result in 96 | continuation.resume(returning: result) 97 | } 98 | } 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /Sources/OpenAIKit/Helpers/AIEventStream.swift: -------------------------------------------------------------------------------- 1 | // 2 | // AIEventStream.swift 3 | // 4 | // 5 | // Created by Kyrylo Mukha on 29.03.2023. 6 | // 7 | 8 | import Combine 9 | import Foundation 10 | #if canImport(FoundationNetworking) && canImport(FoundationXML) 11 | import FoundationNetworking 12 | import FoundationXML 13 | #endif 14 | 15 | @available(swift 5.5) 16 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 17 | public struct AIStreamResponse { 18 | public let stream: AIEventStream 19 | public let message: ResponseType? 20 | public let data: Data? 21 | public var isFinished: Bool = false 22 | public var forceEnd: Bool = false 23 | } 24 | 25 | @available(swift 5.5) 26 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 27 | public final class AIEventStream: NSObject, URLSessionDataDelegate { 28 | private let request: URLRequest 29 | private var session: URLSession? 30 | private var operationQueue: OperationQueue 31 | 32 | private var onStartCompletion: (() -> Void)? 33 | private var onCompleteCompletion: ((_ code: Int?, _ forceEnd: Bool, _ error: Error?) throws -> Void)? 34 | private var onMessageCompletion: ((_ data: Data, _ message: ResponseType?) -> Void)? 35 | 36 | private var isStreamActive: Bool = false 37 | 38 | private var fetchError: Error? = nil 39 | 40 | private weak var sslDelegate: OpenAISSLDelegate? 41 | init(request: URLRequest, sslDelegate: OpenAISSLDelegate?) { 42 | self.request = request 43 | self.operationQueue = OperationQueue() 44 | self.sslDelegate = sslDelegate 45 | operationQueue.maxConcurrentOperationCount = 1 46 | } 47 | 48 | public func startStream() { 49 | guard session == nil else { return } 50 | 51 | let configurationHeaders = [ 52 | "Accept": "text/event-stream", 53 | "Cache-Control": "no-cache" 54 | ] 55 | 56 | let sessionConfiguration = URLSessionConfiguration.default 57 | sessionConfiguration.timeoutIntervalForRequest = TimeInterval(INT_MAX) 58 | sessionConfiguration.timeoutIntervalForResource = TimeInterval(INT_MAX) 59 | sessionConfiguration.httpAdditionalHeaders = configurationHeaders 60 | 61 | session = URLSession(configuration: sessionConfiguration, delegate: self, delegateQueue: operationQueue) 62 | session?.dataTask(with: request).resume() 63 | } 64 | 65 | public func stopStream() { 66 | let oldIsStreamActive = isStreamActive 67 | isStreamActive = false 68 | session?.invalidateAndCancel() 69 | operationQueue.cancelAllOperations() 70 | session = nil 71 | guard oldIsStreamActive != isStreamActive else { return } 72 | try? onCompleteCompletion?(0, true, nil) 73 | } 74 | 75 | // MARK: - URLSessionDataDelegate 76 | 77 | public func urlSession(_ session: URLSession, dataTask: URLSessionDataTask, didReceive response: URLResponse, completionHandler: @escaping @Sendable (URLSession.ResponseDisposition) -> Void) { 78 | completionHandler(URLSession.ResponseDisposition.allow) 79 | 80 | isStreamActive = true 81 | onStartCompletion?() 82 | } 83 | 84 | public func urlSession(_ session: URLSession, dataTask: URLSessionDataTask, didReceive data: Data) { 85 | guard isStreamActive else { return } 86 | 87 | if let response = (dataTask.response as? HTTPURLResponse), let decodedError = try? JSONSerialization.jsonObject(with: data) as? [String: Any] { 88 | guard 200 ... 299 ~= response.statusCode, decodedError["error"] != nil else { 89 | let error = NSError(domain: NSURLErrorDomain, code: response.statusCode, userInfo: decodedError) 90 | fetchError = error 91 | return 92 | } 93 | } 94 | 95 | let decoder = JSONDecoder.aiDecoder 96 | 97 | let dataString = String(data: data, encoding: .utf8) ?? "" 98 | let lines = dataString.components(separatedBy: "\n") 99 | 100 | for line in lines { 101 | var message: ResponseType? 102 | 103 | if line.hasPrefix("data: "), let data = line.dropFirst(6).data(using: .utf8) { 104 | message = try? decoder.decode(ResponseType.self, from: data) 105 | } 106 | 107 | onMessageCompletion?(data, message) 108 | } 109 | } 110 | 111 | public func urlSession(_ session: URLSession, task: URLSessionTask, didCompleteWithError error: Error?) { 112 | guard let responseStatusCode = (task.response as? HTTPURLResponse)?.statusCode else { 113 | try? onCompleteCompletion?(nil, false, error ?? fetchError) 114 | return 115 | } 116 | 117 | try? onCompleteCompletion?(responseStatusCode, false, isStreamActive ? error ?? fetchError : nil) 118 | } 119 | 120 | public func urlSession(_ session: URLSession, task: URLSessionTask, willPerformHTTPRedirection response: HTTPURLResponse, newRequest request: URLRequest, completionHandler: @escaping (URLRequest?) -> Void) { 121 | completionHandler(request) 122 | } 123 | 124 | public func urlSession(_ session: URLSession, didReceive challenge: URLAuthenticationChallenge, completionHandler: @escaping (URLSession.AuthChallengeDisposition, URLCredential?) -> Void) { 125 | sslDelegate?.urlSession(session, didReceive: challenge, completionHandler: completionHandler) 126 | } 127 | } 128 | 129 | @available(swift 5.5) 130 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 131 | public extension AIEventStream { 132 | func onStart(_ onStartCompletion: @escaping (() -> Void)) { 133 | self.onStartCompletion = onStartCompletion 134 | } 135 | 136 | func onComplete(_ onCompleteCompletion: @escaping ((_ code: Int?, _ forceEnd: Bool, _ error: Error?) throws -> Void)) { 137 | self.onCompleteCompletion = onCompleteCompletion 138 | } 139 | 140 | func onMessage(_ onMessageCompletion: @escaping ((_ data: Data, _ message: ResponseType?) -> Void)) { 141 | self.onMessageCompletion = onMessageCompletion 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | . 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /Sources/OpenAIKit/Helpers/OpenAIKitNetwork.swift: -------------------------------------------------------------------------------- 1 | // 2 | // OpenAIKitNetwork.swift 3 | // 4 | // 5 | // Created by Kyrylo Mukha on 01.03.2023. 6 | // 7 | 8 | import Foundation 9 | #if canImport(FoundationNetworking) && canImport(FoundationXML) 10 | import FoundationNetworking 11 | import FoundationXML 12 | #endif 13 | 14 | public enum OpenAINetworkError: Error { 15 | case invalidURL 16 | case invalidResponse 17 | case invalidRequest 18 | } 19 | 20 | // MARK: - 21 | 22 | @available(swift 5.5) 23 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 24 | public final class OpenAIKitNetwork { 25 | private let session: URLSession 26 | private weak var sslDelegate: OpenAISSLDelegate? 27 | private var activeStreams: [NSObject] = [] 28 | 29 | init(session: URLSession = URLSession.shared, sslDelegate: OpenAISSLDelegate? = nil) { 30 | self.session = session 31 | self.sslDelegate = sslDelegate 32 | } 33 | 34 | func request(_ method: OpenAIHTTPMethod, url: String, body: Data? = nil, headers: OpenAIHeaders? = nil, completion: @escaping (Result) -> Void) { 35 | guard let url = URL(string: url) else { 36 | completion(.failure(OpenAINetworkError.invalidURL)) 37 | return 38 | } 39 | 40 | var request = URLRequest(url: url) 41 | request.httpMethod = method.rawValue 42 | request.httpBody = body 43 | 44 | headers?.forEach { key, value in 45 | request.addValue(value, forHTTPHeaderField: key) 46 | } 47 | 48 | let task = session.dataTask(with: request) { data, response, error in 49 | if let error = error { 50 | completion(.failure(error)) 51 | return 52 | } 53 | 54 | guard let data = data, let response = response as? HTTPURLResponse else { 55 | completion(.failure(OpenAINetworkError.invalidResponse)) 56 | return 57 | } 58 | 59 | guard 200 ... 299 ~= response.statusCode else { 60 | var userInfo: [String: Any] = [:] 61 | 62 | if let decodedError = try? JSONSerialization.jsonObject(with: data) as? [String: Any] { 63 | userInfo = decodedError 64 | } 65 | 66 | let error = NSError(domain: NSURLErrorDomain, code: response.statusCode, userInfo: userInfo) 67 | completion(.failure(error)) 68 | return 69 | } 70 | 71 | do { 72 | let decoder = JSONDecoder.aiDecoder 73 | let responseObj = try decoder.decode(ResponseType.self, from: data) 74 | completion(.success(responseObj)) 75 | } catch { 76 | completion(.failure(error)) 77 | } 78 | } 79 | 80 | task.resume() 81 | } 82 | 83 | fileprivate struct StreamTaskState { 84 | var isStreamFinished = false 85 | var isStreamForceStop = false 86 | } 87 | 88 | func requestStream(_ method: OpenAIHTTPMethod, url: String, body: Data? = nil, headers: OpenAIHeaders? = nil, completion: @escaping (Result, Error>) -> Void) { 89 | guard let url = URL(string: url) else { 90 | completion(.failure(OpenAINetworkError.invalidURL)) 91 | return 92 | } 93 | 94 | var request = URLRequest(url: url) 95 | request.httpMethod = method.rawValue 96 | request.httpBody = body 97 | 98 | headers?.forEach { key, value in 99 | request.addValue(value, forHTTPHeaderField: key) 100 | } 101 | 102 | var streamState = StreamTaskState() 103 | 104 | let stream = AIEventStream(request: request, sslDelegate: sslDelegate) 105 | activeStreams.append(stream) 106 | 107 | stream.onMessage { data, message in 108 | completion(.success(AIStreamResponse(stream: stream, message: message, data: data, isFinished: streamState.isStreamFinished, forceEnd: streamState.isStreamForceStop))) 109 | } 110 | 111 | stream.onComplete { [weak self] _, forceEnd, error in 112 | if let error { 113 | completion(.failure(error)) 114 | self?.terminateStream(stream) 115 | return 116 | } 117 | 118 | streamState.isStreamFinished = true 119 | streamState.isStreamForceStop = forceEnd 120 | 121 | completion(.success(AIStreamResponse(stream: stream, message: nil, data: nil, isFinished: streamState.isStreamFinished, forceEnd: streamState.isStreamForceStop))) 122 | 123 | self?.terminateStream(stream) 124 | } 125 | 126 | stream.startStream() 127 | } 128 | 129 | func requestStream(_ method: OpenAIHTTPMethod, url: String, body: Data? = nil, headers: OpenAIHeaders? = nil) async throws -> AsyncThrowingStream, Error> { 130 | guard let url = URL(string: url) else { 131 | throw OpenAINetworkError.invalidURL 132 | } 133 | 134 | var request = URLRequest(url: url) 135 | request.httpMethod = method.rawValue 136 | request.httpBody = body 137 | 138 | headers?.forEach { key, value in 139 | request.addValue(value, forHTTPHeaderField: key) 140 | } 141 | 142 | let stream = AIEventStream(request: request, sslDelegate: sslDelegate) 143 | activeStreams.append(stream) 144 | 145 | return AsyncThrowingStream, Error> { continuation in 146 | Task(priority: .userInitiated) { 147 | var streamState = StreamTaskState() 148 | 149 | stream.onMessage { data, message in 150 | continuation.yield(AIStreamResponse(stream: stream, message: message, data: data)) 151 | } 152 | 153 | stream.onComplete { _, forceEnd, error in 154 | streamState.isStreamFinished = true 155 | streamState.isStreamForceStop = forceEnd 156 | 157 | if let error { 158 | continuation.finish(throwing: error) 159 | return 160 | } 161 | 162 | continuation.yield(AIStreamResponse(stream: stream, message: nil, data: nil, isFinished: true, forceEnd: forceEnd)) 163 | 164 | continuation.finish() 165 | } 166 | 167 | stream.startStream() 168 | 169 | continuation.onTermination = { @Sendable [weak self] _ in 170 | self?.terminateStream(stream) 171 | } 172 | } 173 | } 174 | } 175 | 176 | private func terminateStream(_ stream: AIEventStream) { 177 | stream.stopStream() 178 | activeStreams.removeAll { $0 == stream } 179 | } 180 | } 181 | -------------------------------------------------------------------------------- /Sources/OpenAIKit/Models/AIModel.swift: -------------------------------------------------------------------------------- 1 | // 2 | // AIModel.swift 3 | // 4 | // 5 | // Created by Kyrylo Mukha on 01.03.2023. 6 | // 7 | 8 | import Foundation 9 | 10 | public enum AIModelType: RawRepresentable, Codable { 11 | /// GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy than any of our previous models, thanks to its broader general knowledge and advanced reasoning capabilities. Like `gpt-3.5-turbo`, GPT-4 is optimized for chat but works well for traditional completions tasks. 12 | case gptV4(GPTv4Model) 13 | /// GPT-3.5 models can understand and generate natural language or code. Our most capable and cost effective model is gpt-3.5-turbo which is optimized for chat but works well for traditional completions tasks as well. 14 | case gptV3_5(GPTv3_5Model) 15 | /// GPT-3 models can understand and generate natural language. These models were superceded by the more powerful GPT-3.5 generation models. However, the original GPT-3 base models (`davinci`, `curie`, `ada`, and `babbage`) are current the only models that are available to fine-tune. 16 | case gptV3(GPTv3Model) 17 | /// The Codex models are descendants of our GPT-3 models that can understand and generate code. Their training data contains both natural language and billions of lines of public code from GitHub. 18 | case codex(CodexModel) 19 | /// The custom ID of the model to use. 20 | case custom(String) 21 | 22 | public var rawValue: String { 23 | switch self { 24 | case .gptV4(let model): 25 | return model.rawValue 26 | case .gptV3_5(let model): 27 | return model.rawValue 28 | case .gptV3(let model): 29 | return model.rawValue 30 | case .codex(let model): 31 | return model.rawValue 32 | case .custom(let model): 33 | return model 34 | } 35 | } 36 | 37 | public init?(rawValue: RawValue) { 38 | if let model = GPTv4Model(rawValue: rawValue) { 39 | self = .gptV4(model) 40 | } else if let model = GPTv3_5Model(rawValue: rawValue) { 41 | self = .gptV3_5(model) 42 | } else if let model = GPTv3Model(rawValue: rawValue) { 43 | self = .gptV3(model) 44 | } else if let model = CodexModel(rawValue: rawValue) { 45 | self = .codex(model) 46 | } else { 47 | self = .custom(rawValue) 48 | } 49 | } 50 | } 51 | 52 | public extension AIModelType { 53 | /// GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy than any of our previous models, thanks to its broader general knowledge and advanced reasoning capabilities. Like `gpt-3.5-turbo`, GPT-4 is optimized for chat but works well for traditional completions tasks. 54 | enum GPTv4Model: String { 55 | /// More capable than any GPT-3.5 model, able to do more complex tasks, and optimized for chat. Will be updated with our latest model iteration. 56 | case gpt4 = "gpt-4" 57 | /// Same capabilities as the base `gpt-4` mode but with 4x the context length. Will be updated with our latest model iteration. 58 | case gpt4_32k = "gpt-4-32k" 59 | } 60 | 61 | /// GPT-3.5 models can understand and generate natural language or code. Our most capable and cost effective model is gpt-3.5-turbo which is optimized for chat but works well for traditional completions tasks as well. 62 | enum GPTv3_5Model: String { 63 | /// Most capable GPT-3.5 model and optimized for chat at 1/10th the cost of text-davinci-003. Will be updated with our latest model iteration. 64 | case gptTurbo = "gpt-3.5-turbo" 65 | /// Can do any language task with better quality, longer output, and consistent instruction-following than the curie, babbage, or ada models. Also supports inserting completions within text. 66 | case davinciText003 = "text-davinci-003" 67 | /// Similar capabilities to text-davinci-003 but trained with supervised fine-tuning instead of reinforcement learning 68 | case davinciText002 = "text-davinci-002" 69 | /// Optimized for code-completion tasks 70 | case davinciCode002 = "code-davinci-002" 71 | } 72 | 73 | /// GPT-3 models can understand and generate natural language. These models were superceded by the more powerful GPT-3.5 generation models. However, the original GPT-3 base models (`davinci`, `curie`, `ada`, and `babbage`) are current the only models that are available to fine-tune. 74 | enum GPTv3Model: String { 75 | /// Very capable, faster and lower cost than Davinci. 76 | case curieText = "text-curie-001" 77 | /// Capable of straightforward tasks, very fast, and lower cost. 78 | case babbageText = "text-babbage-001" 79 | /// Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost. 80 | case adaText = "text-ada-001" 81 | /// Most capable GPT-3 model. Can do any task the other models can do, often with higher quality. 82 | case davinci 83 | /// Very capable, but faster and lower cost than Davinci. 84 | case curie 85 | /// Capable of straightforward tasks, very fast, and lower cost. 86 | case babbage 87 | /// Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost. 88 | case ada 89 | } 90 | 91 | /// The Codex models are descendants of our GPT-3 models that can understand and generate code. Their training data contains both natural language and billions of lines of public code from GitHub. 92 | enum CodexModel: String { 93 | /// Most capable Codex model. Particularly good at translating natural language to code. In addition to completing code, also supports inserting completions within code. 94 | case davinciCode = "code-davinci-002" 95 | /// Almost as capable as Davinci Codex, but slightly faster. This speed advantage may make it preferable for real-time applications. 96 | case cushmanCode = "code-cushman-001" 97 | } 98 | } 99 | 100 | public enum AIMessageRole: String, Codable { 101 | case system 102 | case user 103 | case assistant 104 | } 105 | 106 | public struct AIMessage: Codable { 107 | public let role: AIMessageRole 108 | public let content: String 109 | 110 | public init(role: AIMessageRole, content: String) { 111 | self.role = role 112 | self.content = content 113 | } 114 | } 115 | 116 | public struct AIResponseModel: Codable { 117 | public struct Choice: Codable { 118 | public var text: String? = nil 119 | public var message: AIMessage? = nil 120 | public let index: Int 121 | public var logprobs: Int? = nil 122 | public var finishReason: String? = nil 123 | 124 | private struct AIMessageDelta: Codable { 125 | var role: AIMessageRole? = nil 126 | var content: String? = nil 127 | } 128 | 129 | enum CodingKeys: CodingKey { 130 | case text 131 | case message 132 | case index 133 | case logprobs 134 | case finishReason 135 | case delta 136 | } 137 | 138 | public init(from decoder: Decoder) throws { 139 | let container: KeyedDecodingContainer = try decoder.container(keyedBy: AIResponseModel.Choice.CodingKeys.self) 140 | self.text = try container.decodeIfPresent(String.self, forKey: AIResponseModel.Choice.CodingKeys.text) 141 | self.message = try container.decodeIfPresent(AIMessage.self, forKey: AIResponseModel.Choice.CodingKeys.message) 142 | self.index = try container.decode(Int.self, forKey: AIResponseModel.Choice.CodingKeys.index) 143 | self.logprobs = try container.decodeIfPresent(Int.self, forKey: AIResponseModel.Choice.CodingKeys.logprobs) 144 | self.finishReason = try container.decodeIfPresent(String.self, forKey: AIResponseModel.Choice.CodingKeys.finishReason) 145 | 146 | if let deltaMessage = try? container.decodeIfPresent(AIMessageDelta.self, forKey: AIResponseModel.Choice.CodingKeys.delta) { 147 | self.message = AIMessage(role: deltaMessage.role ?? .assistant, content: deltaMessage.content ?? "") 148 | } 149 | } 150 | 151 | public func encode(to encoder: Encoder) throws { 152 | var container = encoder.container(keyedBy: AIResponseModel.Choice.CodingKeys.self) 153 | try container.encodeIfPresent(self.text, forKey: AIResponseModel.Choice.CodingKeys.text) 154 | try container.encodeIfPresent(self.message, forKey: AIResponseModel.Choice.CodingKeys.message) 155 | try container.encode(self.index, forKey: AIResponseModel.Choice.CodingKeys.index) 156 | try container.encodeIfPresent(self.logprobs, forKey: AIResponseModel.Choice.CodingKeys.logprobs) 157 | try container.encodeIfPresent(self.finishReason, forKey: AIResponseModel.Choice.CodingKeys.finishReason) 158 | } 159 | } 160 | 161 | public struct Usage: Codable { 162 | public var promptTokens: Int? = nil 163 | public var completionTokens: Int? = nil 164 | public var totalTokens: Int? = nil 165 | } 166 | 167 | public struct Logprobs: Codable { 168 | public var tokens: [String]? = nil 169 | public var tokenLogprobs: [Double]? = nil 170 | public var topLogprobs: [String: Double]? = nil 171 | public var textOffset: [Int]? = nil 172 | } 173 | 174 | public var id: String? = nil 175 | public let object: String 176 | public let created: TimeInterval 177 | public var model: AIModelType? = nil 178 | public let choices: [Choice] 179 | public var usage: Usage? = nil 180 | public var logprobs: Logprobs? = nil 181 | } 182 | -------------------------------------------------------------------------------- /Sources/OpenAIKit/OpenAIKitRequests/Completions.swift: -------------------------------------------------------------------------------- 1 | // 2 | // Completions.swift 3 | // 4 | // 5 | // Created by Kyrylo Mukha on 03.03.2023. 6 | // 7 | 8 | import Foundation 9 | 10 | @available(swift 5.5) 11 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 12 | public extension OpenAIKit { 13 | /// Creates a completion for the provided prompt and parameters 14 | /// 15 | /// - Parameters: 16 | /// - prompt: The prompt to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. 17 | /// - model: ID of the model to use. 18 | /// - maxTokens: The maximum number of tokens to generate in the completion. 19 | /// - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `topP` but not both. 20 | /// - n: How many completions to generate for each prompt. 21 | /// - topP: An alternative to sampling with `temperature`, called nucleus sampling, where the model considers the results of the tokens with `topP` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. 22 | /// - frequencyPenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. 23 | /// - presencePenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. 24 | /// - logprobs: Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. 25 | /// - stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. 26 | /// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 27 | func sendCompletion(prompt: String, 28 | model: AIModelType, 29 | maxTokens: Int?, 30 | temperature: Double = 1, 31 | n: Int? = nil, 32 | topP: Double? = nil, 33 | frequencyPenalty: Double? = nil, 34 | presencePenalty: Double? = nil, 35 | logprobs: Int? = nil, 36 | stop: [String]? = nil, 37 | user: String? = nil, 38 | completion: @escaping (Result) -> Void) 39 | { 40 | let endpoint = OpenAIEndpoint.completions 41 | 42 | let requestBody = CompletionsRequest(model: model, prompt: prompt, temperature: temperature, n: n, maxTokens: maxTokens, topP: topP, frequencyPenalty: frequencyPenalty, presencePenalty: presencePenalty, logprobs: logprobs, stop: stop, user: user) 43 | 44 | let requestData = try? jsonEncoder.encode(requestBody) 45 | 46 | let headers = baseHeaders 47 | 48 | network.request(endpoint.method, url: endpoint.urlPath(for: self), body: requestData, headers: headers, completion: completion) 49 | } 50 | 51 | @available(swift 5.5) 52 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 53 | func sendCompletion(prompt: String, 54 | model: AIModelType, 55 | maxTokens: Int?, 56 | temperature: Double = 1, 57 | n: Int? = nil, 58 | topP: Double? = nil, 59 | frequencyPenalty: Double? = nil, 60 | presencePenalty: Double? = nil, 61 | logprobs: Int? = nil, 62 | stop: [String]? = nil, 63 | user: String? = nil) async -> Result 64 | { 65 | return await withCheckedContinuation { continuation in 66 | sendCompletion(prompt: prompt, model: model, maxTokens: maxTokens, temperature: temperature, n: n, topP: topP, frequencyPenalty: frequencyPenalty, presencePenalty: presencePenalty, logprobs: logprobs, stop: stop, user: user) { result in 67 | continuation.resume(returning: result) 68 | } 69 | } 70 | } 71 | } 72 | 73 | // MARK: - Stream methods 74 | 75 | @available(swift 5.5) 76 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 77 | public extension OpenAIKit { 78 | /// Creates a completion for the provided prompt and parameters 79 | /// 80 | /// - Parameters: 81 | /// - prompt: The prompt to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. 82 | /// - model: ID of the model to use. 83 | /// - maxTokens: The maximum number of tokens to generate in the completion. 84 | /// - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `topP` but not both. 85 | /// - n: How many completions to generate for each prompt. 86 | /// - topP: An alternative to sampling with `temperature`, called nucleus sampling, where the model considers the results of the tokens with `topP` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. 87 | /// - frequencyPenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. 88 | /// - presencePenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. 89 | /// - logprobs: Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. 90 | /// - stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. 91 | /// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 92 | func sendStreamCompletion(prompt: String, 93 | model: AIModelType, 94 | maxTokens: Int?, 95 | temperature: Double = 1, 96 | n: Int? = nil, 97 | topP: Double? = nil, 98 | frequencyPenalty: Double? = nil, 99 | presencePenalty: Double? = nil, 100 | logprobs: Int? = nil, 101 | stop: [String]? = nil, 102 | user: String? = nil, 103 | completion: @escaping (Result, Error>) -> Void) 104 | { 105 | let endpoint = OpenAIEndpoint.completions 106 | 107 | let requestBody = CompletionsRequest(model: model, prompt: prompt, temperature: temperature, n: n, maxTokens: maxTokens, topP: topP, frequencyPenalty: frequencyPenalty, presencePenalty: presencePenalty, logprobs: logprobs, stop: stop, user: user, stream: true) 108 | 109 | let requestData = try? jsonEncoder.encode(requestBody) 110 | 111 | let headers = baseHeaders 112 | 113 | network.requestStream(endpoint.method, url: endpoint.urlPath(for: self), body: requestData, headers: headers) { (result: Result, Error>) in 114 | completion(result) 115 | } 116 | } 117 | 118 | @available(swift 5.5) 119 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 120 | func sendStreamCompletion(prompt: String, 121 | model: AIModelType, 122 | maxTokens: Int?, 123 | temperature: Double = 1, 124 | n: Int? = nil, 125 | topP: Double? = nil, 126 | frequencyPenalty: Double? = nil, 127 | presencePenalty: Double? = nil, 128 | logprobs: Int? = nil, 129 | stop: [String]? = nil, 130 | user: String? = nil) async throws -> AsyncThrowingStream, Error> 131 | { 132 | let endpoint = OpenAIEndpoint.completions 133 | 134 | let requestBody = CompletionsRequest(model: model, prompt: prompt, temperature: temperature, n: n, maxTokens: maxTokens, topP: topP, frequencyPenalty: frequencyPenalty, presencePenalty: presencePenalty, logprobs: logprobs, stop: stop, user: user, stream: true) 135 | 136 | let requestData = try? jsonEncoder.encode(requestBody) 137 | 138 | let headers = baseHeaders 139 | 140 | return try await network.requestStream(endpoint.method, url: endpoint.urlPath(for: self), body: requestData, headers: headers) 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /Sources/OpenAIKit/OpenAIKitRequests/Chat.swift: -------------------------------------------------------------------------------- 1 | // 2 | // Chat.swift 3 | // 4 | // 5 | // Created by Kyrylo Mukha on 10.03.2023. 6 | // 7 | 8 | import Combine 9 | import Foundation 10 | 11 | @available(swift 5.5) 12 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 13 | public extension OpenAIKit { 14 | /// Creates a completion for the chat message 15 | /// 16 | /// - Parameters: 17 | /// - newMessage: The main input is the `newMessage` parameter. Where each object has a `role` (either `system`, `user`, or `assistant`) and `content` (the content of the message). 18 | /// - previousMessages: Previous messages, an optional parameter, the assistant will communicate in the context of these messages. Must be an array of `AIMessage` objects. 19 | /// - model: ID of the model to use. 20 | /// - maxTokens: The maximum number of tokens to generate in the completion. 21 | /// - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `topP` but not both. 22 | /// - n: How many completions to generate for each prompt. 23 | /// - topP: An alternative to sampling with `temperature`, called nucleus sampling, where the model considers the results of the tokens with `topP` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. 24 | /// - frequencyPenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. 25 | /// - presencePenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. 26 | /// - logprobs: Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. 27 | /// - stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. 28 | /// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 29 | func sendChatCompletion( 30 | newMessage: AIMessage, 31 | previousMessages: [AIMessage] = [], 32 | model: AIModelType, 33 | maxTokens: Int?, 34 | temperature: Double = 1, 35 | n: Int? = nil, 36 | topP: Double? = nil, 37 | frequencyPenalty: Double? = nil, 38 | presencePenalty: Double? = nil, 39 | logprobs: Int? = nil, 40 | stop: [String]? = nil, 41 | responseFormat: ChatCompletionsRequest.ResponseFormat? = nil, 42 | user: String? = nil, 43 | completion: @escaping (Result) -> Void 44 | ) { 45 | let endpoint = OpenAIEndpoint.chatCompletions 46 | 47 | var messages = previousMessages 48 | messages.append(newMessage) 49 | 50 | let requestBody = ChatCompletionsRequest(model: model, messages: messages, temperature: temperature, n: n, maxTokens: maxTokens, topP: topP, frequencyPenalty: frequencyPenalty, presencePenalty: presencePenalty, logprobs: logprobs, responseFormat: responseFormat, stop: stop, user: user) 51 | 52 | let requestData = try? jsonEncoder.encode(requestBody) 53 | 54 | let headers = baseHeaders 55 | 56 | network.request(endpoint.method, url: endpoint.urlPath(for: self), body: requestData, headers: headers, completion: completion) 57 | } 58 | 59 | @available(swift 5.5) 60 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 61 | func sendChatCompletion( 62 | newMessage: AIMessage, 63 | previousMessages: [AIMessage] = [], 64 | model: AIModelType, 65 | maxTokens: Int?, 66 | temperature: Double = 1, 67 | n: Int? = nil, 68 | topP: Double? = nil, 69 | frequencyPenalty: Double? = nil, 70 | presencePenalty: Double? = nil, 71 | logprobs: Int? = nil, 72 | stop: [String]? = nil, 73 | responseFormat: ChatCompletionsRequest.ResponseFormat? = nil, 74 | user: String? = nil 75 | ) async -> Result { 76 | return await withCheckedContinuation { continuation in 77 | sendChatCompletion( 78 | newMessage: newMessage, 79 | previousMessages: previousMessages, 80 | model: model, 81 | maxTokens: maxTokens, 82 | temperature: temperature, 83 | n: n, 84 | topP: topP, 85 | frequencyPenalty: frequencyPenalty, 86 | presencePenalty: presencePenalty, 87 | logprobs: logprobs, 88 | stop: stop, 89 | responseFormat: responseFormat, 90 | user: user 91 | ) { result in 92 | continuation.resume(returning: result) 93 | } 94 | } 95 | } 96 | } 97 | 98 | // MARK: - Stream methods 99 | 100 | @available(swift 5.5) 101 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 102 | public extension OpenAIKit { 103 | /// Creates a completion for the chat message 104 | /// 105 | /// - Parameters: 106 | /// - newMessage: The main input is the `newMessage` parameter. Where each object has a `role` (either `system`, `user`, or `assistant`) and `content` (the content of the message). 107 | /// - previousMessages: Previous messages, an optional parameter, the assistant will communicate in the context of these messages. Must be an array of `AIMessage` objects. 108 | /// - model: ID of the model to use. 109 | /// - maxTokens: The maximum number of tokens to generate in the completion. 110 | /// - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `topP` but not both. 111 | /// - n: How many completions to generate for each prompt. 112 | /// - topP: An alternative to sampling with `temperature`, called nucleus sampling, where the model considers the results of the tokens with `topP` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. 113 | /// - frequencyPenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. 114 | /// - presencePenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. 115 | /// - logprobs: Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. 116 | /// - stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. 117 | /// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 118 | func sendStreamChatCompletion( 119 | newMessage: AIMessage, 120 | previousMessages: [AIMessage] = [], 121 | model: AIModelType, 122 | maxTokens: Int?, 123 | temperature: Double = 1, 124 | n: Int? = nil, 125 | topP: Double? = nil, 126 | frequencyPenalty: Double? = nil, 127 | presencePenalty: Double? = nil, 128 | logprobs: Int? = nil, 129 | stop: [String]? = nil, 130 | user: String? = nil, 131 | completion: @escaping (Result, Error>) -> Void 132 | ) { 133 | let endpoint = OpenAIEndpoint.chatCompletions 134 | 135 | var messages = previousMessages 136 | messages.append(newMessage) 137 | 138 | let requestBody = ChatCompletionsRequest(model: model, messages: messages, temperature: temperature, n: n, maxTokens: maxTokens, topP: topP, frequencyPenalty: frequencyPenalty, presencePenalty: presencePenalty, logprobs: logprobs, stop: stop, user: user, stream: true) 139 | 140 | let requestData = try? jsonEncoder.encode(requestBody) 141 | 142 | let headers = baseHeaders 143 | 144 | network.requestStream(endpoint.method, url: endpoint.urlPath(for: self), body: requestData, headers: headers) { (result: Result, Error>) in 145 | completion(result) 146 | } 147 | } 148 | 149 | @available(swift 5.5) 150 | @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) 151 | func sendStreamChatCompletion( 152 | newMessage: AIMessage, 153 | previousMessages: [AIMessage] = [], 154 | model: AIModelType, 155 | maxTokens: Int?, 156 | temperature: Double = 1, 157 | n: Int? = nil, 158 | topP: Double? = nil, 159 | frequencyPenalty: Double? = nil, 160 | presencePenalty: Double? = nil, 161 | logprobs: Int? = nil, 162 | stop: [String]? = nil, 163 | user: String? = nil 164 | ) async throws -> AsyncThrowingStream, Error> { 165 | let endpoint = OpenAIEndpoint.chatCompletions 166 | 167 | var messages = previousMessages 168 | messages.append(newMessage) 169 | 170 | let requestBody = ChatCompletionsRequest(model: model, messages: messages, temperature: temperature, n: n, maxTokens: maxTokens, topP: topP, frequencyPenalty: frequencyPenalty, presencePenalty: presencePenalty, logprobs: logprobs, stop: stop, user: user, stream: true) 171 | 172 | let requestData = try? jsonEncoder.encode(requestBody) 173 | 174 | let headers = baseHeaders 175 | 176 | return try await network.requestStream(endpoint.method, url: endpoint.urlPath(for: self), body: requestData, headers: headers) 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenAI 2 |

3 | 4 |

5 | 6 | ![Swift Workflow](https://github.com/FuturraGroup/OpenAI/actions/workflows/swift.yml/badge.svg) 7 | [![Swift](https://img.shields.io/badge/Swift%20Compatibility-5.5%20%7C%205.6%20%7C%205.7-orange)](https://img.shields.io/badge/Swift%20Compatibility-5.5%20%7C%205.6%20%7C%205.7-orange) 8 | [![Platform](https://img.shields.io/badge/Platform%20Compatibility-iOS%20%7C%20macOS%20%7C%20tvOS%20%7C%20watchOS-blue)](https://img.shields.io/badge/Platform%20Compatibility-iOS%20%7C%20macOS%20%7C%20tvOS%20%7C%20watchOS-blue) 9 | [![licence](https://img.shields.io/badge/%20licence-MIT-green)](https://img.shields.io/badge/%20licence-MIT-green) 10 | 11 | OpenAI is a community-maintained repository containing Swift implementation over [OpenAI public API](https://platform.openai.com/docs/api-reference/). 12 | 13 | - [Overview](#overview) 14 | - [Installation](#installation) 15 | - [CocoaPods](#cocoapods) 16 | - [Swift Package Manager](#swift-package-manager) 17 | - [Usage](#usage) 18 | - [Initialization](#initialization) 19 | - [Additional Initialization parameters](#additional-initialization-parameters) 20 | - [SSL Handshake](#ssl-handshake) 21 | - [What is this and why you may use it?](#what-is-this-and-why-you-may-use-it) 22 | - [The advantages of this solution](#the-advantages-of-this-solution) 23 | - [How to use SSL Handshake](#how-to-use-ssl-handshake) 24 | - [Completions](#completions) 25 | - [Completions](#completions) 26 | - [Chat Completions](#chat-completions) 27 | - [Stream](#stream) 28 | - [JSON mode](#json-mode) 29 | - [Generate Image](#generate-image) 30 | - [Contribute](#contribute) 31 | - [License](#license) 32 | 33 | ## Overview 34 | The OpenAI API can be applied to virtually any task that involves understanding or generating natural language or code. We offer a spectrum of models with different levels of power suitable for different tasks, as well as the ability to fine-tune your own custom models. These models can be used for everything from content generation to semantic search and classification. 35 | 36 | ## Installation 37 | 38 | OpenAI is available with CocoaPods and Swift Package Manager. 39 | 40 | ### CocoaPods 41 | 42 | [CocoaPods](https://cocoapods.org) is a dependency manager for Cocoa projects. For usage and installation instructions, visit their website. To integrate OpenAI into your Xcode project using CocoaPods, specify it in your `Podfile`: 43 | 44 | ```ruby 45 | pod 'OpenAIKit' 46 | ``` 47 | ### Swift Package Manager 48 | 49 | The [Swift Package Manager](https://swift.org/package-manager/) is a tool for automating the distribution of Swift code and is integrated into the `swift` compiler. 50 | 51 | Once you have your Swift package set up, adding OpenAI as a dependency is as easy as adding it to the `dependencies` value of your `Package.swift`. 52 | 53 | ```swift 54 | dependencies: [ 55 | .package(url: "https://github.com/FuturraGroup/OpenAI.git", .branch("main")) 56 | ] 57 | ``` 58 | 59 | ## Usage 60 | 61 | ### Initialization 62 | 63 | It is encouraged to use environment variables to inject the [OpenAI API key](https://platform.openai.com/account/api-keys), instead of hardcoding it in the source code. This is shown in our [Example project](https://github.com/FuturraGroup/OpenAI/tree/main/Example). 64 | 65 | ```swift 66 | let apiToken: String = "" 67 | let organizationName: String = "" 68 | ``` 69 | Initialize `OpenAIKit` with your API Token wherever convenient in your project. Organization name is optional. 70 | 71 | ```swift 72 | import OpenAIKit 73 | 74 | public let openAI = OpenAIKit(apiToken: apiToken, organization: organizationName) 75 | ``` 76 | 77 | #### Additional Initialization parameters 78 | Additional optional initializations parameters list: 79 | 80 | - `timeoutInterval: TimeInterval` - timeout interval for OpenAI API response. Default value is `60` sec. 81 | - `customOpenAIURL: String?` - custom endpoint to customize OpenAI API endpoint. Default value is `nil`. **Attention!** If you customize `customOpenAIURL` with your own instead of default OpenAI URL (for example `customOpenAIURL: "https://openai.mysite.test"`) - routes and method **DOESN'T CHANGE**! All routes keep their implementation from [OpenAI public API](https://platform.openai.com/docs/api-reference/). 82 | - `sslCerificatePath: String?` - path to `*.cer` SSL certificate file to validate it with OpenAI's certificate or your server's if you use custom `customOpenAIURL`. Default value is `nil`. 83 | 84 | ### SSL Handshake 85 | 86 | As described in the [Additional Initialization parameters](#additional-initialization-parameters) section, we accept an additional parameter `sslCerificatePath` to establish an SSL handshake with OpenAI or your own server. 87 | 88 | #### What is this and why you may use it? 89 | 90 | First of all - we made this feature to protect from MITM attack. We verify certificate from provided path `sslCerificatePath` with SSL certificate from OpenAI's or your server. If certificates doesn't match we canceling this request. 91 | 92 | #### The advantages of this solution 93 | 94 | - Protecting your data from MITM attack. If someone in network where launches your app executing MITM attack - Your [OpenAI API key](https://platform.openai.com/account/api-keys) in safe. They can't access any private data from request and they'll get error about `SSL Handshake Failure`. 95 | - Don't waste your tokens for hackers. After cancelling request with `SSL Handshake Failure` request doesn't execute and doesn't send any data to OpenAI and you don't waste your request tokens on them! 96 | - You can use this feature with your server, just provide you SSL certificate instead of OpenAI's. 97 | 98 | #### How to use SSL Handshake 99 | 100 | **Attention! We _DON'T SAVE_ any certificates and _DOESN'T MANAGE_ them! 101 | We use the data from the provided `*.cer` file _ONLY_ for comparison with the server certificate. 102 | We are _not responsible_ for the relevance and integrity of the provided certificate.** 103 | 104 | **These actions are within the competence of our library, and you yourself must monitor the validity of the certificate, be responsible for downloading new versions, etc.** 105 | 106 | 107 | **The procedure described below is an instruction on how to correctly obtain an open OpenAI SSL certificate. We are not responsible for any changes to this certificate and its loss of relevance after you have received it.** 108 | 109 | **How to use SSL Handshake:** 110 | 111 | 1. Get SSL Certificate from OpenAI 112 | - Open `https://api.openai.com/` from Safari. It's necessary to open and save from Safari because it saves certificate in binary format. 113 | - Press on lock icon in address bar 114 | - On showed popup press `Show Certificate` 115 | - Select botom one certificate in list and Drag-n-Drop certificate icon anywhare you need to save. 116 |

117 | 118 |

119 | 2. Add certificate to your project 120 | 3. Initialize our library providing path to this file 121 | 122 | Example with `Bundle` source path: 123 | 124 | ```Swift 125 | var openAI: OpenAIKit? 126 | 127 | if let filePath = Bundle.main.path(forResource: "MyCert", ofType: "cer") { 128 | openAI = OpenAIKit(apiToken: apiToken, organization: nil, sslCerificatePath: filePath) 129 | } 130 | ``` 131 | 132 | Example with `FileManager` source path: 133 | 134 | ```Swift 135 | var openAI: OpenAIKit? 136 | 137 | let fileManager = FileManager.default 138 | let documentDirectoryPath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true).first as? NSString 139 | 140 | if let filePath = documentDirectoryPath?.appendingPathComponent("MyCert.cer") { 141 | openAI = OpenAIKit(apiToken: apiToken, organization: nil, sslCerificatePath: filePath) 142 | } 143 | ``` 144 | 145 | ### Completions 146 | Create a call to the completions API, passing in a text prompt. 147 | 148 | ```swift 149 | openAI.sendCompletion(prompt: "Hello!", model: .gptV3_5(.davinciText003), maxTokens: 2048) { [weak self] result in 150 | switch result { 151 | case .success(let aiResult): 152 | DispatchQueue.main.async { 153 | if let text = aiResult.choices.first?.text { 154 | print("response text: \(text)") //"\n\nHello there, how may I assist you today?" 155 | } 156 | } 157 | case .failure(let error): 158 | DispatchQueue.main.async { [weak self] in 159 | let alert = UIAlertController(title: "Error", message: error.localizedDescription, preferredStyle: .alert) 160 | alert.addAction(UIAlertAction(title: "Ok", style: .default)) 161 | self?.present(alert, animated: true) 162 | } 163 | } 164 | } 165 | ``` 166 | 167 | Also supports async/await usage for all methods. Here is example. 168 | 169 | ```swift 170 | let result = await openAI.sendCompletion(prompt: "Hello!", model: .gptV3_5(.davinciText003), maxTokens: 2048) 171 | 172 | switch result { 173 | case .success(let aiResult): 174 | /// Hadle success response result 175 | if let text = aiResult.choices.first?.text { 176 | print("response text: \(text)") //"\n\nHello there, how may I assist you today?" 177 | } 178 | case .failure(let error): 179 | /// Hadle error actions 180 | print(error.localizedDescription) 181 | } 182 | ``` 183 | #### Chat Completions 184 | Chat completions almost the same as completions, there only few differences: 185 | 186 | - It can understand context with pathing previous chat messages ([read more](https://platform.openai.com/docs/guides/chat)). 187 | - Response text located in message field of retrieved completion 188 | - Supports **ONLY** *gpt-3.5-turbo* and *gpt-3.5-turbo-0301* models ([read more about models compatibility](https://platform.openai.com/docs/models/model-endpoint-compatability)). 189 | 190 | ```swift 191 | openAI.sendChatCompletion(newMessage: AIMessage(role: .user, content: prompt), previousMessages: [], model: .gptV3_5(.gptTurbo), maxTokens: 2048, n: 1, completion: { [weak self] result in 192 | DispatchQueue.main.async { self?.stopLoading() } 193 | 194 | switch result { 195 | case .success(let aiResult): 196 | // Handle result actions 197 | if let text = aiResult.choices.first?.message?.content { 198 | print(text) 199 | } 200 | case .failure(let error): 201 | // Handle error actions 202 | print(error.localizedDescription) 203 | } 204 | }) 205 | ``` 206 | #### Stream 207 | You can retrieve response from OpenAI for Completions and Chat Completions in stream partial progress. Don't need to wait time when whole completion response will complete. You can handle and present results in live. 208 | 209 | Example call on Chat Completions: 210 | 211 | ```swift 212 | openAI.sendStreamChatCompletion(newMessage: AIMessage(role: .user, content: "Hello!"), model: .gptV3_5(.gptTurbo), maxTokens: 2048) { result in 213 | switch result { 214 | case .success(let streamResult): 215 | /// Hadle success response result 216 | if let streamMessage = streamResult.message?.choices.first?.message { 217 | print("Stream message: \(streamMessage)") //"\n\nHello there, how may I assist you today?" 218 | } 219 | case .failure(let error): 220 | // Handle error actions 221 | print(error.localizedDescription) 222 | } 223 | } 224 | ``` 225 | 226 | You can also stop stream manually like this: 227 | 228 | ```swift 229 | openAI.sendStreamChatCompletion(newMessage: AIMessage(role: .user, content: "Hello!"), model: .gptV3_5(.gptTurbo), maxTokens: 2048) { result in 230 | switch result { 231 | case .success(let streamResult): 232 | /// Hadle success response result 233 | 234 | streamResult.stream.stopStream() /// Manually stop stream 235 | case .failure(let error): 236 | // Handle error actions 237 | print(error.localizedDescription) 238 | } 239 | } 240 | ``` 241 | 242 | #### JSON mode 243 | 244 | A common way to use Chat Completions is to instruct the model to always return a JSON object that makes sense for your use case, by specifying this in the system message. 245 | To prevent these errors and improve model performance, when calling gpt-4-turbo-preview or gpt-3.5-turbo-1106, you can set response_format to { "type": "json_object" } to enable JSON mode. When JSON mode is enabled, the model is constrained to only generate strings that parse into valid JSON object. More details in [OpenAI JSON mode](https://platform.openai.com/docs/guides/text-generation/json-mode) documentation. 246 | 247 | You can use it in `sendChatCompletion` or `sendStreamChatCompletion ` methods. Path optional parametr `responseFormat` to any of this methods 248 | 249 | ```swift 250 | openAI.sendChatCompletion(newMessage: AIMessage(role: .user, content: prompt), previousMessages: [], model: .gptV3_5(.gptTurbo), maxTokens: 2048, n: 1, responseFormat: .json, completion: { [weak self] result in 251 | DispatchQueue.main.async { self?.stopLoading() } 252 | 253 | switch result { 254 | case .success(let aiResult): 255 | // Handle result actions 256 | if let text = aiResult.choices.first?.message?.content { 257 | print(text) /// Printed JSON string 258 | } 259 | case .failure(let error): 260 | // Handle error actions 261 | print(error.localizedDescription) 262 | } 263 | }) 264 | ``` 265 | 266 | ### Generate Image 267 | 268 | [DALL·E](https://platform.openai.com/docs/models/dall-e) is a AI system that can create realistic images and art from a description in natural language. We currently support the ability, given a prommpt, to create a new image with a certain size, edit an existing image, or create variations of a user provided image. 269 | 270 | The code below demonstrates how you can generate an image using DALL·E: 271 | 272 | ```swift 273 | openAI.sendImagesRequest(prompt: "bird", size: .size512, n: 1) { [weak self] result in 274 | 275 | switch result { 276 | case .success(let aiResult): 277 | 278 | DispatchQueue.main.async { 279 | if let urlString = aiResult.data.first?.url { 280 | print("generated image url: \(urlString)") 281 | } 282 | } 283 | case .failure(let error): 284 | DispatchQueue.main.async { [weak self] in 285 | let alert = UIAlertController(title: "Error", message: error.localizedDescription, preferredStyle: .alert) 286 | alert.addAction(UIAlertAction(title: "Ok", style: .default)) 287 | self?.present(alert, animated: true) 288 | } 289 | } 290 | } 291 | ``` 292 | ## Contribute 293 | 294 | Contributions for improvements are welcomed. Feel free to submit a pull request to help grow the library. If you have any questions, feature suggestions, or bug reports, please send them to [Issues](https://github.com/FuturraGroup/OpenAI/issues). 295 | 296 | ## License 297 | 298 | ``` 299 | MIT License 300 | 301 | Copyright (c) 2023 Futurra Group 302 | 303 | Permission is hereby granted, free of charge, to any person obtaining a copy 304 | of this software and associated documentation files (the "Software"), to deal 305 | in the Software without restriction, including without limitation the rights 306 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 307 | copies of the Software, and to permit persons to whom the Software is 308 | furnished to do so, subject to the following conditions: 309 | 310 | The above copyright notice and this permission notice shall be included in all 311 | copies or substantial portions of the Software. 312 | 313 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 314 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 315 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 316 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 317 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 318 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 319 | SOFTWARE. 320 | ``` 321 | -------------------------------------------------------------------------------- /Example/OpenAIKit/Base.lproj/Main.storyboard: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | -------------------------------------------------------------------------------- /Example/OpenAIKit.xcodeproj/project.pbxproj: -------------------------------------------------------------------------------- 1 | // !$*UTF8*$! 2 | { 3 | archiveVersion = 1; 4 | classes = { 5 | }; 6 | objectVersion = 46; 7 | objects = { 8 | 9 | /* Begin PBXBuildFile section */ 10 | 607FACD61AFB9204008FA782 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 607FACD51AFB9204008FA782 /* AppDelegate.swift */; }; 11 | 607FACD81AFB9204008FA782 /* ViewController.swift in Sources */ = {isa = PBXBuildFile; fileRef = 607FACD71AFB9204008FA782 /* ViewController.swift */; }; 12 | 607FACDB1AFB9204008FA782 /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 607FACD91AFB9204008FA782 /* Main.storyboard */; }; 13 | 607FACDD1AFB9204008FA782 /* Images.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 607FACDC1AFB9204008FA782 /* Images.xcassets */; }; 14 | 607FACE01AFB9204008FA782 /* LaunchScreen.xib in Resources */ = {isa = PBXBuildFile; fileRef = 607FACDE1AFB9204008FA782 /* LaunchScreen.xib */; }; 15 | 92178128F361F9174284E9A2 /* Pods_OpenAIKit_Example.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 7C773545CB6B572C312902B7 /* Pods_OpenAIKit_Example.framework */; }; 16 | A402FF1A29B5BF0B0008D484 /* ViewControllerImages.swift in Sources */ = {isa = PBXBuildFile; fileRef = A402FF1929B5BF0B0008D484 /* ViewControllerImages.swift */; }; 17 | /* End PBXBuildFile section */ 18 | 19 | /* Begin PBXFileReference section */ 20 | 353006538B3B7B1B4EC1C7BB /* LICENSE */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text; name = LICENSE; path = ../LICENSE; sourceTree = ""; }; 21 | 4404DA1264097869FD8F9A17 /* Pods-OpenAIKit_Tests.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-OpenAIKit_Tests.debug.xcconfig"; path = "Target Support Files/Pods-OpenAIKit_Tests/Pods-OpenAIKit_Tests.debug.xcconfig"; sourceTree = ""; }; 22 | 607FACD01AFB9204008FA782 /* OpenAIKit_Example.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = OpenAIKit_Example.app; sourceTree = BUILT_PRODUCTS_DIR; }; 23 | 607FACD41AFB9204008FA782 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; 24 | 607FACD51AFB9204008FA782 /* AppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = ""; }; 25 | 607FACD71AFB9204008FA782 /* ViewController.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ViewController.swift; sourceTree = ""; }; 26 | 607FACDA1AFB9204008FA782 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = ""; }; 27 | 607FACDC1AFB9204008FA782 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; 28 | 607FACDF1AFB9204008FA782 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.xib; name = Base; path = Base.lproj/LaunchScreen.xib; sourceTree = ""; }; 29 | 7C773545CB6B572C312902B7 /* Pods_OpenAIKit_Example.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = Pods_OpenAIKit_Example.framework; sourceTree = BUILT_PRODUCTS_DIR; }; 30 | 7F5010F3DC25DB71104222F3 /* Pods_OpenAIKit_Tests.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = Pods_OpenAIKit_Tests.framework; sourceTree = BUILT_PRODUCTS_DIR; }; 31 | 89E7BCE4717AB352A90C79AC /* Pods-OpenAIKit_Tests.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-OpenAIKit_Tests.release.xcconfig"; path = "Target Support Files/Pods-OpenAIKit_Tests/Pods-OpenAIKit_Tests.release.xcconfig"; sourceTree = ""; }; 32 | A06687B527A549049A02A1AE /* OpenAIKit.podspec */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text; name = OpenAIKit.podspec; path = ../OpenAIKit.podspec; sourceTree = ""; }; 33 | A402FF1929B5BF0B0008D484 /* ViewControllerImages.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ViewControllerImages.swift; sourceTree = ""; }; 34 | A9ACDFE51C218EAAF8993468 /* README.md */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = net.daringfireball.markdown; name = README.md; path = ../README.md; sourceTree = ""; }; 35 | B69B4F536C86F9D0E181D71C /* Pods-OpenAIKit_Example.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-OpenAIKit_Example.release.xcconfig"; path = "Target Support Files/Pods-OpenAIKit_Example/Pods-OpenAIKit_Example.release.xcconfig"; sourceTree = ""; }; 36 | E19438361E65893CDD328D9A /* Pods-OpenAIKit_Example.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-OpenAIKit_Example.debug.xcconfig"; path = "Target Support Files/Pods-OpenAIKit_Example/Pods-OpenAIKit_Example.debug.xcconfig"; sourceTree = ""; }; 37 | /* End PBXFileReference section */ 38 | 39 | /* Begin PBXFrameworksBuildPhase section */ 40 | 607FACCD1AFB9204008FA782 /* Frameworks */ = { 41 | isa = PBXFrameworksBuildPhase; 42 | buildActionMask = 2147483647; 43 | files = ( 44 | 92178128F361F9174284E9A2 /* Pods_OpenAIKit_Example.framework in Frameworks */, 45 | ); 46 | runOnlyForDeploymentPostprocessing = 0; 47 | }; 48 | /* End PBXFrameworksBuildPhase section */ 49 | 50 | /* Begin PBXGroup section */ 51 | 607FACC71AFB9204008FA782 = { 52 | isa = PBXGroup; 53 | children = ( 54 | 607FACF51AFB993E008FA782 /* Podspec Metadata */, 55 | 607FACD21AFB9204008FA782 /* Example for OpenAIKit */, 56 | 607FACD11AFB9204008FA782 /* Products */, 57 | ACCF50D69F2577FE6A3B5164 /* Pods */, 58 | B8F534E56F1EEDEF7B64A1B7 /* Frameworks */, 59 | ); 60 | sourceTree = ""; 61 | }; 62 | 607FACD11AFB9204008FA782 /* Products */ = { 63 | isa = PBXGroup; 64 | children = ( 65 | 607FACD01AFB9204008FA782 /* OpenAIKit_Example.app */, 66 | ); 67 | name = Products; 68 | sourceTree = ""; 69 | }; 70 | 607FACD21AFB9204008FA782 /* Example for OpenAIKit */ = { 71 | isa = PBXGroup; 72 | children = ( 73 | 607FACD51AFB9204008FA782 /* AppDelegate.swift */, 74 | 607FACD71AFB9204008FA782 /* ViewController.swift */, 75 | A402FF1929B5BF0B0008D484 /* ViewControllerImages.swift */, 76 | 607FACDC1AFB9204008FA782 /* Images.xcassets */, 77 | 607FACDE1AFB9204008FA782 /* LaunchScreen.xib */, 78 | 607FACD91AFB9204008FA782 /* Main.storyboard */, 79 | 607FACD31AFB9204008FA782 /* Supporting Files */, 80 | ); 81 | name = "Example for OpenAIKit"; 82 | path = OpenAIKit; 83 | sourceTree = ""; 84 | }; 85 | 607FACD31AFB9204008FA782 /* Supporting Files */ = { 86 | isa = PBXGroup; 87 | children = ( 88 | 607FACD41AFB9204008FA782 /* Info.plist */, 89 | ); 90 | name = "Supporting Files"; 91 | sourceTree = ""; 92 | }; 93 | 607FACF51AFB993E008FA782 /* Podspec Metadata */ = { 94 | isa = PBXGroup; 95 | children = ( 96 | A06687B527A549049A02A1AE /* OpenAIKit.podspec */, 97 | A9ACDFE51C218EAAF8993468 /* README.md */, 98 | 353006538B3B7B1B4EC1C7BB /* LICENSE */, 99 | ); 100 | name = "Podspec Metadata"; 101 | sourceTree = ""; 102 | }; 103 | ACCF50D69F2577FE6A3B5164 /* Pods */ = { 104 | isa = PBXGroup; 105 | children = ( 106 | E19438361E65893CDD328D9A /* Pods-OpenAIKit_Example.debug.xcconfig */, 107 | B69B4F536C86F9D0E181D71C /* Pods-OpenAIKit_Example.release.xcconfig */, 108 | 4404DA1264097869FD8F9A17 /* Pods-OpenAIKit_Tests.debug.xcconfig */, 109 | 89E7BCE4717AB352A90C79AC /* Pods-OpenAIKit_Tests.release.xcconfig */, 110 | ); 111 | path = Pods; 112 | sourceTree = ""; 113 | }; 114 | B8F534E56F1EEDEF7B64A1B7 /* Frameworks */ = { 115 | isa = PBXGroup; 116 | children = ( 117 | 7C773545CB6B572C312902B7 /* Pods_OpenAIKit_Example.framework */, 118 | 7F5010F3DC25DB71104222F3 /* Pods_OpenAIKit_Tests.framework */, 119 | ); 120 | name = Frameworks; 121 | sourceTree = ""; 122 | }; 123 | /* End PBXGroup section */ 124 | 125 | /* Begin PBXNativeTarget section */ 126 | 607FACCF1AFB9204008FA782 /* OpenAIKit_Example */ = { 127 | isa = PBXNativeTarget; 128 | buildConfigurationList = 607FACEF1AFB9204008FA782 /* Build configuration list for PBXNativeTarget "OpenAIKit_Example" */; 129 | buildPhases = ( 130 | 9658B17C80C8C74359767F78 /* [CP] Check Pods Manifest.lock */, 131 | 607FACCC1AFB9204008FA782 /* Sources */, 132 | 607FACCD1AFB9204008FA782 /* Frameworks */, 133 | 607FACCE1AFB9204008FA782 /* Resources */, 134 | A629922A3A225228C93200AA /* [CP] Embed Pods Frameworks */, 135 | ); 136 | buildRules = ( 137 | ); 138 | dependencies = ( 139 | ); 140 | name = OpenAIKit_Example; 141 | productName = OpenAIKit; 142 | productReference = 607FACD01AFB9204008FA782 /* OpenAIKit_Example.app */; 143 | productType = "com.apple.product-type.application"; 144 | }; 145 | /* End PBXNativeTarget section */ 146 | 147 | /* Begin PBXProject section */ 148 | 607FACC81AFB9204008FA782 /* Project object */ = { 149 | isa = PBXProject; 150 | attributes = { 151 | LastSwiftUpdateCheck = 0830; 152 | LastUpgradeCheck = 1420; 153 | ORGANIZATIONNAME = "Futurra Group"; 154 | TargetAttributes = { 155 | 607FACCF1AFB9204008FA782 = { 156 | CreatedOnToolsVersion = 6.3.1; 157 | DevelopmentTeam = 358DXRMT22; 158 | LastSwiftMigration = ""; 159 | }; 160 | }; 161 | }; 162 | buildConfigurationList = 607FACCB1AFB9204008FA782 /* Build configuration list for PBXProject "OpenAIKit" */; 163 | compatibilityVersion = "Xcode 3.2"; 164 | developmentRegion = English; 165 | hasScannedForEncodings = 0; 166 | knownRegions = ( 167 | English, 168 | en, 169 | Base, 170 | ); 171 | mainGroup = 607FACC71AFB9204008FA782; 172 | productRefGroup = 607FACD11AFB9204008FA782 /* Products */; 173 | projectDirPath = ""; 174 | projectRoot = ""; 175 | targets = ( 176 | 607FACCF1AFB9204008FA782 /* OpenAIKit_Example */, 177 | ); 178 | }; 179 | /* End PBXProject section */ 180 | 181 | /* Begin PBXResourcesBuildPhase section */ 182 | 607FACCE1AFB9204008FA782 /* Resources */ = { 183 | isa = PBXResourcesBuildPhase; 184 | buildActionMask = 2147483647; 185 | files = ( 186 | 607FACDB1AFB9204008FA782 /* Main.storyboard in Resources */, 187 | 607FACE01AFB9204008FA782 /* LaunchScreen.xib in Resources */, 188 | 607FACDD1AFB9204008FA782 /* Images.xcassets in Resources */, 189 | ); 190 | runOnlyForDeploymentPostprocessing = 0; 191 | }; 192 | /* End PBXResourcesBuildPhase section */ 193 | 194 | /* Begin PBXShellScriptBuildPhase section */ 195 | 9658B17C80C8C74359767F78 /* [CP] Check Pods Manifest.lock */ = { 196 | isa = PBXShellScriptBuildPhase; 197 | buildActionMask = 2147483647; 198 | files = ( 199 | ); 200 | inputFileListPaths = ( 201 | ); 202 | inputPaths = ( 203 | "${PODS_PODFILE_DIR_PATH}/Podfile.lock", 204 | "${PODS_ROOT}/Manifest.lock", 205 | ); 206 | name = "[CP] Check Pods Manifest.lock"; 207 | outputFileListPaths = ( 208 | ); 209 | outputPaths = ( 210 | "$(DERIVED_FILE_DIR)/Pods-OpenAIKit_Example-checkManifestLockResult.txt", 211 | ); 212 | runOnlyForDeploymentPostprocessing = 0; 213 | shellPath = /bin/sh; 214 | shellScript = "diff \"${PODS_PODFILE_DIR_PATH}/Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\necho \"SUCCESS\" > \"${SCRIPT_OUTPUT_FILE_0}\"\n"; 215 | showEnvVarsInLog = 0; 216 | }; 217 | A629922A3A225228C93200AA /* [CP] Embed Pods Frameworks */ = { 218 | isa = PBXShellScriptBuildPhase; 219 | buildActionMask = 2147483647; 220 | files = ( 221 | ); 222 | inputPaths = ( 223 | "${PODS_ROOT}/Target Support Files/Pods-OpenAIKit_Example/Pods-OpenAIKit_Example-frameworks.sh", 224 | "${BUILT_PRODUCTS_DIR}/OpenAIKit/OpenAIKit.framework", 225 | "${BUILT_PRODUCTS_DIR}/SDWebImage/SDWebImage.framework", 226 | ); 227 | name = "[CP] Embed Pods Frameworks"; 228 | outputPaths = ( 229 | "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/OpenAIKit.framework", 230 | "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/SDWebImage.framework", 231 | ); 232 | runOnlyForDeploymentPostprocessing = 0; 233 | shellPath = /bin/sh; 234 | shellScript = "\"${PODS_ROOT}/Target Support Files/Pods-OpenAIKit_Example/Pods-OpenAIKit_Example-frameworks.sh\"\n"; 235 | showEnvVarsInLog = 0; 236 | }; 237 | /* End PBXShellScriptBuildPhase section */ 238 | 239 | /* Begin PBXSourcesBuildPhase section */ 240 | 607FACCC1AFB9204008FA782 /* Sources */ = { 241 | isa = PBXSourcesBuildPhase; 242 | buildActionMask = 2147483647; 243 | files = ( 244 | 607FACD81AFB9204008FA782 /* ViewController.swift in Sources */, 245 | 607FACD61AFB9204008FA782 /* AppDelegate.swift in Sources */, 246 | A402FF1A29B5BF0B0008D484 /* ViewControllerImages.swift in Sources */, 247 | ); 248 | runOnlyForDeploymentPostprocessing = 0; 249 | }; 250 | /* End PBXSourcesBuildPhase section */ 251 | 252 | /* Begin PBXVariantGroup section */ 253 | 607FACD91AFB9204008FA782 /* Main.storyboard */ = { 254 | isa = PBXVariantGroup; 255 | children = ( 256 | 607FACDA1AFB9204008FA782 /* Base */, 257 | ); 258 | name = Main.storyboard; 259 | sourceTree = ""; 260 | }; 261 | 607FACDE1AFB9204008FA782 /* LaunchScreen.xib */ = { 262 | isa = PBXVariantGroup; 263 | children = ( 264 | 607FACDF1AFB9204008FA782 /* Base */, 265 | ); 266 | name = LaunchScreen.xib; 267 | sourceTree = ""; 268 | }; 269 | /* End PBXVariantGroup section */ 270 | 271 | /* Begin XCBuildConfiguration section */ 272 | 607FACED1AFB9204008FA782 /* Debug */ = { 273 | isa = XCBuildConfiguration; 274 | buildSettings = { 275 | ALWAYS_SEARCH_USER_PATHS = NO; 276 | CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; 277 | CLANG_CXX_LIBRARY = "libc++"; 278 | CLANG_ENABLE_MODULES = YES; 279 | CLANG_ENABLE_OBJC_ARC = YES; 280 | CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; 281 | CLANG_WARN_BOOL_CONVERSION = YES; 282 | CLANG_WARN_COMMA = YES; 283 | CLANG_WARN_CONSTANT_CONVERSION = YES; 284 | CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; 285 | CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; 286 | CLANG_WARN_EMPTY_BODY = YES; 287 | CLANG_WARN_ENUM_CONVERSION = YES; 288 | CLANG_WARN_INFINITE_RECURSION = YES; 289 | CLANG_WARN_INT_CONVERSION = YES; 290 | CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; 291 | CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; 292 | CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; 293 | CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; 294 | CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; 295 | CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; 296 | CLANG_WARN_STRICT_PROTOTYPES = YES; 297 | CLANG_WARN_SUSPICIOUS_MOVE = YES; 298 | CLANG_WARN_UNREACHABLE_CODE = YES; 299 | CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; 300 | "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; 301 | COPY_PHASE_STRIP = NO; 302 | DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; 303 | ENABLE_STRICT_OBJC_MSGSEND = YES; 304 | ENABLE_TESTABILITY = YES; 305 | GCC_C_LANGUAGE_STANDARD = gnu99; 306 | GCC_DYNAMIC_NO_PIC = NO; 307 | GCC_NO_COMMON_BLOCKS = YES; 308 | GCC_OPTIMIZATION_LEVEL = 0; 309 | GCC_PREPROCESSOR_DEFINITIONS = ( 310 | "DEBUG=1", 311 | "$(inherited)", 312 | ); 313 | GCC_SYMBOLS_PRIVATE_EXTERN = NO; 314 | GCC_WARN_64_TO_32_BIT_CONVERSION = YES; 315 | GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; 316 | GCC_WARN_UNDECLARED_SELECTOR = YES; 317 | GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; 318 | GCC_WARN_UNUSED_FUNCTION = YES; 319 | GCC_WARN_UNUSED_VARIABLE = YES; 320 | IPHONEOS_DEPLOYMENT_TARGET = 12.0; 321 | MTL_ENABLE_DEBUG_INFO = YES; 322 | ONLY_ACTIVE_ARCH = YES; 323 | SDKROOT = iphoneos; 324 | SWIFT_OPTIMIZATION_LEVEL = "-Onone"; 325 | }; 326 | name = Debug; 327 | }; 328 | 607FACEE1AFB9204008FA782 /* Release */ = { 329 | isa = XCBuildConfiguration; 330 | buildSettings = { 331 | ALWAYS_SEARCH_USER_PATHS = NO; 332 | CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; 333 | CLANG_CXX_LIBRARY = "libc++"; 334 | CLANG_ENABLE_MODULES = YES; 335 | CLANG_ENABLE_OBJC_ARC = YES; 336 | CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; 337 | CLANG_WARN_BOOL_CONVERSION = YES; 338 | CLANG_WARN_COMMA = YES; 339 | CLANG_WARN_CONSTANT_CONVERSION = YES; 340 | CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; 341 | CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; 342 | CLANG_WARN_EMPTY_BODY = YES; 343 | CLANG_WARN_ENUM_CONVERSION = YES; 344 | CLANG_WARN_INFINITE_RECURSION = YES; 345 | CLANG_WARN_INT_CONVERSION = YES; 346 | CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; 347 | CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; 348 | CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; 349 | CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; 350 | CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; 351 | CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; 352 | CLANG_WARN_STRICT_PROTOTYPES = YES; 353 | CLANG_WARN_SUSPICIOUS_MOVE = YES; 354 | CLANG_WARN_UNREACHABLE_CODE = YES; 355 | CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; 356 | "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; 357 | COPY_PHASE_STRIP = NO; 358 | DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; 359 | ENABLE_NS_ASSERTIONS = NO; 360 | ENABLE_STRICT_OBJC_MSGSEND = YES; 361 | GCC_C_LANGUAGE_STANDARD = gnu99; 362 | GCC_NO_COMMON_BLOCKS = YES; 363 | GCC_WARN_64_TO_32_BIT_CONVERSION = YES; 364 | GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; 365 | GCC_WARN_UNDECLARED_SELECTOR = YES; 366 | GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; 367 | GCC_WARN_UNUSED_FUNCTION = YES; 368 | GCC_WARN_UNUSED_VARIABLE = YES; 369 | IPHONEOS_DEPLOYMENT_TARGET = 12.0; 370 | MTL_ENABLE_DEBUG_INFO = NO; 371 | SDKROOT = iphoneos; 372 | SWIFT_OPTIMIZATION_LEVEL = "-Owholemodule"; 373 | VALIDATE_PRODUCT = YES; 374 | }; 375 | name = Release; 376 | }; 377 | 607FACF01AFB9204008FA782 /* Debug */ = { 378 | isa = XCBuildConfiguration; 379 | baseConfigurationReference = E19438361E65893CDD328D9A /* Pods-OpenAIKit_Example.debug.xcconfig */; 380 | buildSettings = { 381 | ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; 382 | DEVELOPMENT_TEAM = 358DXRMT22; 383 | INFOPLIST_FILE = OpenAIKit/Info.plist; 384 | IPHONEOS_DEPLOYMENT_TARGET = 13.0; 385 | LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks"; 386 | MODULE_NAME = ExampleApp; 387 | PRODUCT_BUNDLE_IDENTIFIER = "org.cocoapods.demo.$(PRODUCT_NAME:rfc1034identifier)"; 388 | PRODUCT_NAME = "$(TARGET_NAME)"; 389 | SWIFT_SWIFT3_OBJC_INFERENCE = Default; 390 | SWIFT_VERSION = 5.0; 391 | }; 392 | name = Debug; 393 | }; 394 | 607FACF11AFB9204008FA782 /* Release */ = { 395 | isa = XCBuildConfiguration; 396 | baseConfigurationReference = B69B4F536C86F9D0E181D71C /* Pods-OpenAIKit_Example.release.xcconfig */; 397 | buildSettings = { 398 | ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; 399 | DEVELOPMENT_TEAM = 358DXRMT22; 400 | INFOPLIST_FILE = OpenAIKit/Info.plist; 401 | IPHONEOS_DEPLOYMENT_TARGET = 13.0; 402 | LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks"; 403 | MODULE_NAME = ExampleApp; 404 | PRODUCT_BUNDLE_IDENTIFIER = "org.cocoapods.demo.$(PRODUCT_NAME:rfc1034identifier)"; 405 | PRODUCT_NAME = "$(TARGET_NAME)"; 406 | SWIFT_SWIFT3_OBJC_INFERENCE = Default; 407 | SWIFT_VERSION = 5.0; 408 | }; 409 | name = Release; 410 | }; 411 | /* End XCBuildConfiguration section */ 412 | 413 | /* Begin XCConfigurationList section */ 414 | 607FACCB1AFB9204008FA782 /* Build configuration list for PBXProject "OpenAIKit" */ = { 415 | isa = XCConfigurationList; 416 | buildConfigurations = ( 417 | 607FACED1AFB9204008FA782 /* Debug */, 418 | 607FACEE1AFB9204008FA782 /* Release */, 419 | ); 420 | defaultConfigurationIsVisible = 0; 421 | defaultConfigurationName = Release; 422 | }; 423 | 607FACEF1AFB9204008FA782 /* Build configuration list for PBXNativeTarget "OpenAIKit_Example" */ = { 424 | isa = XCConfigurationList; 425 | buildConfigurations = ( 426 | 607FACF01AFB9204008FA782 /* Debug */, 427 | 607FACF11AFB9204008FA782 /* Release */, 428 | ); 429 | defaultConfigurationIsVisible = 0; 430 | defaultConfigurationName = Release; 431 | }; 432 | /* End XCConfigurationList section */ 433 | }; 434 | rootObject = 607FACC81AFB9204008FA782 /* Project object */; 435 | } 436 | --------------------------------------------------------------------------------