├── .gitignore ├── Package.resolved ├── CONTRIBUTING.md ├── Package.swift ├── LICENSE ├── README.md └── Sources └── LlamaCLI └── main.swift /.gitignore: -------------------------------------------------------------------------------- 1 | .build/ 2 | llama.cpp/ -------------------------------------------------------------------------------- /Package.resolved: -------------------------------------------------------------------------------- 1 | { 2 | "pins" : [ 3 | { 4 | "identity" : "swift-argument-parser", 5 | "kind" : "remoteSourceControl", 6 | "location" : "https://github.com/apple/swift-argument-parser.git", 7 | "state" : { 8 | "revision" : "41982a3656a71c768319979febd796c6fd111d5c", 9 | "version" : "1.5.0" 10 | } 11 | } 12 | ], 13 | "version" : 2 14 | } 15 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | We welcome contributions! Please follow these guidelines: 4 | 5 | 1. **Fork the repository.** 6 | 2. **Create a new branch:** `git checkout -b my-feature-branch` 7 | 3. **Make your changes and commit them:** `git commit -am 'Add some feature'` 8 | 4. **Push to the branch:** `git push origin my-feature-branch` 9 | 5. **Submit a pull request.** 10 | 11 | ## Code of Conduct 12 | 13 | Please note that this project is released with a Contributor Code of Conduct. By participating in this project you agree to abide by its terms. -------------------------------------------------------------------------------- /Package.swift: -------------------------------------------------------------------------------- 1 | // swift-tools-version:5.7 2 | import PackageDescription 3 | 4 | let package = Package( 5 | name: "LlamaCLI", 6 | platforms: [ 7 | .macOS(.v10_15) // Or a newer version like .v12 or .v13 8 | ], 9 | dependencies: [ 10 | .package(url: "https://github.com/apple/swift-argument-parser.git", from: "1.2.0"), // Use the latest appropriate version 11 | ], 12 | targets: [ 13 | .executableTarget( 14 | name: "LlamaCLI", 15 | dependencies: [ 16 | .product(name: "ArgumentParser", package: "swift-argument-parser"), 17 | ] 18 | ), 19 | ] 20 | ) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) [2025] [Vaibhav Srivastav] 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Luigi 2 | 3 | A swift CLI wrapper over llama-server! 4 | 5 | ## Installation 6 | 7 | 1. **Clone the repository:** 8 | ```bash 9 | git clone https://github.com/vaibhavs10/luigi.git 10 | cd yourproject 11 | ``` 12 | 2. **Build the project:** 13 | ```bash 14 | swift build 15 | ``` 16 | 17 | ### Building llama.cpp 18 | 19 | `llama-server` is built alongside everything else from the root of the `llama.cpp` project. 20 | 21 | If you need to build `llama.cpp`, follow these steps: 22 | 23 | 1. **Clone the `llama.cpp` repository:** 24 | ```bash 25 | git clone https://github.com/ggml-org/llama.cpp.git 26 | cd llama.cpp 27 | ``` 28 | 2. **Build `llama-server` using CMake from the root of the `llama.cpp` project:** 29 | ```bash 30 | cmake -B build 31 | cmake --build build --config Release -t llama-server 32 | ``` 33 | The binary will be located at `./build/bin/llama-server`. 34 | 35 | For more details, refer to the [llama.cpp server tools documentation](https://github.com/ggml-org/llama.cpp/tree/master/tools/server). 36 | 37 | ## Usage 38 | 39 | ```bash 40 | LLAMA_SERVER_PATH=./llama.cpp/build/bin/llama-server .build/debug/LlamaCLI --hf bartowski/Qwen_Qwen3-0.6B-GGUF:Q4_K_M --c 2048 41 | ``` 42 | 43 | ## Contributing 44 | 45 | Please read [CONTRIBUTING.md](CONTRIBUTING.md) for details on our code of conduct, and the process for submitting pull requests to us. 46 | 47 | ## License 48 | 49 | This project is licensed under the MIT License - see the [LICENSE.md](LICENSE.md) file for details. 50 | -------------------------------------------------------------------------------- /Sources/LlamaCLI/main.swift: -------------------------------------------------------------------------------- 1 | import Foundation 2 | import ArgumentParser 3 | 4 | struct LlamaCLI: ParsableCommand { 5 | static let configuration = CommandConfiguration( 6 | commandName: "llama-cli", 7 | abstract: "A Swift CLI wrapper to run the llama-server binary.", 8 | discussion: """ 9 | This tool wraps the 'llama-server' binary, allowing you to configure 10 | and run it with specified parameters. The 'llama-server' binary 11 | must be accessible by this wrapper. 12 | """ 13 | ) 14 | 15 | @Option(name: .customLong("hf"), help: "The Hugging Face model identifier (e.g., 'meta-llama/Llama-2-7b-chat-hf') or a local path to the model. This will be passed to llama-server.") 16 | var hfModel: String 17 | 18 | @Option(name: .customLong("c"), help: "A numeric value, often used for context size or number of threads for llama-server.") 19 | var cValue: Int 20 | 21 | // --- IMPORTANT: Configuration for llama-server --- 22 | // You will need to adjust these based on how your 'llama-server' binary 23 | // actually accepts its command-line arguments. 24 | // For example, if llama-server expects '--model-path' instead of '--model': 25 | private var modelArgName: String = "--hf-repo" // Placeholder: e.g., "--model", "--model-path" 26 | // And if '-c' corresponds to '--threads' or '--context-length': 27 | private var cValueArgName: String = "--ctx-size" // Placeholder: e.g., "--threads", "--context-size", "-n" 28 | 29 | private func findLlamaServerPath() throws -> String { 30 | // Priority for finding 'llama-server': 31 | // 1. LLAMA_SERVER_PATH environment variable 32 | if let envPath = ProcessInfo.processInfo.environment["LLAMA_SERVER_PATH"], !envPath.isEmpty { 33 | if FileManager.default.isExecutableFile(atPath: envPath) { 34 | print("Using llama-server from LLAMA_SERVER_PATH: \(envPath)") 35 | return envPath 36 | } else { 37 | print("Warning: LLAMA_SERVER_PATH ('\(envPath)') is set but the file is not executable or not found. Falling back...") 38 | } 39 | } 40 | 41 | // 2. Relative to the CLI executable's path (common for co-located/bundled binaries) 42 | if let executableFolderURL = Bundle.main.executableURL?.deletingLastPathComponent() { 43 | // Check in the same directory as the CLI executable 44 | let sameDirPath = executableFolderURL.appendingPathComponent("llama-server").path 45 | if FileManager.default.isExecutableFile(atPath: sameDirPath) { 46 | print("Found llama-server alongside the CLI executable: \(sameDirPath)") 47 | return sameDirPath 48 | } 49 | 50 | // Check in a 'Resources' subfolder relative to the executable (common for app bundles) 51 | // This path would be MainBundle/Contents/Resources/llama-server if CLI is in MainBundle/Contents/MacOS/ 52 | let resourcesPath = executableFolderURL.deletingLastPathComponent().appendingPathComponent("Resources/llama-server").path 53 | if FileManager.default.isExecutableFile(atPath: resourcesPath) { 54 | print("Found llama-server in Resources folder: \(resourcesPath)") 55 | return resourcesPath 56 | } 57 | } 58 | 59 | // 3. Default path (e.g., current working directory) 60 | let defaultPath = "./llama-server" 61 | if FileManager.default.isExecutableFile(atPath: defaultPath) { 62 | print("Found llama-server in current directory: \(defaultPath)") 63 | return defaultPath 64 | } 65 | 66 | throw RuntimeError("Error: 'llama-server' binary not found.\nSearched LLAMA_SERVER_PATH, relative to executable (and ../Resources), and current directory ('./llama-server').\nPlease ensure 'llama-server' is executable and in one of these locations, or update LLAMA_SERVER_PATH.") 67 | } 68 | 69 | func run() throws { 70 | print("LlamaCLI preparing to launch llama-server...") 71 | print(" Model (-hf): \(hfModel)") 72 | print(" C Value (-c): \(cValue)") 73 | 74 | let llamaServerPath: String 75 | do { 76 | llamaServerPath = try findLlamaServerPath() 77 | } catch { 78 | print("\(error.localizedDescription)") // Error already includes "Error:" 79 | LlamaCLI.exit(withError: error) 80 | } 81 | 82 | // Construct arguments for llama-server. 83 | // !!! CRITICAL ASSUMPTION !!! 84 | // You MUST verify how 'llama-server' expects its arguments and update 85 | // `modelArgName`, `cValueArgName`, and the argument construction below. 86 | var serverArgs = [String]() 87 | serverArgs.append(modelArgName) 88 | serverArgs.append(hfModel) 89 | serverArgs.append(cValueArgName) 90 | serverArgs.append(String(cValue)) 91 | 92 | // Add any other necessary default arguments for llama-server here. 93 | // For example: 94 | // serverArgs.append("--port") 95 | // serverArgs.append("8080") 96 | // serverArgs.append("--host") 97 | // serverArgs.append("127.0.0.1") 98 | 99 | print("Executing: \(llamaServerPath) \(serverArgs.joined(separator: " "))") 100 | 101 | let process = Process() 102 | process.executableURL = URL(fileURLWithPath: llamaServerPath) 103 | process.arguments = serverArgs 104 | 105 | // Pass through standard output, error, and input for interactive servers 106 | process.standardOutput = FileHandle.standardOutput 107 | process.standardError = FileHandle.standardError 108 | process.standardInput = FileHandle.standardInput // Important if llama-server is interactive 109 | 110 | do { 111 | try process.run() 112 | process.waitUntilExit() // Wait for the server process to complete 113 | 114 | if process.terminationStatus == 0 { 115 | print("\nllama-server exited successfully.") 116 | } else { 117 | print("\nllama-server exited with status: \(process.terminationStatus).") 118 | // Propagate the exit code from llama-server 119 | LlamaCLI.exit(withError: ExitCode(process.terminationStatus)) 120 | } 121 | } catch { 122 | print("Error: Failed to start or run llama-server: \(error.localizedDescription)") 123 | LlamaCLI.exit(withError: error) 124 | } 125 | } 126 | } 127 | 128 | // Custom error for cleaner messages 129 | struct RuntimeError: Error, CustomStringConvertible, LocalizedError { 130 | var message: String 131 | init(_ message: String) { self.message = message } 132 | var description: String { message } 133 | var errorDescription: String? { message } 134 | } 135 | 136 | // Entry point 137 | LlamaCLI.main() 138 | --------------------------------------------------------------------------------