├── .gitignore
├── assets
├── instant.gif
└── screen1.png
├── config.ini
├── scripts
├── opk_alias.sh
└── opk.plist
├── setup.py
├── requirements.txt
├── BINDKEY.md
├── README.md
├── client
└── opk.py
└── server
└── opk-server.py
/.gitignore:
--------------------------------------------------------------------------------
1 | venv
--------------------------------------------------------------------------------
/assets/instant.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zerocorebeta/Option-K/HEAD/assets/instant.gif
--------------------------------------------------------------------------------
/assets/screen1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zerocorebeta/Option-K/HEAD/assets/screen1.png
--------------------------------------------------------------------------------
/config.ini:
--------------------------------------------------------------------------------
1 | [optionk]
2 | port = 8089
3 |
4 | [vertexai]
5 | enabled = false
6 | project = my-project
7 | location = asia-south1
8 | model = gemini-1.5-flash-001
9 |
10 | [google_ai_studio]
11 | enabled = true
12 | api_key = YOUR_API_KEY_HERE
13 | model = gemini-1.5-flash
--------------------------------------------------------------------------------
/scripts/opk_alias.sh:
--------------------------------------------------------------------------------
1 | optionk() {
2 | local install_path="{INSTALL_PATH}"
3 | local query="$BUFFER"
4 | local result=$("$install_path/venv/bin/python" "$install_path/client/opk.py" "$query" --quick)
5 | BUFFER="$result"
6 | zle end-of-line
7 | }
8 |
9 | zle -N optionk
10 | bindkey '˚' optionk
--------------------------------------------------------------------------------
/scripts/opk.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Label
6 | com.example.optionk
7 | ProgramArguments
8 |
9 | #{opt_libexec}/bin/python
10 | #{opt_libexec}/option-k-server.py
11 |
12 | EnvironmentVariables
13 |
14 | PYTHONPATH
15 | #{opt_libexec}/lib/python3.*/site-packages
16 | PATH
17 | #{opt_libexec}/bin:$PATH
18 |
19 | RunAtLoad
20 |
21 | KeepAlive
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | # Read the contents of your README file for the long description
4 | with open("README.md", "r") as fh:
5 | long_description = fh.read()
6 |
7 | # Function to parse requirements.txt
8 | def parse_requirements(filename):
9 | with open(filename, "r") as f:
10 | lines = f.readlines()
11 | return [line.strip() for line in lines if line.strip() and not line.startswith("#")]
12 |
13 | # Parse the requirements
14 | install_requires = parse_requirements("requirements.txt")
15 |
16 | setup(
17 | name="optionk",
18 | version="1.0.0",
19 | description="Option-K CLI and server application",
20 | long_description=long_description,
21 | long_description_content_type="text/markdown",
22 | author="Alex",
23 | author_email="coredrop@protonmail.com",
24 | license="MIT",
25 | packages=find_packages(include=["client", "server"]),
26 | install_requires=install_requires,
27 | entry_points={
28 | 'console_scripts': [
29 | 'opk = client.opk:main', # Replace `main` with the actual entry point
30 | 'opk-server = server.opk_server:main', # Replace `main` with the actual entry point
31 | ],
32 | },
33 | # Additional files to include
34 | package_data={
35 | 'optionk': ['config.ini', 'scripts/*.plist', 'scripts/*.sh'],
36 | },
37 | include_package_data=True,
38 | classifiers=[
39 | "Programming Language :: Python :: 3",
40 | "License :: OSI Approved :: MIT License",
41 | "Operating System :: OS Independent",
42 | ],
43 | python_requires='>=3.12',
44 | )
45 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | aiofiles==24.1.0
2 | aiohappyeyeballs==2.4.0
3 | aiohttp==3.10.5
4 | aiosignal==1.3.1
5 | annotated-types==0.7.0
6 | attrs==24.2.0
7 | build==1.2.1
8 | cachetools==5.5.0
9 | certifi==2024.8.30
10 | charset-normalizer==3.3.2
11 | click==8.1.7
12 | docstring_parser==0.16
13 | frozenlist==1.4.1
14 | fuzzywuzzy==0.18.0
15 | getch==1.0
16 | google-ai-generativelanguage==0.6.6
17 | google-api-core==2.19.2
18 | google-api-python-client==2.143.0
19 | google-auth==2.34.0
20 | google-auth-httplib2==0.2.0
21 | google-cloud-aiplatform==1.65.0
22 | google-cloud-bigquery==3.25.0
23 | google-cloud-core==2.4.1
24 | google-cloud-resource-manager==1.12.5
25 | google-cloud-storage==2.18.2
26 | google-crc32c==1.6.0
27 | google-generativeai==0.7.2
28 | google-resumable-media==2.7.2
29 | googleapis-common-protos==1.65.0
30 | grpc-google-iam-v1==0.13.1
31 | grpcio==1.66.1
32 | grpcio-status==1.62.3
33 | httplib2==0.22.0
34 | idna==3.8
35 | Levenshtein==0.25.1
36 | markdown-it-py==3.0.0
37 | mdurl==0.1.2
38 | multidict==6.0.5
39 | numpy==2.0.2
40 | packaging==24.1
41 | pip-tools==7.4.1
42 | prompt-toolkit==3.0.43
43 | proto-plus==1.24.0
44 | protobuf==4.25.4
45 | pyasn1==0.6.0
46 | pyasn1_modules==0.4.0
47 | pydantic==2.8.2
48 | pydantic_core==2.20.1
49 | Pygments==2.18.0
50 | pyparsing==3.1.4
51 | pyproject_hooks==1.1.0
52 | python-dateutil==2.9.0.post0
53 | python-Levenshtein==0.25.1
54 | rapidfuzz==3.9.7
55 | requests==2.32.3
56 | rich==13.7.0
57 | rsa==4.9
58 | setuptools==74.1.1
59 | shapely==2.0.6
60 | six==1.16.0
61 | tqdm==4.66.5
62 | typing_extensions==4.12.2
63 | uritemplate==4.1.1
64 | urllib3==2.2.2
65 | wcwidth==0.2.13
66 | wheel==0.44.0
67 | yarl==1.9.11
68 |
--------------------------------------------------------------------------------
/BINDKEY.md:
--------------------------------------------------------------------------------
1 | To get the complete list of key bindings in Zsh, including the `bindkey` commands, you can use the following methods:
2 |
3 | ### 1. **View Current Key Bindings**
4 | You can list all the current key bindings in your Zsh session by running:
5 | ```zsh
6 | bindkey
7 | ```
8 | This will display all the key sequences and the functions or widgets they are bound to.
9 |
10 | ### 2. **View Custom Key Bindings in Your Configuration Files**
11 | If you've customized your key bindings (like your example with `bindkey '˚' optionk`), you can find them in your Zsh configuration files, typically `~/.zshrc` or `~/.zshenv`. You can search for `bindkey` commands using `grep`:
12 | ```zsh
13 | grep bindkey ~/.zshrc
14 | ```
15 |
16 | ### 3. **List All Widgets**
17 | You can list all Zsh widgets (functions bound to key sequences) using:
18 | ```zsh
19 | zle -l
20 | ```
21 | This will list the names of all widgets that you can bind to keys.
22 |
23 | ### 4. **Get Documentation for `bindkey`**
24 | You can read the Zsh manual for detailed information on `bindkey` and key binding syntax by running:
25 | ```zsh
26 | man zshzle
27 | ```
28 | In the manual, search for `bindkey` to get detailed usage instructions.
29 |
30 | ### 5. **Get Key Codes**
31 | If you're unsure about the key code for a specific key (like `˚` in your example), you can press `Ctrl+v` followed by the key in your terminal. This will show the key sequence that you can use in the `bindkey` command.
32 |
33 | For example:
34 | - Press `Ctrl+v` and then `˚`, and you'll see the key code output.
35 | - You can then use this key code in your `bindkey` command like so:
36 | ```zsh
37 | bindkey 'key_code' optionk
38 | ```
39 |
40 | By using these methods, you can view, customize, and manage key bindings in Zsh effectively.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Option-K: AI-Powered Instant CLI
2 |
3 | Option-K is an AI-powered CLI assistant that helps you generate and execute shell and git commands based on natural language queries.
4 |
5 | ## Multiple Command View
6 |
7 | 
8 |
9 | ## Instant Command with `Option + K` key combo
10 |
11 | 
12 |
13 | ## Features
14 |
15 | - Generate shell and git commands from natural language queries
16 | - Execute generated commands directly from the CLI
17 | - Quick suggestion mode for instant command generation
18 | - Supports both macOS and Linux environments
19 | - Configurable AI backend (Google AI Studio or Vertex AI)
20 |
21 | ## Installation
22 |
23 | ### Using Homebrew (macOS)
24 |
25 | You can install Option-K using Homebrew:
26 |
27 | ```bash
28 | brew tap zerocorebeta/core
29 | brew install optionk
30 | ```
31 |
32 | This will install both the Option-K CLI `opk` and server `opk-server`application.
33 | After installation follow the instructions on the screen to complete setup.
34 |
35 | ## Usage
36 |
37 | 1. Type your query in terminal followed by `Option+K` hotkey combo. (configurable via alias)
38 |
39 | 2. The AI will generate a command based on your query. You can then edit or execute the command.
40 |
41 | 3. For more options, use the CLI directly:
42 | ```
43 | opk "your query here"
44 | ```
45 |
46 | ## Configuration
47 |
48 | 1. Request a free Google AI Studio API key [here](https://ai.google.dev/gemini-api/docs/getting-started)
49 |
50 | 2. Edit the `~/.config/optionk/config.ini` file to configure the AI backend:
51 |
52 | ```ini
53 | [optionk]
54 | port = 8089
55 |
56 | [vertexai]
57 | enabled = false
58 | project = my-project
59 | location = asia-south1
60 | model = gemini-1.5-flash-001
61 |
62 | [google_ai_studio]
63 | enabled = true
64 | api_key = YOUR_API_KEY_HERE
65 | model = gemini-1.5-flash
66 | ```
67 |
68 | Note `enabled` should be set to `true` for one of the AI backends.
69 |
70 | Request a free Google AI Studio API key https://ai.google.dev/gemini-api
71 |
72 | For vertex, auth is handled by gcloud cli:
73 | https://cloud.google.com/vertex-ai/generative-ai/docs/start/quickstarts/quickstart-multimodal#set-up-your-environment
74 |
75 | I just did ` gcloud auth application-default login` and it worked. Beyond that i did not need any additional setup.
76 |
77 | Note on Region Support:
78 | - Vertex AI: Supports custom regions, allowing you to specify a location of your choice.
79 | - Google AI Studio: Only supports `us-central1` region. This cannot be changed.
80 |
81 | When configuring your `config.ini`, keep in mind this difference in regional flexibility between the two services.
82 |
83 | After editing the config file, restart the server `brew services restart optionk`
84 |
85 | Check logs here: `/usr/local/var/log/opk-server.log`
86 |
87 | Test by running `opk "show current time"`
88 |
89 | If you want one line quick suggestion: `opk "show current time" --quick`
90 |
91 | Now, all that is left is to map a hotkey combo to this `opk --quick command.`
92 |
93 | Edit `~/.zshrc` if using zsg or ~/.bashrc if using bash as you shell and add this line:
94 |
95 | ```zsh
96 |
97 | optionk() {
98 | local query="$BUFFER"
99 | local result=$(opk query --quick "$query")
100 | BUFFER="$result"
101 | zle end-of-line
102 | }
103 | zle -N optionk
104 | bindkey '˚' optionk
105 | ```
106 |
107 | In this we are binding `option+k` to our custom function `optionk` which will generate the command based on the text we type in terminal.
108 |
109 | You can edit `bindkey '˚' optionk` to map it to any other key combo.
110 | For more details read [here:](./BINDKEY.md)
111 |
112 | Reload: `source ~/.zshrc` or `source ~/.bashrc`
113 |
114 | Type in terminal `show date` followed by hotkey combo `Option+K` and you'll see the command there!
115 |
116 | ## Installation via Source
117 |
118 | 1. Clone the repository:
119 | ```
120 | git clone https://github.com/zerocorebeta/Option-K.git
121 | cd Option-K
122 | ```
123 |
124 | 2. Create a virtual environment and install dependencies:
125 | ```
126 | python -m venv venv
127 | source venv/bin/activate
128 | pip install -r requirements.txt
129 | ```
130 |
131 | 3. Configure the AI backend:
132 | - Create `~/.config/optionk/config.ini` file (create directory if it doesn't exist)
133 | - Edit the `~/.config/optionk/config.ini` file to set up your preferred AI service (Google AI Studio or Vertex AI)
134 |
135 | 4. Run the server:
136 | ```
137 | python server/opk-server.py
138 | ```
139 |
140 | 5. Test command completion using the detailed mode:
141 | ```
142 | python client/opk.py "your query here"
143 | ```
144 | or quick mode:
145 | ```
146 | python client/opk.py "your query here" --quick
147 | ```
148 |
149 | 6. Setup `server/opk-server.py` as a service
150 |
151 | 7. Add the alias to your shell configuration:
152 |
153 | Now, all that is left is to map a hotkey combo to this `opk --quick command.`
154 |
155 | Edit ~/.zshrc or ~/.bashrc and add this line (if `opk` is in path, if not then specify the full path):
156 |
157 | ```zsh
158 | optionk() {
159 | local query="$BUFFER"
160 | local result=$(opk query --quick "$query")
161 | BUFFER="$result"
162 | zle end-of-line
163 | }
164 | zle -N optionk
165 | bindkey '˚' optionk
166 | ```
167 | Reload: `source ~/.zshrc` or `source ~/.bashrc`
168 |
169 | ## Files
170 |
171 | - `client/opk.py`: Main CLI interface
172 | - `server/opk-server.py`: Backend server handling AI requests
173 |
174 | ## Contributing
175 |
176 | Contributions are welcome! Please feel free to submit a Pull Request.
177 |
178 | ## License
179 |
180 | [MIT License](LICENSE)
--------------------------------------------------------------------------------
/client/opk.py:
--------------------------------------------------------------------------------
1 | import os
2 | import asyncio
3 | import aiohttp
4 | import sys
5 | from rich.console import Console
6 | from rich.panel import Panel
7 | from rich.table import Table
8 | from rich.text import Text
9 | import argparse
10 | from prompt_toolkit import PromptSession
11 | from prompt_toolkit.history import FileHistory, InMemoryHistory
12 | from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
13 | from prompt_toolkit.application import Application
14 | from prompt_toolkit.key_binding import KeyBindings
15 | from prompt_toolkit.layout import Layout
16 | from prompt_toolkit.widgets import TextArea
17 | from prompt_toolkit.layout.containers import Window
18 | from prompt_toolkit.layout.controls import BufferControl
19 | from prompt_toolkit.formatted_text import HTML
20 | from prompt_toolkit.styles import Style
21 | from prompt_toolkit.buffer import Buffer
22 | import aiofiles
23 | import configparser
24 | import platform
25 |
26 | # Initialize the console
27 | console = Console()
28 |
29 | def get_config_path():
30 | if platform.system() == "Windows":
31 | config_path = os.path.join(os.environ.get('APPDATA'), 'optionk', 'config.ini')
32 | else:
33 | config_path = os.path.expanduser('~/.config/optionk/config.ini')
34 | return config_path
35 |
36 | # Replace the existing config reading
37 | config = configparser.ConfigParser()
38 | config.read(get_config_path())
39 |
40 | PORT = config.get('optionk', 'port', fallback='8089')
41 |
42 | # Update HISTORY_FILE path
43 | HISTORY_FILE = os.path.join(os.path.dirname(get_config_path()), 'history')
44 |
45 | async def save_to_history(command):
46 | try:
47 | async with aiofiles.open(HISTORY_FILE, 'a') as f:
48 | await f.write(f"{command}\n")
49 | except ImportError:
50 | # Fallback to synchronous file writing if aiofiles is not available
51 | with open(HISTORY_FILE, 'a') as f:
52 | f.write(f"{command}\n")
53 |
54 | async def load_history():
55 | try:
56 | async with aiofiles.open(HISTORY_FILE, 'r') as f:
57 | return [line.strip() for line in await f.readlines()]
58 | except FileNotFoundError:
59 | return []
60 |
61 | async def run_command(command):
62 | try:
63 | proc = await asyncio.create_subprocess_shell(
64 | command,
65 | stdout=asyncio.subprocess.PIPE,
66 | stderr=asyncio.subprocess.PIPE
67 | )
68 | stdout, stderr = await proc.communicate()
69 | if proc.returncode == 0:
70 | return stdout.decode()
71 | else:
72 | return f"Error: {stderr.decode()}"
73 | except Exception as e:
74 | return f"Error: {str(e)}"
75 |
76 | def apply_color_scheme_html(command):
77 | parts = command.split()
78 | colored_parts = []
79 | for i, part in enumerate(parts):
80 | if i == 0:
81 | colored_parts.append(f'{part}')
82 | elif part.startswith('-'):
83 | colored_parts.append(f'{part}')
84 | else:
85 | colored_parts.append(f'{part}')
86 | return ' '.join(colored_parts)
87 |
88 | async def main():
89 | parser = argparse.ArgumentParser(description="AI Coding Assistant CLI")
90 | parser.add_argument("query", nargs="*", help="The task or query to generate a command for")
91 | parser.add_argument("--quick", action="store_true", help="Get a single best result")
92 | args = parser.parse_args()
93 |
94 | user_input = " ".join(args.query)
95 |
96 | async with aiohttp.ClientSession() as session:
97 | if args.quick:
98 | async with session.post(f'http://localhost:{PORT}/quick_suggest', json={'query': user_input}) as response:
99 | data = await response.json()
100 | print(data['result'])
101 | return
102 |
103 | try:
104 | while True:
105 | if not user_input:
106 | user_input = console.input(
107 | "[bold green]Query?[/bold green] (or 'q' to quit) "
108 | )
109 | if user_input.lower() in ['exit', 'quit', 'q']:
110 | console.print("\n[bold yellow]Exiting...[/bold yellow]")
111 | return
112 |
113 | with console.status("[bold blue]Generating response...[/bold blue]"):
114 | async with session.post(f'http://localhost:{PORT}/generate', json={'query': user_input}) as response:
115 | data = await response.json()
116 | response_text = data['response']
117 |
118 | commands = []
119 | for line in response_text.splitlines():
120 | if line.startswith(tuple(f"{i}." for i in range(1, 10))):
121 | parts = line.split(' - ', 1)
122 | if len(parts) == 2:
123 | cmd, explanation = parts
124 | else:
125 | cmd, explanation = parts[0], ""
126 | commands.append((cmd.strip(), explanation.strip()))
127 |
128 | commands = commands[:10] # Limit to 10 commands (0-9)
129 |
130 | while commands:
131 | table = Table(title="Generated Commands", show_header=True, header_style="bold magenta", expand=True)
132 | table.add_column("#", style="dim", width=4, justify="center")
133 | table.add_column("Command", style="cyan", no_wrap=True, ratio=30)
134 | table.add_column("Explanation", style="green", ratio=70)
135 |
136 | for i, (cmd, explanation) in enumerate(commands):
137 | cmd_parts = cmd.split(None, 1)[1].strip('`*').split()
138 |
139 | colored_cmd = Text()
140 | for j, part in enumerate(cmd_parts):
141 | if j == 0:
142 | colored_cmd.append(part, style="bold cyan")
143 | elif part.startswith('-'):
144 | colored_cmd.append(f" {part}", style="yellow")
145 | else:
146 | colored_cmd.append(f" {part}", style="green")
147 |
148 | table.add_row(str(i), colored_cmd, Text(explanation, style="green"))
149 |
150 | console.print(table)
151 |
152 | kb = KeyBindings()
153 |
154 | def handle_input(event):
155 | char = event.data
156 | if char in [str(i) for i in range(10)] + ['n', 'q']:
157 | event.app.exit(result=char)
158 |
159 | for i in range(10):
160 | kb.add(str(i))(handle_input)
161 | kb.add('n')(handle_input)
162 | kb.add('q')(handle_input)
163 |
164 | buffer = Buffer()
165 | text_area = TextArea()
166 | application = Application(
167 | layout=Layout(Window(BufferControl(buffer=buffer))),
168 | key_bindings=kb,
169 | full_screen=False,
170 | )
171 |
172 | console.print("Select a command number (0-9), 'n' for a new query, or 'q' to quit: ", end="")
173 | choice = await asyncio.to_thread(application.run)
174 |
175 | if choice == 'q':
176 | console.print("\n[bold yellow]Exiting...[/bold yellow]")
177 | return
178 | elif choice == 'n':
179 | console.print()
180 | break
181 | elif choice in [str(i) for i in range(10)]:
182 | command_index = int(choice)
183 | if 0 <= command_index < len(commands):
184 | command_to_execute = commands[command_index][0].split(None, 1)[1].strip('`*')
185 |
186 | console.print("[italic]Edit the command or press Enter to execute. Use Ctrl+C to cancel.[/italic]")
187 |
188 | colored_command = apply_color_scheme_html(command_to_execute)
189 |
190 | edit_session = PromptSession(
191 | history=InMemoryHistory([command_to_execute]),
192 | auto_suggest=AutoSuggestFromHistory(),
193 | style=Style.from_dict({
194 | 'prompt': '#00FFFF bold',
195 | 'cmd': '#00FFFF bold',
196 | 'arg': '#FFFF00',
197 | 'param': '#00FF00',
198 | })
199 | )
200 |
201 | try:
202 | edited_command = await asyncio.to_thread(
203 | edit_session.prompt,
204 | HTML(f'> {colored_command}'),
205 | )
206 | command_to_execute = edited_command if edited_command.strip() else command_to_execute
207 | except KeyboardInterrupt:
208 | console.print("\n[bold yellow]Command execution cancelled.[/bold yellow]")
209 | continue
210 |
211 | with console.status("[bold green]Executing command...[/bold green]"):
212 | output = await run_command(command_to_execute)
213 |
214 | if output.strip():
215 | console.print(Panel(output, title="Command Output", expand=False, border_style="green"))
216 | else:
217 | console.print("[yellow]Command executed successfully, but produced no output.[/yellow]")
218 | await save_to_history(command_to_execute)
219 | else:
220 | console.print("[bold red]Invalid selection. Please try again.[/bold red]")
221 | else:
222 | console.print("[bold red]Invalid input. Please try again.[/bold red]")
223 |
224 | user_input = "" # Reset user_input to prompt for a new query
225 |
226 | except Exception as e:
227 | console.print(f"[bold red]An error occurred:[/bold red] {str(e)}")
228 | if "project" in str(e).lower():
229 | console.print("[bold yellow]Please check your PROJECT_ID and ensure it's correctly set in your environment variables.[/bold yellow]")
230 |
231 | if __name__ == "__main__":
232 | asyncio.run(main())
--------------------------------------------------------------------------------
/server/opk-server.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | import platform
4 | from functools import lru_cache
5 | import asyncio
6 | from aiohttp import web
7 | from fuzzywuzzy import fuzz
8 | import signal
9 | import configparser
10 | import google.generativeai as genai
11 | from vertexai.generative_models import GenerativeModel, GenerationConfig
12 | from vertexai.generative_models import HarmCategory as VertexHarmCategory
13 | from vertexai.generative_models import HarmBlockThreshold as VertexHarmBlockThreshold
14 | from google.generativeai.types import HarmCategory as GoogleHarmCategory
15 | from google.generativeai.types import HarmBlockThreshold as GoogleHarmBlockThreshold
16 | import argparse
17 | import vertexai
18 | import logging
19 | import re
20 |
21 | # Move these global variables outside of the run_server function
22 | config = configparser.ConfigParser()
23 | model = None
24 | vertex_safetysettings = {
25 | VertexHarmCategory.HARM_CATEGORY_HARASSMENT: VertexHarmBlockThreshold.BLOCK_ONLY_HIGH,
26 | VertexHarmCategory.HARM_CATEGORY_HATE_SPEECH: VertexHarmBlockThreshold.BLOCK_ONLY_HIGH,
27 | VertexHarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: VertexHarmBlockThreshold.BLOCK_ONLY_HIGH,
28 | VertexHarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: VertexHarmBlockThreshold.BLOCK_ONLY_HIGH
29 | }
30 |
31 | googleai_safetysettings = {
32 | GoogleHarmCategory.HARM_CATEGORY_HARASSMENT: GoogleHarmBlockThreshold.BLOCK_NONE,
33 | GoogleHarmCategory.HARM_CATEGORY_HATE_SPEECH: GoogleHarmBlockThreshold.BLOCK_NONE,
34 | GoogleHarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: GoogleHarmBlockThreshold.BLOCK_NONE,
35 | GoogleHarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: GoogleHarmBlockThreshold.BLOCK_NONE
36 | }
37 |
38 | def create_default_config(config_path):
39 | config = configparser.ConfigParser()
40 | config['optionk'] = {
41 | 'port': '8089'
42 | }
43 | config['vertexai'] = {
44 | 'enabled': 'false',
45 | 'project': 'my-project',
46 | 'location': 'asia-south1',
47 | 'model': 'gemini-1.5-flash-001'
48 | }
49 | config['google_ai_studio'] = {
50 | 'enabled': 'true',
51 | 'api_key': 'YOUR_API_KEY_HERE',
52 | 'model': 'gemini-1.5-flash'
53 | }
54 |
55 | os.makedirs(os.path.dirname(config_path), exist_ok=True)
56 | with open(config_path, 'w') as configfile:
57 | config.write(configfile)
58 |
59 | # Add helpful comments to the config file
60 | with open(config_path, 'r+') as f:
61 | content = f.read()
62 | f.seek(0, 0)
63 | f.write("# Option-K Configuration File\n\n")
64 | f.write("# [optionk]\n# port: The port number for the Option-K server\n\n")
65 | f.write("# [vertexai]\n# enabled: Set to true to use Vertex AI\n# project: Your Google Cloud project ID\n# location: The location of your Vertex AI resources\n# model: The Vertex AI model to use\n\n")
66 | f.write("# [google_ai_studio]\n# enabled: Set to true to use Google AI Studio\n# api_key: Your Google AI Studio API key\n# model: The Google AI Studio model to use\n\n")
67 | f.write(content)
68 |
69 | def get_config_path(custom_path=None):
70 | if custom_path:
71 | return custom_path
72 |
73 | if platform.system() == "Windows":
74 | config_path = os.path.join(os.environ.get('APPDATA'), 'optionk', 'config.ini')
75 | else:
76 | config_path = os.path.expanduser('~/.config/optionk/config.ini')
77 | return config_path
78 |
79 | @lru_cache(maxsize=1)
80 | def get_system_info():
81 | system = platform.system()
82 | machine = platform.machine()
83 | if system == "Darwin":
84 | os_name = "macOS"
85 | version = platform.mac_ver()[0]
86 | elif system == "Linux":
87 | os_name = "Linux"
88 | try:
89 | distro = subprocess.check_output(["lsb_release", "-ds"]).decode().strip()
90 | except:
91 | distro = "Unknown distribution"
92 | version = distro
93 | else:
94 | os_name = system
95 | version = platform.version()
96 |
97 | return f"{os_name} {version} ({machine})"
98 |
99 | async def generate_response_stream(query, command_type, system_info):
100 | is_git_query = is_git_related_query(query)
101 |
102 | if is_git_query:
103 | system_query = f"""Machine-readable output. You are a Git expert providing git commands that match the query.
104 | Provide git commands specific to this system.
105 | Rank suggestions by relevance.
106 | Explain what each git command does and how it works.
107 | Output as a numbered list (starts with 0) in the format: - .
108 | """
109 | else:
110 | system_query = f"""Machine-readable output. You are a CLI expert providing {command_type} commands that match the query.
111 | The user's system is: {system_info}
112 | Provide commands specific to this system.
113 | Rank suggestions by relevance.
114 | Explain what each command does and how it works.
115 | Output as a numbered list (starts with 0) in the format: - ."""
116 |
117 | full_query = f"{system_query}\n\nquery: {query}\n\nProvide up to 9 commands."
118 |
119 | if config.getboolean('vertexai', 'enabled', fallback=False):
120 | response = model.generate_content(
121 | contents=full_query,
122 | generation_config=GenerationConfig(
123 | max_output_tokens=1024,
124 | temperature=0,
125 | top_p=1,
126 | top_k=1
127 | ),
128 | safety_settings=vertex_safetysettings,
129 | stream=True
130 | )
131 |
132 | full_response = ""
133 | for chunk in response:
134 | if chunk.text:
135 | full_response += chunk.text
136 | else: # Google AI Studio
137 | response = model.generate_content(
138 | full_query,
139 | generation_config=genai.GenerationConfig(
140 | max_output_tokens=1024,
141 | temperature=0,
142 | top_p=1,
143 | top_k=1
144 | ),
145 | safety_settings=googleai_safetysettings,
146 | stream=True
147 | )
148 |
149 | full_response = ""
150 | for chunk in response:
151 | full_response += chunk.text
152 |
153 | return full_response
154 |
155 | def is_git_related_query(query):
156 | query = query.lower()
157 | git_terms = ["git", "commit", "branch", "merge", "pull", "push", "rebase", "stash", "checkout", "clone"]
158 | url_pattern = re.compile(r'https?://\S+|www\.\S+')
159 |
160 | # Check if any git term is mentioned (allowing for misspellings)
161 | contains_git_term = any(fuzz.partial_ratio(term, query) > 85 for term in git_terms)
162 |
163 | # Check if 'git' is part of a URL
164 | urls = url_pattern.findall(query)
165 | git_in_url = any('git' in url.lower() for url in urls)
166 |
167 | # Check for common download commands
168 | download_commands = ["curl", "wget"]
169 | looks_like_download = any(command in query for command in download_commands)
170 |
171 | # Check for context (e.g., "How to use git")
172 | git_context = re.search(r'\b(use|using|with|in)\s+git\b', query) is not None
173 |
174 | return (contains_git_term or git_context) and not (git_in_url or looks_like_download)
175 |
176 | async def get_single_best_result(query, command_type, system_info):
177 | is_git_query = is_git_related_query(query)
178 | is_commit_message = "commit" in query and "message" in query
179 |
180 | if is_git_query:
181 | system_query = f"""Machine-readable output.
182 | Output with JUST the command to run directly in the command line.
183 | You are a Git expert providing the single best git command that matches the query.
184 | """
185 | if is_commit_message:
186 | system_query += """
187 | For commit messages:
188 | 1. Rewrite the commit message to meet Conventional Commits Specification.
189 | 2. Use the block-style commit message format with a single pair of quotes.
190 | 3. Separate the title from the body with a blank line.
191 | 4. Wrap the body text at approximately 72 characters.
192 | 5. Output just the git commit command.
193 | """
194 | else:
195 | system_query += "Output JUST the command to run directly in the command line."
196 |
197 | else:
198 | system_query = f"""Machine-readable output.
199 | Output with JUST the command to run directly in the command line.
200 | You are a CLI expert providing the single best {command_type} command that matches the query.
201 | The user's system is: {system_info}
202 | Provide a command specific to this system.
203 | """
204 |
205 | full_query = f"{system_query}\n\nquery: {query}\n\n"
206 |
207 | if config.getboolean('vertexai', 'enabled', fallback=False):
208 | response = model.generate_content(
209 | [full_query],
210 | generation_config=GenerationConfig(
211 | max_output_tokens=500,
212 | temperature=0,
213 | top_p=1,
214 | top_k=1
215 | ),
216 | safety_settings=vertex_safetysettings,
217 | stream=False,
218 | )
219 | return response.text.strip('` \t\n\r')
220 | else: # Google AI Studio
221 | response = model.generate_content(
222 | full_query,
223 | generation_config=genai.GenerationConfig(
224 | max_output_tokens=100,
225 | temperature=0,
226 | top_p=1,
227 | top_k=1
228 | ),
229 | safety_settings=googleai_safetysettings,
230 | )
231 | return response.text.strip('` \t\n\r')
232 |
233 | async def handle_generate(request):
234 | data = await request.json()
235 | query = data['query']
236 | system_info = get_system_info()
237 | response = await generate_response_stream(query, "CLI", system_info)
238 | return web.json_response({'response': response})
239 |
240 | async def handle_quick_suggest(request):
241 | data = await request.json()
242 | query = data['query']
243 | system_info = get_system_info()
244 | result = await get_single_best_result(query, "CLI", system_info)
245 | return web.json_response({'result': result})
246 |
247 | async def shutdown(app):
248 | print("Shutting down...")
249 | tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
250 | [task.cancel() for task in tasks]
251 | await asyncio.gather(*tasks, return_exceptions=True)
252 | asyncio.get_event_loop().stop()
253 |
254 | def signal_handler(signame):
255 | print(f"Received signal {signame}. Initiating shutdown...")
256 | asyncio.create_task(shutdown(None))
257 |
258 | def parse_arguments():
259 | parser = argparse.ArgumentParser(description="Option-K Server")
260 | parser.add_argument('--config', help='Path to custom config file')
261 | return parser.parse_args()
262 |
263 | def run_server():
264 | global config, model # Add this line to use global variables
265 |
266 | args = parse_arguments()
267 |
268 | config_path = get_config_path(args.config)
269 | if not os.path.exists(config_path):
270 | create_default_config(config_path)
271 | print(f"Created default configuration file at: {config_path}")
272 | else:
273 | print(f"Using configuration file: {config_path}")
274 |
275 | config.read(config_path)
276 |
277 | # Set up logging
278 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
279 |
280 | # Check configuration and initialize AI model
281 | try:
282 | if config.getboolean('vertexai', 'enabled', fallback=False):
283 | vertexai.init(project=config.get('vertexai', 'project'), location=config.get('vertexai', 'location'))
284 | model = GenerativeModel(config.get('vertexai', 'model'))
285 | logging.info("Initialized Vertex AI model")
286 | elif config.getboolean('google_ai_studio', 'enabled', fallback=False):
287 | genai.configure(api_key=config.get('google_ai_studio', 'api_key'))
288 | model = genai.GenerativeModel(config.get('google_ai_studio', 'model'))
289 | logging.info("Initialized Google AI Studio model")
290 | else:
291 | raise ValueError("No AI service is enabled in the configuration")
292 |
293 | # Test API
294 | test_query = "Hello, world!"
295 | test_result = asyncio.run(get_single_best_result(test_query, "CLI", get_system_info()))
296 | logging.info(f"API test successful. Response: {test_result}")
297 | except Exception as e:
298 | logging.error(f"Error initializing AI model: {str(e)}")
299 | return
300 |
301 | app = web.Application()
302 | app.router.add_post('/generate', handle_generate)
303 | app.router.add_post('/quick_suggest', handle_quick_suggest)
304 |
305 | # Set up signal handlers
306 | for signame in ('SIGINT', 'SIGTERM'):
307 | try:
308 | loop = asyncio.get_running_loop()
309 | except RuntimeError:
310 | loop = asyncio.new_event_loop()
311 | asyncio.set_event_loop(loop)
312 | loop.add_signal_handler(
313 | getattr(signal, signame),
314 | lambda: signal_handler(signame)
315 | )
316 |
317 | port = int(config.get('optionk', 'port'))
318 | host = 'localhost'
319 |
320 | if platform.system() == "Darwin":
321 | # macOS specific configuration
322 | logging.info(f"Starting server on http://{host}:{port}")
323 | web.run_app(app, port=port, host=host)
324 | elif platform.system() == "Linux":
325 | # Linux (systemd) specific configuration
326 | socket_path = '/run/zerocoretwo/server.sock'
327 | logging.info(f"Starting server on Unix socket: {socket_path}")
328 | web.run_app(app, port=port, host=host, path=socket_path)
329 | else:
330 | # Default configuration
331 | logging.info(f"Starting server on http://{host}:{port}")
332 | web.run_app(app, port=port, host=host)
333 |
334 | if __name__ == '__main__':
335 | run_server()
--------------------------------------------------------------------------------