" if role else "")
95 | self.elements.append("")
96 | for header in headers:
97 | self.elements.append(f"| {header} | ")
98 | self.elements.append("
")
99 |
100 | def add_row(self, cells):
101 | self.elements.append("")
102 | for cell in cells:
103 | self.elements.append(f"| {cell} | ")
104 | self.elements.append("
")
105 |
106 | def end_table(self):
107 | self.elements.append("
")
108 |
109 | def render(self):
110 | html_content = "\n".join(self.elements)
111 | full_html = f"""
112 |
113 |
114 |
115 |
116 |
117 | {self.doc_title}
118 | {get_style()}
119 |
120 |
121 |
122 | {html_content}
123 |
124 |
125 |
126 | """
127 |
128 | # If you are using a browser through a snap package on Linux you cannot
129 | # open many directories so we just default to the downloads folder since that is one we can use
130 | dir = None
131 | if platform.system() == "Linux":
132 | default = os.path.join(os.path.expanduser("~"), "Downloads")
133 | if os.path.exists(default):
134 | dir = default
135 |
136 | with tempfile.NamedTemporaryFile(
137 | mode="w+", suffix=".html", delete=False, encoding="utf-8", dir=dir
138 | ) as temp_file:
139 | temp_file.write(full_html)
140 | temp_file_path = temp_file.name
141 | webbrowser.open("file://" + os.path.abspath(temp_file_path))
142 |
143 |
144 | # API Demo
145 | # builder = Builder()
146 | # builder.title("Generated Help Page from Talon")
147 | # builder.h1("Banner Heading", role=ARIARole.BANNER)
148 | # builder.h1("Header 1 for the page")
149 | # builder.h2("Header 2")
150 | # builder.h3("Smaller Header 3")
151 | # builder.ul("Bullet 1", "Bullet number two")
152 | # builder.p("This is a paragraph within the article", role=ARIARole.MAIN)
153 | # builder.h2("Navigation Heading", role=ARIARole.NAV)
154 | # builder.p("This is a paragraph within the article")
155 | # builder.ol("First element: Hello", "Second one: World")
156 | # builder.p("This is labeled as an aria footer within the article", role=ARIARole.FOOTER)
157 | # builder.render()
158 |
--------------------------------------------------------------------------------
/GPT/lists/staticPrompt.talon-list:
--------------------------------------------------------------------------------
1 | list: user.staticPrompt
2 | -
3 |
4 | # Use static prompts as aliases for detailed instructions
5 | # Reduce verbosity and prevent the need to say the entire prompt each time time
6 |
7 | ## FIXES
8 | fix grammar formally: Fix any mistakes or irregularities in grammar, spelling, or formatting. Use a professional business tone. The text was created using voice dictation. Thus, there are likely to be issues regarding homophones and other misrecognitions. Do not change the original structure of the text.
9 | fix grammar: Fix any mistakes or irregularities in grammar, spelling, or formatting. The text was created using voice dictation. Thus, there are likely to be issues regarding homophones and other misrecognitions. Do not change the tone. Do not change the original structure of the text.
10 | fix syntax: Fix any syntax errors in this code selection. Do not change any behavior.
11 |
12 | ## FORMATTING
13 | format table: The following markdown text is raw data. There is no index. Return the text in a markdown table format. Each row has a new line in the original data.
14 | format bullets: Convert each paragraph into a heading with a series of bullet points underneath it. Each paragraph is separated by a new line. Separate paragraphs should not have combined bullet points. This should all be done in markdown syntax. If it is a small paragraph, then you can just leave it as a heading and not add bullet points. Do not reduce content, only reduce things that would be redundant. These bullet points should be in a useful format for notes for those who want to quickly look at it. If there is a citation in the markdown original, then keep the citation just at the top and not within every individual bullet point.
15 | format mermaid: Convert the following plain text into the text syntax for a mermaid diagram.
16 | format comment: Format the following text as a comment for the current programming language. Use the proper comment syntax for the current language. Split the comment into multiple lines if the lines are too long.
17 | group: Act as an organizer. The following text consists of various topics all put together. Please group these items into categories and label each category. Return just the results.
18 | join: Act as an editor. The following text is separated into multiple parts. Please group them together into one part maintaining the flow and meaning. Reorder in whatever way makes sense. Remove any redundant information. The result should be only one part with no additional structure. Return just the modified text.
19 |
20 | ## TEXT GENERATION
21 | explain: Explain this text in a way that is easier to understand for a layman without technical knowledge.
22 | summarize: Summarize this text into a format suitable for project notes.
23 | add context: Add additional text to the selected text that would be appropriate to the situation and add useful information.
24 | fit schema: The given text has a series of responses that need to be categorized. Each response has a key that needs to be mapped to a value. Infer the schema from the text unless it is given at the top of the text with prior examples. Return the key-value pairs in a JSON format unless you infer a different format.
25 | answer: Generate text that satisfies the question or request given in the input.
26 | shell: Generate a shell script that performs the following actions. Output only the command. Do not output any comments or explanations. Default to the bash shell unless otherwise specified.
27 | add emoji: Return the same exact text verbatim with the same formatting, but add emoji when appropriate in order to make the text fun and easier to understand.
28 | make softer: Act as an editor. I want you to make the following text softer in tone. Return just the modified text.
29 | make stronger: Act as an editor. I want you to make the following text stronger in tone. Return just the modified text.
30 |
31 | ## FILE CONVERSIONS
32 | convert to jason: Convert the following data into a JSON format.
33 | convert to markdown: Convert the following text into a markdown format.
34 | convert to python: Convert the following key-value pairs into the syntax for a Python dictionary. So you should serialize the key-value pairs into a native Python format.
35 | convert to sheet: Convert the following data into a CSV format.
36 | convert to yam: Convert the following data into a YAML format.
37 |
38 | ## CHECKERS
39 | describe code: Explain what the following code does in natural language at a high level without getting into the specifics of the syntax.
40 | check grammar: Check the grammar and formatting of the following text. Return a list of all potential errors.
41 | check spelling: Check the spelling of the following text. Return a list of all potential errors.
42 | check structure: Skim the structure and layout of the following text. Tell me if the structure and order of my writing are correct. If it is not correct or flows poorly, then tell me what might be wrong with it. If it is all correct, then say it looks good.
43 |
44 | ## TRANSLATIONS
45 | translate to english: Translate the following text into English.
46 |
47 | ## CODE GENERATION
48 | generate code: The following plaintext describes a process in code in the language that is specified by the system prompt. Please output the code necessary to do this. Return just code and not any natural language explanations.
49 | update comments: Act as a software engineer. The following code may be missing comments or the comments could be out of date. Please update the comments. If you are unsure how to comment something, ask a question in a comment instead. Return just the code and not any explanations.
50 | clean code: Act as a software engineer. Reduce any duplication in the selected code and improve it to be more idiomatic and clear for other users. However, do not change the behavior or functionality. Return just the code and not any explanations.
51 | improve semantics: The following is an HTML document. Keep the same structure and layout but if it is needed, change any elements to use proper semantic HTML and make sure it is implementing best practices for user accessibility. Output just the HTML and not any extra explanations.
52 |
53 | ## WRITING HELPERS
54 | add questions: Help me explore this question from multiple perspectives. For each perspective, ask follow-up questions and indicate what perspective is being taken.
55 | format outline: Create an outline that encapsulates the text below. Keep the number of sections between three and five to optimize for human working memory. Return just the outline.
56 | format prose: As an editor, format the following outline or summarization as prose. You can have headings and paragraphs. Avoid using bullet points. Reorder and add transitions as necessary to make the document flow. Return just the text.
57 |
--------------------------------------------------------------------------------
/GPT/gpt.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import Any, Optional
3 |
4 | from talon import Module, actions, clip, settings
5 |
6 | from ..lib.HTMLBuilder import Builder
7 | from ..lib.modelConfirmationGUI import confirmation_gui
8 | from ..lib.modelHelpers import (
9 | extract_message,
10 | format_clipboard,
11 | format_message,
12 | messages_to_string,
13 | notify,
14 | send_request,
15 | )
16 | from ..lib.modelState import GPTState
17 | from ..lib.modelTypes import GPTMessageItem
18 |
19 | mod = Module()
20 | mod.tag(
21 | "model_window_open",
22 | desc="Tag for enabling the model window commands when the window is open",
23 | )
24 |
25 |
26 | def gpt_query(
27 | prompt: GPTMessageItem,
28 | text_to_process: Optional[GPTMessageItem],
29 | model: str,
30 | thread: str,
31 | destination: str = "",
32 | ):
33 | """Send a prompt to the GPT API and return the response"""
34 |
35 | # Reset state before pasting
36 | GPTState.last_was_pasted = False
37 |
38 | response = send_request(prompt, text_to_process, model, thread, destination)
39 | GPTState.last_response = extract_message(response)
40 | return response
41 |
42 |
43 | @mod.action_class
44 | class UserActions:
45 | def gpt_generate_shell(text_to_process: str, model: str, thread: str) -> str:
46 | """Generate a shell command from a spoken instruction"""
47 | shell_name = settings.get("user.model_shell_default")
48 | if shell_name is None:
49 | raise Exception("GPT Error: Shell name is not set. Set it in the settings.")
50 |
51 | prompt = f"""
52 | Generate a {shell_name} shell command that will perform the given task.
53 | Only include the code. Do not include any comments, backticks, or natural language explanations. Do not output the shell name, only the code that is valid {shell_name}.
54 | Condense the code into a single line such that it can be ran in the terminal.
55 | """
56 |
57 | result = gpt_query(
58 | format_message(prompt), format_message(text_to_process), model, thread
59 | )
60 | return extract_message(result)
61 |
62 | def gpt_generate_sql(text_to_process: str, model: str, thread: str) -> str:
63 | """Generate a SQL query from a spoken instruction"""
64 |
65 | prompt = """
66 | Generate SQL to complete a given request.
67 | Output only the SQL in one line without newlines.
68 | Do not output comments, backticks, or natural language explanations.
69 | Prioritize SQL queries that are database agnostic.
70 | """
71 | return gpt_query(
72 | format_message(prompt), format_message(text_to_process), model, thread
73 | ).get("text", "")
74 |
75 | def gpt_start_debug():
76 | """Enable debug logging"""
77 | GPTState.start_debug()
78 |
79 | def gpt_stop_debug():
80 | """Disable debug logging"""
81 | GPTState.stop_debug()
82 |
83 | def gpt_clear_context():
84 | """Reset the stored context"""
85 | GPTState.clear_context()
86 |
87 | def gpt_push_context(context: str | list[str]):
88 | """Add the selected text to the stored context"""
89 | if isinstance(context, list):
90 | context = "\n".join(context)
91 | GPTState.push_context(format_message(context))
92 |
93 | def gpt_additional_user_context() -> list[str]:
94 | """This is an override function that can be used to add additional context to the prompt"""
95 | return []
96 |
97 | def gpt_select_last() -> None:
98 | """select all the text in the last GPT output"""
99 | if not GPTState.last_was_pasted:
100 | notify("Tried to select GPT output, but it was not pasted in an editor")
101 | return
102 |
103 | lines = GPTState.last_response.split("\n")
104 | for _ in lines[:-1]:
105 | actions.edit.extend_up()
106 | actions.edit.extend_line_end()
107 | for _ in lines[0]:
108 | actions.edit.extend_left()
109 |
110 | def gpt_apply_prompt(
111 | prompt: str,
112 | model: str,
113 | thread: str,
114 | source: str = "",
115 | destination: str = "",
116 | ):
117 | """Apply an arbitrary prompt to arbitrary text"""
118 |
119 | text_to_process: GPTMessageItem = actions.user.gpt_get_source_text(source)
120 | if not text_to_process.get("text") and not text_to_process.get("image_url"):
121 | text_to_process = None # type: ignore
122 |
123 | # Handle special cases in the prompt
124 | ### Ask is a special case, where the text to process is the prompted question, not selected text
125 | if prompt.startswith("ask"):
126 | text_to_process = format_message(prompt.removeprefix("ask"))
127 | prompt = "Generate text that satisfies the question or request given in the input."
128 |
129 | response = gpt_query(
130 | format_message(prompt), text_to_process, model, thread, destination
131 | )
132 |
133 | actions.user.gpt_insert_response(response, destination)
134 | return response
135 |
136 | def gpt_apply_prompt_for_cursorless(
137 | prompt: str,
138 | model: str,
139 | thread: str,
140 | source: list[str],
141 | ) -> str:
142 | """Apply a prompt to text from Cursorless and return a string result.
143 | This function is specifically designed for Cursorless integration
144 | and does not trigger insertion actions."""
145 |
146 | # Join the list into a single string
147 | source_text = "\n".join(source)
148 | text_to_process = format_message(source_text)
149 |
150 | # Send the request but don't insert the response (Cursorless will handle insertion)
151 | response = gpt_query(format_message(prompt), text_to_process, model, thread, "")
152 |
153 | # Return just the text string
154 | return extract_message(response)
155 |
156 | def gpt_pass(source: str = "", destination: str = "") -> None:
157 | """Passes a response from source to destination"""
158 | actions.user.gpt_insert_response(
159 | actions.user.gpt_get_source_text(source), destination
160 | )
161 |
162 | def gpt_help() -> None:
163 | """Open the GPT help file in the web browser"""
164 | # get the text from the file and open it in the web browser
165 | current_dir = os.path.dirname(__file__)
166 | file_path = os.path.join(current_dir, "lists", "staticPrompt.talon-list")
167 | with open(file_path, "r") as f:
168 | lines = f.readlines()[2:]
169 |
170 | builder = Builder()
171 | builder.h1("Talon GPT Prompt List")
172 | for line in lines:
173 | if "##" in line:
174 | builder.h2(line)
175 | else:
176 | builder.p(line)
177 |
178 | builder.render()
179 |
180 | def gpt_reformat_last(how_to_reformat: str, model: str, thread: str) -> str:
181 | """Reformat the last model output"""
182 | PROMPT = f"""The last phrase was written using voice dictation. It has an error with spelling, grammar, or just general misrecognition due to a lack of context. Please reformat the following text to correct the error with the context that it was {how_to_reformat}."""
183 | last_output = actions.user.get_last_phrase()
184 | if last_output:
185 | actions.user.clear_last_phrase()
186 | return extract_message(
187 | gpt_query(
188 | format_message(PROMPT), format_message(last_output), model, thread
189 | )
190 | )
191 | else:
192 | notify("No text to reformat")
193 | raise Exception("No text to reformat")
194 |
195 | def gpt_insert_response(
196 | gpt_message: GPTMessageItem,
197 | method: str = "",
198 | cursorless_destination: Any = None,
199 | ) -> None:
200 | """Insert a GPT result in a specified way"""
201 | # Use a custom default if nothing is provided and the user has set
202 | # a different default destination
203 | if method == "":
204 | method = settings.get("user.model_default_destination")
205 |
206 | if gpt_message.get("type") != "text":
207 | actions.app.notify(
208 | f"Tried to insert an image to {method}, but that is not currently supported. To insert an image to this destination use a prompt to convert it to text."
209 | )
210 | return
211 |
212 | message_text_no_images = extract_message(gpt_message)
213 | match method:
214 | case "above":
215 | actions.key("left")
216 | actions.edit.line_insert_up()
217 | GPTState.last_was_pasted = True
218 | actions.user.paste(message_text_no_images)
219 | case "below":
220 | actions.key("right")
221 | actions.edit.line_insert_down()
222 | GPTState.last_was_pasted = True
223 | actions.user.paste(message_text_no_images)
224 | case "clipboard":
225 | clip.set_text(message_text_no_images)
226 | case "snip":
227 | actions.user.insert_snippet(message_text_no_images)
228 | case "context":
229 | GPTState.push_context(gpt_message)
230 | case "newContext":
231 | GPTState.clear_context()
232 | GPTState.push_context(gpt_message)
233 | case "appendClipboard":
234 | if clip.text() is not None:
235 | clip.set_text(clip.text() + "\n" + message_text_no_images) # type: ignore Unclear why this is throwing a type error in pylance
236 | else:
237 | clip.set_text(message_text_no_images)
238 | case "browser":
239 | builder = Builder()
240 | builder.h1("Talon GPT Result")
241 | for line in message_text_no_images.split("\n"):
242 | builder.p(line)
243 | builder.render()
244 | case "textToSpeech":
245 | try:
246 | actions.user.tts(message_text_no_images)
247 | except KeyError:
248 | notify("GPT Failure: text to speech is not installed")
249 |
250 | # Although we can insert to a cursorless destination, the cursorless_target capture
251 | # Greatly increases DFA compliation times and should be avoided if possible
252 | case "cursorless":
253 | actions.user.cursorless_insert(
254 | cursorless_destination, message_text_no_images
255 | )
256 | # Don't add to the window twice if the thread is enabled
257 | case "window":
258 | # If there was prior text in the confirmation GUI and the user
259 | # explicitly passed new text to the gui, clear the old result
260 | GPTState.text_to_confirm = message_text_no_images
261 | actions.user.confirmation_gui_append(message_text_no_images)
262 | case "chain":
263 | GPTState.last_was_pasted = True
264 | actions.user.paste(message_text_no_images)
265 | actions.user.gpt_select_last()
266 |
267 | case "paste":
268 | GPTState.last_was_pasted = True
269 | actions.user.paste(message_text_no_images)
270 | # If the user doesn't specify a method assume they want to paste.
271 | # However if they didn't specify a method when the confirmation gui
272 | # is showing, assume they don't want anything to be inserted
273 | case _ if not confirmation_gui.showing:
274 | GPTState.last_was_pasted = True
275 | actions.user.paste(message_text_no_images)
276 | # Don't do anything if none of the previous conditions were valid
277 | case _:
278 | pass
279 |
280 | def gpt_get_source_text(spoken_text: str) -> GPTMessageItem:
281 | """Get the source text that is will have the prompt applied to it"""
282 | match spoken_text:
283 | case "clipboard":
284 | return format_clipboard()
285 | case "context":
286 | if GPTState.context == []:
287 | notify("GPT Failure: Context is empty")
288 | raise Exception(
289 | "GPT Failure: User applied a prompt to the phrase context, but there was no context stored"
290 | )
291 | return format_message(messages_to_string(GPTState.context))
292 | case "gptResponse":
293 | if GPTState.last_response == "":
294 | raise Exception(
295 | "GPT Failure: User applied a prompt to the phrase GPT response, but there was no GPT response stored"
296 | )
297 | return format_message(GPTState.last_response)
298 |
299 | case "lastTalonDictation":
300 | last_output = actions.user.get_last_phrase()
301 | if last_output:
302 | actions.user.clear_last_phrase()
303 | return format_message(last_output)
304 | else:
305 | notify("GPT Failure: No last dictation to reformat")
306 | raise Exception(
307 | "GPT Failure: User applied a prompt to the phrase last Talon Dictation, but there was no text to reformat"
308 | )
309 | case "this" | _:
310 | return format_message(actions.edit.selected_text())
311 |
--------------------------------------------------------------------------------
/lib/modelHelpers.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import json
3 | import logging
4 | import os
5 | import platform
6 | import subprocess
7 | from pathlib import Path
8 | from typing import IO, Any, Literal, NotRequired, Optional, TypedDict
9 |
10 | import requests
11 | from talon import actions, app, clip, resource, settings
12 |
13 | from ..lib.pureHelpers import strip_markdown
14 | from .modelState import GPTState
15 | from .modelTypes import GPTMessage, GPTMessageItem
16 |
17 | """"
18 | All functions in this this file have impure dependencies on either the model or the talon APIs
19 | """
20 |
21 |
22 | # TypedDict definition for model configuration
23 | class ModelConfig(TypedDict):
24 | name: str
25 | model_id: NotRequired[str]
26 | system_prompt: NotRequired[str]
27 | llm_options: NotRequired[dict[str, Any]]
28 | api_options: NotRequired[dict[str, Any]]
29 |
30 |
31 | # Path to the models.json file
32 | MODELS_PATH = Path(__file__).parent.parent / "models.json"
33 |
34 | # Store loaded model configurations
35 | model_configs: dict[str, ModelConfig] = {}
36 |
37 |
38 | def load_model_config(f: IO) -> None:
39 | """
40 | Load model configurations from models.json
41 | """
42 | global model_configs
43 | try:
44 | content = f.read()
45 | configs = json.loads(content)
46 | # Convert list to dictionary with name as key
47 | model_configs = {config["name"]: config for config in configs}
48 | except Exception as e:
49 | notify(f"Failed to load models.json: {e!r}")
50 | model_configs = {}
51 |
52 |
53 | def ensure_models_file_exists():
54 | if not MODELS_PATH.exists():
55 | with open(MODELS_PATH, "w") as f:
56 | f.write("[]")
57 |
58 |
59 | ensure_models_file_exists()
60 |
61 |
62 | # Set up file watcher to reload configuration when models.json changes
63 | @resource.watch(str(MODELS_PATH))
64 | def on_update(f: IO):
65 | load_model_config(f)
66 |
67 |
68 | def resolve_model_name(model: str) -> str:
69 | """
70 | Get the actual model name from the model list value.
71 | """
72 | if model == "model":
73 | # Check for deprecated setting first for backward compatibility
74 | openai_model: str = settings.get("user.openai_model") # type: ignore
75 | if openai_model != "do_not_use":
76 | logging.warning(
77 | "The setting 'user.openai_model' is deprecated. Please use 'user.model_default' instead."
78 | )
79 | model = openai_model
80 | else:
81 | model = settings.get("user.model_default") # type: ignore
82 | return model
83 |
84 |
85 | def get_model_config(model_name: str) -> Optional[ModelConfig]:
86 | """
87 | Get the configuration for a specific model from the loaded configs
88 | """
89 | return model_configs.get(model_name)
90 |
91 |
92 | def messages_to_string(messages: list[GPTMessageItem]) -> str:
93 | """Format messages as a string"""
94 | formatted_messages = []
95 | for message in messages:
96 | if message.get("type") == "image_url":
97 | formatted_messages.append("image")
98 | else:
99 | formatted_messages.append(message.get("text", ""))
100 | return "\n\n".join(formatted_messages)
101 |
102 |
103 | def notify(message: str):
104 | """Send a notification to the user. Defaults the Andreas' notification system if you have it installed"""
105 | try:
106 | actions.user.notify(message)
107 | except Exception:
108 | app.notify(message)
109 | # Log in case notifications are disabled
110 | print(message)
111 |
112 |
113 | def get_token() -> str:
114 | """Get the OpenAI API key from the environment"""
115 | try:
116 | return os.environ["OPENAI_API_KEY"]
117 | except KeyError:
118 | message = "GPT Failure: env var OPENAI_API_KEY is not set."
119 | notify(message)
120 | raise Exception(message)
121 |
122 |
123 | def format_messages(
124 | role: Literal["user", "system", "assistant"], messages: list[GPTMessageItem]
125 | ) -> GPTMessage:
126 | return {
127 | "role": role,
128 | "content": messages,
129 | }
130 |
131 |
132 | def format_message(content: str) -> GPTMessageItem:
133 | return {"type": "text", "text": content}
134 |
135 |
136 | def extract_message(content: GPTMessageItem) -> str:
137 | return content.get("text", "")
138 |
139 |
140 | def format_clipboard() -> GPTMessageItem:
141 | clipped_image = clip.image()
142 | if clipped_image:
143 | data = clipped_image.encode().data()
144 | base64_image = base64.b64encode(data).decode("utf-8")
145 | return {
146 | "type": "image_url",
147 | "image_url": {"url": f"data:image/;base64,{base64_image}"},
148 | }
149 | else:
150 | if not clip.text():
151 | raise RuntimeError(
152 | "User requested info from the clipboard but there is nothing in it"
153 | )
154 |
155 | return format_message(clip.text()) # type: ignore Unclear why this is not narrowing the type
156 |
157 |
158 | def send_request(
159 | prompt: GPTMessageItem,
160 | content_to_process: Optional[GPTMessageItem],
161 | model: str,
162 | thread: str,
163 | destination: str = "",
164 | ) -> GPTMessageItem:
165 | """Generate run a GPT request and return the response"""
166 | model = resolve_model_name(model)
167 |
168 | continue_thread = thread == "continueLast"
169 |
170 | notification = "GPT Task Started"
171 | if len(GPTState.context) > 0:
172 | notification += ": Reusing Stored Context"
173 |
174 | # Use specified model if provided
175 | if model:
176 | notification += f", Using model: {model}"
177 |
178 | if settings.get("user.model_verbose_notifications"):
179 | notify(notification)
180 |
181 | # Get model configuration if available
182 | config = get_model_config(model)
183 |
184 | language = actions.code.language()
185 | language_context = (
186 | f"The user is currently in a code editor for the programming language: {language}."
187 | if language != ""
188 | else None
189 | )
190 | application_context = f"The following describes the currently focused application:\n\n{actions.user.talon_get_active_context()}"
191 | snippet_context = (
192 | "\n\nPlease return the response as a snippet with placeholders. A snippet can control cursors and text insertion using constructs like tabstops ($1, $2, etc., with $0 as the final position). Linked tabstops update together. Placeholders, such as ${1:foo}, allow easy changes and can be nested (${1:another ${2:}}). Choices, using ${1|one,two,three|}, prompt user selection."
193 | if destination == "snip"
194 | else None
195 | )
196 |
197 | system_message = "\n\n".join(
198 | [
199 | item
200 | for item in [
201 | (
202 | config["system_prompt"]
203 | if config and "system_prompt" in config
204 | else settings.get("user.model_system_prompt")
205 | ),
206 | language_context,
207 | application_context,
208 | snippet_context,
209 | ]
210 | + actions.user.gpt_additional_user_context()
211 | + [context.get("text") for context in GPTState.context]
212 | if item
213 | ]
214 | )
215 |
216 | content: list[GPTMessageItem] = [prompt]
217 | if content_to_process is not None:
218 | if content_to_process["type"] == "image_url":
219 | image = content_to_process
220 | # If we are processing an image, we have
221 | # to add it as a second message
222 | content = [prompt, image]
223 | elif content_to_process["type"] == "text":
224 | # If we are processing text content, just
225 | # add the text on to the same message instead
226 | # of splitting it into multiple messages
227 | prompt["text"] = (
228 | prompt["text"] + '\n\n"""' + content_to_process["text"] + '"""' # type: ignore a Prompt has to be of type text
229 | )
230 | content = [prompt]
231 |
232 | request = GPTMessage(
233 | role="user",
234 | content=content,
235 | )
236 |
237 | model_endpoint: str = settings.get("user.model_endpoint") # type: ignore
238 | if model_endpoint == "llm":
239 | response = send_request_to_llm_cli(
240 | prompt, content_to_process, system_message, model, continue_thread
241 | )
242 | else:
243 | if continue_thread:
244 | notify(
245 | "Warning: Thread continuation is only supported when using setting user.model_endpoint = 'llm'"
246 | )
247 | response = send_request_to_api(request, system_message, model)
248 |
249 | return response
250 |
251 |
252 | def send_request_to_api(
253 | request: GPTMessage, system_message: str, model: str
254 | ) -> GPTMessageItem:
255 | """Send a request to the model API endpoint and return the response"""
256 | # Get model configuration if available
257 | config = get_model_config(model)
258 |
259 | # Use model_id from configuration if available
260 | model_id = config["model_id"] if config and "model_id" in config else model
261 |
262 | data = {
263 | "messages": (
264 | [
265 | format_messages(
266 | "system",
267 | [GPTMessageItem(type="text", text=system_message)],
268 | ),
269 | ]
270 | if system_message
271 | else []
272 | )
273 | + [request],
274 | "max_tokens": 2024,
275 | "n": 1,
276 | "model": model_id,
277 | }
278 |
279 | # Check for deprecated temperature setting
280 | temperature: float = settings.get("user.model_temperature") # type: ignore
281 | if temperature != -1.0:
282 | logging.warning(
283 | "The setting 'user.model_temperature' is deprecated. Please configure temperature in models.json instead."
284 | )
285 | data["temperature"] = temperature
286 |
287 | # Apply API options from configuration if available
288 | if config and "api_options" in config:
289 | data.update(config["api_options"])
290 |
291 | if GPTState.debug_enabled:
292 | print(data)
293 |
294 | url: str = settings.get("user.model_endpoint") # type: ignore
295 | headers = {"Content-Type": "application/json"}
296 | token = get_token()
297 | # If the model endpoint is Azure, we need to use a different header
298 | if "azure.com" in url:
299 | headers["api-key"] = token
300 | else:
301 | headers["Authorization"] = f"Bearer {token}"
302 |
303 | raw_response = requests.post(url, headers=headers, data=json.dumps(data))
304 |
305 | match raw_response.status_code:
306 | case 200:
307 | if settings.get("user.model_verbose_notifications"):
308 | notify("GPT Task Completed")
309 | resp = raw_response.json()["choices"][0]["message"]["content"].strip()
310 | formatted_resp = strip_markdown(resp)
311 | return format_message(formatted_resp)
312 | case _:
313 | notify("GPT Failure: Check the Talon Log")
314 | raise Exception(raw_response.json())
315 |
316 |
317 | def send_request_to_llm_cli(
318 | prompt: GPTMessageItem,
319 | content_to_process: Optional[GPTMessageItem],
320 | system_message: str,
321 | model: str,
322 | continue_thread: bool,
323 | ) -> GPTMessageItem:
324 | """Send a request to the LLM CLI tool and return the response"""
325 | # Get model configuration if available
326 | config = get_model_config(model)
327 |
328 | # Use model_id from configuration if available
329 | model_id = config["model_id"] if config and "model_id" in config else model
330 |
331 | # Build command
332 | command: list[str] = [settings.get("user.model_llm_path")] # type: ignore
333 | if continue_thread:
334 | command.append("-c")
335 | command.append(prompt["text"]) # type: ignore
336 | cmd_input: bytes | None = None
337 | if content_to_process and content_to_process["type"] == "image_url":
338 | img_url: str = content_to_process["image_url"]["url"] # type: ignore
339 | if img_url.startswith("data:"):
340 | command.extend(["-a", "-"])
341 | base64_data: str = img_url.split(",", 1)[1]
342 | cmd_input = base64.b64decode(base64_data)
343 | else:
344 | command.extend(["-a", img_url])
345 |
346 | # Add model option
347 | command.extend(["-m", model_id])
348 |
349 | # Check for deprecated temperature setting
350 | temperature: float = settings.get("user.model_temperature") # type: ignore
351 | if temperature != -1.0:
352 | logging.warning(
353 | "The setting 'user.model_temperature' is deprecated. Please configure temperature in models.json instead."
354 | )
355 | command.extend(["-o", "temperature", str(temperature)])
356 |
357 | # Apply llm_options from configuration if available
358 | if config and "llm_options" in config:
359 | for key, value in config["llm_options"].items():
360 | if isinstance(value, bool):
361 | if value:
362 | command.extend(["-o", key, "true"])
363 | else:
364 | command.extend(["-o", key, "false"])
365 | else:
366 | command.extend(["-o", key, str(value)])
367 |
368 | # Add system message if available
369 | if system_message:
370 | command.extend(["-s", system_message])
371 |
372 | if GPTState.debug_enabled:
373 | print(command)
374 |
375 | # Configure output encoding
376 | process_env = os.environ.copy()
377 | if platform.system() == "Windows":
378 | process_env["PYTHONUTF8"] = "1" # For Python 3.7+ to enable UTF-8 mode
379 | # On other platforms, UTF-8 is also the common/expected encoding.
380 | output_encoding = "utf-8"
381 |
382 | # Execute command and capture output.
383 | try:
384 | result = subprocess.run(
385 | command,
386 | input=cmd_input,
387 | capture_output=True,
388 | check=True,
389 | creationflags=(
390 | subprocess.CREATE_NO_WINDOW if platform.system() == "Windows" else 0 # type: ignore
391 | ),
392 | env=process_env if platform.system() == "Windows" else None,
393 | )
394 | if settings.get("user.model_verbose_notifications"):
395 | notify("GPT Task Completed")
396 | resp = result.stdout.decode(output_encoding).strip()
397 | formatted_resp = strip_markdown(resp)
398 | return format_message(formatted_resp)
399 | except subprocess.CalledProcessError as e:
400 | error_msg = e.stderr.decode(output_encoding).strip() if e.stderr else str(e)
401 | notify(f"GPT Failure: {error_msg}")
402 | raise e
403 | except Exception as e:
404 | notify("GPT Failure: Check the Talon Log")
405 | raise e
406 |
--------------------------------------------------------------------------------