├── .env_sample ├── .gitignore ├── Grompt.py ├── README.md ├── prompt_canvas.py ├── requirements.txt ├── src ├── Grompt.md └── custom_instructions.txt └── streamlit_app.py /.env_sample: -------------------------------------------------------------------------------- 1 | # Groq API Key (required) 2 | GROQ_API_KEY=your_api_key_here 3 | 4 | # Default model to use (optional, uncomment to change) 5 | # GROMPT_DEFAULT_MODEL=llama3-groq-70b-8192-tool-use-preview 6 | 7 | # Default temperature for text generation (optional, uncomment to change) 8 | # GROMPT_DEFAULT_TEMPERATURE=0.5 9 | 10 | # Default maximum number of tokens to generate (optional, uncomment to change) 11 | # GROMPT_DEFAULT_MAX_TOKENS=1024 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # Virtual Environment 7 | venv/ 8 | env/ 9 | .env 10 | 11 | # IDE files 12 | .vscode/ 13 | .idea/ 14 | 15 | # Logs 16 | *.log 17 | 18 | # OS generated files 19 | .DS_Store 20 | .DS_Store? 21 | ._* 22 | .Spotlight-V100 23 | .Trashes 24 | ehthumbs.db 25 | Thumbs.db 26 | 27 | # Grompt specific 28 | grompt_output/ -------------------------------------------------------------------------------- /Grompt.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | from dotenv import load_dotenv 4 | from pocketgroq import GroqProvider 5 | from typing import Optional 6 | from prompt_canvas import PromptCanvas 7 | 8 | load_dotenv() 9 | 10 | DEFAULT_MODEL = os.getenv('GROMPT_DEFAULT_MODEL', 'llama-3.3-70b-versatile') 11 | DEFAULT_TEMPERATURE = float(os.getenv('GROMPT_DEFAULT_TEMPERATURE', '0.5')) 12 | DEFAULT_MAX_TOKENS = int(os.getenv('GROMPT_DEFAULT_MAX_TOKENS', '1024')) 13 | 14 | def craft_system_message(canvas: Optional[PromptCanvas] = None, prompt: str = "") -> str: 15 | if canvas: 16 | return f"""You are a {canvas.persona} focused on delivering results for {canvas.audience}. 17 | 18 | Task: {canvas.task} 19 | 20 | Step-by-Step Approach: 21 | {chr(10).join(f'- {step}' for step in canvas.steps)} 22 | 23 | Context: {canvas.context} 24 | 25 | References: {', '.join(canvas.references)} 26 | 27 | Output Requirements: 28 | - Format: {canvas.output_format} 29 | - Tone: {canvas.tonality}""" 30 | else: 31 | return get_rephrased_user_prompt(prompt) 32 | 33 | def get_rephrased_user_prompt(prompt: str) -> str: 34 | return f"""You are a professional prompt engineer. Optimize this prompt by making it clearer, more concise, and more effective. 35 | User request: "{prompt}" 36 | Rephrased:""" 37 | 38 | def rephrase_prompt(prompt: str, 39 | model: str = DEFAULT_MODEL, 40 | temperature: float = DEFAULT_TEMPERATURE, 41 | max_tokens: int = DEFAULT_MAX_TOKENS, 42 | canvas: Optional[PromptCanvas] = None) -> str: 43 | try: 44 | groq = GroqProvider() 45 | system_message = craft_system_message(canvas, prompt) 46 | 47 | response = groq.generate( 48 | prompt=system_message, 49 | model=model, 50 | temperature=temperature, 51 | max_tokens=max_tokens, 52 | ) 53 | 54 | return response.strip() 55 | except Exception as e: 56 | raise Exception(f"Prompt engineering error: {str(e)}") 57 | 58 | def main(): 59 | parser = argparse.ArgumentParser(description="Rephrase prompts using Groq LLM.") 60 | parser.add_argument("prompt", help="The prompt to rephrase") 61 | parser.add_argument("--model", default=DEFAULT_MODEL) 62 | parser.add_argument("--temperature", type=float, default=DEFAULT_TEMPERATURE) 63 | parser.add_argument("--max_tokens", type=int, default=DEFAULT_MAX_TOKENS) 64 | 65 | args = parser.parse_args() 66 | 67 | try: 68 | rephrased = rephrase_prompt(args.prompt, args.model, args.temperature, args.max_tokens) 69 | print("Rephrased prompt:") 70 | print(rephrased) 71 | except Exception as e: 72 | print(f"Error: {str(e)}") 73 | 74 | if __name__ == "__main__": 75 | main() -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Grompt Utility 2 | 3 | Grompt is a Python utility that uses the Groq LLM provider service to instantly refactor amazingly detailed and effective prompts. It's designed to optimize user prompts for better results when working with large language models. 4 | 5 | ![image](https://github.com/user-attachments/assets/42eb1007-0e43-445d-b763-c3aca0da43fd) 6 | 7 | 8 | ## Features 9 | 10 | - Rephrase and optimize user prompts using Groq's LLM services 11 | - Configurable via environment variables or .env file 12 | - Can be used as a module in other Python scripts or run from the command line 13 | - Supports various Groq models and customizable parameters 14 | - Includes a separate Streamlit web app for easy demonstration and testing 15 | - Streamlit app supports API key input for use in hosted environments 16 | 17 | ## Prerequisites 18 | 19 | - Python 3.6 or higher 20 | - A Groq API key 21 | 22 | ## Installation 23 | 24 | 1. Clone this repository: 25 | ``` 26 | git clone https://github.com/jgravelle/Grompt.git 27 | cd Grompt 28 | ``` 29 | 30 | 2. Install the required dependencies: 31 | ``` 32 | pip install -r requirements.txt 33 | ``` 34 | 35 | 3. Create a `.env` file in the project root directory and add your Groq API key: 36 | ``` 37 | GROQ_API_KEY=your_api_key_here 38 | ``` 39 | 40 | ## Adding Grompt to Your Project 41 | 42 | To use Grompt in your project, you only need to include the `Grompt.py` file. Follow these steps: 43 | 44 | 1. Copy the `Grompt.py` file into your project directory. 45 | 2. Install the required dependencies: 46 | ``` 47 | pip install groq python-dotenv 48 | ``` 49 | 3. Import and use the `rephrase_prompt` function in your Python scripts: 50 | ```python 51 | from Grompt import rephrase_prompt 52 | 53 | original_prompt = "Write a story about a robot" 54 | rephrased_prompt = rephrase_prompt(original_prompt) 55 | 56 | print(rephrased_prompt) 57 | ``` 58 | 59 | ## File Structure 60 | 61 | - `Grompt.py`: The main Grompt utility file 62 | - `streamlit_app.py`: A separate Streamlit app for demonstrating Grompt's capabilities 63 | - `.env`: Configuration file for environment variables 64 | - `requirements.txt`: List of Python dependencies 65 | - `README.md`: This file 66 | 67 | ## Configuration 68 | 69 | You can configure Grompt using environment variables or a `.env` file. Here are the available configuration options: 70 | 71 | - `GROQ_API_KEY`: Your Groq API key (required) 72 | - `GROMPT_DEFAULT_MODEL`: The default Groq model to use (optional, default is 'llama-3.3-70b-versatile') 73 | - `GROMPT_DEFAULT_TEMPERATURE`: The default temperature for text generation (optional, default is 0.5) 74 | - `GROMPT_DEFAULT_MAX_TOKENS`: The default maximum number of tokens to generate (optional, default is 1024) 75 | 76 | Example `.env` file: 77 | 78 | ``` 79 | GROQ_API_KEY=your_api_key_here 80 | GROMPT_DEFAULT_MODEL=llama-3.3-70b-versatile 81 | GROMPT_DEFAULT_TEMPERATURE=0.7 82 | GROMPT_DEFAULT_MAX_TOKENS=2048 83 | ``` 84 | 85 | ## Usage 86 | 87 | ### Streamlit Web App 88 | 89 | To run the Streamlit web app for an interactive demo: 90 | 91 | ``` 92 | streamlit run streamlit_app.py 93 | ``` 94 | 95 | This will start a local web server and open the Grompt demo in your default web browser. You can enter prompts, adjust parameters, and see the optimized results in real-time. 96 | 97 | When using the Streamlit app in a hosted environment: 98 | 99 | 1. Look for the sidebar on the left side of the app. 100 | 2. Enter your Groq API key in the "Enter your GROQ API Key:" field. 101 | 3. Your API key will be used only for the current session and is not stored. 102 | 103 | Note: Always keep your API keys confidential and do not share them publicly. 104 | 105 | ### As a Command-Line Tool 106 | 107 | Run Grompt from the command line: 108 | 109 | ``` 110 | python Grompt.py "Your prompt here" [--model MODEL] [--temperature TEMP] [--max_tokens MAX_TOKENS] 111 | ``` 112 | 113 | Options: 114 | - `--model`: Specify the Groq model to use (overrides the default) 115 | - `--temperature`: Set the temperature for text generation (overrides the default) 116 | - `--max_tokens`: Set the maximum number of tokens to generate (overrides the default) 117 | 118 | Example: 119 | ``` 120 | python Grompt.py "Write a poem about AI" --model llama3-groq-8b-8192-tool-use-preview --temperature 0.8 --max_tokens 500 121 | ``` 122 | 123 | ### Practical Example 124 | 125 | Here's an example of Grompt in action: 126 | 127 | ``` 128 | C:\ai\Grompt> python Grompt.py "Write an 11th grade level report on quantum physics" 129 | Rephrased prompt: 130 | "Compose a comprehensive report on quantum physics, tailored to an 11th-grade reading level, that includes clear explanations of key concepts, historical background, and real-world applications. Ensure the report is engaging, informative, and easy to understand for students at this level. Include relevant examples and diagrams to illustrate complex ideas. The report should be well-structured, with logical flow between sections, and should not exceed 2000 words. Please adhere to academic writing standards and provide a list of credible sources used in the research." 131 | ``` 132 | 133 | This example demonstrates how Grompt takes a simple, open-ended prompt and transforms it into a detailed, structured prompt that is likely to produce a high-quality response from an LLM. 134 | 135 | ## Contributing 136 | 137 | Contributions are welcome! Please feel free to submit a Pull Request. 138 | 139 | ## License 140 | 141 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. 142 | Include the name 'J. Gravelle' somewhere in your code and docs if you use this. He's full of himself. 143 | 144 | ## Acknowledgments 145 | 146 | - Thanks to Groq for providing the LLM services used in this utility. 147 | - This project was inspired by the need for better prompt engineering in AI applications. 148 | - Created by J. Gravelle, who is indeed full of himself. 149 | -------------------------------------------------------------------------------- /prompt_canvas.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import List 3 | 4 | @dataclass 5 | class PromptCanvas: 6 | persona: str 7 | audience: str 8 | task: str 9 | steps: List[str] 10 | context: str 11 | references: List[str] 12 | output_format: str 13 | tonality: str -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | groq 2 | python-dotenv 3 | streamlit 4 | aiohttp -------------------------------------------------------------------------------- /src/Grompt.md: -------------------------------------------------------------------------------- 1 | # Grompt.py 2 | 3 | ```python 4 | import argparse 5 | import os 6 | from dotenv import load_dotenv 7 | from pocketgroq import GroqProvider 8 | from pocketgroq.exceptions import GroqAPIKeyMissingError, GroqAPIError 9 | 10 | # Load environment variables from .env file 11 | load_dotenv() 12 | 13 | # Get configuration from environment variables or use defaults 14 | DEFAULT_MODEL = os.getenv('GROMPT_DEFAULT_MODEL', 'llama-3.3-70b-versatile') 15 | DEFAULT_TEMPERATURE = float(os.getenv('GROMPT_DEFAULT_TEMPERATURE', '0.5')) 16 | DEFAULT_MAX_TOKENS = int(os.getenv('GROMPT_DEFAULT_MAX_TOKENS', '1024')) 17 | 18 | def get_rephrased_user_prompt(user_request: str) -> str: 19 | """ 20 | Generate a system message for prompt rephrasing. 21 | 22 | Args: 23 | user_request (str): The original user request. 24 | 25 | Returns: 26 | str: A system message for prompt rephrasing. 27 | """ 28 | return f"""You are a professional prompt engineer. Your task is to optimize the user's prompt by making it clearer, more concise, and more effective. Only output the improved prompt without adding any commentary or labels. If the original prompt is already optimized, return it unchanged. 29 | User request: "{user_request}" 30 | Rephrased: 31 | """ 32 | 33 | def rephrase_prompt(prompt: str, model: str = DEFAULT_MODEL, temperature: float = DEFAULT_TEMPERATURE, max_tokens: int = DEFAULT_MAX_TOKENS) -> str: 34 | """ 35 | Rephrase the given prompt using the GroqProvider. 36 | 37 | Args: 38 | prompt (str): The original prompt to rephrase. 39 | model (str): The model to use for generation. 40 | temperature (float): The temperature for text generation. 41 | max_tokens (int): The maximum number of tokens to generate. 42 | 43 | Returns: 44 | str: The rephrased prompt. 45 | 46 | Raises: 47 | GroqAPIKeyMissingError: If the GROQ_API_KEY is not set. 48 | GroqAPIError: If an error occurs during the API call. 49 | """ 50 | try: 51 | groq = GroqProvider() 52 | 53 | system_message = get_rephrased_user_prompt(prompt) 54 | 55 | response = groq.generate( 56 | prompt=system_message, 57 | model=model, 58 | temperature=temperature, 59 | max_tokens=max_tokens, 60 | ) 61 | 62 | return response.strip() 63 | except GroqAPIKeyMissingError: 64 | raise GroqAPIKeyMissingError("GROQ_API_KEY must be set in the environment or in a .env file") 65 | except GroqAPIError as e: 66 | raise GroqAPIError(f"Error calling Groq API: {str(e)}") 67 | 68 | def main(): 69 | parser = argparse.ArgumentParser(description="Rephrase a user prompt using Groq LLM.") 70 | parser.add_argument("prompt", help="The user prompt to rephrase.") 71 | parser.add_argument("--model", default=DEFAULT_MODEL, help="The Groq model to use.") 72 | parser.add_argument("--temperature", type=float, default=DEFAULT_TEMPERATURE, help="The temperature for text generation.") 73 | parser.add_argument("--max_tokens", type=int, default=DEFAULT_MAX_TOKENS, help="The maximum number of tokens to generate.") 74 | 75 | args = parser.parse_args() 76 | 77 | try: 78 | rephrased = rephrase_prompt(args.prompt, args.model, args.temperature, args.max_tokens) 79 | print("Rephrased prompt:") 80 | print(rephrased) 81 | except (GroqAPIKeyMissingError, GroqAPIError) as e: 82 | print(f"Error: {str(e)}") 83 | except Exception as e: 84 | print(f"An unexpected error occurred: {str(e)}") 85 | 86 | def test_function(): 87 | return "Grompt module imported successfully!" 88 | 89 | if __name__ == "__main__": 90 | main() 91 | ``` 92 | 93 | # streamlit_app.py 94 | 95 | ```python 96 | import streamlit as st 97 | import os 98 | import sys 99 | import importlib.util 100 | from dotenv import load_dotenv 101 | 102 | # Load environment variables from .env file 103 | load_dotenv() 104 | 105 | # Diagnostic information 106 | # st.write("Current working directory:", os.getcwd()) 107 | # st.write("Contents of current directory:", os.listdir()) 108 | # st.write("Python path:", sys.path) 109 | 110 | # Function to import a module from a file path 111 | def import_module_from_path(module_name, file_path): 112 | spec = importlib.util.spec_from_file_location(module_name, file_path) 113 | module = importlib.util.module_from_spec(spec) 114 | spec.loader.exec_module(module) 115 | return module 116 | 117 | # Get configuration from environment variables or use defaults 118 | DEFAULT_MODEL = os.getenv('GROMPT_DEFAULT_MODEL', 'llama-3.3-70b-versatile') 119 | DEFAULT_TEMPERATURE = float(os.getenv('GROMPT_DEFAULT_TEMPERATURE', '0.5')) 120 | DEFAULT_MAX_TOKENS = int(os.getenv('GROMPT_DEFAULT_MAX_TOKENS', '1024')) 121 | 122 | # Sidebar for API key input and GitHub link 123 | st.sidebar.title("Configuration") 124 | GROQ_API_KEY = st.sidebar.text_input("Enter your GROQ API Key:", type="password") 125 | 126 | if not GROQ_API_KEY: 127 | st.sidebar.warning("Please enter your GROQ API Key to use the app.") 128 | 129 | # Main app 130 | st.title("Grompt - Prompt Optimizer") 131 | 132 | st.write(""" 133 | Grompt is a utility that uses Groq's LLM services to instantly optimize and rephrase prompts. 134 | Enter your prompt below and see how Grompt can improve it! Add it to YOUR project in seconds: 135 | """) 136 | st.write("""
rephrased = rephrase_prompt("[YOUR PROMPT HERE]")

""", unsafe_allow_html=True) 137 | 138 | user_prompt = st.text_area("Enter your prompt:", height=100) 139 | 140 | col1, col2, col3 = st.columns(3) 141 | with col1: 142 | model = st.selectbox("Select Model", [ 143 | "llama-3.3-70b-versatile", 144 | "llama3-groq-8b-8192-tool-use-preview", 145 | "llama3-70b-8192", 146 | "llama3-8b-8192" 147 | ], index=0) 148 | with col2: 149 | temperature = st.slider("Temperature", 0.0, 1.0, DEFAULT_TEMPERATURE, 0.1) 150 | with col3: 151 | max_tokens = st.number_input("Max Tokens", 1, 32768, DEFAULT_MAX_TOKENS) 152 | 153 | if st.button("Optimize Prompt"): 154 | if not GROQ_API_KEY: 155 | st.error("Please enter your GROQ API Key in the sidebar to use the app.") 156 | elif user_prompt: 157 | # Set the API key in the environment for the rephrase_prompt function 158 | os.environ['GROQ_API_KEY'] = GROQ_API_KEY 159 | 160 | # Now import Grompt after setting the API key 161 | try: 162 | Grompt = import_module_from_path("Grompt", "Grompt.py") 163 | # st.write("Successfully imported Grompt") 164 | except Exception as e: 165 | st.error(f"Unable to import 'Grompt': {str(e)}") 166 | st.stop() 167 | 168 | with st.spinner("Optimizing your prompt..."): 169 | optimized_prompt = Grompt.rephrase_prompt(user_prompt, model, temperature, max_tokens) 170 | if optimized_prompt: 171 | st.subheader("Optimized Prompt:") 172 | st.write(optimized_prompt) 173 | else: 174 | st.warning("Please enter a prompt to optimize.") 175 | 176 | st.markdown("---") 177 | st.write("Powered by Groq LLM services.") 178 | 179 | # Add a note about API key security 180 | st.sidebar.markdown("---") 181 | st.sidebar.info( 182 | "Note: Your API key is used only for this session and is not stored. " 183 | "Always keep your API keys confidential and do not share them publicly." 184 | ) 185 | 186 | # Add GitHub link to sidebar 187 | st.sidebar.markdown("---") 188 | st.sidebar.markdown("[View on GitHub](https://github.com/jgravelle/Grompt)") 189 | 190 | # Add credit to J. Gravelle 191 | st.sidebar.markdown("Created by J. Gravelle") 192 | 193 | ``` 194 | 195 | -------------------------------------------------------------------------------- /src/custom_instructions.txt: -------------------------------------------------------------------------------- 1 | Grompt is a Python utility that uses the Groq LLM provider service to instantly refactor amazingly detailed and effective prompts. It's designed to optimize user prompts for better results when working with large language models. 2 | 3 | Please act as an expert Python programmer and software engineer. The attached Grompt.md file contains the complete and up-to-date codebase for our application. Your task is to thoroughly analyze the codebase, understand its programming flow and logic, and provide detailed insights, suggestions, and solutions to enhance the application's performance, efficiency, readability, and maintainability. 4 | 5 | We highly value responses that demonstrate a deep understanding of the code. Please ensure your recommendations are thoughtful, well-analyzed, and contribute positively to the project's success. Your expertise is crucial in helping us improve and upgrade our application. 6 | -------------------------------------------------------------------------------- /streamlit_app.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import os 3 | import sys 4 | import importlib.util 5 | from dotenv import load_dotenv 6 | from dataclasses import dataclass 7 | from typing import List 8 | 9 | load_dotenv() 10 | 11 | @dataclass 12 | class PromptCanvas: 13 | persona: str = "" 14 | audience: str = "" 15 | task: str = "" 16 | steps: List[str] = None 17 | context: str = "" 18 | references: List[str] = None 19 | output_format: str = "" 20 | tonality: str = "" 21 | 22 | def import_module_from_path(module_name, file_path): 23 | spec = importlib.util.spec_from_file_location(module_name, file_path) 24 | module = importlib.util.module_from_spec(spec) 25 | spec.loader.exec_module(module) 26 | return module 27 | 28 | DEFAULT_MODEL = os.getenv('GROMPT_DEFAULT_MODEL', 'llama-3.3-70b-versatile') 29 | DEFAULT_TEMPERATURE = float(os.getenv('GROMPT_DEFAULT_TEMPERATURE', '0.5')) 30 | DEFAULT_MAX_TOKENS = int(os.getenv('GROMPT_DEFAULT_MAX_TOKENS', '1024')) 31 | 32 | st.sidebar.title("Configuration") 33 | GROQ_API_KEY = st.sidebar.text_input("Enter your GROQ API Key:", type="password") 34 | 35 | if not GROQ_API_KEY: 36 | st.sidebar.warning("Please enter your GROQ API Key to use the app.") 37 | 38 | st.title("Grompt - Prompt Optimizer") 39 | st.write("Grompt uses Groq's LLM services to instantly optimize prompts.") 40 | 41 | # Add tabs for Basic and Advanced modes 42 | tab1, tab2 = st.tabs(["Basic", "Advanced (Prompt Canvas)"]) 43 | 44 | with tab1: 45 | user_prompt = st.text_area("Enter your prompt:", height=100) 46 | 47 | with tab2: 48 | with st.expander("Persona & Audience", expanded=True): 49 | col1, col2 = st.columns(2) 50 | with col1: 51 | persona = st.text_input("Persona/Role", placeholder="e.g., expert technical writer") 52 | with col2: 53 | audience = st.text_input("Target Audience", placeholder="e.g., software developers") 54 | 55 | task = st.text_area("Task/Intent", placeholder="Describe the specific task...") 56 | steps = st.text_area("Steps", placeholder="Enter steps, one per line...") 57 | context = st.text_area("Context", placeholder="Provide relevant background...") 58 | references = st.text_area("References", placeholder="Enter references, one per line...") 59 | 60 | with st.expander("Output Format & Tone", expanded=True): 61 | col1, col2 = st.columns(2) 62 | with col1: 63 | output_format = st.selectbox("Output Format", 64 | ["Natural Text", "Technical Documentation", "Code", "Markdown"]) 65 | with col2: 66 | tonality = st.text_input("Tone", placeholder="e.g., professional, technical") 67 | 68 | canvas_prompt = st.text_area("Your Prompt:", height=100) 69 | 70 | # Shared model settings 71 | col1, col2, col3 = st.columns(3) 72 | with col1: 73 | model = st.selectbox("Select Model", [ 74 | "llama-3.3-70b-versatile", 75 | "llama3-groq-8b-8192-tool-use-preview", 76 | "llama3-70b-8192", 77 | "llama3-8b-8192" 78 | ], index=0) 79 | with col2: 80 | temperature = st.slider("Temperature", 0.0, 1.0, DEFAULT_TEMPERATURE, 0.1) 81 | with col3: 82 | max_tokens = st.number_input("Max Tokens", 1, 32768, DEFAULT_MAX_TOKENS) 83 | 84 | if st.button("Optimize Prompt"): 85 | if not GROQ_API_KEY: 86 | st.error("Please enter your GROQ API Key in the sidebar.") 87 | elif user_prompt or canvas_prompt: 88 | os.environ['GROQ_API_KEY'] = GROQ_API_KEY 89 | 90 | try: 91 | Grompt = import_module_from_path("Grompt", "Grompt.py") 92 | except Exception as e: 93 | st.error(f"Unable to import 'Grompt': {str(e)}") 94 | st.stop() 95 | 96 | with st.spinner("Optimizing prompt..."): 97 | if canvas_prompt: # Advanced mode 98 | canvas = PromptCanvas( 99 | persona=persona, 100 | audience=audience, 101 | task=task, 102 | steps=[s.strip() for s in steps.split('\n') if s.strip()], 103 | context=context, 104 | references=[r.strip() for r in references.split('\n') if r.strip()], 105 | output_format=output_format, 106 | tonality=tonality 107 | ) 108 | optimized_prompt = Grompt.rephrase_prompt( 109 | canvas_prompt, model, temperature, max_tokens, canvas=canvas 110 | ) 111 | else: # Basic mode 112 | optimized_prompt = Grompt.rephrase_prompt( 113 | user_prompt, model, temperature, max_tokens 114 | ) 115 | 116 | if optimized_prompt: 117 | st.subheader("Optimized Prompt:") 118 | st.write(optimized_prompt) 119 | else: 120 | st.warning("Please enter a prompt to optimize.") 121 | 122 | st.markdown("---") 123 | st.write("Powered by Groq LLM services.") 124 | 125 | st.sidebar.markdown("---") 126 | st.sidebar.info( 127 | "Note: Your API key is used only for this session and is not stored. " 128 | "Always keep your API keys confidential." 129 | ) 130 | 131 | st.sidebar.markdown("---") 132 | st.sidebar.markdown("[View on GitHub](https://github.com/jgravelle/Grompt)") 133 | st.sidebar.markdown("Created by J. Gravelle") --------------------------------------------------------------------------------