├── .env_sample ├── .gitignore ├── Grompt.py ├── README.md ├── prompt_canvas.py ├── requirements.txt ├── src ├── Grompt.md └── custom_instructions.txt └── streamlit_app.py /.env_sample: -------------------------------------------------------------------------------- 1 | # Groq API Key (required) 2 | GROQ_API_KEY=your_api_key_here 3 | 4 | # Default model to use (optional, uncomment to change) 5 | # GROMPT_DEFAULT_MODEL=llama3-groq-70b-8192-tool-use-preview 6 | 7 | # Default temperature for text generation (optional, uncomment to change) 8 | # GROMPT_DEFAULT_TEMPERATURE=0.5 9 | 10 | # Default maximum number of tokens to generate (optional, uncomment to change) 11 | # GROMPT_DEFAULT_MAX_TOKENS=1024 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # Virtual Environment 7 | venv/ 8 | env/ 9 | .env 10 | 11 | # IDE files 12 | .vscode/ 13 | .idea/ 14 | 15 | # Logs 16 | *.log 17 | 18 | # OS generated files 19 | .DS_Store 20 | .DS_Store? 21 | ._* 22 | .Spotlight-V100 23 | .Trashes 24 | ehthumbs.db 25 | Thumbs.db 26 | 27 | # Grompt specific 28 | grompt_output/ -------------------------------------------------------------------------------- /Grompt.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | from dotenv import load_dotenv 4 | from pocketgroq import GroqProvider 5 | from typing import Optional 6 | from prompt_canvas import PromptCanvas 7 | 8 | load_dotenv() 9 | 10 | DEFAULT_MODEL = os.getenv('GROMPT_DEFAULT_MODEL', 'llama-3.3-70b-versatile') 11 | DEFAULT_TEMPERATURE = float(os.getenv('GROMPT_DEFAULT_TEMPERATURE', '0.5')) 12 | DEFAULT_MAX_TOKENS = int(os.getenv('GROMPT_DEFAULT_MAX_TOKENS', '1024')) 13 | 14 | def craft_system_message(canvas: Optional[PromptCanvas] = None, prompt: str = "") -> str: 15 | if canvas: 16 | return f"""You are a {canvas.persona} focused on delivering results for {canvas.audience}. 17 | 18 | Task: {canvas.task} 19 | 20 | Step-by-Step Approach: 21 | {chr(10).join(f'- {step}' for step in canvas.steps)} 22 | 23 | Context: {canvas.context} 24 | 25 | References: {', '.join(canvas.references)} 26 | 27 | Output Requirements: 28 | - Format: {canvas.output_format} 29 | - Tone: {canvas.tonality}""" 30 | else: 31 | return get_rephrased_user_prompt(prompt) 32 | 33 | def get_rephrased_user_prompt(prompt: str) -> str: 34 | return f"""You are a professional prompt engineer. Optimize this prompt by making it clearer, more concise, and more effective. 35 | User request: "{prompt}" 36 | Rephrased:""" 37 | 38 | def rephrase_prompt(prompt: str, 39 | model: str = DEFAULT_MODEL, 40 | temperature: float = DEFAULT_TEMPERATURE, 41 | max_tokens: int = DEFAULT_MAX_TOKENS, 42 | canvas: Optional[PromptCanvas] = None) -> str: 43 | try: 44 | groq = GroqProvider() 45 | system_message = craft_system_message(canvas, prompt) 46 | 47 | response = groq.generate( 48 | prompt=system_message, 49 | model=model, 50 | temperature=temperature, 51 | max_tokens=max_tokens, 52 | ) 53 | 54 | return response.strip() 55 | except Exception as e: 56 | raise Exception(f"Prompt engineering error: {str(e)}") 57 | 58 | def main(): 59 | parser = argparse.ArgumentParser(description="Rephrase prompts using Groq LLM.") 60 | parser.add_argument("prompt", help="The prompt to rephrase") 61 | parser.add_argument("--model", default=DEFAULT_MODEL) 62 | parser.add_argument("--temperature", type=float, default=DEFAULT_TEMPERATURE) 63 | parser.add_argument("--max_tokens", type=int, default=DEFAULT_MAX_TOKENS) 64 | 65 | args = parser.parse_args() 66 | 67 | try: 68 | rephrased = rephrase_prompt(args.prompt, args.model, args.temperature, args.max_tokens) 69 | print("Rephrased prompt:") 70 | print(rephrased) 71 | except Exception as e: 72 | print(f"Error: {str(e)}") 73 | 74 | if __name__ == "__main__": 75 | main() -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Grompt Utility 2 | 3 | Grompt is a Python utility that uses the Groq LLM provider service to instantly refactor amazingly detailed and effective prompts. It's designed to optimize user prompts for better results when working with large language models. 4 | 5 |  6 | 7 | 8 | ## Features 9 | 10 | - Rephrase and optimize user prompts using Groq's LLM services 11 | - Configurable via environment variables or .env file 12 | - Can be used as a module in other Python scripts or run from the command line 13 | - Supports various Groq models and customizable parameters 14 | - Includes a separate Streamlit web app for easy demonstration and testing 15 | - Streamlit app supports API key input for use in hosted environments 16 | 17 | ## Prerequisites 18 | 19 | - Python 3.6 or higher 20 | - A Groq API key 21 | 22 | ## Installation 23 | 24 | 1. Clone this repository: 25 | ``` 26 | git clone https://github.com/jgravelle/Grompt.git 27 | cd Grompt 28 | ``` 29 | 30 | 2. Install the required dependencies: 31 | ``` 32 | pip install -r requirements.txt 33 | ``` 34 | 35 | 3. Create a `.env` file in the project root directory and add your Groq API key: 36 | ``` 37 | GROQ_API_KEY=your_api_key_here 38 | ``` 39 | 40 | ## Adding Grompt to Your Project 41 | 42 | To use Grompt in your project, you only need to include the `Grompt.py` file. Follow these steps: 43 | 44 | 1. Copy the `Grompt.py` file into your project directory. 45 | 2. Install the required dependencies: 46 | ``` 47 | pip install groq python-dotenv 48 | ``` 49 | 3. Import and use the `rephrase_prompt` function in your Python scripts: 50 | ```python 51 | from Grompt import rephrase_prompt 52 | 53 | original_prompt = "Write a story about a robot" 54 | rephrased_prompt = rephrase_prompt(original_prompt) 55 | 56 | print(rephrased_prompt) 57 | ``` 58 | 59 | ## File Structure 60 | 61 | - `Grompt.py`: The main Grompt utility file 62 | - `streamlit_app.py`: A separate Streamlit app for demonstrating Grompt's capabilities 63 | - `.env`: Configuration file for environment variables 64 | - `requirements.txt`: List of Python dependencies 65 | - `README.md`: This file 66 | 67 | ## Configuration 68 | 69 | You can configure Grompt using environment variables or a `.env` file. Here are the available configuration options: 70 | 71 | - `GROQ_API_KEY`: Your Groq API key (required) 72 | - `GROMPT_DEFAULT_MODEL`: The default Groq model to use (optional, default is 'llama-3.3-70b-versatile') 73 | - `GROMPT_DEFAULT_TEMPERATURE`: The default temperature for text generation (optional, default is 0.5) 74 | - `GROMPT_DEFAULT_MAX_TOKENS`: The default maximum number of tokens to generate (optional, default is 1024) 75 | 76 | Example `.env` file: 77 | 78 | ``` 79 | GROQ_API_KEY=your_api_key_here 80 | GROMPT_DEFAULT_MODEL=llama-3.3-70b-versatile 81 | GROMPT_DEFAULT_TEMPERATURE=0.7 82 | GROMPT_DEFAULT_MAX_TOKENS=2048 83 | ``` 84 | 85 | ## Usage 86 | 87 | ### Streamlit Web App 88 | 89 | To run the Streamlit web app for an interactive demo: 90 | 91 | ``` 92 | streamlit run streamlit_app.py 93 | ``` 94 | 95 | This will start a local web server and open the Grompt demo in your default web browser. You can enter prompts, adjust parameters, and see the optimized results in real-time. 96 | 97 | When using the Streamlit app in a hosted environment: 98 | 99 | 1. Look for the sidebar on the left side of the app. 100 | 2. Enter your Groq API key in the "Enter your GROQ API Key:" field. 101 | 3. Your API key will be used only for the current session and is not stored. 102 | 103 | Note: Always keep your API keys confidential and do not share them publicly. 104 | 105 | ### As a Command-Line Tool 106 | 107 | Run Grompt from the command line: 108 | 109 | ``` 110 | python Grompt.py "Your prompt here" [--model MODEL] [--temperature TEMP] [--max_tokens MAX_TOKENS] 111 | ``` 112 | 113 | Options: 114 | - `--model`: Specify the Groq model to use (overrides the default) 115 | - `--temperature`: Set the temperature for text generation (overrides the default) 116 | - `--max_tokens`: Set the maximum number of tokens to generate (overrides the default) 117 | 118 | Example: 119 | ``` 120 | python Grompt.py "Write a poem about AI" --model llama3-groq-8b-8192-tool-use-preview --temperature 0.8 --max_tokens 500 121 | ``` 122 | 123 | ### Practical Example 124 | 125 | Here's an example of Grompt in action: 126 | 127 | ``` 128 | C:\ai\Grompt> python Grompt.py "Write an 11th grade level report on quantum physics" 129 | Rephrased prompt: 130 | "Compose a comprehensive report on quantum physics, tailored to an 11th-grade reading level, that includes clear explanations of key concepts, historical background, and real-world applications. Ensure the report is engaging, informative, and easy to understand for students at this level. Include relevant examples and diagrams to illustrate complex ideas. The report should be well-structured, with logical flow between sections, and should not exceed 2000 words. Please adhere to academic writing standards and provide a list of credible sources used in the research." 131 | ``` 132 | 133 | This example demonstrates how Grompt takes a simple, open-ended prompt and transforms it into a detailed, structured prompt that is likely to produce a high-quality response from an LLM. 134 | 135 | ## Contributing 136 | 137 | Contributions are welcome! Please feel free to submit a Pull Request. 138 | 139 | ## License 140 | 141 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. 142 | Include the name 'J. Gravelle' somewhere in your code and docs if you use this. He's full of himself. 143 | 144 | ## Acknowledgments 145 | 146 | - Thanks to Groq for providing the LLM services used in this utility. 147 | - This project was inspired by the need for better prompt engineering in AI applications. 148 | - Created by J. Gravelle, who is indeed full of himself. 149 | -------------------------------------------------------------------------------- /prompt_canvas.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import List 3 | 4 | @dataclass 5 | class PromptCanvas: 6 | persona: str 7 | audience: str 8 | task: str 9 | steps: List[str] 10 | context: str 11 | references: List[str] 12 | output_format: str 13 | tonality: str -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | groq 2 | python-dotenv 3 | streamlit 4 | aiohttp -------------------------------------------------------------------------------- /src/Grompt.md: -------------------------------------------------------------------------------- 1 | # Grompt.py 2 | 3 | ```python 4 | import argparse 5 | import os 6 | from dotenv import load_dotenv 7 | from pocketgroq import GroqProvider 8 | from pocketgroq.exceptions import GroqAPIKeyMissingError, GroqAPIError 9 | 10 | # Load environment variables from .env file 11 | load_dotenv() 12 | 13 | # Get configuration from environment variables or use defaults 14 | DEFAULT_MODEL = os.getenv('GROMPT_DEFAULT_MODEL', 'llama-3.3-70b-versatile') 15 | DEFAULT_TEMPERATURE = float(os.getenv('GROMPT_DEFAULT_TEMPERATURE', '0.5')) 16 | DEFAULT_MAX_TOKENS = int(os.getenv('GROMPT_DEFAULT_MAX_TOKENS', '1024')) 17 | 18 | def get_rephrased_user_prompt(user_request: str) -> str: 19 | """ 20 | Generate a system message for prompt rephrasing. 21 | 22 | Args: 23 | user_request (str): The original user request. 24 | 25 | Returns: 26 | str: A system message for prompt rephrasing. 27 | """ 28 | return f"""You are a professional prompt engineer. Your task is to optimize the user's prompt by making it clearer, more concise, and more effective. Only output the improved prompt without adding any commentary or labels. If the original prompt is already optimized, return it unchanged. 29 | User request: "{user_request}" 30 | Rephrased: 31 | """ 32 | 33 | def rephrase_prompt(prompt: str, model: str = DEFAULT_MODEL, temperature: float = DEFAULT_TEMPERATURE, max_tokens: int = DEFAULT_MAX_TOKENS) -> str: 34 | """ 35 | Rephrase the given prompt using the GroqProvider. 36 | 37 | Args: 38 | prompt (str): The original prompt to rephrase. 39 | model (str): The model to use for generation. 40 | temperature (float): The temperature for text generation. 41 | max_tokens (int): The maximum number of tokens to generate. 42 | 43 | Returns: 44 | str: The rephrased prompt. 45 | 46 | Raises: 47 | GroqAPIKeyMissingError: If the GROQ_API_KEY is not set. 48 | GroqAPIError: If an error occurs during the API call. 49 | """ 50 | try: 51 | groq = GroqProvider() 52 | 53 | system_message = get_rephrased_user_prompt(prompt) 54 | 55 | response = groq.generate( 56 | prompt=system_message, 57 | model=model, 58 | temperature=temperature, 59 | max_tokens=max_tokens, 60 | ) 61 | 62 | return response.strip() 63 | except GroqAPIKeyMissingError: 64 | raise GroqAPIKeyMissingError("GROQ_API_KEY must be set in the environment or in a .env file") 65 | except GroqAPIError as e: 66 | raise GroqAPIError(f"Error calling Groq API: {str(e)}") 67 | 68 | def main(): 69 | parser = argparse.ArgumentParser(description="Rephrase a user prompt using Groq LLM.") 70 | parser.add_argument("prompt", help="The user prompt to rephrase.") 71 | parser.add_argument("--model", default=DEFAULT_MODEL, help="The Groq model to use.") 72 | parser.add_argument("--temperature", type=float, default=DEFAULT_TEMPERATURE, help="The temperature for text generation.") 73 | parser.add_argument("--max_tokens", type=int, default=DEFAULT_MAX_TOKENS, help="The maximum number of tokens to generate.") 74 | 75 | args = parser.parse_args() 76 | 77 | try: 78 | rephrased = rephrase_prompt(args.prompt, args.model, args.temperature, args.max_tokens) 79 | print("Rephrased prompt:") 80 | print(rephrased) 81 | except (GroqAPIKeyMissingError, GroqAPIError) as e: 82 | print(f"Error: {str(e)}") 83 | except Exception as e: 84 | print(f"An unexpected error occurred: {str(e)}") 85 | 86 | def test_function(): 87 | return "Grompt module imported successfully!" 88 | 89 | if __name__ == "__main__": 90 | main() 91 | ``` 92 | 93 | # streamlit_app.py 94 | 95 | ```python 96 | import streamlit as st 97 | import os 98 | import sys 99 | import importlib.util 100 | from dotenv import load_dotenv 101 | 102 | # Load environment variables from .env file 103 | load_dotenv() 104 | 105 | # Diagnostic information 106 | # st.write("Current working directory:", os.getcwd()) 107 | # st.write("Contents of current directory:", os.listdir()) 108 | # st.write("Python path:", sys.path) 109 | 110 | # Function to import a module from a file path 111 | def import_module_from_path(module_name, file_path): 112 | spec = importlib.util.spec_from_file_location(module_name, file_path) 113 | module = importlib.util.module_from_spec(spec) 114 | spec.loader.exec_module(module) 115 | return module 116 | 117 | # Get configuration from environment variables or use defaults 118 | DEFAULT_MODEL = os.getenv('GROMPT_DEFAULT_MODEL', 'llama-3.3-70b-versatile') 119 | DEFAULT_TEMPERATURE = float(os.getenv('GROMPT_DEFAULT_TEMPERATURE', '0.5')) 120 | DEFAULT_MAX_TOKENS = int(os.getenv('GROMPT_DEFAULT_MAX_TOKENS', '1024')) 121 | 122 | # Sidebar for API key input and GitHub link 123 | st.sidebar.title("Configuration") 124 | GROQ_API_KEY = st.sidebar.text_input("Enter your GROQ API Key:", type="password") 125 | 126 | if not GROQ_API_KEY: 127 | st.sidebar.warning("Please enter your GROQ API Key to use the app.") 128 | 129 | # Main app 130 | st.title("Grompt - Prompt Optimizer") 131 | 132 | st.write(""" 133 | Grompt is a utility that uses Groq's LLM services to instantly optimize and rephrase prompts. 134 | Enter your prompt below and see how Grompt can improve it! Add it to YOUR project in seconds: 135 | """) 136 | st.write("""