├── example.jpeg ├── requirements.txt ├── LICENSE ├── README.md └── prompt_generator_v5.py /example.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fstech-digital/gpt_prompt_generator/HEAD/example.jpeg -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | langchain==0.0.144 2 | PyPDF2==3.0.1 3 | streamlit==1.21.0 4 | openai==0.27.0 5 | faiss-cpu==1.7.3 6 | tiktoken==0.3.1 7 | tokenizers==0.13.3 8 | scikit-learn==1.2.2 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Felipe Silva 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## OpenAI Prompt Generator, Expert Simulation with Prompt Filter 2 | This Python script uses OpenAI's GPT-4 engine to simulate a variety of experts in different scenarios. The user selects an option from a list, and the AI generates a response according to the chosen role and prompt. Additionally, the script features a critical mode, where the AI can critique and improve its own generated content. A unique aspect of this script is the prompt filter, which helps create more refined prompts for the AI to answer. 3 | 4 | ## Features 5 | 6 | * Simulate Expert: The AI simulates an expert in a chosen field. 7 | 8 | * Challenge Narrative: The AI provides examples contradicting a given dominant narrative. 9 | 10 | * Write in Different Styles: The AI generates content in various writing styles and tones. 11 | 12 | * Critical Mode (Optional): After displaying the generated text, the program asks the user whether they want to enter "critical mode." If the user answers 'y' (yes), the program constructs a new prompt that asks the GPT-3.5 model to act as a critic and criticize the previously generated response, providing a new perspective. The program then displays the critical response. 13 | 14 | ## Usage 15 | Ensure you have Python 3.x installed. 16 | 17 | Install the openai package using pip: 18 | 19 | `` 20 | pip install openai 21 | `` 22 | 23 | Replace the placeholder API key in the script with your own OpenAI API key. 24 | 25 | Run the script: 26 | 27 | `` 28 | python prompt_generator_v5.py 29 | `` 30 | 31 | Follow the prompts to choose an option, provide the required information, and receive the AI-generated response. 32 | Optionally, enter critical mode to have the AI critique and improve its own generated content. 33 | 34 | ## Dependencies 35 | Python 3.x 36 | openai 37 | ## License 38 | This project is licensed under the MIT License. 39 | 40 | Please note that this project uses the OpenAI API, and you will need an API key to run the script. Additionally, the AI model used is GPT-4, so make sure you have access to it before running the script. 41 | -------------------------------------------------------------------------------- /prompt_generator_v5.py: -------------------------------------------------------------------------------- 1 | import openai 2 | import os 3 | import traceback 4 | 5 | # Set the OpenAI API key 6 | openai.api_key = "" 7 | 8 | # Set the engine name 9 | engine_name = "gpt-3.5-turbo" 10 | 11 | 12 | def get_user_input(): 13 | try: 14 | options = [ 15 | "Simulate an expert", 16 | "Challenge the conventional narrative", 17 | "Write in different styles or tones, such as satire or irony" 18 | ] 19 | print("Please choose an option:") 20 | for i, option in enumerate(options, 1): 21 | print(f"{i}. {option}") 22 | 23 | choice = int(input("Enter the option number: ")) 24 | while choice not in range(1, len(options) + 1): 25 | choice = int( 26 | input("Invalid choice. Enter a valid option number: ")) 27 | 28 | if choice == 1: 29 | desired_prompt = input("Enter the prompt: ") 30 | roles = ["a teacher", "a scientist", "Enter your own role"] 31 | print("Choose a role:") 32 | for i, role in enumerate(roles, 1): 33 | print(f"{i}. {role}") 34 | 35 | role_choice = int(input("Enter the role number: ")) 36 | while role_choice not in range(1, len(roles) + 1): 37 | role_choice = int( 38 | input("Invalid choice. Enter a valid role number: ")) 39 | 40 | role = roles[role_choice - 41 | 1] if role_choice != len(roles) else input("Enter the role: ") 42 | additional_info = input("Any additional information or context? ") 43 | user_message = desired_prompt + ". " + additional_info + "." 44 | 45 | elif choice == 2: 46 | topic = input("Enter a Topic: ") 47 | dominant_narrative = input("What is the dominant narrative? ") 48 | desired_prompt = f"For the topic about {topic}, give examples that contradict the dominant narrative that said {dominant_narrative}." 49 | role = "a thought-provoking writer" 50 | additional_info = "" 51 | user_message = desired_prompt 52 | 53 | elif choice == 3: 54 | desired_prompt = input("Enter the prompt: ") 55 | styles = ["satire", "irony", "Enter your own style or tone"] 56 | print("Choose a style or tone:") 57 | for i, style in enumerate(styles, 1): 58 | print(f"{i}. {style}") 59 | 60 | style_choice = int(input("Enter the style number: ")) 61 | while style_choice not in range(1, len(styles) + 1): 62 | style_choice = int( 63 | input("Invalid choice. Enter a valid style number: ")) 64 | 65 | style = styles[style_choice - 1] if style_choice != len( 66 | styles) else input("Enter the style or tone: ") 67 | role = f"a {style} writer" 68 | additional_info = "" 69 | user_message = desired_prompt 70 | 71 | # Define the initial role and message for the conversation 72 | conversation = [{"role": "user", "content": user_message}] 73 | response = openai.ChatCompletion.create( 74 | model=engine_name, 75 | messages=conversation, 76 | temperature=0.7, 77 | max_tokens=150, 78 | top_p=1, 79 | frequency_penalty=0, 80 | presence_penalty=0 81 | ) 82 | print(response.choices[0].message.content) 83 | 84 | critical_mode = input("Do you want to enter critical mode? (y/n): ") 85 | if critical_mode.lower() == 'y': 86 | # Map the custom role to a predefined role 87 | mapped_role = "assistant" if role in [ 88 | "a teacher", "a scientist"] else "user" 89 | 90 | # Add the AI-generated response to the conversation 91 | ai_response = response.choices[0].message.content 92 | conversation.append( 93 | {"role": mapped_role, "content": ai_response}) 94 | 95 | # Add the critic's message to the conversation, including the previous response 96 | critic_message = f"Act as a good critic. Criticize this answer: '{ai_response}'. Convince me why it can be improve. Let's think step by step and provide a new perspective." 97 | conversation.append({"role": "user", "content": critic_message}) 98 | 99 | response = openai.ChatCompletion.create( 100 | model=engine_name, 101 | messages=conversation, 102 | temperature=0.7, 103 | max_tokens=150, 104 | top_p=1, 105 | frequency_penalty=0, 106 | presence_penalty=0 107 | ) 108 | print(response.choices[0].message.content) 109 | 110 | except Exception as e: 111 | print(f"An error occurred: {e}") 112 | traceback.print_exc() 113 | 114 | 115 | # Call the get_user_input function 116 | get_user_input() 117 | --------------------------------------------------------------------------------