├── .DS_Store ├── .gitignore ├── README.MD ├── __pycache__ └── docx.cpython-311.pyc ├── agents ├── .DS_Store ├── environment │ └── aqi │ │ └── main.py ├── game │ └── main.py ├── gemini-agents │ ├── comparison.py │ └── main.py ├── health │ └── main.py ├── jobs │ ├── README.md │ ├── job-hunt-agent-new.py │ ├── job-hunt-agent.py │ └── requirements.txt ├── legal │ └── legal_team.py ├── marketing │ ├── competitor-study │ │ └── ai-agent.py │ ├── customer-support.py │ │ └── customer-agent.py │ └── lead-generation │ │ └── agents.py ├── newsletter │ └── main.py ├── openai │ ├── README.md │ └── basic-agent.py ├── real-estate │ └── main.py ├── research │ └── main.py ├── sales │ ├── __pycache__ │ │ └── with_st.cpython-311.pyc │ ├── email_preview.py │ ├── main.py │ ├── run_email_preview.py │ ├── st-ui.py │ └── with_st.py ├── social_media │ ├── main.py │ └── thinking-ant-social-media-calendar.py ├── startup-ideas │ └── main.py └── thinking │ ├── model-comparison.py │ ├── o3-agent.py │ └── streamlit-based.py ├── bg.png ├── bird.png ├── content_calendar_20250225_122502.json ├── db ├── 6dc7ffbe-9fe7-40a1-a369-1b7124802880 │ ├── data_level0.bin │ ├── header.bin │ ├── length.bin │ └── link_lists.bin └── chroma.sqlite3 ├── docx.py ├── dynamic_newsletter.py ├── dynamic_research.py ├── flappy_bird.py ├── newsletter.py ├── pipe.png ├── requirements.txt ├── research.py ├── research_results_20250212_105658.json ├── research_results_20250212_105935.json ├── research_results_20250212_110115.json ├── research_results_20250212_110507.json ├── research_results_20250212_110607.json ├── research_results_20250212_110628.json ├── research_results_20250212_110633.json ├── research_results_20250212_110656.json ├── research_results_20250212_112527.json ├── steps_to_run ├── teleprompter.py └── templates └── voice_chat.html /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whyashthakker/ai-agents/7d839eac0854eceaf9d97110075301893b2d9ec2/.DS_Store -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | 3 | myenv/ 4 | 5 | agents/call-agent 6 | agents/simple-call-agent 7 | agents/agents -------------------------------------------------------------------------------- /README.MD: -------------------------------------------------------------------------------- 1 | # Project Setup 2 | 3 | Follow these steps to set up the project environment and install the required dependencies. 4 | 5 | ## Setup Virtual Environment 6 | 7 | 1. Create a new virtual environment using Python 3.11: 8 | 9 | ``` 10 | python3.11 -m venv myenv 11 | ``` 12 | 13 | 2. Activate the virtual environment: 14 | 15 | ``` 16 | source myenv/bin/activate 17 | ``` 18 | 19 | ## Install Dependencies 20 | 21 | 3. Install the main project dependencies: 22 | 23 | ``` 24 | pip install crewai python-dotenv langchain_openai langchain_community 25 | ``` 26 | 27 | 4. Install additional tools for crewai: 28 | 29 | ``` 30 | pip install 'crewai[tools]' 31 | ``` 32 | 33 | ## Next Steps 34 | 35 | After completing these steps, your environment will be set up and ready for development. Make sure to run all project-related commands within this activated virtual environment. -------------------------------------------------------------------------------- /__pycache__/docx.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whyashthakker/ai-agents/7d839eac0854eceaf9d97110075301893b2d9ec2/__pycache__/docx.cpython-311.pyc -------------------------------------------------------------------------------- /agents/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whyashthakker/ai-agents/7d839eac0854eceaf9d97110075301893b2d9ec2/agents/.DS_Store -------------------------------------------------------------------------------- /agents/game/main.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from openai import OpenAI 3 | from agno.agent import Agent as AgnoAgent 4 | from agno.models.openai import OpenAIChat as AgnoOpenAIChat 5 | from langchain_openai import ChatOpenAI 6 | import asyncio 7 | from browser_use import Browser 8 | 9 | st.set_page_config(page_title="PyGame Code Generator", layout="wide") 10 | 11 | # Initialize session state 12 | if "api_keys" not in st.session_state: 13 | st.session_state.api_keys = { 14 | "deepseek": "", 15 | "openai": "" 16 | } 17 | 18 | # Streamlit sidebar for API keys 19 | with st.sidebar: 20 | st.title("API Keys Configuration") 21 | st.session_state.api_keys["deepseek"] = st.text_input( 22 | "DeepSeek API Key", 23 | type="password", 24 | value=st.session_state.api_keys["deepseek"] 25 | ) 26 | st.session_state.api_keys["openai"] = st.text_input( 27 | "OpenAI API Key", 28 | type="password", 29 | value=st.session_state.api_keys["openai"] 30 | ) 31 | 32 | st.markdown("---") 33 | st.info(""" 34 | 📝 How to use: 35 | 1. Enter your API keys above 36 | 2. Write your PyGame visualization query 37 | 3. Click 'Generate Code' to get the code 38 | 4. Click 'Generate Visualization' to: 39 | - Open Trinket.io PyGame editor 40 | - Copy and paste the generated code 41 | - Watch it run automatically 42 | """) 43 | 44 | # Main UI 45 | st.title("🎮 AI 3D Visualizer with DeepSeek R1") 46 | example_query = "Create a particle system simulation where 100 particles emit from the mouse position and respond to keyboard-controlled wind forces" 47 | query = st.text_area( 48 | "Enter your PyGame query:", 49 | height=70, 50 | placeholder=f"e.g.: {example_query}" 51 | ) 52 | 53 | # Split the buttons into columns 54 | col1, col2 = st.columns(2) 55 | generate_code_btn = col1.button("Generate Code") 56 | generate_vis_btn = col2.button("Generate Visualization") 57 | 58 | if generate_code_btn and query: 59 | if not st.session_state.api_keys["deepseek"] or not st.session_state.api_keys["openai"]: 60 | st.error("Please provide both API keys in the sidebar") 61 | st.stop() 62 | 63 | # Initialize Deepseek client 64 | deepseek_client = OpenAI( 65 | api_key=st.session_state.api_keys["deepseek"], 66 | base_url="https://api.deepseek.com" 67 | ) 68 | 69 | system_prompt = """You are a Pygame and Python Expert that specializes in making games and visualisation through pygame and python programming. 70 | During your reasoning and thinking, include clear, concise, and well-formatted Python code in your reasoning. 71 | Always include explanations for the code you provide.""" 72 | 73 | try: 74 | # Get reasoning from Deepseek 75 | with st.spinner("Generating solution..."): 76 | deepseek_response = deepseek_client.chat.completions.create( 77 | model="deepseek-reasoner", 78 | messages=[ 79 | {"role": "system", "content": system_prompt}, 80 | {"role": "user", "content": query} 81 | ], 82 | max_tokens=1 83 | ) 84 | 85 | reasoning_content = deepseek_response.choices[0].message.reasoning_content 86 | print("\nDeepseek Reasoning:\n", reasoning_content) 87 | with st.expander("R1's Reasoning"): 88 | st.write(reasoning_content) 89 | 90 | # Initialize Claude agent (using PhiAgent) 91 | openai_agent = AgnoAgent( 92 | model=AgnoOpenAIChat( 93 | id="gpt-4o", 94 | api_key=st.session_state.api_keys["openai"] 95 | ), 96 | show_tool_calls=True, 97 | markdown=True 98 | ) 99 | 100 | # Extract code 101 | extraction_prompt = f"""Extract ONLY the Python code from the following content which is reasoning of a particular query to make a pygame script. 102 | Return nothing but the raw code without any explanations, or markdown backticks: 103 | {reasoning_content}""" 104 | 105 | with st.spinner("Extracting code..."): 106 | code_response = openai_agent.run(extraction_prompt) 107 | extracted_code = code_response.content 108 | 109 | # Store the generated code in session state 110 | st.session_state.generated_code = extracted_code 111 | 112 | # Display the code 113 | with st.expander("Generated PyGame Code", expanded=True): 114 | st.code(extracted_code, language="python") 115 | 116 | st.success("Code generated successfully! Click 'Generate Visualization' to run it.") 117 | 118 | except Exception as e: 119 | st.error(f"An error occurred: {str(e)}") 120 | 121 | elif generate_vis_btn: 122 | if "generated_code" not in st.session_state: 123 | st.warning("Please generate code first before visualization") 124 | else: 125 | async def run_pygame_on_trinket(code: str) -> None: 126 | browser = Browser() 127 | from browser_use import Agent 128 | async with await browser.new_context() as context: 129 | model = ChatOpenAI( 130 | model="gpt-4o", 131 | api_key=st.session_state.api_keys["openai"] 132 | ) 133 | 134 | agent1 = Agent( 135 | task='Go to https://trinket.io/features/pygame, thats your only job.', 136 | llm=model, 137 | browser_context=context, 138 | ) 139 | 140 | executor = Agent( 141 | task='Executor. Execute the code written by the User by clicking on the run button on the right. ', 142 | llm=model, 143 | browser_context=context 144 | ) 145 | 146 | coder = Agent( 147 | task='Coder. Your job is to wait for the user for 10 seconds to write the code in the code editor.', 148 | llm=model, 149 | browser_context=context 150 | ) 151 | 152 | viewer = Agent( 153 | task='Viewer. Your job is to just view the pygame window for 10 seconds.', 154 | llm=model, 155 | browser_context=context, 156 | ) 157 | 158 | with st.spinner("Running code on Trinket..."): 159 | try: 160 | await agent1.run() 161 | await coder.run() 162 | await executor.run() 163 | await viewer.run() 164 | st.success("Code is running on Trinket!") 165 | except Exception as e: 166 | st.error(f"Error running code on Trinket: {str(e)}") 167 | st.info("You can still copy the code above and run it manually on Trinket") 168 | 169 | # Run the async function with the stored code 170 | asyncio.run(run_pygame_on_trinket(st.session_state.generated_code)) 171 | 172 | elif generate_code_btn and not query: 173 | st.warning("Please enter a query before generating code") -------------------------------------------------------------------------------- /agents/gemini-agents/comparison.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from crewai import Agent, Task, Crew, LLM 4 | from crewai_tools import SerperDevTool, WebsiteSearchTool 5 | import streamlit as st 6 | import concurrent.futures 7 | from datetime import datetime 8 | import pandas as pd 9 | import json 10 | # Load environment variables 11 | load_dotenv() 12 | os.environ["SERPER_API_KEY"] = os.getenv("SERPER_API_KEY") 13 | os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY") 14 | 15 | # Initialize enhanced search tools 16 | search_tool = SerperDevTool() 17 | website_tool = WebsiteSearchTool() 18 | 19 | def get_llm(model_choice='gemini'): 20 | """Get the specified language model""" 21 | if model_choice == 'openai': 22 | return LLM( 23 | model="openai/o1-mini", 24 | api_key=os.getenv("OPENAI_API_KEY"), 25 | verbose=True 26 | ) 27 | elif model_choice == 'gemini': 28 | return LLM( 29 | model="gemini/gemini-2.0-flash", 30 | temperature=0.7, 31 | google_api_key=os.getenv("GEMINI_API_KEY"), 32 | verbose=True 33 | ) 34 | else: # ollama 35 | return LLM( 36 | model="ollama/deepseek-r1:latest", 37 | base_url="http://localhost:11434", 38 | ) 39 | 40 | def create_agents(model_choice='gemini'): 41 | """Create specialized research and analysis agents""" 42 | llm = get_llm(model_choice) 43 | 44 | deep_researcher = Agent( 45 | role='Deep Research Specialist', 46 | goal='Conduct comprehensive internet research and data gathering', 47 | backstory="""Expert at conducting deep, thorough research across multiple sources. 48 | Skilled at finding hard-to-locate information and connecting disparate data points. 49 | Specializes in complex research tasks that would typically take hours or days.""", 50 | tools=[search_tool, website_tool], 51 | llm=llm, 52 | verbose=True, 53 | max_iter=100, 54 | allow_delegation=False, 55 | max_rpm=50, 56 | max_retry_limit=3 57 | ) 58 | 59 | analyst = Agent( 60 | role='Research Analyst', 61 | goal='Analyze and synthesize complex research findings', 62 | backstory="""Expert analyst skilled at processing large amounts of information, 63 | identifying patterns, and drawing meaningful conclusions. Specializes in turning 64 | raw research into actionable insights.""", 65 | tools=[search_tool], 66 | llm=llm, 67 | verbose=True, 68 | max_iter=75, 69 | allow_delegation=False, 70 | max_rpm=30, 71 | max_retry_limit=2 72 | ) 73 | 74 | report_writer = Agent( 75 | role='Research Report Writer', 76 | goal='Create comprehensive, well-structured research reports', 77 | backstory="""Expert at transforming complex research and analysis into 78 | clear, actionable reports. Skilled at maintaining detail while ensuring 79 | accessibility and practical value.""", 80 | llm=llm, 81 | verbose=True, 82 | max_iter=50, 83 | allow_delegation=False, 84 | max_rpm=20, 85 | max_retry_limit=2 86 | ) 87 | 88 | return deep_researcher, analyst, report_writer 89 | 90 | def create_tasks(researcher, analyst, writer, research_query): 91 | """Create research tasks with clear objectives""" 92 | deep_research_task = Task( 93 | description=f"""Conduct focused research on: {research_query} 94 | 95 | Step-by-step approach: 96 | 1. Initial broad search to identify key sources 97 | 2. Deep dive into most relevant sources 98 | 3. Extract specific details and evidence 99 | 4. Verify key findings across sources 100 | 5. Document sources and findings clearly 101 | 102 | Keep focused on specific, verified information.""", 103 | agent=researcher, 104 | expected_output="Detailed research findings with verified sources" 105 | ) 106 | 107 | analysis_task = Task( 108 | description=f"""Analyze the research findings about {research_query}: 109 | 110 | Follow these steps: 111 | 1. Review and categorize all findings 112 | 2. Identify main themes and patterns 113 | 3. Evaluate source credibility 114 | 4. Note any inconsistencies 115 | 5. Summarize key insights 116 | 117 | Focus on clear, actionable analysis.""", 118 | agent=analyst, 119 | context=[deep_research_task], 120 | expected_output="Clear analysis of findings with key insights" 121 | ) 122 | 123 | report_task = Task( 124 | description=f"""Create a structured report about {research_query}: 125 | 126 | Include: 127 | 1. Executive summary (2-3 paragraphs) 128 | 2. Key findings (bullet points) 129 | 3. Supporting evidence 130 | 4. Conclusions 131 | 5. References 132 | 133 | Keep it clear and focused.""", 134 | agent=writer, 135 | context=[deep_research_task, analysis_task], 136 | expected_output="Concise, well-structured report" 137 | ) 138 | 139 | return [deep_research_task, analysis_task, report_task] 140 | 141 | def create_crew(agents, tasks): 142 | """Create a crew with optimal settings""" 143 | return Crew( 144 | agents=agents, 145 | tasks=tasks, 146 | verbose=True, 147 | max_rpm=100, 148 | process="sequential" 149 | ) 150 | 151 | def run_research(model_choice, query): 152 | """Run research with specified model and return results""" 153 | try: 154 | start_time = datetime.now() 155 | researcher, analyst, writer = create_agents(model_choice) 156 | tasks = create_tasks(researcher, analyst, writer, query) 157 | crew = create_crew([researcher, analyst, writer], tasks) 158 | result = crew.kickoff() 159 | execution_time = (datetime.now() - start_time).total_seconds() 160 | return {'result': result, 'execution_time': execution_time} 161 | except Exception as e: 162 | return f"Error with {model_choice}: {str(e)}" 163 | 164 | def save_results(query, results): 165 | """Save research results to JSON file""" 166 | timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") 167 | filename = f"research_results_{timestamp}.json" 168 | 169 | data = { 170 | "query": query, 171 | "timestamp": timestamp, 172 | "results": results 173 | } 174 | 175 | with open(filename, "w") as f: 176 | json.dump(data, f, indent=4) 177 | 178 | return filename 179 | 180 | def main(): 181 | st.set_page_config(page_title="Research Model Comparison", layout="wide") 182 | 183 | st.title("🔍 Deep Research Model Comparison") 184 | 185 | # Sidebar configuration 186 | st.sidebar.header("Configuration") 187 | selected_models = st.sidebar.multiselect( 188 | "Select Models to Compare", 189 | ["Gemini", "OpenAI", "Ollama"], 190 | default=["Gemini"] 191 | ) 192 | 193 | # Convert display names to internal names 194 | model_mapping = { 195 | "Gemini": "gemini", 196 | "OpenAI": "openai", 197 | "Ollama": "ollama" 198 | } 199 | 200 | # Main query input 201 | query = st.text_area("Research Query", height=100, placeholder="Enter your research query here...") 202 | 203 | if st.button("Start Research", type="primary"): 204 | if not query: 205 | st.error("Please enter a research query") 206 | return 207 | 208 | if not selected_models: 209 | st.error("Please select at least one model") 210 | return 211 | 212 | # Create progress containers 213 | progress_bars = {model: st.progress(0) for model in selected_models} 214 | status_containers = {model: st.empty() for model in selected_models} 215 | timer_containers = {model: st.empty() for model in selected_models} 216 | 217 | # Initialize results dictionary 218 | results = {} 219 | 220 | # Create columns for results 221 | cols = st.columns(len(selected_models)) 222 | result_containers = {model: cols[i].container() for i, model in enumerate(selected_models)} 223 | 224 | start_times = {model: None for model in selected_models} 225 | 226 | # Run research for each selected model 227 | with concurrent.futures.ThreadPoolExecutor() as executor: 228 | future_to_model = { 229 | executor.submit(run_research, model_mapping[model], query): model 230 | for model in selected_models 231 | } 232 | 233 | # Start times for each model 234 | for model in selected_models: 235 | start_times[model] = datetime.now() 236 | 237 | while future_to_model: 238 | done, _ = concurrent.futures.wait(future_to_model.keys(), timeout=0.1) 239 | 240 | # Update running timers 241 | for model in selected_models: 242 | if model in results: # Skip if already completed 243 | continue 244 | current_time = (datetime.now() - start_times[model]).total_seconds() 245 | timer_containers[model].text(f"⏱️ Running Time: {current_time:.1f}s") 246 | 247 | for future in done: 248 | model = future_to_model[future] 249 | try: 250 | result_data = future.result() 251 | if isinstance(result_data, dict): 252 | results[model] = result_data['result'] 253 | execution_time = result_data['execution_time'] 254 | 255 | progress_bars[model].progress(100) 256 | status_containers[model].success(f"{model} Research Complete") 257 | timer_containers[model].text(f"⏱️ Final Time: {execution_time:.2f}s") 258 | 259 | with result_containers[model]: 260 | st.subheader(f"{model} Results") 261 | st.write(results[model]) 262 | else: 263 | progress_bars[model].progress(100) 264 | status_containers[model].error(f"Error with {model}: {result_data}") 265 | timer_containers[model].empty() 266 | except Exception as e: 267 | progress_bars[model].progress(100) 268 | status_containers[model].error(f"Error with {model}: {str(e)}") 269 | timer_containers[model].empty() 270 | 271 | del future_to_model[future] 272 | 273 | # Save results if any were generated 274 | if results: 275 | filename = save_results(query, results) 276 | st.sidebar.success(f"Results saved to {filename}") 277 | 278 | # Create comparison table 279 | st.subheader("Quick Comparison") 280 | comparison_data = { 281 | "Model": list(results.keys()), 282 | "Response Length": [len(str(r)) for r in results.values()], 283 | "Contains References": ["References" in str(r) for r in results.values()], 284 | "Contains Analysis": ["Analysis" in str(r) for r in results.values()] 285 | } 286 | comparison_df = pd.DataFrame(comparison_data) 287 | st.dataframe(comparison_df) 288 | 289 | if __name__ == "__main__": 290 | main() -------------------------------------------------------------------------------- /agents/gemini-agents/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from crewai import Agent, Task, Crew, LLM 4 | from crewai_tools import SerperDevTool, WebsiteSearchTool 5 | from langchain_openai import ChatOpenAI 6 | from langchain_community.llms import Ollama 7 | from langchain_google_genai import ChatGoogleGenerativeAI 8 | 9 | # Load environment variables 10 | load_dotenv() 11 | os.environ["SERPER_API_KEY"] = os.getenv("SERPER_API_KEY") 12 | os.environ["GOOGLE_API_KEY"] = os.getenv("GOOGLE_API_KEY") 13 | 14 | # Initialize enhanced search tools 15 | search_tool = SerperDevTool() 16 | website_tool = WebsiteSearchTool() 17 | 18 | def get_llm(model_choice='gemini'): 19 | """Get the specified language model""" 20 | if model_choice == 'openai': 21 | return ChatOpenAI( 22 | model_name="o3-mini", 23 | ) 24 | elif model_choice == 'gemini': 25 | return LLM( 26 | model="gemini/gemini-2.0-flash", 27 | temperature=0.7, 28 | google_api_key=os.getenv("GOOGLE_API_KEY"), 29 | verbose=True 30 | ) 31 | else: # ollama 32 | return Ollama( 33 | model="deepseek-r1:latest", 34 | base_url="http://localhost:11434", 35 | temperature=0.7 36 | ) 37 | 38 | def create_agents(model_choice='gemini'): 39 | """Create specialized research and analysis agents""" 40 | llm = get_llm(model_choice) 41 | 42 | deep_researcher = Agent( 43 | role='Deep Research Specialist', 44 | goal='Conduct comprehensive internet research and data gathering', 45 | backstory="""Expert at conducting deep, thorough research across multiple sources. 46 | Skilled at finding hard-to-locate information and connecting disparate data points. 47 | Specializes in complex research tasks that would typically take hours or days.""", 48 | tools=[search_tool, website_tool], 49 | llm=llm, 50 | verbose=True, 51 | max_iter=100, 52 | allow_delegation=False, 53 | max_rpm=50, 54 | max_retry_limit=3 55 | ) 56 | 57 | analyst = Agent( 58 | role='Research Analyst', 59 | goal='Analyze and synthesize complex research findings', 60 | backstory="""Expert analyst skilled at processing large amounts of information, 61 | identifying patterns, and drawing meaningful conclusions. Specializes in turning 62 | raw research into actionable insights.""", 63 | tools=[search_tool], 64 | llm=llm, 65 | verbose=True, 66 | max_iter=75, 67 | allow_delegation=False, 68 | max_rpm=30, 69 | max_retry_limit=2 70 | ) 71 | 72 | report_writer = Agent( 73 | role='Research Report Writer', 74 | goal='Create comprehensive, well-structured research reports', 75 | backstory="""Expert at transforming complex research and analysis into 76 | clear, actionable reports. Skilled at maintaining detail while ensuring 77 | accessibility and practical value.""", 78 | llm=llm, 79 | verbose=True, 80 | max_iter=50, 81 | allow_delegation=False, 82 | max_rpm=20, 83 | max_retry_limit=2 84 | ) 85 | 86 | return deep_researcher, analyst, report_writer 87 | 88 | def create_tasks(researcher, analyst, writer, research_query): 89 | """Create research tasks with clear objectives""" 90 | deep_research_task = Task( 91 | description=f"""Conduct focused research on: {research_query} 92 | 93 | Step-by-step approach: 94 | 1. Initial broad search to identify key sources 95 | 2. Deep dive into most relevant sources 96 | 3. Extract specific details and evidence 97 | 4. Verify key findings across sources 98 | 5. Document sources and findings clearly 99 | 100 | Keep focused on specific, verified information.""", 101 | agent=researcher, 102 | expected_output="Detailed research findings with verified sources" 103 | ) 104 | 105 | analysis_task = Task( 106 | description=f"""Analyze the research findings about {research_query}: 107 | 108 | Follow these steps: 109 | 1. Review and categorize all findings 110 | 2. Identify main themes and patterns 111 | 3. Evaluate source credibility 112 | 4. Note any inconsistencies 113 | 5. Summarize key insights 114 | 115 | Focus on clear, actionable analysis.""", 116 | agent=analyst, 117 | context=[deep_research_task], 118 | expected_output="Clear analysis of findings with key insights" 119 | ) 120 | 121 | report_task = Task( 122 | description=f"""Create a structured report about {research_query}: 123 | 124 | Include: 125 | 1. Executive summary (2-3 paragraphs) 126 | 2. Key findings (bullet points) 127 | 3. Supporting evidence 128 | 4. Conclusions 129 | 5. References 130 | 131 | Keep it clear and focused.""", 132 | agent=writer, 133 | context=[deep_research_task, analysis_task], 134 | expected_output="Concise, well-structured report" 135 | ) 136 | 137 | return [deep_research_task, analysis_task, report_task] 138 | 139 | def create_crew(agents, tasks): 140 | """Create a crew with optimal settings""" 141 | return Crew( 142 | agents=agents, 143 | tasks=tasks, 144 | verbose=True, 145 | max_rpm=100, 146 | process="sequential" 147 | ) 148 | 149 | def main(): 150 | print("\n🔍 Welcome to Deep Research Crew!") 151 | print("\nAvailable Models:") 152 | print("1. Google Gemini 1.5 Pro") 153 | print("2. OpenAI o3-mini (Requires API key)") 154 | print("3. Local DeepSeek-r1 (Requires Ollama)") 155 | 156 | choice = input("\nSelect model (1-3): ").strip() 157 | model_choice = { 158 | '1': 'gemini', 159 | '2': 'openai', 160 | '3': 'ollama' 161 | }.get(choice, 'gemini') 162 | 163 | if model_choice == 'ollama': 164 | print("\nUsing Ollama with DeepSeek-r1") 165 | print("Ensure Ollama is running: ollama run deepseek-r1:latest") 166 | 167 | query = input("\nWhat would you like researched? (Be specific): ") 168 | 169 | try: 170 | researcher, analyst, writer = create_agents(model_choice) 171 | tasks = create_tasks(researcher, analyst, writer, query) 172 | crew = create_crew([researcher, analyst, writer], tasks) 173 | 174 | print("\n🔍 Starting deep research process...") 175 | result = crew.kickoff() 176 | 177 | print("\n📊 Research Report:") 178 | print("==================") 179 | print(result) 180 | 181 | except Exception as e: 182 | print(f"\n❌ Error: {str(e)}") 183 | if model_choice == 'openai': 184 | print("\nTip: Check your OpenAI API key") 185 | elif model_choice == 'gemini': 186 | print("\nTip: Check your Google API key") 187 | else: 188 | print("\nTip: Ensure Ollama is running with deepseek-r1:latest") 189 | print("Run: ollama run deepseek-r1:latest") 190 | 191 | if __name__ == "__main__": 192 | main() -------------------------------------------------------------------------------- /agents/health/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import streamlit as st 3 | from dotenv import load_dotenv 4 | from crewai import Agent, Task, Crew, LLM 5 | from crewai_tools import SerperDevTool 6 | from langchain_openai import ChatOpenAI 7 | 8 | # Load environment variables 9 | load_dotenv() 10 | os.environ["SERPER_API_KEY"] = os.getenv("SERPER_API_KEY") 11 | os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") 12 | 13 | # Initialize the search tool 14 | search_tool = SerperDevTool() 15 | 16 | def get_llm(): 17 | return LLM( 18 | model="openai/o1-mini", 19 | api_key=os.getenv("OPENAI_API_KEY"), 20 | verbose=True 21 | ) 22 | 23 | def create_agents(): 24 | """Create the specialized nutrition agents.""" 25 | llm = get_llm() 26 | 27 | # Nutrition Researcher 28 | nutritionist = Agent( 29 | role='Nutrition Specialist', 30 | goal='Research and develop personalized nutritional recommendations based on scientific evidence', 31 | backstory='''You are a highly qualified nutritionist with expertise in therapeutic diets, 32 | nutrient interactions, and dietary requirements across different health conditions. 33 | Your recommendations are always backed by peer-reviewed research.''', 34 | tools=[search_tool], 35 | llm=llm, 36 | verbose=True 37 | ) 38 | 39 | # Medical Nutrition Specialist 40 | medical_specialist = Agent( 41 | role='Medical Nutrition Therapist', 42 | goal='Analyze medical conditions and provide appropriate dietary modifications', 43 | backstory='''With dual training in medicine and nutrition, you specialize in managing 44 | nutrition-related aspects of various medical conditions. You understand 45 | medication-food interactions and how to optimize nutrition within medical constraints.''', 46 | tools=[search_tool], 47 | llm=llm, 48 | verbose=True 49 | ) 50 | 51 | # Diet Plan Creator 52 | diet_planner = Agent( 53 | role='Therapeutic Diet Planner', 54 | goal='Create detailed, practical and enjoyable meal plans tailored to individual needs', 55 | backstory='''You excel at transforming clinical nutrition requirements into delicious, 56 | practical eating plans. You have extensive knowledge of food preparation, 57 | nutrient preservation, and food combinations that optimize both health and enjoyment.''', 58 | llm=llm, 59 | verbose=True 60 | ) 61 | 62 | return nutritionist, medical_specialist, diet_planner 63 | 64 | def create_tasks(nutritionist, medical_specialist, diet_planner, user_info): 65 | """Create tasks for each agent based on user information.""" 66 | 67 | # First task: Research nutrition needs based on demographics 68 | demographics_research = Task( 69 | description=f'''Research nutritional needs for an individual with the following demographics: 70 | - Age: {user_info['age']} 71 | - Gender: {user_info['gender']} 72 | - Height: {user_info['height']} 73 | - Weight: {user_info['weight']} 74 | - Activity Level: {user_info['activity_level']} 75 | - Goals: {user_info['goals']} 76 | 77 | Provide detailed nutritional requirements including: 78 | 1. Caloric needs (basal and adjusted for activity) 79 | 2. Macronutrient distribution (proteins, carbs, fats) 80 | 3. Key micronutrients particularly important for this demographic 81 | 4. Hydration requirements 82 | 5. Meal timing and frequency recommendations''', 83 | agent=nutritionist, 84 | expected_output="A comprehensive nutritional profile with scientific rationale" 85 | ) 86 | 87 | # Second task: Analyze medical conditions and adjust nutritional recommendations 88 | medical_analysis = Task( 89 | description=f'''Analyze the following medical conditions and medications, then provide dietary modifications: 90 | - Medical Conditions: {user_info['medical_conditions']} 91 | - Medications: {user_info['medications']} 92 | - Allergies/Intolerances: {user_info['allergies']} 93 | 94 | Consider the baseline nutritional profile and provide: 95 | 1. Specific nutrients to increase or limit based on each condition 96 | 2. Food-medication interactions to avoid 97 | 3. Potential nutrient deficiencies associated with these conditions/medications 98 | 4. Foods that may help manage symptoms or improve outcomes 99 | 5. Foods to strictly avoid''', 100 | agent=medical_specialist, 101 | context=[demographics_research], 102 | expected_output="A detailed analysis of medical nutrition therapy adjustments" 103 | ) 104 | 105 | # Third task: Create the comprehensive diet plan 106 | diet_plan = Task( 107 | description=f'''Create a detailed, practical diet plan incorporating all information: 108 | - User's Food Preferences: {user_info['food_preferences']} 109 | - Cooking Skills/Time: {user_info['cooking_ability']} 110 | - Budget Constraints: {user_info['budget']} 111 | - Cultural/Religious Factors: {user_info['cultural_factors']} 112 | 113 | Develop a comprehensive nutrition plan that includes: 114 | 1. Specific foods to eat daily, weekly, and occasionally with portion sizes 115 | 2. A 7-day meal plan with specific meals and recipes 116 | 3. Grocery shopping list with specific items 117 | 4. Meal preparation tips and simple recipes 118 | 5. Eating out guidelines and suggested restaurant options/orders 119 | 6. Supplement recommendations if necessary (with scientific justification) 120 | 7. Hydration schedule and recommended beverages 121 | 8. How to monitor progress and potential adjustments over time''', 122 | agent=diet_planner, 123 | context=[demographics_research, medical_analysis], 124 | expected_output="A comprehensive, practical, and personalized nutrition plan" 125 | ) 126 | 127 | return [demographics_research, medical_analysis, diet_plan] 128 | 129 | def create_crew(agents, tasks): 130 | """Create the CrewAI crew with the specified agents and tasks.""" 131 | return Crew( 132 | agents=agents, 133 | tasks=tasks, 134 | verbose=True 135 | ) 136 | 137 | def run_nutrition_advisor(user_info): 138 | """Run the nutrition advisor with the user information.""" 139 | try: 140 | # Create agents 141 | nutritionist, medical_specialist, diet_planner = create_agents() 142 | 143 | # Create tasks 144 | tasks = create_tasks(nutritionist, medical_specialist, diet_planner, user_info) 145 | 146 | # Create crew 147 | crew = create_crew([nutritionist, medical_specialist, diet_planner], tasks) 148 | 149 | # Execute the crew 150 | with st.spinner('Our nutrition team is creating your personalized plan. This may take a few minutes...'): 151 | result = crew.kickoff() 152 | 153 | return result 154 | except Exception as e: 155 | st.error(f"An error occurred: {str(e)}") 156 | return None 157 | 158 | def app(): 159 | """Main Streamlit application.""" 160 | st.set_page_config(page_title="Personalized Nutrition Advisor", page_icon="🥗", layout="wide") 161 | 162 | st.title("🥗 Personalized Nutrition Advisor") 163 | st.markdown(""" 164 | Get a detailed nutrition plan based on your demographics, health conditions, and preferences. 165 | Our AI team of nutrition specialists will create a personalized recommendation just for you. 166 | """) 167 | 168 | # Create tabs for organization 169 | tab1, tab2, tab3 = st.tabs(["Basic Information", "Health Details", "Preferences & Lifestyle"]) 170 | 171 | with tab1: 172 | st.header("Personal Information") 173 | col1, col2 = st.columns(2) 174 | 175 | with col1: 176 | age = st.number_input("Age", min_value=1, max_value=120, value=30) 177 | gender = st.selectbox("Gender", ["Male", "Female", "Non-binary/Other"]) 178 | height = st.text_input("Height (e.g., 5'10\" or 178 cm)", "5'10\"") 179 | 180 | with col2: 181 | weight = st.text_input("Weight (e.g., 160 lbs or 73 kg)", "160 lbs") 182 | activity_level = st.select_slider( 183 | "Activity Level", 184 | options=["Sedentary", "Lightly Active", "Moderately Active", "Very Active", "Extremely Active"] 185 | ) 186 | goals = st.multiselect( 187 | "Nutrition Goals", 188 | ["Weight Loss", "Weight Gain", "Maintenance", "Muscle Building", "Better Energy", 189 | "Improved Athletic Performance", "Disease Management", "General Health"] 190 | ) 191 | 192 | with tab2: 193 | st.header("Health Information") 194 | 195 | medical_conditions = st.text_area( 196 | "Medical Conditions (separate with commas)", 197 | placeholder="E.g., Diabetes Type 2, Hypertension, Hypothyroidism..." 198 | ) 199 | 200 | medications = st.text_area( 201 | "Current Medications (separate with commas)", 202 | placeholder="E.g., Metformin, Lisinopril, Levothyroxine..." 203 | ) 204 | 205 | allergies = st.text_area( 206 | "Food Allergies/Intolerances (separate with commas)", 207 | placeholder="E.g., Lactose, Gluten, Shellfish, Peanuts..." 208 | ) 209 | 210 | with tab3: 211 | st.header("Preferences & Lifestyle") 212 | 213 | col1, col2 = st.columns(2) 214 | 215 | with col1: 216 | food_preferences = st.text_area( 217 | "Food Preferences & Dislikes", 218 | placeholder="E.g., Prefer plant-based, dislike seafood..." 219 | ) 220 | 221 | cooking_ability = st.select_slider( 222 | "Cooking Skills & Available Time", 223 | options=["Very Limited", "Basic/Quick Meals", "Average", "Advanced/Can Spend Time", "Professional Level"] 224 | ) 225 | 226 | with col2: 227 | budget = st.select_slider( 228 | "Budget Considerations", 229 | options=["Very Limited", "Budget Conscious", "Moderate", "Flexible", "No Constraints"] 230 | ) 231 | 232 | cultural_factors = st.text_area( 233 | "Cultural or Religious Dietary Factors", 234 | placeholder="E.g., Halal, Kosher, Mediterranean tradition..." 235 | ) 236 | 237 | # Collect all user information 238 | user_info = { 239 | "age": age, 240 | "gender": gender, 241 | "height": height, 242 | "weight": weight, 243 | "activity_level": activity_level, 244 | "goals": ", ".join(goals) if goals else "General health improvement", 245 | "medical_conditions": medical_conditions or "None reported", 246 | "medications": medications or "None reported", 247 | "allergies": allergies or "None reported", 248 | "food_preferences": food_preferences or "No specific preferences", 249 | "cooking_ability": cooking_ability, 250 | "budget": budget, 251 | "cultural_factors": cultural_factors or "No specific factors" 252 | } 253 | 254 | # Check if API keys are present 255 | if not os.getenv("SERPER_API_KEY") or not os.getenv("OPENAI_API_KEY"): 256 | st.warning("⚠️ API keys not detected. Please add your SERPER_API_KEY and OPENAI_API_KEY to your .env file.") 257 | 258 | # Create a submission button 259 | if st.button("Generate Nutrition Plan"): 260 | if not goals: 261 | st.error("Please select at least one nutrition goal.") 262 | return 263 | 264 | # Display user information summary 265 | with st.expander("Summary of Your Information"): 266 | st.json(user_info) 267 | 268 | # Run the nutrition advisor 269 | result = run_nutrition_advisor(user_info) 270 | 271 | if result: 272 | st.success("✅ Your personalized nutrition plan is ready!") 273 | st.markdown("## Your Personalized Nutrition Plan") 274 | st.markdown(result) 275 | 276 | # Add download capability 277 | st.download_button( 278 | label="Download Nutrition Plan", 279 | data=result, 280 | file_name="my_nutrition_plan.md", 281 | mime="text/markdown" 282 | ) 283 | 284 | if __name__ == "__main__": 285 | app() -------------------------------------------------------------------------------- /agents/jobs/README.md: -------------------------------------------------------------------------------- 1 | # AI Job Hunting Assistant 2 | 3 | An intelligent job hunting assistant that helps you find and analyze job opportunities based on your preferences. This tool uses AI to search job sites like Naukri, Indeed, and Monster to find relevant job listings and provide personalized recommendations. 4 | 5 | ## Features 6 | 7 | - **Job Search**: Find job opportunities matching your job title, location, experience, and skills 8 | - **Job Analysis**: Get detailed analysis of job opportunities including skills match, company insights, and application tips 9 | - **Industry Trends**: Analyze industry trends including salary ranges, growth rates, and in-demand skills 10 | - **Career Recommendations**: Receive personalized recommendations for the best job opportunities 11 | 12 | ## Requirements 13 | 14 | - Python 3.8+ 15 | - Firecrawl API key (for web scraping) 16 | - OpenAI API key (for AI analysis) 17 | 18 | ## Installation 19 | 20 | 1. Clone the repository 21 | 2. Install the required packages: 22 | ``` 23 | pip install -r requirements.txt 24 | ``` 25 | 3. Set up your API keys: 26 | - Create a `.env` file in the root directory 27 | - Add your API keys: 28 | ``` 29 | FIRECRAWL_API_KEY=your_firecrawl_api_key 30 | OPENAI_API_KEY=your_openai_api_key 31 | OPENAI_MODEL_ID=o3-mini # or gpt-4o-mini 32 | ``` 33 | 34 | ## Usage 35 | 36 | Run the Streamlit app: 37 | 38 | ``` 39 | streamlit run agents/jobs/job-hunt-agent.py 40 | ``` 41 | 42 | Then: 43 | 44 | 1. Enter your job search criteria: 45 | - Job Title (e.g., "Software Engineer") 46 | - Location (e.g., "Bangalore" or "Remote") 47 | - Experience (in years) 48 | - Skills (comma-separated) 49 | - Industry/Job Category 50 | 51 | 2. Click "Start Job Search" to begin the search process 52 | 53 | 3. Review the job recommendations and industry trends analysis 54 | 55 | ## How It Works 56 | 57 | 1. The agent uses Firecrawl to search job sites for opportunities matching your criteria 58 | 2. It extracts detailed information about each job including title, company, location, salary, and requirements 59 | 3. The AI analyzes the job listings to find the best matches for your profile 60 | 4. The agent also analyzes industry trends to provide insights about salary ranges, growth potential, and in-demand skills 61 | 5. All information is presented in a structured format with actionable recommendations 62 | 63 | ## Supported Job Sites 64 | 65 | - Naukri 66 | - Indeed 67 | - Monster 68 | - PayScale (for industry trends) 69 | 70 | ## Note 71 | 72 | This tool requires valid API keys to function. The Firecrawl API is used for web scraping, and the OpenAI API is used for AI analysis. You can enter these keys in the sidebar of the application or set them as environment variables. -------------------------------------------------------------------------------- /agents/jobs/requirements.txt: -------------------------------------------------------------------------------- 1 | streamlit>=1.30.0 2 | pydantic>=2.0.0 3 | python-dotenv>=1.0.0 4 | firecrawl>=0.1.0 5 | agno>=0.1.0 6 | openai>=1.0.0 -------------------------------------------------------------------------------- /agents/marketing/lead-generation/agents.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import requests 3 | from agno.agent import Agent 4 | from agno.tools.firecrawl import FirecrawlTools 5 | from agno.models.openai import OpenAIChat 6 | from firecrawl import FirecrawlApp 7 | from pydantic import BaseModel, Field 8 | from typing import List 9 | from composio_phidata import Action, ComposioToolSet 10 | import json 11 | import os 12 | from dotenv import load_dotenv, set_key 13 | import pathlib 14 | 15 | # Get the absolute path to the .env file 16 | env_path = pathlib.Path(os.path.join(os.getcwd(), '.env')) 17 | 18 | # Load environment variables from .env file 19 | load_dotenv(dotenv_path=env_path) 20 | 21 | class QuoraUserInteractionSchema(BaseModel): 22 | username: str = Field(description="The username of the user who posted the question or answer") 23 | bio: str = Field(description="The bio or description of the user") 24 | post_type: str = Field(description="The type of post, either 'question' or 'answer'") 25 | timestamp: str = Field(description="When the question or answer was posted") 26 | upvotes: int = Field(default=0, description="Number of upvotes received") 27 | links: List[str] = Field(default_factory=list, description="Any links included in the post") 28 | 29 | class QuoraPageSchema(BaseModel): 30 | interactions: List[QuoraUserInteractionSchema] = Field(description="List of all user interactions (questions and answers) on the page") 31 | 32 | def search_for_urls(company_description: str, firecrawl_api_key: str, num_links: int) -> List[str]: 33 | url = "https://api.firecrawl.dev/v1/search" 34 | headers = { 35 | "Authorization": f"Bearer {firecrawl_api_key}", 36 | "Content-Type": "application/json" 37 | } 38 | query1 = f"quora websites where people are looking for {company_description} services" 39 | payload = { 40 | "query": query1, 41 | "limit": num_links, 42 | "lang": "en", 43 | "location": "United States", 44 | "timeout": 60000, 45 | } 46 | response = requests.post(url, json=payload, headers=headers) 47 | if response.status_code == 200: 48 | data = response.json() 49 | if data.get("success"): 50 | results = data.get("data", []) 51 | return [result["url"] for result in results] 52 | return [] 53 | 54 | def extract_user_info_from_urls(urls: List[str], firecrawl_api_key: str) -> List[dict]: 55 | user_info_list = [] 56 | firecrawl_app = FirecrawlApp(api_key=firecrawl_api_key) 57 | 58 | try: 59 | for url in urls: 60 | response = firecrawl_app.extract( 61 | [url], 62 | { 63 | 'prompt': """ 64 | Extract information about all users who have posted questions or answers on this Quora page. 65 | For each user, extract their username, bio, the type of post (question or answer), 66 | when it was posted, number of upvotes, and any links they included. 67 | """, 68 | 'schema': QuoraPageSchema.model_json_schema(), 69 | } 70 | ) 71 | 72 | if response.get('success') and response.get('data'): 73 | extracted_data = response['data'] 74 | if 'interactions' in extracted_data: 75 | for interaction in extracted_data['interactions']: 76 | user_info = { 77 | 'url': url, 78 | 'username': interaction.get('username', 'Unknown'), 79 | 'bio': interaction.get('bio', 'No bio available'), 80 | 'post_type': interaction.get('post_type', 'Unknown'), 81 | 'timestamp': interaction.get('timestamp', 'Unknown'), 82 | 'upvotes': interaction.get('upvotes', 0), 83 | 'links': interaction.get('links', []) 84 | } 85 | user_info_list.append(user_info) 86 | except Exception as e: 87 | st.error(f"Error extracting user info: {str(e)}") 88 | 89 | return user_info_list 90 | 91 | def format_user_info_to_flattened_json(user_info_list: List[dict]) -> List[dict]: 92 | flattened_data = [] 93 | 94 | for user_info in user_info_list: 95 | flattened_user = { 96 | 'url': user_info.get('url', ''), 97 | 'username': user_info.get('username', ''), 98 | 'bio': user_info.get('bio', ''), 99 | 'post_type': user_info.get('post_type', ''), 100 | 'timestamp': user_info.get('timestamp', ''), 101 | 'upvotes': user_info.get('upvotes', 0), 102 | 'links': ', '.join(user_info.get('links', [])), 103 | } 104 | flattened_data.append(flattened_user) 105 | 106 | return flattened_data 107 | 108 | def create_google_sheets_agent(composio_api_key: str, openai_api_key: str) -> Agent: 109 | composio_tools = ComposioToolSet( 110 | api_key=composio_api_key, 111 | actions=[Action.GOOGLE_SHEETS_CREATE] 112 | ) 113 | 114 | return Agent( 115 | model=OpenAIChat(id="gpt-4", api_key=openai_api_key), 116 | tools=[composio_tools], 117 | show_tool_calls=True, 118 | markdown=True 119 | ) 120 | 121 | def write_to_google_sheets(flattened_data: List[dict], composio_api_key: str, openai_api_key: str) -> str: 122 | if not flattened_data: 123 | return "" 124 | 125 | agent = create_google_sheets_agent(composio_api_key, openai_api_key) 126 | 127 | json_data = json.dumps(flattened_data, indent=2) 128 | 129 | response = agent.run( 130 | f"""Create a Google Sheet with the following data: 131 | {json_data} 132 | 133 | Format it nicely with appropriate column headers and return the link to the sheet. 134 | """ 135 | ) 136 | 137 | # Extract the Google Sheets link from the response 138 | content = response.content 139 | if "https://docs.google.com/spreadsheets" in content: 140 | for line in content.split('\n'): 141 | if "https://docs.google.com/spreadsheets" in line: 142 | return line.strip() 143 | 144 | return "" 145 | 146 | def create_prompt_transformation_agent(openai_api_key: str) -> Agent: 147 | return Agent( 148 | model=OpenAIChat(id="gpt-4", api_key=openai_api_key), 149 | system_prompt="""You are an expert at transforming verbose product/service descriptions into concise, targeted phrases for search queries. 150 | Your task is to take a detailed description and extract the core product or service being offered, condensing it into 3-4 words. 151 | 152 | Examples: 153 | Input: "We're looking for businesses that need help with their social media marketing, especially those struggling with content creation and engagement" 154 | Output: "social media marketing" 155 | 156 | Input: "Need to find businesses interested in implementing machine learning solutions for fraud detection" 157 | Output: "ML fraud detection" 158 | 159 | Always focus on the core product/service and keep it concise but clear.""", 160 | markdown=True 161 | ) 162 | 163 | # Function to save API keys to .env file 164 | def save_api_keys_to_env(): 165 | try: 166 | # Save OpenAI API key 167 | if st.session_state.openai_api_key: 168 | set_key(env_path, "OPENAI_API_KEY", st.session_state.openai_api_key) 169 | 170 | # Save Firecrawl API key 171 | if st.session_state.firecrawl_api_key: 172 | set_key(env_path, "FIRECRAWL_API_KEY", st.session_state.firecrawl_api_key) 173 | 174 | # Save Composio API key 175 | if st.session_state.composio_api_key: 176 | set_key(env_path, "COMPOSIO_API_KEY", st.session_state.composio_api_key) 177 | 178 | # Update environment variables in session state 179 | st.session_state.env_openai_api_key = st.session_state.openai_api_key 180 | st.session_state.env_firecrawl_api_key = st.session_state.firecrawl_api_key 181 | st.session_state.env_composio_api_key = st.session_state.composio_api_key 182 | 183 | return True 184 | except Exception as e: 185 | st.error(f"Error saving API keys to .env file: {str(e)}") 186 | return False 187 | 188 | def main(): 189 | st.set_page_config(page_title="AI Lead Generation Agent", layout="wide") 190 | st.title("🎯 AI Lead Generation Agent") 191 | st.info("This firecrawl powered agent helps you generate leads from Quora by searching for relevant posts and extracting user information.") 192 | 193 | # Initialize session state for API keys if not already set 194 | if "api_keys_initialized" not in st.session_state: 195 | # Get API keys from environment variables 196 | st.session_state.env_openai_api_key = os.getenv("OPENAI_API_KEY", "") 197 | st.session_state.env_firecrawl_api_key = os.getenv("FIRECRAWL_API_KEY", "") 198 | st.session_state.env_composio_api_key = os.getenv("COMPOSIO_API_KEY", "") 199 | 200 | # Initialize the working API keys with environment values 201 | st.session_state.openai_api_key = st.session_state.env_openai_api_key 202 | st.session_state.firecrawl_api_key = st.session_state.env_firecrawl_api_key 203 | st.session_state.composio_api_key = st.session_state.env_composio_api_key 204 | 205 | st.session_state.api_keys_initialized = True 206 | 207 | with st.sidebar: 208 | st.header("API Keys") 209 | 210 | # API Key Management Section 211 | with st.expander("Configure API Keys", expanded=False): 212 | st.info("API keys from .env file are used by default. You can override them here.") 213 | 214 | # Function to handle API key updates 215 | def update_api_key(key_name, env_key_name, help_text=""): 216 | new_value = st.text_input( 217 | f"{key_name}", 218 | value=st.session_state[env_key_name] if st.session_state[env_key_name] else "", 219 | type="password", 220 | help=help_text 221 | ) 222 | 223 | # Only update if user entered something or if we have an env value 224 | if new_value: 225 | st.session_state[key_name.lower()] = new_value 226 | return True 227 | elif st.session_state[env_key_name]: 228 | st.session_state[key_name.lower()] = st.session_state[env_key_name] 229 | return True 230 | return False 231 | 232 | # API keys inputs 233 | has_firecrawl = update_api_key( 234 | "Firecrawl API Key", 235 | "env_firecrawl_api_key", 236 | help_text="Get your Firecrawl API key from [Firecrawl's website](https://www.firecrawl.dev/app/api-keys)" 237 | ) 238 | 239 | has_openai = update_api_key( 240 | "OpenAI API Key", 241 | "env_openai_api_key", 242 | help_text="Get your OpenAI API key from [OpenAI's website](https://platform.openai.com/api-keys)" 243 | ) 244 | 245 | has_composio = update_api_key( 246 | "Composio API Key", 247 | "env_composio_api_key", 248 | help_text="Get your Composio API key from [Composio's website](https://composio.ai)" 249 | ) 250 | 251 | # Buttons for API key management 252 | col1, col2 = st.columns(2) 253 | with col1: 254 | if st.button("Reset to .env values"): 255 | st.session_state.openai_api_key = st.session_state.env_openai_api_key 256 | st.session_state.firecrawl_api_key = st.session_state.env_firecrawl_api_key 257 | st.session_state.composio_api_key = st.session_state.env_composio_api_key 258 | st.experimental_rerun() 259 | 260 | with col2: 261 | if st.button("Save to .env file"): 262 | if save_api_keys_to_env(): 263 | st.success("API keys saved to .env file!") 264 | st.experimental_rerun() 265 | 266 | # Display API status 267 | api_status_ok = has_openai and has_firecrawl and has_composio 268 | 269 | if api_status_ok: 270 | st.success("✅ All required API keys are configured") 271 | else: 272 | missing_keys = [] 273 | if not has_openai: 274 | missing_keys.append("OpenAI API Key") 275 | if not has_firecrawl: 276 | missing_keys.append("Firecrawl API Key") 277 | if not has_composio: 278 | missing_keys.append("Composio API Key") 279 | 280 | st.error(f"❌ Missing API keys: {', '.join(missing_keys)}") 281 | 282 | # Search settings 283 | st.subheader("Search Settings") 284 | num_links = st.number_input("Number of links to search", min_value=1, max_value=10, value=3) 285 | 286 | if st.button("Reset Session"): 287 | # Keep API keys but clear other session state 288 | api_keys = { 289 | "api_keys_initialized": st.session_state.api_keys_initialized, 290 | "env_openai_api_key": st.session_state.env_openai_api_key, 291 | "env_firecrawl_api_key": st.session_state.env_firecrawl_api_key, 292 | "env_composio_api_key": st.session_state.env_composio_api_key, 293 | "openai_api_key": st.session_state.openai_api_key, 294 | "firecrawl_api_key": st.session_state.firecrawl_api_key, 295 | "composio_api_key": st.session_state.composio_api_key 296 | } 297 | st.session_state.clear() 298 | for key, value in api_keys.items(): 299 | st.session_state[key] = value 300 | st.experimental_rerun() 301 | 302 | user_query = st.text_area( 303 | "Describe what kind of leads you're looking for:", 304 | placeholder="e.g., Looking for users who need automated video editing software with AI capabilities", 305 | help="Be specific about the product/service and target audience. The AI will convert this into a focused search query." 306 | ) 307 | 308 | if st.button("Generate Leads"): 309 | if not api_status_ok: 310 | st.error("Please configure all required API keys in the sidebar.") 311 | elif not user_query: 312 | st.error("Please describe what leads you're looking for.") 313 | else: 314 | with st.spinner("Processing your query..."): 315 | transform_agent = create_prompt_transformation_agent(st.session_state.openai_api_key) 316 | company_description = transform_agent.run(f"Transform this query into a concise 3-4 word company description: {user_query}") 317 | st.write("🎯 Searching for:", company_description.content) 318 | 319 | with st.spinner("Searching for relevant URLs..."): 320 | urls = search_for_urls(company_description.content, st.session_state.firecrawl_api_key, num_links) 321 | 322 | if urls: 323 | st.subheader("Quora Links Used:") 324 | for url in urls: 325 | st.write(url) 326 | 327 | with st.spinner("Extracting user info from URLs..."): 328 | user_info_list = extract_user_info_from_urls(urls, st.session_state.firecrawl_api_key) 329 | 330 | with st.spinner("Formatting user info..."): 331 | flattened_data = format_user_info_to_flattened_json(user_info_list) 332 | 333 | with st.spinner("Writing to Google Sheets..."): 334 | google_sheets_link = write_to_google_sheets(flattened_data, st.session_state.composio_api_key, st.session_state.openai_api_key) 335 | 336 | if google_sheets_link: 337 | st.success("Lead generation and data writing to Google Sheets completed successfully!") 338 | st.subheader("Google Sheets Link:") 339 | st.markdown(f"[View Google Sheet]({google_sheets_link})") 340 | else: 341 | st.error("Failed to retrieve the Google Sheets link.") 342 | else: 343 | st.warning("No relevant URLs found.") 344 | 345 | if __name__ == "__main__": 346 | main() -------------------------------------------------------------------------------- /agents/newsletter/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from crewai import Agent, Task, Crew, LLM 4 | from crewai_tools import SerperDevTool 5 | from langchain_openai import ChatOpenAI 6 | from langchain_community.llms import Ollama 7 | 8 | load_dotenv() 9 | os.environ["SERPER_API_KEY"] = os.getenv("SERPER_API_KEY") 10 | 11 | search_tool = SerperDevTool() 12 | 13 | def get_llm(use_gpt=True): 14 | if use_gpt: 15 | return LLM(model="openai/gpt-4o-mini", temperature=0.7) 16 | return Ollama( 17 | model="deepseek-r1:latest", 18 | base_url="http://localhost:11434", 19 | temperature=0.7 20 | ) 21 | 22 | def create_agents(use_gpt=True): 23 | llm = get_llm(use_gpt) 24 | 25 | researcher = Agent( 26 | role='Research Specialist', 27 | goal='Find comprehensive and up-to-date information on topics', 28 | backstory='Expert researcher skilled at discovering reliable information from various sources', 29 | tools=[search_tool], 30 | llm=llm, 31 | verbose=True 32 | ) 33 | 34 | fact_checker = Agent( 35 | role='Fact Verification Specialist', 36 | goal='Verify accuracy of information and cross-reference sources', 37 | backstory='Meticulous fact-checker with years of experience in verification and validation', 38 | tools=[search_tool], 39 | llm=llm, 40 | verbose=True 41 | ) 42 | 43 | writer = Agent( 44 | role='Newsletter Writer', 45 | goal='Create engaging and well-structured newsletters', 46 | backstory='Professional writer specializing in creating compelling newsletters with clear structure and engaging content', 47 | llm=llm, 48 | verbose=True 49 | ) 50 | 51 | return researcher, fact_checker, writer 52 | 53 | def create_tasks(researcher, fact_checker, writer, topic): 54 | research_task = Task( 55 | description=f"Research the latest developments, key trends, and important insights about: {topic}", 56 | agent=researcher, 57 | expected_output="A detailed summary of the topic with key points and references" 58 | ) 59 | 60 | verify_task = Task( 61 | description="Verify the accuracy of the research findings and identify any conflicting information", 62 | agent=fact_checker, 63 | context=[research_task], 64 | expected_output="A comprehensive report on the accuracy of the research findings and any conflicting information" 65 | ) 66 | 67 | newsletter_task = Task( 68 | description=f"""Create a newsletter about {topic} with the following format: 69 | - Title 70 | - Subtitle 71 | - Topic overview 72 | - H1, H2, H3 headers for main points 73 | - 500-word blog post 74 | Make it engaging and well-structured.""", 75 | agent=writer, 76 | context=[research_task, verify_task], 77 | expected_output="A well-structured newsletter in HTML format" 78 | ) 79 | 80 | return [research_task, verify_task, newsletter_task] 81 | 82 | def create_crew(agents, tasks): 83 | return Crew( 84 | agents=agents, 85 | tasks=tasks, 86 | verbose=True 87 | ) 88 | 89 | def main(): 90 | print("Welcome to the Newsletter Creation Crew!") 91 | use_gpt = input("Use GPT-4? (yes/no): ").lower() == 'yes' 92 | 93 | if not use_gpt: 94 | print("\nUsing Ollama - Ensure it's running on http://localhost:11434") 95 | print("Start with: ollama run deepseek-r1:latest") 96 | 97 | topic = input("\nNewsletter topic: ") 98 | 99 | try: 100 | researcher, fact_checker, writer = create_agents(use_gpt) 101 | tasks = create_tasks(researcher, fact_checker, writer, topic) 102 | crew = create_crew([researcher, fact_checker, writer], tasks) 103 | 104 | result = crew.kickoff() 105 | print("\nNewsletter Result:") 106 | print(result) 107 | 108 | except Exception as e: 109 | print(f"\nError: {str(e)}") 110 | if not use_gpt: 111 | print("\nTip: Ensure Ollama is running with the deepseek-r1:latest model") 112 | print("Run: ollama pull deepseek-r1:latest") 113 | 114 | if __name__ == "__main__": 115 | main() -------------------------------------------------------------------------------- /agents/openai/README.md: -------------------------------------------------------------------------------- 1 | # OpenAI Agents SDK Research Application 2 | 3 | This application demonstrates the use of the OpenAI Agents SDK to create a research assistant that can search the web and a personal knowledge base. 4 | 5 | ## Features 6 | 7 | - Web search capabilities using OpenAI's WebSearchTool 8 | - File search in a personal knowledge base using vector search 9 | - Orchestration between different specialized agents 10 | - Trace viewing for debugging and understanding agent behavior 11 | - Streamlit UI for easy interaction 12 | 13 | ## Requirements 14 | 15 | - Python 3.9+ 16 | - OpenAI API key with access to the Agents SDK 17 | - Required packages (see requirements.txt) 18 | 19 | ## Installation 20 | 21 | 1. Install the required packages: 22 | 23 | ```bash 24 | pip install -r requirements.txt 25 | ``` 26 | 27 | 2. Set your OpenAI API key as an environment variable: 28 | 29 | ```bash 30 | export OPENAI_API_KEY='your-api-key' 31 | ``` 32 | 33 | ## Usage 34 | 35 | Run the Streamlit application: 36 | 37 | ```bash 38 | streamlit run agents/openai/basic-agent.py 39 | ``` 40 | 41 | The application will open in your browser, where you can: 42 | 43 | 1. Enter your OpenAI API key if not set as an environment variable 44 | 2. Select a model to use (gpt-4o-mini or gpt-4o) 45 | 3. Choose between web search, file search, or combined research 46 | 4. Upload files to your knowledge base 47 | 5. Query your knowledge base 48 | 6. View traces of agent runs for debugging 49 | 50 | ## How It Works 51 | 52 | The application uses three main agents: 53 | 54 | 1. **Research Agent**: Uses web search to find information on the internet 55 | 2. **File Search Agent**: Searches through your personal knowledge base 56 | 3. **Orchestrator Agent**: Coordinates between web search and file search based on the query 57 | 58 | Files uploaded to the knowledge base are stored in a vector store for semantic search. 59 | 60 | ## Troubleshooting 61 | 62 | - If you encounter errors related to the OpenAI API, check that your API key is valid and has access to the Agents SDK 63 | - If file search is not working, ensure that your vector store is properly set up 64 | - For other issues, check the trace viewer for detailed debugging information 65 | 66 | ## License 67 | 68 | This project is licensed under the MIT License - see the LICENSE file for details. -------------------------------------------------------------------------------- /agents/real-estate/main.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List 2 | from pydantic import BaseModel, Field 3 | from agno.agent import Agent 4 | from agno.models.openai import OpenAIChat 5 | from firecrawl import FirecrawlApp 6 | import streamlit as st 7 | import os 8 | from dotenv import load_dotenv 9 | 10 | # Load environment variables from .env file if it exists 11 | load_dotenv() 12 | 13 | class PropertyData(BaseModel): 14 | """Schema for property data extraction""" 15 | building_name: str = Field(description="Name of the building/property", alias="Building_name") 16 | property_type: str = Field(description="Type of property (commercial, residential, etc)", alias="Property_type") 17 | location_address: str = Field(description="Complete address of the property") 18 | price: str = Field(description="Price of the property", alias="Price") 19 | description: str = Field(description="Detailed description of the property", alias="Description") 20 | 21 | class PropertiesResponse(BaseModel): 22 | """Schema for multiple properties response""" 23 | properties: List[PropertyData] = Field(description="List of property details") 24 | 25 | class LocationData(BaseModel): 26 | """Schema for location price trends""" 27 | location: str 28 | price_per_sqft: float 29 | percent_increase: float 30 | rental_yield: float 31 | 32 | class LocationsResponse(BaseModel): 33 | """Schema for multiple locations response""" 34 | locations: List[LocationData] = Field(description="List of location data points") 35 | 36 | class FirecrawlResponse(BaseModel): 37 | """Schema for Firecrawl API response""" 38 | success: bool 39 | data: Dict 40 | status: str 41 | expiresAt: str 42 | 43 | class PropertyFindingAgent: 44 | """Agent responsible for finding properties and providing recommendations""" 45 | 46 | def __init__(self, firecrawl_api_key: str, openai_api_key: str, model_id: str = "o3-mini"): 47 | self.agent = Agent( 48 | model=OpenAIChat(id=model_id, api_key=openai_api_key), 49 | markdown=True, 50 | description="I am a real estate expert who helps find and analyze properties based on user preferences." 51 | ) 52 | self.firecrawl = FirecrawlApp(api_key=firecrawl_api_key) 53 | 54 | def find_properties( 55 | self, 56 | city: str, 57 | max_price: float, 58 | property_category: str = "Residential", 59 | property_type: str = "Flat" 60 | ) -> str: 61 | """Find and analyze properties based on user preferences""" 62 | formatted_location = city.lower() 63 | 64 | urls = [ 65 | f"https://www.squareyards.com/sale/property-for-sale-in-{formatted_location}/*", 66 | f"https://www.99acres.com/property-in-{formatted_location}-ffid/*", 67 | f"https://housing.com/in/buy/{formatted_location}/{formatted_location}", 68 | # f"https://www.nobroker.in/property/sale/{city}/{formatted_location}", 69 | ] 70 | 71 | property_type_prompt = "Flats" if property_type == "Flat" else "Individual Houses" 72 | 73 | raw_response = self.firecrawl.extract( 74 | urls=urls, 75 | params={ 76 | 'prompt': f"""Extract ONLY 10 OR LESS different {property_category} {property_type_prompt} from {city} that cost less than {max_price} crores. 77 | 78 | Requirements: 79 | - Property Category: {property_category} properties only 80 | - Property Type: {property_type_prompt} only 81 | - Location: {city} 82 | - Maximum Price: {max_price} crores 83 | - Include complete property details with exact location 84 | - IMPORTANT: Return data for at least 3 different properties. MAXIMUM 10. 85 | - Format as a list of properties with their respective details 86 | """, 87 | 'schema': PropertiesResponse.model_json_schema() 88 | } 89 | ) 90 | 91 | print("Raw Property Response:", raw_response) 92 | 93 | if isinstance(raw_response, dict) and raw_response.get('success'): 94 | properties = raw_response['data'].get('properties', []) 95 | else: 96 | properties = [] 97 | 98 | print("Processed Properties:", properties) 99 | 100 | 101 | analysis = self.agent.run( 102 | f"""As a real estate expert, analyze these properties and market trends: 103 | 104 | Properties Found in json format: 105 | {properties} 106 | 107 | **IMPORTANT INSTRUCTIONS:** 108 | 1. ONLY analyze properties from the above JSON data that match the user's requirements: 109 | - Property Category: {property_category} 110 | - Property Type: {property_type} 111 | - Maximum Price: {max_price} crores 112 | 2. DO NOT create new categories or property types 113 | 3. From the matching properties, select 5-6 properties with prices closest to {max_price} crores 114 | 115 | Please provide your analysis in this format: 116 | 117 | 🏠 SELECTED PROPERTIES 118 | • List only 5-6 best matching properties with prices closest to {max_price} crores 119 | • For each property include: 120 | - Name and Location 121 | - Price (with value analysis) 122 | - Key Features 123 | - Pros and Cons 124 | 125 | 💰 BEST VALUE ANALYSIS 126 | • Compare the selected properties based on: 127 | - Price per sq ft 128 | - Location advantage 129 | - Amenities offered 130 | 131 | 📍 LOCATION INSIGHTS 132 | • Specific advantages of the areas where selected properties are located 133 | 134 | 💡 RECOMMENDATIONS 135 | • Top 3 properties from the selection with reasoning 136 | • Investment potential 137 | • Points to consider before purchase 138 | 139 | 🤝 NEGOTIATION TIPS 140 | • Property-specific negotiation strategies 141 | 142 | Format your response in a clear, structured way using the above sections. 143 | """ 144 | ) 145 | 146 | return analysis.content 147 | 148 | def get_location_trends(self, city: str) -> str: 149 | """Get price trends for different localities in the city""" 150 | raw_response = self.firecrawl.extract([ 151 | f"https://www.99acres.com/property-rates-and-price-trends-in-{city.lower()}-prffid/*" 152 | ], { 153 | 'prompt': """Extract price trends data for ALL major localities in the city. 154 | IMPORTANT: 155 | - Return data for at least 5-10 different localities 156 | - Include both premium and affordable areas 157 | - Do not skip any locality mentioned in the source 158 | - Format as a list of locations with their respective data 159 | """, 160 | 'schema': LocationsResponse.model_json_schema(), 161 | }) 162 | 163 | if isinstance(raw_response, dict) and raw_response.get('success'): 164 | locations = raw_response['data'].get('locations', []) 165 | 166 | analysis = self.agent.run( 167 | f"""As a real estate expert, analyze these location price trends for {city}: 168 | 169 | {locations} 170 | 171 | Please provide: 172 | 1. A bullet-point summary of the price trends for each location 173 | 2. Identify the top 3 locations with: 174 | - Highest price appreciation 175 | - Best rental yields 176 | - Best value for money 177 | 3. Investment recommendations: 178 | - Best locations for long-term investment 179 | - Best locations for rental income 180 | - Areas showing emerging potential 181 | 4. Specific advice for investors based on these trends 182 | 183 | Format the response as follows: 184 | 185 | 📊 LOCATION TRENDS SUMMARY 186 | • [Bullet points for each location] 187 | 188 | 🏆 TOP PERFORMING AREAS 189 | • [Bullet points for best areas] 190 | 191 | 💡 INVESTMENT INSIGHTS 192 | • [Bullet points with investment advice] 193 | 194 | 🎯 RECOMMENDATIONS 195 | • [Bullet points with specific recommendations] 196 | """ 197 | ) 198 | 199 | return analysis.content 200 | 201 | return "No price trends data available" 202 | 203 | def create_property_agent(): 204 | """Create PropertyFindingAgent with API keys from session state""" 205 | if 'property_agent' not in st.session_state: 206 | st.session_state.property_agent = PropertyFindingAgent( 207 | firecrawl_api_key=st.session_state.firecrawl_key, 208 | openai_api_key=st.session_state.openai_key, 209 | model_id=st.session_state.model_id 210 | ) 211 | 212 | def main(): 213 | st.set_page_config( 214 | page_title="AI Real Estate Agent", 215 | page_icon="🏠", 216 | layout="wide" 217 | ) 218 | 219 | # Get API keys from environment variables 220 | env_firecrawl_key = os.getenv("FIRECRAWL_API_KEY", "") 221 | env_openai_key = os.getenv("OPENAI_API_KEY", "") 222 | default_model = os.getenv("OPENAI_MODEL_ID", "o3-mini") 223 | 224 | with st.sidebar: 225 | st.title("🔑 API Configuration") 226 | 227 | st.subheader("🤖 Model Selection") 228 | model_id = st.selectbox( 229 | "Choose OpenAI Model", 230 | options=["o3-mini", "gpt-4o-mini"], 231 | index=0 if default_model == "o3-mini" else 1, 232 | help="Select the AI model to use. Choose gpt-4o if your api doesn't have access to o3-mini" 233 | ) 234 | st.session_state.model_id = model_id 235 | 236 | st.divider() 237 | 238 | st.subheader("🔐 API Keys") 239 | 240 | # Show environment variable status 241 | if env_firecrawl_key: 242 | st.success("✅ Firecrawl API Key found in environment variables") 243 | if env_openai_key: 244 | st.success("✅ OpenAI API Key found in environment variables") 245 | 246 | # Allow UI override of environment variables 247 | firecrawl_key = st.text_input( 248 | "Firecrawl API Key (optional if set in environment)", 249 | type="password", 250 | help="Enter your Firecrawl API key or set FIRECRAWL_API_KEY in environment", 251 | value="" if env_firecrawl_key else "" 252 | ) 253 | openai_key = st.text_input( 254 | "OpenAI API Key (optional if set in environment)", 255 | type="password", 256 | help="Enter your OpenAI API key or set OPENAI_API_KEY in environment", 257 | value="" if env_openai_key else "" 258 | ) 259 | 260 | # Use environment variables if UI inputs are empty 261 | firecrawl_key = firecrawl_key or env_firecrawl_key 262 | openai_key = openai_key or env_openai_key 263 | 264 | if firecrawl_key and openai_key: 265 | st.session_state.firecrawl_key = firecrawl_key 266 | st.session_state.openai_key = openai_key 267 | create_property_agent() 268 | else: 269 | missing_keys = [] 270 | if not firecrawl_key: 271 | missing_keys.append("Firecrawl API Key") 272 | if not openai_key: 273 | missing_keys.append("OpenAI API Key") 274 | if missing_keys: 275 | st.warning(f"⚠️ Missing required API keys: {', '.join(missing_keys)}") 276 | st.info("Please provide the missing keys in the fields above or set them as environment variables.") 277 | 278 | st.title("🏠 AI Real Estate Agent") 279 | st.info( 280 | """ 281 | Welcome to the AI Real Estate Agent! 282 | Enter your search criteria below to get property recommendations 283 | and location insights. 284 | """ 285 | ) 286 | 287 | col1, col2 = st.columns(2) 288 | 289 | with col1: 290 | city = st.text_input( 291 | "City", 292 | placeholder="Enter city name (e.g., Bangalore)", 293 | help="Enter the city where you want to search for properties" 294 | ) 295 | 296 | property_category = st.selectbox( 297 | "Property Category", 298 | options=["Residential", "Commercial"], 299 | help="Select the type of property you're interested in" 300 | ) 301 | 302 | with col2: 303 | max_price = st.number_input( 304 | "Maximum Price (in Crores)", 305 | min_value=0.1, 306 | max_value=100.0, 307 | value=5.0, 308 | step=0.1, 309 | help="Enter your maximum budget in Crores" 310 | ) 311 | 312 | property_type = st.selectbox( 313 | "Property Type", 314 | options=["Flat", "Individual House"], 315 | help="Select the specific type of property" 316 | ) 317 | 318 | if st.button("🔍 Start Search", use_container_width=True): 319 | if 'property_agent' not in st.session_state: 320 | st.error("⚠️ Please enter your API keys in the sidebar first!") 321 | return 322 | 323 | if not city: 324 | st.error("⚠️ Please enter a city name!") 325 | return 326 | 327 | try: 328 | with st.spinner("🔍 Searching for properties..."): 329 | property_results = st.session_state.property_agent.find_properties( 330 | city=city, 331 | max_price=max_price, 332 | property_category=property_category, 333 | property_type=property_type 334 | ) 335 | 336 | st.success("✅ Property search completed!") 337 | 338 | st.subheader("🏘️ Property Recommendations") 339 | st.markdown(property_results) 340 | 341 | st.divider() 342 | 343 | with st.spinner("📊 Analyzing location trends..."): 344 | location_trends = st.session_state.property_agent.get_location_trends(city) 345 | 346 | st.success("✅ Location analysis completed!") 347 | 348 | with st.expander("📈 Location Trends Analysis of the city"): 349 | st.markdown(location_trends) 350 | 351 | except Exception as e: 352 | st.error(f"❌ An error occurred: {str(e)}") 353 | 354 | if __name__ == "__main__": 355 | main() -------------------------------------------------------------------------------- /agents/research/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from crewai import Agent, Task, Crew 4 | from crewai_tools import SerperDevTool 5 | from langchain_openai import ChatOpenAI 6 | from langchain_community.llms import Ollama 7 | 8 | load_dotenv() 9 | 10 | os.environ["SERPER_API_KEY"] = os.getenv("SERPER_API_KEY") 11 | 12 | search_tool = SerperDevTool() 13 | 14 | def create_research_agent(use_gpt=True): 15 | if use_gpt: 16 | llm = ChatOpenAI(model="o3-mini") 17 | else: 18 | llm = Ollama(model="llama3.1") 19 | 20 | return Agent( 21 | role='Research Specialist', 22 | goal='Conduct thorough research on given topics', 23 | backstory='You are an experienced researcher with expertise in finding and synthesizing information from various sources.', 24 | verbose=True, 25 | allow_delegation=False, 26 | tools=[search_tool], 27 | llm=llm 28 | ) 29 | 30 | def create_research_task(agent, topic): 31 | return Task( 32 | description=f"Research the following topic and provide a comprehensive summary: {topic}", 33 | agent=agent, 34 | expected_output="A detailed summary of the research findings, including key points, trends, and insights related to the topic." 35 | ) 36 | 37 | def run_research(topic, use_gpt=True): 38 | agent = create_research_agent(use_gpt) 39 | task = create_research_task(agent, topic) 40 | crew = Crew(agents=[agent], tasks=[task]) 41 | result = crew.kickoff() 42 | return result 43 | 44 | if __name__ == "__main__": 45 | print("Welcome to the Research Agent!") 46 | use_gpt = input("Do you want to use GPT? (yes/no): ").lower() == 'yes' 47 | topic = input("Enter the research topic: ") 48 | 49 | result = run_research(topic, use_gpt) 50 | print("\nResearch Result:") 51 | print(result) -------------------------------------------------------------------------------- /agents/sales/__pycache__/with_st.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whyashthakker/ai-agents/7d839eac0854eceaf9d97110075301893b2d9ec2/agents/sales/__pycache__/with_st.cpython-311.pyc -------------------------------------------------------------------------------- /agents/sales/main.py: -------------------------------------------------------------------------------- 1 | from textwrap import dedent 2 | from crewai import Agent, Task, Crew, Process 3 | from crewai.tools import BaseTool 4 | from crewai_tools import SerperDevTool 5 | from pydantic import BaseModel, Field 6 | from typing import List, Dict, Type, ClassVar 7 | import requests 8 | import smtplib 9 | from email.mime.text import MIMEText 10 | from email.mime.multipart import MIMEMultipart 11 | from langchain_community.llms import Ollama 12 | from langchain_openai import ChatOpenAI 13 | from dotenv import load_dotenv 14 | import os 15 | import json 16 | 17 | # Load environment variables 18 | load_dotenv() 19 | os.environ["SERPER_API_KEY"] = os.getenv("SERPER_API_KEY") 20 | 21 | # Initialize SerperDev tool 22 | search_tool = SerperDevTool() 23 | 24 | def get_llm(use_gpt=False): 25 | """Get the specified language model""" 26 | if use_gpt: 27 | return ChatOpenAI( 28 | model_name="gpt-4o-mini", 29 | temperature=0.7 30 | ) 31 | return Ollama( 32 | model="deepseek-r1:latest", 33 | base_url="http://localhost:11434", 34 | temperature=0.7 35 | ) 36 | 37 | class EmailInput(BaseModel): 38 | """Input schema for Email Tool""" 39 | to: str = Field(..., description="Recipient email address") 40 | subject: str = Field(..., description="Email subject line") 41 | body: str = Field(..., description="Email body content") 42 | 43 | class EmailSender(BaseTool): 44 | name: str = "Email Sender" 45 | description: str = "Sends personalized emails using Gmail SMTP" 46 | args_schema: Type[BaseModel] = EmailInput 47 | 48 | smtp_settings: ClassVar[Dict[str, str | int]] = { 49 | 'server': "smtp.gmail.com", 50 | 'port': 587, 51 | 'username': os.getenv('GMAIL_USER'), 52 | 'password': os.getenv('GMAIL_APP_PASSWORD') 53 | } 54 | 55 | def _run(self, to: str, subject: str, body: str) -> str: 56 | if not self.smtp_settings['username'] or not self.smtp_settings['password']: 57 | return json.dumps({"error": "GMAIL_USER and GMAIL_APP_PASSWORD environment variables are required"}) 58 | 59 | try: 60 | msg = MIMEMultipart() 61 | msg['From'] = self.smtp_settings['username'] 62 | msg['To'] = to 63 | msg['Subject'] = subject 64 | msg.attach(MIMEText(body, 'plain')) 65 | 66 | with smtplib.SMTP(self.smtp_settings['server'], self.smtp_settings['port']) as server: 67 | server.starttls() 68 | server.login(self.smtp_settings['username'], self.smtp_settings['password']) 69 | server.send_message(msg) 70 | 71 | return json.dumps({"status": "success", "message": f"Email sent successfully to {to}"}) 72 | except Exception as e: 73 | return json.dumps({"error": f"Error sending email: {str(e)}"}) 74 | 75 | class DetailedSalesCrew: 76 | def __init__(self, target_emails: List[str], use_gpt: bool = False): 77 | self.target_emails = target_emails 78 | self.llm = get_llm(use_gpt) 79 | self.email_tool = EmailSender() 80 | 81 | def create_agents(self): 82 | # Research Agent 83 | self.researcher = Agent( 84 | role='Company Research Specialist', 85 | goal='Analyze companies and gather comprehensive information', 86 | backstory=dedent("""You are an expert researcher specializing in 87 | company analysis. You excel at finding detailed information 88 | about companies, their products, and market presence."""), 89 | tools=[search_tool], 90 | verbose=True, 91 | llm=self.llm, 92 | max_iter=100, 93 | allow_delegation=False, 94 | max_rpm=50, 95 | max_retry_limit=3 96 | ) 97 | 98 | # News Agent 99 | self.news_analyst = Agent( 100 | role='News and Trends Analyst', 101 | goal='Find and analyze relevant news and industry trends', 102 | backstory=dedent("""You are skilled at identifying relevant news 103 | and understanding industry trends. You can connect company 104 | activities to broader market movements."""), 105 | tools=[search_tool], 106 | verbose=True, 107 | llm=self.llm, 108 | max_iter=75, 109 | allow_delegation=False, 110 | max_rpm=30, 111 | max_retry_limit=2 112 | ) 113 | 114 | # Content Writer 115 | self.writer = Agent( 116 | role='Outreach Content Specialist', 117 | goal='Create highly personalized email content', 118 | backstory=dedent("""You are an expert at crafting personalized 119 | outreach emails that resonate with recipients. You excel at 120 | combining company research with industry insights. You are founder of explainx.ai and your name is Yash Thakker, which is what should be mentioned in the email."""), 121 | tools=[self.email_tool], 122 | verbose=True, 123 | llm=self.llm, 124 | max_iter=50, 125 | allow_delegation=False, 126 | max_rpm=20, 127 | max_retry_limit=2 128 | ) 129 | 130 | return [self.researcher, self.news_analyst, self.writer] 131 | 132 | def create_tasks(self, email: str): 133 | # Extract domain from email 134 | domain = email.split('@')[1] 135 | company_name = domain.split('.')[0] 136 | 137 | # Research Task 138 | research_task = Task( 139 | description=dedent(f"""Research {company_name} ({domain}) thoroughly. 140 | Step-by-step approach: 141 | 1. Search for company overview and background 142 | 2. Research their products/services in detail 143 | 3. Find information about their team and leadership 144 | 4. Analyze their market position 145 | 5. Identify their tech stack and tools 146 | 147 | Focus on: 148 | - Company's main products/services 149 | - Value proposition 150 | - Target market 151 | - Team information 152 | - Recent updates or changes 153 | - Technology stack or tools mentioned 154 | 155 | Create a comprehensive profile of the company."""), 156 | agent=self.researcher, 157 | expected_output=dedent("""Detailed company profile including all 158 | discovered information in a structured format.""") 159 | ) 160 | 161 | # News Analysis Task 162 | news_task = Task( 163 | description=dedent(f"""Research recent news and developments about 164 | {company_name} and their industry. 165 | 166 | Step-by-step approach: 167 | 1. Search for company news from the last 3 months 168 | 2. Research industry trends affecting them 169 | 3. Analyze competitor movements 170 | 4. Identify market opportunities 171 | 5. Find any company milestones or achievements 172 | 173 | Focus on: 174 | - Recent company news and press releases 175 | - Industry trends and developments 176 | - Competitive landscape 177 | - Market opportunities and challenges 178 | - Recent achievements or notable events"""), 179 | agent=self.news_analyst, 180 | expected_output=dedent("""Comprehensive news analysis including 181 | company-specific news and relevant industry trends.""") 182 | ) 183 | 184 | # Email Creation Task 185 | email_task = Task( 186 | description=dedent(f"""Create a personalized email for {email} using 187 | the research and news analysis. 188 | 189 | Step-by-step approach: 190 | 1. Extract key insights from research 191 | 2. Identify compelling news points 192 | 3. Craft attention-grabbing subject 193 | 4. Write personalized introduction 194 | 5. Present value proposition 195 | 196 | Guidelines: 197 | - Keep subject line engaging but professional 198 | - Reference specific company details from research 199 | - Mention relevant news or trends 200 | - Focus on value proposition 201 | - Keep email concise (150-200 words) 202 | - Include clear call to action 203 | 204 | Format the response as JSON with 'to', 'subject', and 'body' fields."""), 205 | agent=self.writer, 206 | expected_output=dedent("""JSON formatted email content with subject 207 | line and body text."""), 208 | context=[research_task, news_task] 209 | ) 210 | 211 | return [research_task, news_task, email_task] 212 | 213 | def run(self): 214 | """Process each email and create personalized outreach""" 215 | all_results = [] 216 | 217 | for email in self.target_emails: 218 | print(f"\nProcessing email: {email}") 219 | 220 | # Create crew for this email 221 | crew = Crew( 222 | agents=self.create_agents(), 223 | tasks=self.create_tasks(email), 224 | process=Process.sequential, 225 | verbose=True, 226 | max_rpm=100 227 | ) 228 | 229 | # Execute the crew's tasks 230 | result = crew.kickoff() 231 | all_results.append({ 232 | "email": email, 233 | "result": result 234 | }) 235 | 236 | return all_results 237 | 238 | def main(): 239 | print("\n🔍 Welcome to Sales Outreach Crew!") 240 | print("\nAvailable Models:") 241 | print("1. OpenAI GPT-4 Turbo (Requires API key)") 242 | print("2. Local DeepSeek Coder (Requires Ollama)") 243 | 244 | use_gpt = input("\nUse OpenAI GPT-4? (yes/no): ").lower() == 'yes' 245 | 246 | if not use_gpt: 247 | print("\nUsing Ollama with DeepSeek Coder") 248 | print("Ensure Ollama is running: ollama run deepseek-coder:latest") 249 | 250 | target_emails = [ 251 | "pratham@explainx.ai" 252 | ] 253 | 254 | try: 255 | # Initialize and run the sales crew 256 | sales_crew = DetailedSalesCrew(target_emails, use_gpt) 257 | results = sales_crew.run() 258 | 259 | # Print results 260 | for result in results: 261 | print(f"\nResults for {result['email']}:") 262 | print(result['result']) 263 | 264 | except Exception as e: 265 | print(f"\n❌ Error: {str(e)}") 266 | if use_gpt: 267 | print("\nTip: Check your OpenAI API key and SERPER_API_KEY") 268 | else: 269 | print("\nTip: Ensure Ollama is running with deepseek-coder:latest") 270 | print("Run: ollama run deepseek-coder:latest") 271 | print("Also check your SERPER_API_KEY") 272 | 273 | if __name__ == "__main__": 274 | main() -------------------------------------------------------------------------------- /agents/sales/run_email_preview.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import subprocess 6 | 7 | # Get the current directory 8 | current_dir = os.path.dirname(os.path.abspath(__file__)) 9 | 10 | def main(): 11 | """Run the email preview Streamlit application""" 12 | print("Starting Email Preview & Send application...") 13 | 14 | # Use the Streamlit CLI to run the app 15 | streamlit_cmd = ["streamlit", "run", os.path.join(current_dir, "email_preview.py")] 16 | 17 | try: 18 | # Run the Streamlit command 19 | subprocess.run(streamlit_cmd) 20 | except KeyboardInterrupt: 21 | print("\nApplication stopped.") 22 | except Exception as e: 23 | print(f"Error running Streamlit application: {str(e)}") 24 | sys.exit(1) 25 | 26 | if __name__ == "__main__": 27 | main() -------------------------------------------------------------------------------- /agents/sales/st-ui.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import pandas as pd 3 | from typing import List 4 | from with_st import DetailedSalesCrew # Import from your main implementation file 5 | import json 6 | import os 7 | from dotenv import load_dotenv 8 | 9 | # Load environment variables 10 | load_dotenv() 11 | 12 | # Page config 13 | st.set_page_config( 14 | page_title="AI Sales Outreach Platform", 15 | page_icon="🤖", 16 | layout="wide" 17 | ) 18 | 19 | # Initialize session state 20 | if 'emails' not in st.session_state: 21 | st.session_state.emails = [] 22 | if 'results' not in st.session_state: 23 | st.session_state.results = [] 24 | 25 | def validate_email(email: str) -> bool: 26 | """Simple email validation""" 27 | return '@' in email and '.' in email.split('@')[1] 28 | 29 | def save_email_template(industry: str, template: str): 30 | """Save email template to templates directory""" 31 | os.makedirs('email_templates', exist_ok=True) 32 | with open(f'email_templates/{industry.lower()}.txt', 'w') as f: 33 | f.write(template) 34 | 35 | def load_email_template(industry: str) -> str: 36 | """Load email template for given industry""" 37 | try: 38 | with open(f'email_templates/{industry.lower()}.txt', 'r') as f: 39 | return f.read() 40 | except FileNotFoundError: 41 | return "" 42 | 43 | def run_sales_crew(emails: List[dict], use_gpt: bool = False) -> List[dict]: 44 | """Run the sales crew with the given emails""" 45 | sales_crew = DetailedSalesCrew(emails, use_gpt) 46 | return sales_crew.run() 47 | 48 | # Sidebar 49 | with st.sidebar: 50 | st.title("⚙️ Settings") 51 | 52 | # Model selection 53 | model_option = st.radio( 54 | "Select AI Model", 55 | ["OpenAI GPT-4", "Local DeepSeek Coder"], 56 | help="Choose between OpenAI's GPT-4 or local DeepSeek Coder model" 57 | ) 58 | 59 | # API Keys 60 | with st.expander("API Configuration"): 61 | openai_key = st.text_input("OpenAI API Key", type="password") 62 | serper_key = st.text_input("Serper API Key", type="password") 63 | gmail_user = st.text_input("Gmail User", type="password", help="Your Gmail address") 64 | gmail_password = st.text_input("Gmail App Password", type="password", help="Gmail App Password (NOT your regular password)") 65 | 66 | # Save credentials to environment 67 | if openai_key: 68 | os.environ["OPENAI_API_KEY"] = openai_key 69 | if serper_key: 70 | os.environ["SERPER_API_KEY"] = serper_key 71 | if gmail_user: 72 | os.environ["GMAIL_USER"] = gmail_user 73 | if gmail_password: 74 | os.environ["GMAIL_APP_PASSWORD"] = gmail_password 75 | 76 | # Main content 77 | st.title("🤖 AI Sales Outreach Platform") 78 | st.caption("Personalized email outreach powered by AI") 79 | 80 | # Tab selection 81 | tab1, tab2, tab3 = st.tabs(["Add Prospects", "Email Templates", "Results"]) 82 | 83 | # Add Prospects Tab 84 | with tab1: 85 | st.header("Add Target Prospects") 86 | 87 | col1, col2 = st.columns(2) 88 | 89 | with col1: 90 | email = st.text_input("Email Address", help="Enter the prospect's email address") 91 | industry = st.selectbox( 92 | "Industry", 93 | ["Technology", "Finance", "Healthcare", "Education", "Other"], 94 | help="Select the prospect's industry" 95 | ) 96 | 97 | upload_file = st.file_uploader("Or Upload CSV", type=['csv'], 98 | help="CSV should have 'email' and 'industry' columns") 99 | 100 | if upload_file is not None: 101 | try: 102 | df = pd.read_csv(upload_file) 103 | if 'email' in df.columns and 'industry' in df.columns: 104 | new_prospects = df[['email', 'industry']].to_dict('records') 105 | for prospect in new_prospects: 106 | if validate_email(prospect['email']) and \ 107 | not any(e["email"] == prospect['email'] for e in st.session_state.emails): 108 | st.session_state.emails.append(prospect) 109 | st.success(f"Added {len(new_prospects)} prospects from CSV!") 110 | else: 111 | st.error("CSV must contain 'email' and 'industry' columns") 112 | except Exception as e: 113 | st.error(f"Error processing CSV: {str(e)}") 114 | 115 | with col2: 116 | if st.button("Add Prospect", help="Add single prospect to the list"): 117 | if validate_email(email): 118 | if not any(e["email"] == email for e in st.session_state.emails): 119 | st.session_state.emails.append({"email": email, "industry": industry}) 120 | st.success(f"Added {email} to the prospect list!") 121 | else: 122 | st.warning("This email is already in the list!") 123 | else: 124 | st.error("Please enter a valid email address!") 125 | 126 | # Display current prospects 127 | if st.session_state.emails: 128 | st.subheader("Current Prospects") 129 | df = pd.DataFrame(st.session_state.emails) 130 | st.dataframe(df, hide_index=True) 131 | 132 | col3, col4 = st.columns(2) 133 | 134 | with col3: 135 | if st.button("Clear List", help="Remove all prospects from the list"): 136 | st.session_state.emails = [] 137 | st.success("Prospect list cleared!") 138 | 139 | with col4: 140 | if st.button("Run Outreach Campaign", help="Start sending personalized emails"): 141 | if not os.getenv("GMAIL_USER") or not os.getenv("GMAIL_APP_PASSWORD"): 142 | st.error("Please configure Gmail credentials in the settings!") 143 | elif not os.getenv("SERPER_API_KEY"): 144 | st.error("Please configure Serper API key in the settings!") 145 | elif model_option == "OpenAI GPT-4" and not os.getenv("OPENAI_API_KEY"): 146 | st.error("Please configure OpenAI API key in the settings!") 147 | else: 148 | with st.spinner("Running outreach campaign..."): 149 | try: 150 | results = run_sales_crew( 151 | st.session_state.emails, 152 | use_gpt=(model_option == "OpenAI GPT-4") 153 | ) 154 | st.session_state.results = results 155 | st.success("Campaign completed successfully!") 156 | except Exception as e: 157 | st.error(f"Error running campaign: {str(e)}") 158 | 159 | # Email Templates Tab 160 | with tab2: 161 | st.header("Email Templates") 162 | 163 | template_industry = st.selectbox( 164 | "Select Industry for Template", 165 | ["Technology", "Finance", "Healthcare", "Education", "Other"], 166 | key="template_industry" 167 | ) 168 | 169 | existing_template = load_email_template(template_industry) 170 | 171 | template_content = st.text_area( 172 | "Email Template", 173 | value=existing_template, 174 | height=300, 175 | help="Use placeholders: {company}, {industry}, {name}, etc." 176 | ) 177 | 178 | with st.expander("Template Variables Help"): 179 | st.markdown(""" 180 | Available template variables: 181 | - `{company}`: Company name 182 | - `{industry}`: Industry name 183 | - `{domain}`: Company domain 184 | - `{name}`: Recipient's name (if available) 185 | 186 | Example template: 187 | ``` 188 | Subject: Enhancing {company}'s Capabilities 189 | 190 | Hi {name}, 191 | 192 | I noticed {company}'s impressive work in the {industry} sector... 193 | 194 | Best regards, 195 | Yash Thakker 196 | Founder, ExplainX.ai 197 | ``` 198 | """) 199 | 200 | if st.button("Save Template"): 201 | save_email_template(template_industry, template_content) 202 | st.success(f"Template saved for {template_industry} industry!") 203 | 204 | # Results Tab 205 | with tab3: 206 | st.header("Campaign Results") 207 | 208 | if st.session_state.results: 209 | # Download results button 210 | results_df = pd.DataFrame([{ 211 | 'email': r['email'], 212 | 'industry': r['industry'], 213 | 'status': 'success' if json.loads(r['result'])['status'] == 'success' else 'error', 214 | 'subject': json.loads(r['result'])['subject'], 215 | 'body': json.loads(r['result'])['body'] 216 | } for r in st.session_state.results]) 217 | 218 | csv = results_df.to_csv(index=False) 219 | st.download_button( 220 | "Download Results CSV", 221 | csv, 222 | "campaign_results.csv", 223 | "text/csv", 224 | key='download-csv' 225 | ) 226 | 227 | # Display individual results 228 | for result in st.session_state.results: 229 | with st.expander(f"Results for {result['email']} ({result['industry']})"): 230 | try: 231 | # Parse the JSON result 232 | email_content = json.loads(result['result']) 233 | 234 | # Show status 235 | if "status" in email_content: 236 | st.success(email_content["message"]) 237 | elif "error" in email_content: 238 | st.error(email_content["error"]) 239 | 240 | # Show email content 241 | st.subheader("Subject") 242 | st.write(email_content.get('subject', 'N/A')) 243 | st.subheader("Body") 244 | st.write(email_content.get('body', 'N/A')) 245 | 246 | except Exception as e: 247 | st.error(f"Error displaying result: {str(e)}") 248 | st.write(result['result']) 249 | else: 250 | st.info("No campaign results yet. Run an outreach campaign to see results here.") 251 | 252 | # Footer 253 | st.markdown("---") 254 | st.markdown("Made with ❤️ by @goyashy") 255 | 256 | # Add help/documentation tooltip 257 | with st.sidebar: 258 | with st.expander("ℹ️ Help & Documentation"): 259 | st.markdown(""" 260 | ### Quick Start Guide 261 | 1. Configure your API keys in Settings 262 | 2. Add prospects individually or via CSV 263 | 3. Optionally set up email templates 264 | 4. Run the campaign 265 | 266 | ### Requirements 267 | - Gmail account with App Password 268 | - Serper API key for research 269 | - OpenAI API key (if using GPT-4) 270 | 271 | ### Need Help? 272 | Contact @goyashy for support 273 | """) -------------------------------------------------------------------------------- /agents/sales/with_st.py: -------------------------------------------------------------------------------- 1 | from textwrap import dedent 2 | from crewai import Agent, Task, Crew, Process 3 | from crewai.tools import BaseTool 4 | from crewai_tools import SerperDevTool 5 | from pydantic import BaseModel, Field 6 | from typing import List, Dict, Type, ClassVar 7 | import smtplib 8 | from email.mime.text import MIMEText 9 | from email.mime.multipart import MIMEMultipart 10 | from langchain_community.llms import Ollama 11 | from langchain_openai import ChatOpenAI 12 | import os 13 | import json 14 | 15 | # Initialize SerperDev tool 16 | search_tool = SerperDevTool() 17 | 18 | def get_llm(use_gpt=False): 19 | """Get the specified language model""" 20 | if use_gpt: 21 | return ChatOpenAI( 22 | model_name="gpt-4o-mini", 23 | temperature=0.7 24 | ) 25 | return Ollama( 26 | model="deepseek-r1:latest", 27 | base_url="http://localhost:11434", 28 | temperature=0.7 29 | ) 30 | 31 | class EmailInput(BaseModel): 32 | """Input schema for Email Tool""" 33 | to: str = Field(..., description="Recipient email address") 34 | subject: str = Field(..., description="Email subject line") 35 | body: str = Field(..., description="Email body content") 36 | 37 | class EmailSender(BaseTool): 38 | name: str = "Email Sender" 39 | description: str = "Sends personalized emails using Gmail SMTP" 40 | args_schema: Type[BaseModel] = EmailInput 41 | 42 | smtp_settings: ClassVar[Dict[str, str | int]] = { 43 | 'server': "smtp.gmail.com", 44 | 'port': 587, 45 | 'username': os.getenv('GMAIL_USER'), 46 | 'password': os.getenv('GMAIL_APP_PASSWORD') 47 | } 48 | 49 | def _run(self, to: str, subject: str, body: str) -> str: 50 | if not self.smtp_settings['username'] or not self.smtp_settings['password']: 51 | return json.dumps({"error": "GMAIL_USER and GMAIL_APP_PASSWORD environment variables are required"}) 52 | 53 | try: 54 | msg = MIMEMultipart() 55 | msg['From'] = self.smtp_settings['username'] 56 | msg['To'] = to 57 | msg['Subject'] = subject 58 | msg.attach(MIMEText(body, 'plain')) 59 | 60 | with smtplib.SMTP(self.smtp_settings['server'], self.smtp_settings['port']) as server: 61 | server.starttls() 62 | server.login(self.smtp_settings['username'], self.smtp_settings['password']) 63 | server.send_message(msg) 64 | 65 | return json.dumps({ 66 | "status": "success", 67 | "message": f"Email sent successfully to {to}", 68 | "to": to, 69 | "subject": subject, 70 | "body": body 71 | }) 72 | except Exception as e: 73 | return json.dumps({ 74 | "error": f"Error sending email: {str(e)}", 75 | "to": to, 76 | "subject": subject, 77 | "body": body 78 | }) 79 | 80 | def load_email_template(industry: str) -> str: 81 | """Load email template for the given industry""" 82 | try: 83 | with open(f'email_templates/{industry.lower()}.txt', 'r') as f: 84 | return f.read() 85 | except FileNotFoundError: 86 | return "" 87 | 88 | class DetailedSalesCrew: 89 | def __init__(self, target_emails: List[dict], use_gpt: bool = False): 90 | """ 91 | Initialize with list of dicts containing email and industry 92 | Example: [{"email": "user@domain.com", "industry": "Technology"}] 93 | """ 94 | # Validate email format 95 | for email_data in target_emails: 96 | if not isinstance(email_data, dict) or "email" not in email_data or "industry" not in email_data: 97 | raise ValueError("Each target email must be a dictionary with 'email' and 'industry' keys") 98 | 99 | self.target_emails = target_emails 100 | self.llm = get_llm(use_gpt) 101 | self.email_tool = EmailSender() 102 | 103 | def create_agents(self, industry: str): 104 | # Research Agent 105 | self.researcher = Agent( 106 | role='Company Research Specialist', 107 | goal='Analyze companies and gather comprehensive information', 108 | backstory=dedent(f"""You are an expert researcher specializing in 109 | {industry} company analysis. You excel at finding detailed information 110 | about companies, their products, and market presence."""), 111 | tools=[search_tool], 112 | verbose=True, 113 | llm=self.llm, 114 | max_iter=100, 115 | allow_delegation=False 116 | ) 117 | 118 | # News Agent 119 | self.news_analyst = Agent( 120 | role='News and Trends Analyst', 121 | goal='Find and analyze relevant news and industry trends', 122 | backstory=dedent(f"""You are skilled at identifying relevant news 123 | and understanding {industry} industry trends. You can connect company 124 | activities to broader market movements."""), 125 | tools=[search_tool], 126 | verbose=True, 127 | llm=self.llm, 128 | max_iter=75, 129 | allow_delegation=False 130 | ) 131 | 132 | # Content Writer 133 | template = load_email_template(industry) 134 | template_context = f"Use this template if available: {template}" if template else "" 135 | 136 | self.writer = Agent( 137 | role='Outreach Content Specialist', 138 | goal='Create highly personalized email content', 139 | backstory=dedent(f"""You are an expert at crafting personalized 140 | outreach emails for {industry} companies that resonate with recipients. 141 | You excel at combining company research with industry insights. 142 | You are founder of explainx.ai and your name is Yash Thakker, which 143 | is what should be mentioned in the email. {template_context}"""), 144 | tools=[self.email_tool], 145 | verbose=True, 146 | llm=self.llm, 147 | max_iter=50, 148 | allow_delegation=False 149 | ) 150 | 151 | return [self.researcher, self.news_analyst, self.writer] 152 | 153 | def create_tasks(self, email: str, industry: str): 154 | # Extract domain from email 155 | domain = email.split('@')[1] 156 | company_name = domain.split('.')[0] 157 | 158 | # Research Task 159 | research_task = Task( 160 | description=dedent(f"""Research {company_name} ({domain}) thoroughly. 161 | Consider their position in the {industry} industry. 162 | 163 | Step-by-step approach: 164 | 1. Search for company overview and background 165 | 2. Research their products/services in detail 166 | 3. Find information about their team and leadership 167 | 4. Analyze their market position 168 | 5. Identify their tech stack and tools 169 | 170 | Focus on: 171 | - Company's main products/services 172 | - Value proposition 173 | - Target market 174 | - Team information 175 | - Recent updates or changes 176 | - Technology stack or tools mentioned 177 | 178 | Create a comprehensive profile of the company."""), 179 | agent=self.researcher, 180 | expected_output=dedent("""Detailed company profile including all 181 | discovered information in a structured format.""") 182 | ) 183 | 184 | # News Analysis Task 185 | news_task = Task( 186 | description=dedent(f"""Research recent news and developments about 187 | {company_name} and their position in the {industry} industry. 188 | 189 | Step-by-step approach: 190 | 1. Search for company news from the last 3 months 191 | 2. Research {industry} industry trends affecting them 192 | 3. Analyze competitor movements 193 | 4. Identify market opportunities 194 | 5. Find any company milestones or achievements 195 | 196 | Focus on: 197 | - Recent company news and press releases 198 | - Industry trends and developments 199 | - Competitive landscape 200 | - Market opportunities and challenges 201 | - Recent achievements or notable events"""), 202 | agent=self.news_analyst, 203 | expected_output=dedent("""Comprehensive news analysis including 204 | company-specific news and relevant industry trends.""") 205 | ) 206 | 207 | # Email Creation Task 208 | email_task = Task( 209 | description=dedent(f"""Create and send a personalized email to {email} using 210 | the research and news analysis. Consider their position in the 211 | {industry} industry. 212 | 213 | Step-by-step approach: 214 | 1. Extract key insights from research 215 | 2. Identify compelling news points 216 | 3. Craft attention-grabbing subject 217 | 4. Write personalized introduction 218 | 5. Present value proposition 219 | 220 | Guidelines: 221 | - Keep subject line engaging but professional 222 | - Reference specific company details from research 223 | - Mention relevant {industry} trends 224 | - Focus on value proposition 225 | - Keep email concise (150-200 words) 226 | - Include clear call to action 227 | - Sign as Yash Thakker, Founder at ExplainX.ai 228 | 229 | Use the email tool to send the email directly."""), 230 | agent=self.writer, 231 | context=[research_task, news_task], 232 | expected_output=dedent("""Email sent successfully with response from 233 | email tool in JSON format.""") 234 | ) 235 | 236 | return [research_task, news_task, email_task] 237 | 238 | def run(self): 239 | """Process each email and create personalized outreach""" 240 | all_results = [] 241 | 242 | for target in self.target_emails: 243 | email = target["email"] 244 | industry = target["industry"] 245 | 246 | print(f"\nProcessing email: {email} (Industry: {industry})") 247 | 248 | # Create crew for this email 249 | crew = Crew( 250 | agents=self.create_agents(industry), 251 | tasks=self.create_tasks(email, industry), 252 | process=Process.sequential, 253 | verbose=True, 254 | max_rpm=100 255 | ) 256 | 257 | # Execute the crew's tasks 258 | result = crew.kickoff() 259 | all_results.append({ 260 | "email": email, 261 | "industry": industry, 262 | "result": result 263 | }) 264 | 265 | return all_results -------------------------------------------------------------------------------- /agents/social_media/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | from dotenv import load_dotenv 4 | from crewai import Agent, Task, Crew 5 | from crewai_tools import SerperDevTool 6 | from langchain_openai import ChatOpenAI 7 | from langchain_community.llms import Ollama 8 | 9 | load_dotenv() 10 | 11 | os.environ["SERPER_API_KEY"] = os.getenv("SERPER_API_KEY") 12 | 13 | search_tool = SerperDevTool() 14 | 15 | def create_llm(use_gpt=True): 16 | if use_gpt: 17 | return ChatOpenAI(model="gpt-4o-mini") 18 | else: 19 | return Ollama(model="llama3.1") 20 | 21 | def create_agents(brand_name, llm): 22 | researcher = Agent( 23 | role="Social Media Researcher", 24 | goal=f"Research and gather information about {brand_name} from various sources", 25 | backstory="You are an expert researcher with a knack for finding relevant information quickly.", 26 | verbose=True, 27 | allow_delegation=False, 28 | tools=[search_tool], 29 | llm=llm, 30 | max_iter=15 # Increased max iterations 31 | ) 32 | 33 | social_media_monitor = Agent( 34 | role="Social Media Monitor", 35 | goal=f"Monitor social media platforms for mentions of {brand_name}", 36 | backstory="You are an experienced social media analyst with keen eyes for trends and mentions.", 37 | verbose=True, 38 | allow_delegation=False, 39 | tools=[search_tool], 40 | llm=llm, 41 | max_iter=15 # Increased max iterations 42 | ) 43 | 44 | sentiment_analyzer = Agent( 45 | role="Sentiment Analyzer", 46 | goal=f"Analyze the sentiment of social media mentions about {brand_name}", 47 | backstory="You are an expert in natural language processing and sentiment analysis.", 48 | verbose=True, 49 | allow_delegation=False, 50 | llm=llm, 51 | max_iter=15 # Increased max iterations 52 | ) 53 | 54 | report_generator = Agent( 55 | role="Report Generator", 56 | goal=f"Generate comprehensive reports based on the analysis of {brand_name}", 57 | backstory="You are a skilled data analyst and report writer, adept at presenting insights clearly.", 58 | verbose=True, 59 | allow_delegation=False, 60 | llm=llm, 61 | max_iter=15 # Increased max iterations 62 | ) 63 | 64 | return [researcher, social_media_monitor, sentiment_analyzer, report_generator] 65 | 66 | def create_tasks(brand_name, agents): 67 | research_task = Task( 68 | description=f"Research {brand_name} and provide a summary of their online presence, key information, and recent activities.", 69 | agent=agents[0], 70 | expected_output="A structured summary containing: \n1. Brief overview of {brand_name}\n2. Key online platforms and follower counts\n3. Recent notable activities or campaigns\n4. Main products or services\n5. Any recent news or controversies" 71 | ) 72 | 73 | monitoring_task = Task( 74 | description=f"Monitor social media platforms for mentions of '{brand_name}' in the last 24 hours. Provide a summary of the mentions.", 75 | agent=agents[1], 76 | expected_output="A structured report containing: \n1. Total number of mentions\n2. Breakdown by platform (e.g., Twitter, Instagram, Facebook)\n3. Top 5 most engaging posts or mentions\n4. Any trending hashtags associated with {brand_name}\n5. Notable influencers or accounts mentioning {brand_name}" 77 | ) 78 | 79 | sentiment_analysis_task = Task( 80 | description=f"Analyze the sentiment of the social media mentions about {brand_name}. Categorize them as positive, negative, or neutral.", 81 | agent=agents[2], 82 | expected_output="A sentiment analysis report containing: \n1. Overall sentiment distribution (% positive, negative, neutral)\n2. Key positive themes or comments\n3. Key negative themes or comments\n4. Any notable changes in sentiment compared to previous periods\n5. Suggestions for sentiment improvement if necessary" 83 | ) 84 | 85 | report_generation_task = Task( 86 | description=f"Generate a comprehensive report about {brand_name} based on the research, social media mentions, and sentiment analysis. Include key insights and recommendations.", 87 | agent=agents[3], 88 | expected_output="A comprehensive report structured as follows: \n1. Executive Summary\n2. Brand Overview\n3. Social Media Presence Analysis\n4. Sentiment Analysis\n5. Key Insights\n6. Recommendations for Improvement\n7. Conclusion" 89 | ) 90 | 91 | return [research_task, monitoring_task, sentiment_analysis_task, report_generation_task] 92 | 93 | def run_social_media_monitoring(brand_name, use_gpt=True, max_retries=3): 94 | llm = create_llm(use_gpt) 95 | agents = create_agents(brand_name, llm) 96 | tasks = create_tasks(brand_name, agents) 97 | 98 | crew = Crew( 99 | agents=agents, 100 | tasks=tasks, 101 | verbose=True 102 | ) 103 | 104 | for attempt in range(max_retries): 105 | try: 106 | result = crew.kickoff() 107 | return result 108 | except Exception as e: 109 | print(f"Attempt {attempt + 1} failed: {str(e)}") 110 | if attempt < max_retries - 1: 111 | print("Retrying...") 112 | time.sleep(5) # Wait for 5 seconds before retrying 113 | else: 114 | print("Max retries reached. Unable to complete the task.") 115 | return None 116 | 117 | if __name__ == "__main__": 118 | print("Welcome to the Social Media Monitoring Crew!") 119 | use_gpt = input("Do you want to use GPT? (yes/no): ").lower() == 'yes' 120 | brand_name = input("Enter the name of the brand or influencer you want to research: ") 121 | 122 | result = run_social_media_monitoring(brand_name, use_gpt) 123 | 124 | if result: 125 | print("\n", "="*50, "\n") 126 | print("Final Report:") 127 | print(result) 128 | else: 129 | print("Failed to generate the report. Please try again later.") -------------------------------------------------------------------------------- /agents/social_media/thinking-ant-social-media-calendar.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from crewai import Agent, Task, Crew, LLM 4 | from crewai_tools import SerperDevTool, WebsiteSearchTool 5 | import streamlit as st 6 | from datetime import datetime 7 | import pandas as pd 8 | import json 9 | 10 | # Load environment variables 11 | load_dotenv() 12 | os.environ["SERPER_API_KEY"] = os.getenv("SERPER_API_KEY") 13 | os.environ["ANTHROPIC_API_KEY"] = os.getenv("ANTHROPIC_API_KEY") 14 | 15 | # Initialize enhanced search tools 16 | search_tool = SerperDevTool() 17 | website_tool = WebsiteSearchTool() 18 | 19 | def get_claude_llm(): 20 | """Get the Claude 3.7 Sonnet language model""" 21 | return LLM( 22 | model="anthropic/claude-3-7-sonnet-20250219", 23 | api_key=os.getenv("ANTHROPIC_API_KEY"), 24 | temperature=0.7, 25 | verbose=True 26 | ) 27 | 28 | def create_content_calendar_agents(): 29 | """Create specialized agents for content calendar creation""" 30 | llm = get_claude_llm() 31 | 32 | trend_researcher = Agent( 33 | role='Content Trend Researcher', 34 | goal='Identify current and upcoming content trends relevant to the target audience', 35 | backstory="Expert at discovering trending topics and viral content patterns. You find what resonates with audiences.", 36 | tools=[search_tool, website_tool], 37 | llm=llm, 38 | verbose=True, 39 | allow_delegation=False, 40 | max_tokens=100 41 | ) 42 | 43 | content_strategist = Agent( 44 | role='Content Calendar Strategist', 45 | goal='Develop a strategic 7-day content plan based on research findings', 46 | backstory="Experienced content strategist who creates balanced, engaging content calendars.", 47 | tools=[search_tool], 48 | llm=llm, 49 | verbose=True, 50 | allow_delegation=False, 51 | max_tokens=100 52 | ) 53 | 54 | content_creator = Agent( 55 | role='Content Creator', 56 | goal='Generate brief content outlines for each day of the calendar', 57 | backstory="Creative content developer who transforms plans into actionable content briefs.", 58 | tools=[search_tool], 59 | llm=llm, 60 | verbose=True, 61 | allow_delegation=False, 62 | max_tokens=100 63 | ) 64 | 65 | return trend_researcher, content_strategist, content_creator 66 | 67 | def create_content_calendar_tasks(researcher, strategist, creator, industry, target_audience, content_goals): 68 | """Create content calendar tasks with clear objectives but limited scope to manage token usage""" 69 | # Truncate inputs if they're too long 70 | industry = industry[:100] if industry else "" 71 | target_audience = target_audience[:200] if target_audience else "" 72 | content_goals = content_goals[:200] if content_goals else "" 73 | 74 | trend_research_task = Task( 75 | description=f"""Research current trends in the {industry} industry for {target_audience}. 76 | 77 | Focus on: 78 | 1. Top content formats (video, blog, etc.) 79 | 2. Trending topics and hashtags 80 | 3. Upcoming events in the next 2 weeks 81 | 4. 5-7 potential content topics that align with: {content_goals}""", 82 | agent=researcher, 83 | expected_output="List of content trends and topic ideas (max 500 words)" 84 | ) 85 | 86 | strategy_task = Task( 87 | description=f"""Create a simple 7-day content calendar for {target_audience} based on the research. 88 | 89 | Include: 90 | 1. Mix of content types (educational, promotional, etc.) 91 | 2. One main topic per day 92 | 3. Brief rationale for each day 93 | 94 | Format as Day 1: [Topic] - [Type] - [Brief rationale]""", 95 | agent=strategist, 96 | context=[trend_research_task], 97 | expected_output="7-day content calendar outline (max 500 words)" 98 | ) 99 | 100 | content_brief_task = Task( 101 | description=f"""Create brief content outlines for each day of the 7-day calendar. 102 | 103 | For each day include: 104 | 1. Headline 105 | 2. Brief hook 106 | 3. 3-5 key points 107 | 4. Call-to-action 108 | 109 | Keep each day's brief concise and focused.""", 110 | agent=creator, 111 | context=[trend_research_task, strategy_task], 112 | expected_output="Brief outlines for 7 days of content (max 1000 words)" 113 | ) 114 | 115 | return [trend_research_task, strategy_task, content_brief_task] 116 | 117 | def create_crew(agents, tasks): 118 | """Create a crew with optimal settings and token limits""" 119 | return Crew( 120 | agents=agents, 121 | tasks=tasks, 122 | verbose=True, 123 | process="sequential", 124 | max_rpm=10 # Limiting requests per minute to avoid rate limits 125 | ) 126 | 127 | def run_content_calendar_creation(industry, target_audience, content_goals): 128 | """Run the content calendar creation process and return results""" 129 | try: 130 | start_time = datetime.now() 131 | researcher, strategist, creator = create_content_calendar_agents() 132 | tasks = create_content_calendar_tasks(researcher, strategist, creator, industry, target_audience, content_goals) 133 | crew = create_crew([researcher, strategist, creator], tasks) 134 | result = crew.kickoff() 135 | execution_time = (datetime.now() - start_time).total_seconds() 136 | return {'result': result, 'execution_time': execution_time} 137 | except Exception as e: 138 | return f"Error: {str(e)}" 139 | 140 | def save_content_calendar(industry, target_audience, content_goals, result): 141 | """Save content calendar to JSON file""" 142 | timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") 143 | filename = f"content_calendar_{timestamp}.json" 144 | 145 | data = { 146 | "industry": industry, 147 | "target_audience": target_audience, 148 | "content_goals": content_goals, 149 | "timestamp": timestamp, 150 | "content_calendar": result 151 | } 152 | 153 | with open(filename, "w") as f: 154 | json.dump(data, f, indent=4) 155 | 156 | return filename 157 | 158 | def main(): 159 | st.set_page_config(page_title="7-Day Content Calendar Creator", layout="wide") 160 | 161 | st.title("📅 AI Content Calendar Creator") 162 | st.subheader("Powered by Claude 3.7 Sonnet") 163 | 164 | # Display token usage warning 165 | st.warning("⚠️ Token Usage Management: Please keep inputs brief to avoid rate limits.") 166 | 167 | # Input form with character counters 168 | with st.form("content_calendar_form"): 169 | industry = st.text_input("Industry/Niche (max 100 chars)", placeholder="e.g., Fitness, SaaS, Digital Marketing") 170 | st.caption(f"Characters: {len(industry)}/100") 171 | 172 | target_audience = st.text_area("Target Audience (max 200 chars)", placeholder="Key demographics and interests...", height=80) 173 | st.caption(f"Characters: {len(target_audience)}/200") 174 | 175 | content_goals = st.text_area("Content Goals (max 200 chars)", placeholder="e.g., Increase brand awareness...", height=80) 176 | st.caption(f"Characters: {len(content_goals)}/200") 177 | 178 | submit_button = st.form_submit_button("Generate 7-Day Content Calendar") 179 | 180 | if submit_button: 181 | if not industry or not target_audience or not content_goals: 182 | st.error("Please fill out all fields") 183 | return 184 | 185 | # Create progress tracking 186 | progress_container = st.empty() 187 | progress_bar = st.progress(0) 188 | status_container = st.empty() 189 | timer_container = st.empty() 190 | 191 | status_container.info("Starting content calendar creation...") 192 | start_time = datetime.now() 193 | 194 | # Update timer in a separate area 195 | def update_timer(): 196 | while True: 197 | elapsed = (datetime.now() - start_time).total_seconds() 198 | timer_container.text(f"⏱️ Time elapsed: {elapsed:.1f}s") 199 | time.sleep(0.5) 200 | 201 | import threading 202 | import time 203 | timer_thread = threading.Thread(target=update_timer) 204 | timer_thread.daemon = True 205 | timer_thread.start() 206 | 207 | # Run the content calendar creation 208 | result = run_content_calendar_creation(industry, target_audience, content_goals) 209 | 210 | if isinstance(result, dict): 211 | # Save results to file 212 | filename = save_content_calendar(industry, target_audience, content_goals, result['result']) 213 | 214 | # Show results 215 | progress_bar.progress(100) 216 | status_container.success("Content Calendar Created!") 217 | timer_container.text(f"⏱️ Total time: {result['execution_time']:.2f}s") 218 | 219 | st.subheader("Your 7-Day Content Calendar") 220 | st.write(result['result']) 221 | 222 | # Create a download button for the JSON file 223 | with open(filename, "r") as f: 224 | st.download_button( 225 | label="Download Content Calendar (JSON)", 226 | data=f, 227 | file_name=filename, 228 | mime="application/json" 229 | ) 230 | 231 | # Display the calendar in a more visual format if possible 232 | try: 233 | # This is a basic attempt to extract calendar data - actual format may vary 234 | days = result['result'].split("Day ") 235 | if len(days) > 1: 236 | calendar_data = [] 237 | for day in days[1:]: # Skip first empty split 238 | day_content = day.strip() 239 | if day_content: 240 | day_num = day_content[0] 241 | content = day_content[1:].strip() 242 | calendar_data.append({"Day": int(day_num), "Content": content}) 243 | 244 | if calendar_data: 245 | st.subheader("Calendar View") 246 | calendar_df = pd.DataFrame(calendar_data) 247 | st.dataframe(calendar_df) 248 | except: 249 | # If parsing fails, just show raw result 250 | pass 251 | 252 | else: 253 | progress_bar.progress(100) 254 | status_container.error(f"Error: {result}") 255 | timer_container.empty() 256 | 257 | if __name__ == "__main__": 258 | main() -------------------------------------------------------------------------------- /agents/thinking/model-comparison.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import os 3 | from dotenv import load_dotenv 4 | from crewai import Agent, Task, Crew, LLM 5 | from crewai_tools import SerperDevTool, WebsiteSearchTool 6 | from langchain_openai import ChatOpenAI 7 | from langchain_google_genai import ChatGoogleGenerativeAI 8 | import requests 9 | import asyncio 10 | from concurrent.futures import ThreadPoolExecutor 11 | import google.generativeai as genai 12 | 13 | # Load environment variables 14 | load_dotenv() 15 | os.environ["SERPER_API_KEY"] = os.getenv("SERPER_API_KEY") 16 | 17 | # Initialize enhanced search tools 18 | search_tool = SerperDevTool() 19 | website_tool = WebsiteSearchTool() 20 | 21 | def check_ollama_availability(): 22 | """Check if Ollama server is running""" 23 | try: 24 | response = requests.get("http://localhost:11434/api/version") 25 | return response.status_code == 200 26 | except requests.exceptions.ConnectionError: 27 | return False 28 | 29 | def get_llm(model_type): 30 | """Initialize the specified language model""" 31 | if model_type == "o3-mini": 32 | return ChatOpenAI(model_name="o3-mini") 33 | elif model_type == "gemini": 34 | return LLM( 35 | model="gemini/gemini-2.0-flash-001", 36 | temperature=0.7, 37 | max_tokens=2048, 38 | top_p=0.8, 39 | vertex_credentials=None # Will use GOOGLE_API_KEY environment variable 40 | ) 41 | else: # deepseek 42 | return LLM( 43 | model="ollama/deepseek-r1:latest", 44 | base_url="http://localhost:11434", 45 | temperature=0.7 46 | ) 47 | 48 | def create_agents(model_type): 49 | """Create specialized research and analysis agents for a specific model""" 50 | try: 51 | llm = get_llm(model_type) 52 | 53 | researcher = Agent( 54 | role=f'Deep Research Specialist ({model_type})', 55 | goal='Conduct comprehensive research and gather detailed information', 56 | backstory="""Expert researcher skilled at discovering hard-to-find information 57 | and connecting complex data points. Specializes in thorough, detailed research.""", 58 | tools=[search_tool, website_tool], 59 | llm=llm, 60 | verbose=True, 61 | max_iter=15, 62 | allow_delegation=False 63 | ) 64 | 65 | analyst = Agent( 66 | role=f'Research Analyst ({model_type})', 67 | goal='Analyze and synthesize research findings', 68 | backstory="""Expert analyst skilled at processing complex information and 69 | identifying key patterns and insights. Specializes in clear, actionable analysis.""", 70 | tools=[search_tool], 71 | llm=llm, 72 | verbose=True, 73 | max_iter=10, 74 | allow_delegation=False 75 | ) 76 | 77 | writer = Agent( 78 | role=f'Content Synthesizer ({model_type})', 79 | goal='Create clear, structured reports from analysis', 80 | backstory="""Expert writer skilled at transforming complex analysis into 81 | clear, engaging content while maintaining technical accuracy.""", 82 | llm=llm, 83 | verbose=True, 84 | max_iter=8, 85 | allow_delegation=False 86 | ) 87 | 88 | return researcher, analyst, writer 89 | except Exception as e: 90 | st.error(f"Error creating agents for {model_type}: {str(e)}") 91 | return None, None, None 92 | 93 | def create_tasks(researcher, analyst, writer, topic): 94 | """Create research tasks with clear objectives""" 95 | research_task = Task( 96 | description=f"""Research this topic thoroughly: {topic} 97 | 98 | Follow these steps: 99 | 1. Find reliable sources and latest information 100 | 2. Extract key details and evidence 101 | 3. Verify information across sources 102 | 4. Document findings with references""", 103 | agent=researcher, 104 | expected_output="Detailed research findings with sources" 105 | ) 106 | 107 | analysis_task = Task( 108 | description=f"""Analyze the research findings about {topic}: 109 | 110 | Steps: 111 | 1. Review and categorize findings 112 | 2. Identify patterns and trends 113 | 3. Evaluate source credibility 114 | 4. Note key insights""", 115 | agent=analyst, 116 | context=[research_task], 117 | expected_output="Analysis of findings and insights" 118 | ) 119 | 120 | synthesis_task = Task( 121 | description=f"""Create a clear report on {topic}: 122 | 123 | Include: 124 | - Executive Summary 125 | - Key Findings 126 | - Evidence 127 | - Conclusions 128 | - Specific questions asked by the user 129 | - search volume, demand, search conversion 130 | - Top keywords 131 | - References""", 132 | agent=writer, 133 | context=[research_task, analysis_task], 134 | expected_output="Structured report with insights" 135 | ) 136 | 137 | return [research_task, analysis_task, synthesis_task] 138 | 139 | def run_single_model_research(topic, model_type): 140 | """Execute research process for a single model""" 141 | try: 142 | researcher, analyst, writer = create_agents(model_type) 143 | if not all([researcher, analyst, writer]): 144 | raise Exception(f"Failed to create agents for {model_type}") 145 | 146 | tasks = create_tasks(researcher, analyst, writer, topic) 147 | crew = Crew( 148 | agents=[researcher, analyst, writer], 149 | tasks=tasks, 150 | verbose=True 151 | ) 152 | 153 | result = crew.kickoff() 154 | return str(result) 155 | except Exception as e: 156 | return f"Error with {model_type}: {str(e)}" 157 | 158 | @st.cache_data(ttl=3600) 159 | def run_parallel_research(topic, selected_models): 160 | """Execute research process in parallel for multiple models""" 161 | results = {} 162 | 163 | for model in selected_models: 164 | try: 165 | with st.spinner(f"🔍 Researching with {model}..."): 166 | result = run_single_model_research(topic, model) 167 | results[model] = result 168 | except Exception as e: 169 | results[model] = f"Error with {model}: {str(e)}" 170 | 171 | return results 172 | 173 | def check_api_keys(): 174 | """Check availability of required API keys""" 175 | api_status = { 176 | "gpt-4o-mini": bool(os.getenv("OPENAI_API_KEY")), 177 | "gemini": bool(os.getenv("GEMINI_API_KEY")), 178 | "deepseek": check_ollama_availability() 179 | } 180 | return api_status 181 | 182 | def main(): 183 | st.set_page_config( 184 | page_title="Multi-LLM Research Assistant", 185 | page_icon="🔍", 186 | layout="wide" 187 | ) 188 | 189 | # Sidebar settings 190 | st.sidebar.title("⚙️ Model Selection") 191 | 192 | # Check API keys and model availability 193 | api_status = check_api_keys() 194 | 195 | # Model selection with checkboxes 196 | st.sidebar.markdown("### Choose Models") 197 | selected_models = [] 198 | 199 | if api_status["gpt-4o-mini"]: 200 | if st.sidebar.checkbox("OpenAI GPT-4o-mini", value=True): 201 | selected_models.append("gpt-4o-mini") 202 | else: 203 | st.sidebar.warning("⚠️ OpenAI API key not found") 204 | 205 | if api_status["gemini"]: 206 | if st.sidebar.checkbox("Google Gemini-2.0", value=True): 207 | selected_models.append("gemini") 208 | else: 209 | st.sidebar.warning("⚠️ Google API key not found") 210 | 211 | if api_status["deepseek"]: 212 | if st.sidebar.checkbox("Local DeepSeek-r1", value=True): 213 | selected_models.append("deepseek") 214 | else: 215 | st.sidebar.warning("⚠️ Ollama not running") 216 | 217 | # Main content 218 | st.title("🔍 Multi-LLM Research Assistant") 219 | st.markdown(""" 220 | This enhanced research assistant uses multiple AI models in parallel to provide comprehensive, 221 | multi-perspective research on any topic. Select one or more models to compare their analyses. 222 | """) 223 | 224 | # Input 225 | query = st.text_area( 226 | "Research Topic", 227 | placeholder="Enter your research topic (be specific)...", 228 | help="More specific queries yield better results" 229 | ) 230 | 231 | col1, col2, col3 = st.columns([1, 1, 1]) 232 | with col2: 233 | start_research = st.button( 234 | f"🚀 Start Research with {len(selected_models)} Model{'s' if len(selected_models) != 1 else ''}", 235 | type="primary", 236 | disabled=len(selected_models) == 0 237 | ) 238 | 239 | # Execute research 240 | if start_research and query and selected_models: 241 | with st.spinner(f"🔍 Conducting research using {len(selected_models)} models..."): 242 | results = run_parallel_research(query, selected_models) 243 | 244 | st.success("✅ Research Complete!") 245 | 246 | # Create tabs for each model plus comparison 247 | tabs = [f"📊 {model.upper()}" for model in selected_models] 248 | if len(selected_models) > 1: 249 | tabs.append("🔄 Comparison") 250 | 251 | tab_list = st.tabs(tabs) 252 | 253 | # Display individual results 254 | for i, model in enumerate(selected_models): 255 | with tab_list[i]: 256 | st.markdown(f"### Research Report from {model.upper()}") 257 | st.markdown("---") 258 | st.markdown(str(results[model])) 259 | 260 | # Display comparison if multiple models selected 261 | if len(selected_models) > 1: 262 | with tab_list[-1]: 263 | st.markdown("### Model Comparison") 264 | st.markdown("---") 265 | for model in selected_models: 266 | with st.expander(f"{model.upper()} Summary"): 267 | # Extract and display key points from each model's results 268 | st.markdown(str(results[model])) 269 | 270 | st.divider() 271 | st.markdown("*Built with CrewAI, Streamlit, and Multiple LLMs*") 272 | 273 | if __name__ == "__main__": 274 | main() -------------------------------------------------------------------------------- /agents/thinking/o3-agent.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from crewai import Agent, Task, Crew 4 | from crewai_tools import SerperDevTool, WebsiteSearchTool 5 | from langchain_openai import ChatOpenAI 6 | from langchain_community.llms import Ollama 7 | 8 | # Load environment variables 9 | load_dotenv() 10 | os.environ["SERPER_API_KEY"] = os.getenv("SERPER_API_KEY") 11 | 12 | # Initialize enhanced search tools 13 | search_tool = SerperDevTool() 14 | website_tool = WebsiteSearchTool() 15 | 16 | def get_llm(use_gpt=True): 17 | """Get the specified language model""" 18 | if use_gpt: 19 | return ChatOpenAI( 20 | model_name="o3-mini", 21 | ) 22 | return Ollama( 23 | model="deepseek-r1:latest", 24 | base_url="http://localhost:11434", 25 | temperature=0.7 26 | ) 27 | 28 | def create_agents(use_gpt=True): 29 | """Create specialized research and analysis agents""" 30 | llm = get_llm(use_gpt) 31 | 32 | deep_researcher = Agent( 33 | role='Deep Research Specialist', 34 | goal='Conduct comprehensive internet research and data gathering', 35 | backstory="""Expert at conducting deep, thorough research across multiple sources. 36 | Skilled at finding hard-to-locate information and connecting disparate data points. 37 | Specializes in complex research tasks that would typically take hours or days.""", 38 | tools=[search_tool, website_tool], 39 | llm=llm, 40 | verbose=True, 41 | max_iter=100, # Increased iteration limit 42 | allow_delegation=False, # Prevent unnecessary delegations 43 | max_rpm=50, # Rate limit for API calls 44 | max_retry_limit=3 # Allow retries on failures 45 | ) 46 | 47 | analyst = Agent( 48 | role='Research Analyst', 49 | goal='Analyze and synthesize complex research findings', 50 | backstory="""Expert analyst skilled at processing large amounts of information, 51 | identifying patterns, and drawing meaningful conclusions. Specializes in turning 52 | raw research into actionable insights.""", 53 | tools=[search_tool], 54 | llm=llm, 55 | verbose=True, 56 | max_iter=75, 57 | allow_delegation=False, 58 | max_rpm=30, 59 | max_retry_limit=2 60 | ) 61 | 62 | report_writer = Agent( 63 | role='Research Report Writer', 64 | goal='Create comprehensive, well-structured research reports', 65 | backstory="""Expert at transforming complex research and analysis into 66 | clear, actionable reports. Skilled at maintaining detail while ensuring 67 | accessibility and practical value.""", 68 | llm=llm, 69 | verbose=True, 70 | max_iter=50, 71 | allow_delegation=False, 72 | max_rpm=20, 73 | max_retry_limit=2 74 | ) 75 | 76 | return deep_researcher, analyst, report_writer 77 | 78 | def create_tasks(researcher, analyst, writer, research_query): 79 | """Create research tasks with clear objectives""" 80 | deep_research_task = Task( 81 | description=f"""Conduct focused research on: {research_query} 82 | 83 | Step-by-step approach: 84 | 1. Initial broad search to identify key sources 85 | 2. Deep dive into most relevant sources 86 | 3. Extract specific details and evidence 87 | 4. Verify key findings across sources 88 | 5. Document sources and findings clearly 89 | 90 | Keep focused on specific, verified information.""", 91 | agent=researcher, 92 | expected_output="Detailed research findings with verified sources" 93 | ) 94 | 95 | analysis_task = Task( 96 | description=f"""Analyze the research findings about {research_query}: 97 | 98 | Follow these steps: 99 | 1. Review and categorize all findings 100 | 2. Identify main themes and patterns 101 | 3. Evaluate source credibility 102 | 4. Note any inconsistencies 103 | 5. Summarize key insights 104 | 105 | Focus on clear, actionable analysis.""", 106 | agent=analyst, 107 | context=[deep_research_task], 108 | expected_output="Clear analysis of findings with key insights" 109 | ) 110 | 111 | report_task = Task( 112 | description=f"""Create a structured report about {research_query}: 113 | 114 | Include: 115 | 1. Executive summary (2-3 paragraphs) 116 | 2. Key findings (bullet points) 117 | 3. Supporting evidence 118 | 4. Conclusions 119 | 5. References 120 | 121 | Keep it clear and focused.""", 122 | agent=writer, 123 | context=[deep_research_task, analysis_task], 124 | expected_output="Concise, well-structured report" 125 | ) 126 | 127 | return [deep_research_task, analysis_task, report_task] 128 | 129 | def create_crew(agents, tasks): 130 | """Create a crew with optimal settings""" 131 | return Crew( 132 | agents=agents, 133 | tasks=tasks, 134 | verbose=True, 135 | max_rpm=100, # Overall crew rate limit 136 | process="sequential" 137 | ) 138 | 139 | def main(): 140 | print("\n🔍 Welcome to Deep Research Crew!") 141 | print("\nAvailable Models:") 142 | print("1. OpenAI o3-mini (Requires API key)") 143 | print("2. Local DeepSeek-r1 (Requires Ollama)") 144 | 145 | use_gpt = input("\nUse OpenAI o3-mini? (yes/no): ").lower() == 'yes' 146 | 147 | if not use_gpt: 148 | print("\nUsing Ollama with DeepSeek-r1") 149 | print("Ensure Ollama is running: ollama run deepseek-r1:latest") 150 | 151 | query = input("\nWhat would you like researched? (Be specific): ") 152 | 153 | try: 154 | researcher, analyst, writer = create_agents(use_gpt) 155 | tasks = create_tasks(researcher, analyst, writer, query) 156 | crew = create_crew([researcher, analyst, writer], tasks) 157 | 158 | print("\n🔍 Starting deep research process...") 159 | result = crew.kickoff() 160 | 161 | print("\n📊 Research Report:") 162 | print("==================") 163 | print(result) 164 | 165 | except Exception as e: 166 | print(f"\n❌ Error: {str(e)}") 167 | if use_gpt: 168 | print("\nTip: Check your OpenAI API key") 169 | else: 170 | print("\nTip: Ensure Ollama is running with deepseek-r1:latest") 171 | print("Run: ollama run deepseek-r1:latest") 172 | 173 | if __name__ == "__main__": 174 | main() -------------------------------------------------------------------------------- /agents/thinking/streamlit-based.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import os 3 | from dotenv import load_dotenv 4 | from crewai import Agent, Task, Crew, LLM 5 | from crewai_tools import SerperDevTool, WebsiteSearchTool 6 | from langchain_openai import ChatOpenAI 7 | import requests 8 | 9 | # Load environment variables 10 | load_dotenv() 11 | os.environ["SERPER_API_KEY"] = os.getenv("SERPER_API_KEY") 12 | 13 | # Initialize enhanced search tools 14 | search_tool = SerperDevTool() 15 | website_tool = WebsiteSearchTool() 16 | 17 | def check_ollama_availability(): 18 | """Check if Ollama server is running""" 19 | try: 20 | response = requests.get("http://localhost:11434/api/version") 21 | return response.status_code == 200 22 | except requests.exceptions.ConnectionError: 23 | return False 24 | 25 | def get_llm(use_gpt=True): 26 | """Initialize the specified language model""" 27 | if use_gpt: 28 | return ChatOpenAI(model_name="o3-mini") 29 | 30 | # Use CrewAI's LLM class with correct provider format 31 | return LLM( 32 | model="ollama/deepseek-r1:latest", 33 | base_url="http://localhost:11434", 34 | temperature=0.7 35 | ) 36 | 37 | def create_agents(use_gpt=True): 38 | """Create specialized research and analysis agents""" 39 | try: 40 | llm = get_llm(use_gpt) 41 | 42 | researcher = Agent( 43 | role='Deep Research Specialist', 44 | goal='Conduct comprehensive research and gather detailed information', 45 | backstory="""Expert researcher skilled at discovering hard-to-find information 46 | and connecting complex data points. Specializes in thorough, detailed research.""", 47 | tools=[search_tool, website_tool], 48 | llm=llm, 49 | verbose=True, 50 | max_iter=15, 51 | allow_delegation=False 52 | ) 53 | 54 | analyst = Agent( 55 | role='Research Analyst', 56 | goal='Analyze and synthesize research findings', 57 | backstory="""Expert analyst skilled at processing complex information and 58 | identifying key patterns and insights. Specializes in clear, actionable analysis.""", 59 | tools=[search_tool], 60 | llm=llm, 61 | verbose=True, 62 | max_iter=10, 63 | allow_delegation=False 64 | ) 65 | 66 | writer = Agent( 67 | role='Content Synthesizer', 68 | goal='Create clear, structured reports from analysis', 69 | backstory="""Expert writer skilled at transforming complex analysis into 70 | clear, engaging content while maintaining technical accuracy.""", 71 | llm=llm, 72 | verbose=True, 73 | max_iter=8, 74 | allow_delegation=False 75 | ) 76 | 77 | return researcher, analyst, writer 78 | except Exception as e: 79 | st.error(f"Error creating agents: {str(e)}") 80 | return None, None, None 81 | 82 | def create_tasks(researcher, analyst, writer, topic): 83 | """Create research tasks with clear objectives""" 84 | research_task = Task( 85 | description=f"""Research this topic thoroughly: {topic} 86 | 87 | Follow these steps: 88 | 1. Find reliable sources and latest information 89 | 2. Extract key details and evidence 90 | 3. Verify information across sources 91 | 4. Document findings with references""", 92 | agent=researcher, 93 | expected_output="Detailed research findings with sources" 94 | ) 95 | 96 | analysis_task = Task( 97 | description=f"""Analyze the research findings about {topic}: 98 | 99 | Steps: 100 | 1. Review and categorize findings 101 | 2. Identify patterns and trends 102 | 3. Evaluate source credibility 103 | 4. Note key insights""", 104 | agent=analyst, 105 | context=[research_task], 106 | expected_output="Analysis of findings and insights" 107 | ) 108 | 109 | synthesis_task = Task( 110 | description=f"""Create a clear report on {topic}: 111 | 112 | Include: 113 | - Executive Summary 114 | - Key Findings 115 | - Evidence 116 | - Conclusions 117 | - Specific questions asked by the user 118 | - search volume, demand, search converstion 119 | - Top keywords 120 | - References""", 121 | agent=writer, 122 | context=[research_task, analysis_task], 123 | expected_output="Structured report with insights" 124 | ) 125 | 126 | return [research_task, analysis_task, synthesis_task] 127 | 128 | def run_research(topic, use_gpt): 129 | """Execute the research process""" 130 | try: 131 | if use_gpt and not os.getenv("OPENAI_API_KEY"): 132 | raise ValueError("OpenAI API key not found. Please set OPENAI_API_KEY in your environment.") 133 | 134 | if not use_gpt and not check_ollama_availability(): 135 | raise ConnectionError("Ollama server not running. Start with: ollama run deepseek-r1") 136 | 137 | researcher, analyst, writer = create_agents(use_gpt) 138 | if not all([researcher, analyst, writer]): 139 | raise Exception("Failed to create agents") 140 | 141 | tasks = create_tasks(researcher, analyst, writer, topic) 142 | crew = Crew( 143 | agents=[researcher, analyst, writer], 144 | tasks=tasks, 145 | verbose=True 146 | ) 147 | 148 | result = crew.kickoff() 149 | # Convert CrewOutput to string for consistency 150 | return str(result) 151 | except Exception as e: 152 | return f"Error: {str(e)}" 153 | 154 | 155 | def main(): 156 | st.set_page_config( 157 | page_title="Deep Research Assistant", 158 | page_icon="🔍", 159 | layout="wide" 160 | ) 161 | 162 | # Sidebar 163 | st.sidebar.title("⚙️ Settings") 164 | model_choice = st.sidebar.radio( 165 | "Choose Model", 166 | ["OpenAI o3-mini", "Local DeepSeek-r1"] 167 | ) 168 | use_gpt = model_choice == "OpenAI o3-mini" 169 | 170 | if use_gpt and not os.getenv("OPENAI_API_KEY"): 171 | st.sidebar.warning("⚠️ OpenAI API key not found") 172 | 173 | if not use_gpt: 174 | if not check_ollama_availability(): 175 | st.sidebar.warning("⚠️ Ollama not running. Run: `ollama pull deepseek-r1 && ollama run deepseek-r1`") 176 | else: 177 | st.sidebar.success("✅ Ollama running") 178 | 179 | # Main content 180 | st.title("🔍 Deep Research Assistant") 181 | st.markdown(""" 182 | This AI-powered research assistant conducts comprehensive research on any topic. 183 | It uses specialized agents to research, analyze, and synthesize information. 184 | """) 185 | 186 | # Input 187 | query = st.text_area( 188 | "Research Topic", 189 | placeholder="Enter your research topic (be specific)...", 190 | help="More specific queries yield better results" 191 | ) 192 | 193 | col1, col2, col3 = st.columns([1, 1, 1]) 194 | with col2: 195 | start_research = st.button("🚀 Start Research", type="primary") 196 | 197 | # Execute research 198 | if start_research and query: 199 | with st.spinner("🔍 Conducting research..."): 200 | result = run_research(query, use_gpt) 201 | 202 | if isinstance(result, str) and result.startswith("Error:"): 203 | st.error(result) 204 | else: 205 | st.success("✅ Research Complete!") 206 | 207 | tab1, tab2 = st.tabs(["📊 Report", "ℹ️ About"]) 208 | 209 | with tab1: 210 | st.markdown("### Research Report") 211 | st.markdown("---") 212 | st.markdown(str(result)) # Ensure result is converted to string 213 | 214 | with tab2: 215 | st.markdown(f""" 216 | ### Process: 217 | 1. **Research**: Comprehensive source search 218 | 2. **Analysis**: Pattern identification 219 | 3. **Synthesis**: Report creation 220 | 221 | **Details:** 222 | - Model: {model_choice} 223 | - Tools: Web search, content analysis 224 | - Method: Multi-agent collaboration 225 | """) 226 | 227 | st.divider() 228 | st.markdown("*Built with CrewAI and Streamlit*") 229 | 230 | if __name__ == "__main__": 231 | main() -------------------------------------------------------------------------------- /bg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whyashthakker/ai-agents/7d839eac0854eceaf9d97110075301893b2d9ec2/bg.png -------------------------------------------------------------------------------- /bird.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whyashthakker/ai-agents/7d839eac0854eceaf9d97110075301893b2d9ec2/bird.png -------------------------------------------------------------------------------- /content_calendar_20250225_122502.json: -------------------------------------------------------------------------------- 1 | { 2 | "industry": "Generative AI and AI Agentss", 3 | "target_audience": "AI Experts, AI Educators, AI Learners", 4 | "content_goals": "Increase followers", 5 | "timestamp": "20250225_122502", 6 | "content_calendar": -------------------------------------------------------------------------------- /db/6dc7ffbe-9fe7-40a1-a369-1b7124802880/header.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whyashthakker/ai-agents/7d839eac0854eceaf9d97110075301893b2d9ec2/db/6dc7ffbe-9fe7-40a1-a369-1b7124802880/header.bin -------------------------------------------------------------------------------- /db/6dc7ffbe-9fe7-40a1-a369-1b7124802880/length.bin: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /db/6dc7ffbe-9fe7-40a1-a369-1b7124802880/link_lists.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whyashthakker/ai-agents/7d839eac0854eceaf9d97110075301893b2d9ec2/db/6dc7ffbe-9fe7-40a1-a369-1b7124802880/link_lists.bin -------------------------------------------------------------------------------- /db/chroma.sqlite3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whyashthakker/ai-agents/7d839eac0854eceaf9d97110075301893b2d9ec2/db/chroma.sqlite3 -------------------------------------------------------------------------------- /docx.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from docx import Document 4 | 5 | def convert_docx_to_txt(input_folder): 6 | # Ensure the input folder exists 7 | if not os.path.exists(input_folder): 8 | print(f"The folder {input_folder} does not exist.") 9 | return 10 | 11 | # Iterate through all files in the folder 12 | for filename in os.listdir(input_folder): 13 | if filename.endswith(".docx"): 14 | docx_path = os.path.join(input_folder, filename) 15 | txt_path = os.path.join(input_folder, f"{os.path.splitext(filename)[0]}.txt") 16 | 17 | try: 18 | # Open the docx file 19 | doc = Document(docx_path) 20 | 21 | # Extract text from the document 22 | full_text = [] 23 | for para in doc.paragraphs: 24 | full_text.append(para.text) 25 | 26 | # Write the extracted text to a new txt file 27 | with open(txt_path, "w", encoding="utf-8") as txt_file: 28 | txt_file.write("\n".join(full_text)) 29 | 30 | print(f"Converted {filename} to TXT") 31 | except Exception as e: 32 | print(f"Error converting {filename}: {str(e)}") 33 | 34 | if __name__ == "__main__": 35 | if len(sys.argv) != 2: 36 | print("Usage: python docx_to_txt_converter.py ") 37 | sys.exit(1) 38 | 39 | folder_path = sys.argv[1] 40 | convert_docx_to_txt(folder_path) -------------------------------------------------------------------------------- /dynamic_newsletter.py: -------------------------------------------------------------------------------- 1 | from crewai import Agent, Task, Crew 2 | from crewai_tools import SerperDevTool 3 | import os 4 | 5 | search_tool = SerperDevTool() 6 | 7 | os.environ["OPENAI_MODEL_NAME"] = "gpt-4o-mini" 8 | 9 | def create_newsletter_crew(topic): 10 | researcher = Agent( 11 | role='Research Analyst', 12 | goal=f'Find the latest and most relevant news about {topic}', 13 | backstory=f"You're an AI with a knack for discovering trending topics in {topic}.", 14 | tools=[search_tool] 15 | ) 16 | 17 | writer = Agent( 18 | role='Content Writer', 19 | goal=f'Create engaging newsletter content about {topic} based on research', 20 | backstory=f"You're an AI with a talent for crafting compelling narratives about {topic}." 21 | ) 22 | 23 | editor = Agent( 24 | role='Copy Editor', 25 | goal=f'Ensure the {topic} newsletter is polished and error-free', 26 | backstory="You're an AI with an eye for detail and a mastery of language." 27 | ) 28 | 29 | research_task = Task( 30 | description=f'Find the top 3 trending topics in {topic} and provide brief summaries', 31 | agent=researcher, 32 | expected_output=f"A list of 3 trending {topic} topics with brief summaries for each" 33 | ) 34 | 35 | writing_task = Task( 36 | description=f'Write a 300-word article on each trending {topic}', 37 | agent=writer, 38 | expected_output=f"Three 300-word articles about the trending {topic} topics" 39 | ) 40 | 41 | editing_task = Task( 42 | description=f'Proofread and polish the {topic} articles, ensuring they flow well together', 43 | agent=editor, 44 | expected_output=f"A final, polished newsletter about {topic} trends, ready for distribution" 45 | ) 46 | 47 | newsletter_crew = Crew( 48 | agents=[researcher, writer, editor], 49 | tasks=[research_task, writing_task, editing_task], 50 | verbose=True 51 | ) 52 | 53 | return newsletter_crew 54 | 55 | def main(): 56 | topic = input("Enter the topic for the newsletter: ") 57 | crew = create_newsletter_crew(topic) 58 | result = crew.kickoff() 59 | print(result) 60 | 61 | if __name__ == "__main__": 62 | main() -------------------------------------------------------------------------------- /dynamic_research.py: -------------------------------------------------------------------------------- 1 | from crewai import Agent, Task, Crew, Process 2 | from langchain_openai import OpenAI 3 | from crewai_tools import SerperDevTool 4 | import os 5 | 6 | # Set up tools 7 | search_tool = SerperDevTool() 8 | 9 | # Set up language model 10 | llm = OpenAI(model_name="gpt-4o-mini") 11 | 12 | os.environ["OPENAI_MODEL_NAME"] = "gpt-4o-mini" 13 | 14 | def create_crew(topic): 15 | # Create researcher agent 16 | researcher = Agent( 17 | role="Senior Research Analyst", 18 | goal=f"Uncover cutting-edge developments in {topic}", 19 | backstory=f"You are an experienced research analyst with a keen eye for emerging trends in {topic}. Your expertise lies in identifying groundbreaking innovations and their potential impact on various industries.", 20 | verbose=True, 21 | allow_delegation=False, 22 | tools=[search_tool], 23 | ) 24 | 25 | # Create writer agent 26 | writer = Agent( 27 | role="Content Writer", 28 | goal=f"Create engaging articles about {topic} developments", 29 | backstory=f"You are a skilled writer with a passion for explaining complex {topic} concepts in simple terms. Your articles captivate readers while conveying accurate information about advancements in {topic}.", 30 | verbose=True, 31 | allow_delegation=False, 32 | ) 33 | 34 | research_task = Task( 35 | description=f"Research the latest advancements in {topic} and summarize the top 3 breakthroughs", 36 | agent=researcher, 37 | expected_output=f"A bullet-point list of the top 3 {topic} breakthroughs with a brief explanation of each" 38 | ) 39 | 40 | writing_task = Task( 41 | description=f"Write a blog post about the top 3 {topic} breakthroughs", 42 | agent=writer, 43 | expected_output=f"A 500-word blog post discussing the top 3 {topic} breakthroughs", 44 | context=[research_task] 45 | ) 46 | 47 | crew = Crew( 48 | agents=[researcher, writer], 49 | tasks=[research_task, writing_task], 50 | process=Process.sequential, 51 | verbose=True, 52 | ) 53 | 54 | return crew 55 | 56 | def main(): 57 | topic = input("Enter the topic you want to research: ") 58 | crew = create_crew(topic) 59 | result = crew.kickoff() 60 | print(result) 61 | 62 | if __name__ == "__main__": 63 | main() -------------------------------------------------------------------------------- /flappy_bird.py: -------------------------------------------------------------------------------- 1 | import pygame 2 | import random 3 | import sys 4 | 5 | # Initialize Pygame 6 | pygame.init() 7 | 8 | # Screen dimensions 9 | WIDTH = 400 10 | HEIGHT = 600 11 | SCREEN = pygame.display.set_mode((WIDTH, HEIGHT)) 12 | pygame.display.set_caption('Flappy Bird Clone') 13 | 14 | # Set up fonts 15 | FONT = pygame.font.SysFont('Arial', 32) 16 | 17 | # Define game variables 18 | GRAVITY = 0.5 19 | GAME_SPEED = 3 20 | 21 | # Colors 22 | WHITE = (255, 255, 255) 23 | 24 | # Load images 25 | try: 26 | BIRD_IMAGE = pygame.image.load('bird.png').convert_alpha() 27 | # Scale down the bird image 28 | BIRD_IMAGE = pygame.transform.scale(BIRD_IMAGE, (50, 35)) 29 | BG_IMAGE = pygame.image.load('bg.png').convert() 30 | PIPE_IMAGE = pygame.image.load('pipe.png').convert_alpha() 31 | except pygame.error as e: 32 | print(f"Error loading images: {e}") 33 | pygame.quit() 34 | sys.exit() 35 | 36 | # Player class representing the character 37 | class Player(pygame.sprite.Sprite): 38 | def __init__(self): 39 | super().__init__() 40 | # Use the scaled bird image for the player 41 | self.image = BIRD_IMAGE 42 | self.rect = self.image.get_rect() 43 | self.rect.center = (50, HEIGHT // 2) # Start position 44 | self.velocity = 0 # Initial velocity 45 | 46 | def update(self): 47 | # Apply gravity to the player's velocity 48 | self.velocity += GRAVITY 49 | # Update the player's position 50 | self.rect.y += int(self.velocity) 51 | 52 | # Prevent the player from moving off-screen 53 | if self.rect.top <= 0: 54 | self.rect.top = 0 55 | self.velocity = 0 56 | if self.rect.bottom >= HEIGHT: 57 | self.rect.bottom = HEIGHT 58 | self.velocity = 0 59 | 60 | def flap(self): 61 | # Move the player upward when the spacebar is pressed 62 | self.velocity = -10 63 | 64 | # Pipe class for obstacles 65 | class Pipe(pygame.sprite.Sprite): 66 | def __init__(self, x, y, orientation): 67 | super().__init__() 68 | # Flip the pipe image for the top pipe 69 | if orientation == 'top': 70 | self.image = pygame.transform.flip(PIPE_IMAGE, False, True) 71 | self.rect = self.image.get_rect(midbottom=(x, y - 100)) 72 | else: 73 | self.image = PIPE_IMAGE 74 | self.rect = self.image.get_rect(midtop=(x, y + 100)) 75 | self.mask = pygame.mask.from_surface(self.image) 76 | 77 | def update(self): 78 | # Move the pipe leftward 79 | self.rect.x -= GAME_SPEED 80 | # Remove the pipe when it's off-screen 81 | if self.rect.right < 0: 82 | self.kill() 83 | 84 | # Function to create a new pair of pipes 85 | def create_pipes(): 86 | # Randomly determine the gap's vertical position 87 | gap_center = random.randint(150, HEIGHT - 150) 88 | top_pipe = Pipe(WIDTH, gap_center, 'top') 89 | bottom_pipe = Pipe(WIDTH, gap_center, 'bottom') 90 | return top_pipe, bottom_pipe 91 | 92 | # Main game function 93 | def main_game(): 94 | global GAME_SPEED 95 | clock = pygame.time.Clock() 96 | score = 0 97 | running = True 98 | game_over = False 99 | 100 | # Sprite groups for efficient rendering and updates 101 | all_sprites = pygame.sprite.Group() 102 | pipe_group = pygame.sprite.Group() 103 | 104 | # Create the player object and add it to the sprite group 105 | player = Player() 106 | all_sprites.add(player) 107 | 108 | # Custom event for adding new pipes 109 | ADDPIPE = pygame.USEREVENT + 1 110 | pygame.time.set_timer(ADDPIPE, 1500) # New pipe every 1.5 seconds 111 | 112 | while running: 113 | clock.tick(60) # Limit the frame rate to 60 FPS 114 | 115 | for event in pygame.event.get(): 116 | if event.type == pygame.QUIT: 117 | running = False 118 | pygame.quit() 119 | sys.exit() 120 | 121 | elif event.type == pygame.KEYDOWN: 122 | if event.key == pygame.K_SPACE and not game_over: 123 | player.flap() # Make the player jump 124 | if event.key == pygame.K_r and game_over: 125 | main_game() # Restart the game 126 | 127 | elif event.type == ADDPIPE and not game_over: 128 | # Add new pipes to the game 129 | top_pipe, bottom_pipe = create_pipes() 130 | all_sprites.add(top_pipe, bottom_pipe) 131 | pipe_group.add(top_pipe, bottom_pipe) 132 | 133 | if not game_over: 134 | all_sprites.update() 135 | 136 | # Check for collisions with pipes 137 | if pygame.sprite.spritecollide(player, pipe_group, False, pygame.sprite.collide_mask): 138 | game_over = True 139 | # Check for collisions with the screen boundaries 140 | if player.rect.top <= 0 or player.rect.bottom >= HEIGHT: 141 | game_over = True 142 | 143 | # Update the score and remove off-screen pipes 144 | for pipe in pipe_group: 145 | if pipe.rect.right < player.rect.left and not hasattr(pipe, 'scored'): 146 | score += 0.5 # Each pipe counts as 0.5 points 147 | pipe.scored = True # Ensure each pipe is only counted once 148 | 149 | # Gradually increase the game speed to raise difficulty 150 | if int(score) % 5 == 0 and score != 0: 151 | GAME_SPEED += 0.001 # Slightly increase the speed 152 | 153 | # Draw the background 154 | SCREEN.blit(BG_IMAGE, (0, 0)) 155 | 156 | # Draw all sprites (player and pipes) 157 | all_sprites.draw(SCREEN) 158 | 159 | # Display the current score 160 | score_surface = FONT.render(f'Score: {int(score)}', True, WHITE) 161 | SCREEN.blit(score_surface, (10, 10)) 162 | 163 | if game_over: 164 | # Display 'Game Over' message 165 | game_over_surface = FONT.render('Game Over!', True, WHITE) 166 | SCREEN.blit(game_over_surface, (WIDTH // 2 - game_over_surface.get_width() // 2, HEIGHT // 2 - 50)) 167 | restart_surface = FONT.render('Press R to Restart', True, WHITE) 168 | SCREEN.blit(restart_surface, (WIDTH // 2 - restart_surface.get_width() // 2, HEIGHT // 2)) 169 | 170 | # Update the display 171 | pygame.display.flip() 172 | 173 | # Run the game 174 | if __name__ == '__main__': 175 | main_game() 176 | -------------------------------------------------------------------------------- /newsletter.py: -------------------------------------------------------------------------------- 1 | from crewai import Agent, Task, Crew 2 | from crewai_tools import SerperDevTool 3 | import os 4 | 5 | search_tool = SerperDevTool() 6 | 7 | os.environ["OPENAI_MODEL_NAME"]="o1-mini" 8 | 9 | researcher = Agent( 10 | role='Research Analyst', 11 | goal='Find the latest and most relevant tech news', 12 | backstory="You're an research analyst with a knack for discovering trending topics in tech.", 13 | tools=[search_tool] 14 | ) 15 | 16 | writer = Agent( 17 | role='Content Writer', 18 | goal='Create engaging newsletter content based on research', 19 | backstory="You're a content writer with a talent for crafting compelling narratives." 20 | ) 21 | 22 | editor = Agent( 23 | role='Copy Editor', 24 | goal='Ensure the newsletter is polished, error-free, and well-integrated', 25 | backstory="You're a editor with an eye for detail, a mastery of language, and the ability to seamlessly integrate personalized content." 26 | ) 27 | 28 | personalizer = Agent( 29 | role='Content Personalizer', 30 | goal='Craft personalized introductions for different reader segments', 31 | backstory="You're an content personalizer expert in analyzing reader preferences and creating engaging, personalized content.", 32 | tools=[search_tool] 33 | 34 | ) 35 | 36 | research_task = Task( 37 | description='Find the top 3 trending topics in AI and provide brief summaries', 38 | agent=researcher, 39 | expected_output=f"A list of 3 trending topics with brief summaries for each" 40 | ) 41 | 42 | writing_task = Task( 43 | description='Write a 300-word article on each trending topic', 44 | agent=writer, 45 | expected_output=f"Three 300-word articles about the trending topics" 46 | ) 47 | 48 | editing_task = Task( 49 | description='Proofread and polish the articles, ensuring they flow well together. Integrate the personalized introduction seamlessly.', 50 | agent=editor, 51 | expected_output="Ensure newsletter is polished." 52 | ) 53 | 54 | personalization_task = Task( 55 | description='Analyze reader data and create a personalized introduction for the newsletter', 56 | agent=personalizer, 57 | expected_output = 'Summary of a personalised intro' 58 | ) 59 | 60 | newsletter_crew = Crew( 61 | agents=[researcher, writer, editor, personalizer], 62 | tasks=[research_task, writing_task, editing_task, personalization_task], 63 | verbose=True 64 | ) 65 | 66 | result = newsletter_crew.kickoff() 67 | print(result) 68 | 69 | -------------------------------------------------------------------------------- /pipe.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whyashthakker/ai-agents/7d839eac0854eceaf9d97110075301893b2d9ec2/pipe.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | agno==1.1.8 2 | aiohappyeyeballs==2.4.0 3 | aiohttp==3.11.11 4 | aiosignal==1.3.1 5 | alembic==1.13.2 6 | altair==5.5.0 7 | annotated-types==0.7.0 8 | anthropic==0.49.0 9 | anyio==4.4.0 10 | appdirs==1.4.4 11 | asgiref==3.8.1 12 | asttokens==3.0.0 13 | attrs==24.2.0 14 | auth0-python==4.8.0 15 | backoff==2.2.1 16 | bcrypt==4.2.0 17 | beautifulsoup4==4.12.3 18 | blinker==1.9.0 19 | boto3==1.35.2 20 | botocore==1.35.2 21 | browser-use==0.1.40 22 | build==1.2.1 23 | cachetools==5.5.0 24 | certifi==2024.7.4 25 | cffi==1.17.1 26 | charset-normalizer==3.3.2 27 | chroma-hnswlib==0.7.6 28 | chromadb==0.5.23 29 | click==8.1.8 30 | cohere==5.8.1 31 | coloredlogs==15.0.1 32 | crewai==0.100.1 33 | crewai-tools==0.33.0 34 | cryptography==44.0.0 35 | dataclasses-json==0.6.7 36 | decorator==5.1.1 37 | defusedxml==0.7.1 38 | Deprecated==1.2.14 39 | deprecation==2.1.0 40 | distro==1.9.0 41 | docker==7.1.0 42 | docstring_parser==0.16 43 | docx2txt==0.8 44 | embedchain==0.1.126 45 | et_xmlfile==2.0.0 46 | executing==2.2.0 47 | fastapi==0.112.1 48 | fastavro==1.9.5 49 | filelock==3.15.4 50 | filetype==1.2.0 51 | firecrawl==1.13.5 52 | flatbuffers==24.3.25 53 | frozenlist==1.4.1 54 | fsspec==2024.6.1 55 | gitdb==4.0.12 56 | GitPython==3.1.44 57 | google-ai-generativelanguage==0.6.15 58 | google-api-core==2.19.1 59 | google-api-python-client==2.160.0 60 | google-auth==2.34.0 61 | google-auth-httplib2==0.2.0 62 | google-cloud-aiplatform==1.63.0 63 | google-cloud-bigquery==3.25.0 64 | google-cloud-core==2.4.1 65 | google-cloud-resource-manager==1.12.5 66 | google-cloud-storage==2.18.2 67 | google-crc32c==1.5.0 68 | google-generativeai==0.8.4 69 | google-resumable-media==2.7.2 70 | googleapis-common-protos==1.63.2 71 | gptcache==0.1.44 72 | greenlet==3.1.1 73 | grpc-google-iam-v1==0.13.1 74 | grpcio==1.65.5 75 | grpcio-status==1.62.3 76 | grpcio-tools==1.62.3 77 | h11==0.14.0 78 | h2==4.1.0 79 | hpack==4.0.0 80 | httpcore==1.0.5 81 | httplib2==0.22.0 82 | httptools==0.6.1 83 | httpx==0.28.1 84 | httpx-sse==0.4.0 85 | huggingface-hub==0.24.6 86 | humanfriendly==10.0 87 | hyperframe==6.0.1 88 | idna==3.7 89 | importlib_metadata==8.0.0 90 | importlib_resources==6.4.3 91 | iniconfig==2.0.0 92 | instructor==1.3.3 93 | ipython==8.32.0 94 | jedi==0.19.2 95 | Jinja2==3.1.5 96 | jiter==0.4.2 97 | jmespath==1.0.1 98 | json5==0.10.0 99 | json_repair==0.25.3 100 | jsonpatch==1.33 101 | jsonpickle==4.0.1 102 | jsonpointer==3.0.0 103 | jsonref==1.1.0 104 | jsonschema==4.23.0 105 | jsonschema-specifications==2024.10.1 106 | kubernetes==30.1.0 107 | lancedb==0.5.7 108 | langchain==0.3.17 109 | langchain-anthropic==0.3.3 110 | langchain-cohere==0.3.5 111 | langchain-community==0.3.16 112 | langchain-core==0.3.41 113 | langchain-experimental==0.3.4 114 | langchain-google-genai==2.0.9 115 | langchain-ollama==0.2.2 116 | langchain-openai==0.3.1 117 | langchain-text-splitters==0.3.5 118 | langsmith==0.1.147 119 | litellm==1.59.8 120 | lxml==5.3.0 121 | Mako==1.3.5 122 | markdown-it-py==3.0.0 123 | markdownify==0.14.1 124 | MarkupSafe==2.1.5 125 | marshmallow==3.22.0 126 | matplotlib-inline==0.1.7 127 | mdurl==0.1.2 128 | mem0ai==0.1.48 129 | mmh3==4.1.0 130 | monotonic==1.6 131 | mpmath==1.3.0 132 | multidict==6.0.5 133 | mypy-extensions==1.0.0 134 | narwhals==1.24.2 135 | nest-asyncio==1.6.0 136 | networkx==3.4.2 137 | nodeenv==1.9.1 138 | numpy==1.26.4 139 | oauthlib==3.2.2 140 | ollama==0.4.7 141 | onnxruntime==1.19.0 142 | openai==1.61.0 143 | openai-agents==0.1.0 144 | openpyxl==3.1.5 145 | opentelemetry-api==1.26.0 146 | opentelemetry-exporter-otlp-proto-common==1.26.0 147 | opentelemetry-exporter-otlp-proto-grpc==1.26.0 148 | opentelemetry-exporter-otlp-proto-http==1.26.0 149 | opentelemetry-instrumentation==0.47b0 150 | opentelemetry-instrumentation-asgi==0.47b0 151 | opentelemetry-instrumentation-fastapi==0.47b0 152 | opentelemetry-proto==1.26.0 153 | opentelemetry-sdk==1.26.0 154 | opentelemetry-semantic-conventions==0.47b0 155 | opentelemetry-util-http==0.47b0 156 | orjson==3.10.7 157 | outcome==1.3.0.post0 158 | overrides==7.7.0 159 | packaging==24.1 160 | pandas==2.2.2 161 | parameterized==0.9.0 162 | parso==0.8.4 163 | pdfminer.six==20231228 164 | pdfplumber==0.11.5 165 | pexpect==4.9.0 166 | pillow==11.1.0 167 | playwright==1.50.0 168 | pluggy==1.5.0 169 | portalocker==2.10.1 170 | posthog==3.18.1 171 | prompt_toolkit==3.0.50 172 | propcache==0.2.1 173 | proto-plus==1.24.0 174 | protobuf==4.25.4 175 | ptyprocess==0.7.0 176 | pulsar-client==3.5.0 177 | pure_eval==0.2.3 178 | py==1.11.0 179 | pyarrow==17.0.0 180 | pyasn1==0.6.0 181 | pyasn1_modules==0.4.0 182 | pycparser==2.22 183 | pydantic==2.10.6 184 | pydantic-settings==2.7.1 185 | pydantic_core==2.27.2 186 | pydeck==0.9.1 187 | pyee==12.1.1 188 | pygame==2.6.0 189 | Pygments==2.18.0 190 | PyJWT==2.10.1 191 | pylance==0.9.18 192 | pyparsing==3.2.1 193 | pypdf==5.2.0 194 | pypdfium2==4.30.1 195 | PyPika==0.48.9 196 | pyproject_hooks==1.1.0 197 | pyright==1.1.377 198 | pysbd==0.3.4 199 | PySocks==1.7.1 200 | pytest==8.3.2 201 | python-dateutil==2.9.0.post0 202 | python-docx==1.1.2 203 | python-dotenv==1.0.1 204 | python-multipart==0.0.20 205 | pytube==15.0.0 206 | pytz==2024.1 207 | pyvis==0.3.2 208 | PyYAML==6.0.2 209 | qdrant-client==1.11.0 210 | ratelimiter==1.2.0.post0 211 | referencing==0.36.2 212 | regex==2024.11.6 213 | requests==2.32.3 214 | requests-oauthlib==2.0.0 215 | requests-toolbelt==1.0.0 216 | retry==0.9.2 217 | rich==13.7.1 218 | rpds-py==0.22.3 219 | rsa==4.9 220 | s3transfer==0.10.2 221 | schema==0.7.7 222 | selenium==4.23.1 223 | semver==3.0.2 224 | shapely==2.0.6 225 | shellingham==1.5.4 226 | six==1.16.0 227 | smmap==5.0.2 228 | sniffio==1.3.1 229 | sortedcontainers==2.4.0 230 | soupsieve==2.6 231 | SQLAlchemy==2.0.32 232 | stack-data==0.6.3 233 | starlette==0.38.2 234 | streamlit==1.41.1 235 | sympy==1.13.2 236 | tabulate==0.9.0 237 | tenacity==8.5.0 238 | tiktoken==0.7.0 239 | tokenizers==0.20.0 240 | toml==0.10.2 241 | tomli==2.2.1 242 | tomli_w==1.2.0 243 | tornado==6.4.2 244 | tqdm==4.66.5 245 | traitlets==5.14.3 246 | trio==0.26.2 247 | trio-websocket==0.11.1 248 | typer==0.12.4 249 | types-requests==2.32.0.20240712 250 | typing-inspect==0.9.0 251 | typing_extensions==4.12.2 252 | tzdata==2024.1 253 | uritemplate==4.1.1 254 | urllib3==2.3.0 255 | uv==0.5.26 256 | uvicorn==0.30.6 257 | uvloop==0.20.0 258 | watchfiles==0.23.0 259 | wcwidth==0.2.13 260 | websocket-client==1.8.0 261 | websockets==13.0 262 | wrapt==1.16.0 263 | wsproto==1.2.0 264 | yarl==1.18.3 265 | zipp==3.20.0 -------------------------------------------------------------------------------- /research.py: -------------------------------------------------------------------------------- 1 | from crewai import Agent, Task, Crew, Process 2 | from langchain_openai import OpenAI 3 | from crewai_tools import SerperDevTool 4 | import os 5 | 6 | # Set up tools 7 | search_tool = SerperDevTool() 8 | 9 | # Set up language model 10 | llm = OpenAI(model_name="gpt-4o-mini") 11 | 12 | os.environ["OPENAI_MODEL_NAME"]="gpt-4o-mini" 13 | 14 | # Create researcher agent 15 | researcher = Agent( 16 | role="Senior Research Analyst", 17 | goal="Uncover cutting-edge developments in AI", 18 | backstory="You are an experienced research analyst with a keen eye for emerging trends in technology. Your expertise lies in identifying groundbreaking AI innovations and their potential impact on various industries.", 19 | verbose=True, 20 | allow_delegation=False, 21 | tools=[search_tool], 22 | ) 23 | 24 | # Create writer agent 25 | writer = Agent( 26 | role="Content Writer", 27 | goal="Create engaging articles about AI developments", 28 | backstory="You are a skilled writer with a passion for explaining complex technological concepts in simple terms. Your articles captivate readers while conveying accurate information about AI advancements.", 29 | verbose=True, 30 | allow_delegation=False, 31 | ) 32 | 33 | research_task = Task( 34 | description="Research the latest advancements in AI and summarize the top 3 breakthroughs", 35 | agent=researcher, 36 | expected_output="A bullet-point list of the top 3 AI breakthroughs with a brief explanation of each" 37 | ) 38 | 39 | writing_task = Task( 40 | description="Write a blog post about the top 3 AI breakthroughs", 41 | agent=writer, 42 | expected_output="A 500-word blog post discussing the top 3 AI breakthroughs", 43 | context=[research_task] 44 | ) 45 | 46 | crew = Crew( 47 | agents=[researcher, writer], 48 | tasks=[research_task, writing_task], 49 | process=Process.sequential, 50 | verbose=True, 51 | ) 52 | 53 | result = crew.kickoff() 54 | print(result) 55 | 56 | -------------------------------------------------------------------------------- /research_results_20250212_105658.json: -------------------------------------------------------------------------------- 1 | { 2 | "query": "Market for an ai auto commenter for social media platforms", 3 | "timestamp": "20250212_105658", 4 | "results": { 5 | "Ollama": "Error with ollama: litellm.BadRequestError: LLM Provider NOT provided. Pass in the LLM provider you are trying to call. You passed model=\u001b[1mOllama\u001b[0m\nParams: {'model': 'deepseek-r1:latest', 'format': None, 'options': {'mirostat': None, 'mirostat_eta': None, 'mirostat_tau': None, 'num_ctx': None, 'num_gpu': None, 'num_thread': None, 'num_predict': None, 'repeat_last_n': None, 'repeat_penalty': None, 'temperature': 0.7, 'stop': None, 'tfs_z': None, 'top_k': None, 'top_p': None}, 'system': None, 'template': None, 'keep_alive': None, 'raw': None}\n Pass model as E.g. For 'Huggingface' inference endpoints pass in `completion(model='huggingface/starcoder',..)` Learn more: https://docs.litellm.ai/docs/providers", 6 | "Gemini": "Error with gemini: litellm.AuthenticationError: geminiException - {\n \"error\": {\n \"code\": 400,\n \"message\": \"API key not valid. Please pass a valid API key.\",\n \"status\": \"INVALID_ARGUMENT\",\n \"details\": [\n {\n \"@type\": \"type.googleapis.com/google.rpc.ErrorInfo\",\n \"reason\": \"API_KEY_INVALID\",\n \"domain\": \"googleapis.com\",\n \"metadata\": {\n \"service\": \"generativelanguage.googleapis.com\"\n }\n },\n {\n \"@type\": \"type.googleapis.com/google.rpc.LocalizedMessage\",\n \"locale\": \"en-US\",\n \"message\": \"API key not valid. Please pass a valid API key.\"\n }\n ]\n }\n}\n", 7 | "OpenAI": "Error with openai: litellm.AuthenticationError: AuthenticationError: OpenAIException - Error code: 401 - {'error': {'message': 'Incorrect API key provided: sk-proj-********************************************************************************************************************************************************o28A. You can find your API key at https://platform.openai.com/account/api-keys.', 'type': 'invalid_request_error', 'param': None, 'code': 'invalid_api_key'}}" 8 | } 9 | } -------------------------------------------------------------------------------- /research_results_20250212_105935.json: -------------------------------------------------------------------------------- 1 | { 2 | "query": "tasdad", 3 | "timestamp": "20250212_105935", 4 | "results": { 5 | "Gemini": "Error with gemini: litellm.AuthenticationError: geminiException - {\n \"error\": {\n \"code\": 400,\n \"message\": \"API key not valid. Please pass a valid API key.\",\n \"status\": \"INVALID_ARGUMENT\",\n \"details\": [\n {\n \"@type\": \"type.googleapis.com/google.rpc.ErrorInfo\",\n \"reason\": \"API_KEY_INVALID\",\n \"domain\": \"googleapis.com\",\n \"metadata\": {\n \"service\": \"generativelanguage.googleapis.com\"\n }\n },\n {\n \"@type\": \"type.googleapis.com/google.rpc.LocalizedMessage\",\n \"locale\": \"en-US\",\n \"message\": \"API key not valid. Please pass a valid API key.\"\n }\n ]\n }\n}\n" 6 | } 7 | } -------------------------------------------------------------------------------- /research_results_20250212_110115.json: -------------------------------------------------------------------------------- 1 | { 2 | "query": "tasdad", 3 | "timestamp": "20250212_110115", 4 | "results": { 5 | "Gemini": "Error with gemini, apikey: AIzaSyALSBRZylzpJDfuyoL5FapH2qHZgWS0ikM: litellm.AuthenticationError: geminiException - {\n \"error\": {\n \"code\": 400,\n \"message\": \"API key not valid. Please pass a valid API key.\",\n \"status\": \"INVALID_ARGUMENT\",\n \"details\": [\n {\n \"@type\": \"type.googleapis.com/google.rpc.ErrorInfo\",\n \"reason\": \"API_KEY_INVALID\",\n \"domain\": \"googleapis.com\",\n \"metadata\": {\n \"service\": \"generativelanguage.googleapis.com\"\n }\n },\n {\n \"@type\": \"type.googleapis.com/google.rpc.LocalizedMessage\",\n \"locale\": \"en-US\",\n \"message\": \"API key not valid. Please pass a valid API key.\"\n }\n ]\n }\n}\n" 6 | } 7 | } -------------------------------------------------------------------------------- /research_results_20250212_110507.json: -------------------------------------------------------------------------------- 1 | { 2 | "query": "AI NEWS TODAY", 3 | "timestamp": "20250212_110507", 4 | "results": { 5 | "OpenAI": "Error with openai, apikey: AIzaSyALSBRZylzpJDfuyoL5FapH2qHZgWS0ikM: litellm.AuthenticationError: AuthenticationError: OpenAIException - Error code: 401 - {'error': {'message': 'Incorrect API key provided: sk-proj-********************************************************************************************************************************************************o28A. You can find your API key at https://platform.openai.com/account/api-keys.', 'type': 'invalid_request_error', 'param': None, 'code': 'invalid_api_key'}}" 6 | } 7 | } -------------------------------------------------------------------------------- /research_results_20250212_110607.json: -------------------------------------------------------------------------------- 1 | { 2 | "query": "Market for ai powered auto commenter", 3 | "timestamp": "20250212_110607", 4 | "results": { 5 | "OpenAI": "Error with openai, apikey: AIzaSyALSBRZylzpJDfuyoL5FapH2qHZgWS0ikM: LLM.__init__() missing 1 required positional argument: 'model'" 6 | } 7 | } -------------------------------------------------------------------------------- /research_results_20250212_110628.json: -------------------------------------------------------------------------------- 1 | { 2 | "query": "Market for ai powered auto commenter", 3 | "timestamp": "20250212_110628", 4 | "results": { 5 | "OpenAI": "Error with openai, apikey: AIzaSyALSBRZylzpJDfuyoL5FapH2qHZgWS0ikM: litellm.AuthenticationError: AuthenticationError: OpenAIException - Error code: 401 - {'error': {'message': 'Incorrect API key provided: sk-proj-********************************************************************************************************************************************************o28A. You can find your API key at https://platform.openai.com/account/api-keys.', 'type': 'invalid_request_error', 'param': None, 'code': 'invalid_api_key'}}" 6 | } 7 | } -------------------------------------------------------------------------------- /research_results_20250212_110633.json: -------------------------------------------------------------------------------- 1 | { 2 | "query": "Market for ai powered auto commenter", 3 | "timestamp": "20250212_110633", 4 | "results": { 5 | "OpenAI": "Error with openai, apikey: AIzaSyALSBRZylzpJDfuyoL5FapH2qHZgWS0ikM: litellm.AuthenticationError: AuthenticationError: OpenAIException - Error code: 401 - {'error': {'message': 'Incorrect API key provided: sk-proj-********************************************************************************************************************************************************o28A. You can find your API key at https://platform.openai.com/account/api-keys.', 'type': 'invalid_request_error', 'param': None, 'code': 'invalid_api_key'}}" 6 | } 7 | } -------------------------------------------------------------------------------- /research_results_20250212_110656.json: -------------------------------------------------------------------------------- 1 | { 2 | "query": "Market for ai powered auto commenter", 3 | "timestamp": "20250212_110656", 4 | "results": { 5 | "OpenAI": "Error with openai, apikey: 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: litellm.AuthenticationError: AuthenticationError: OpenAIException - Error code: 401 - {'error': {'message': 'Incorrect API key provided: sk-proj-********************************************************************************************************************************************************o28A. You can find your API key at https://platform.openai.com/account/api-keys.', 'type': 'invalid_request_error', 'param': None, 'code': 'invalid_api_key'}}" 6 | } 7 | } -------------------------------------------------------------------------------- /research_results_20250212_112527.json: -------------------------------------------------------------------------------- 1 | { 2 | "query": "Market for AI Powered Social Media autocommenter and top tools in this space.", 3 | "timestamp": "20250212_112527", 4 | "results": { 5 | "OpenAI": -------------------------------------------------------------------------------- /steps_to_run: -------------------------------------------------------------------------------- 1 | python3.11 -m venv myenv 2 | 3 | source myenv/bin/activate 4 | 5 | pip install crewai python-dotenv langchain_openai langchain_community langchain_ollama streamlit 6 | 7 | pip install 'crewai[tools]' 8 | 9 | -------------------------------------------------------------------------------- /teleprompter.py: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | from tkinter import filedialog, messagebox, ttk 3 | import docx 4 | 5 | class Teleprompter: 6 | def __init__(self, master): 7 | self.master = master 8 | master.title("Modern Teleprompter") 9 | master.geometry("800x600") 10 | master.resizable(False, False) # Disable window resizing 11 | 12 | style = ttk.Style() 13 | style.theme_use('clam') 14 | 15 | self.main_frame = ttk.Frame(master) 16 | self.main_frame.pack(fill=tk.BOTH, expand=True) 17 | 18 | # Control frame at the top 19 | self.control_frame = ttk.Frame(self.main_frame) 20 | self.control_frame.pack(side=tk.TOP, fill=tk.X, padx=10, pady=10) 21 | 22 | self.load_button = ttk.Button(self.control_frame, text="Load Script", command=self.load_script) 23 | self.load_button.grid(row=0, column=0, padx=5, pady=5, sticky="ew") 24 | 25 | self.start_button = ttk.Button(self.control_frame, text="Start", command=self.start_scrolling) 26 | self.start_button.grid(row=0, column=1, padx=5, pady=5, sticky="ew") 27 | 28 | self.stop_button = ttk.Button(self.control_frame, text="Stop", command=self.stop_scrolling) 29 | self.stop_button.grid(row=0, column=2, padx=5, pady=5, sticky="ew") 30 | 31 | self.restart_button = ttk.Button(self.control_frame, text="Restart", command=self.restart_scrolling) 32 | self.restart_button.grid(row=0, column=3, padx=5, pady=5, sticky="ew") 33 | 34 | self.speed_label = ttk.Label(self.control_frame, text="Speed:") 35 | self.speed_label.grid(row=1, column=0, padx=5, pady=5, sticky="w") 36 | 37 | self.speed_scale = ttk.Scale(self.control_frame, from_=0.1, to=50, orient=tk.HORIZONTAL) 38 | self.speed_scale.set(5) 39 | self.speed_scale.grid(row=1, column=1, columnspan=3, padx=5, pady=5, sticky="ew") 40 | 41 | self.font_size_label = ttk.Label(self.control_frame, text="Font Size:") 42 | self.font_size_label.grid(row=2, column=0, padx=5, pady=5, sticky="w") 43 | 44 | self.font_size_scale = ttk.Scale(self.control_frame, from_=12, to=72, orient=tk.HORIZONTAL, command=self.change_font_size) 45 | self.font_size_scale.set(24) 46 | self.font_size_scale.grid(row=2, column=1, columnspan=3, padx=5, pady=5, sticky="ew") 47 | 48 | self.control_frame.grid_columnconfigure((0, 1, 2, 3), weight=1) 49 | 50 | # Text frame below the control frame 51 | self.text_frame = ttk.Frame(self.main_frame) 52 | self.text_frame.pack(fill=tk.BOTH, expand=True) 53 | 54 | self.text = tk.Text(self.text_frame, wrap=tk.WORD, font=("Arial", 24), bg="black", fg="white") 55 | self.text.pack(fill=tk.BOTH, expand=True) 56 | 57 | self.scrollbar = ttk.Scrollbar(self.text_frame, orient="vertical", command=self.text.yview) 58 | self.scrollbar.pack(side="right", fill="y") 59 | self.text.configure(yscrollcommand=self.scrollbar.set) 60 | 61 | self.scrolling = False 62 | self.scroll_position = 0.0 63 | 64 | def load_script(self): 65 | file_path = filedialog.askopenfilename(filetypes=[("Word Document", "*.docx"), ("Text File", "*.txt")]) 66 | if file_path: 67 | try: 68 | if file_path.endswith('.docx'): 69 | doc = docx.Document(file_path) 70 | full_text = "\n".join([paragraph.text for paragraph in doc.paragraphs]) 71 | else: # Assume it's a .txt file 72 | with open(file_path, 'r', encoding='utf-8') as file: 73 | full_text = file.read() 74 | self.text.delete(1.0, tk.END) 75 | self.text.insert(tk.END, full_text) 76 | self.scroll_position = 0.0 77 | self.text.yview_moveto(0) 78 | except Exception as e: 79 | messagebox.showerror("Error", f"Failed to load the document: {str(e)}") 80 | 81 | def start_scrolling(self): 82 | self.scrolling = True 83 | self.scroll_text() 84 | 85 | def stop_scrolling(self): 86 | self.scrolling = False 87 | 88 | def restart_scrolling(self): 89 | self.scroll_position = 0.0 90 | self.text.yview_moveto(0) 91 | self.start_scrolling() 92 | 93 | def scroll_text(self): 94 | if self.scrolling: 95 | self.scroll_position += 0.0001 * self.speed_scale.get() 96 | self.text.yview_moveto(self.scroll_position) 97 | if self.scroll_position >= 1.0: 98 | self.stop_scrolling() 99 | return 100 | self.master.after(20, self.scroll_text) 101 | 102 | def change_font_size(self, size): 103 | new_size = int(float(size)) 104 | self.text.configure(font=("Arial", new_size)) 105 | # Adjust text widget height to maintain visibility of control frame 106 | approx_lines = 500 // new_size # Reduced from 600 to account for control frame 107 | self.text.configure(height=approx_lines) 108 | 109 | root = tk.Tk() 110 | teleprompter = Teleprompter(root) 111 | root.mainloop() -------------------------------------------------------------------------------- /templates/voice_chat.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Voice Chat Test Interface 6 | 7 | 8 | 73 | 74 | 75 |

Voice Chat Test Interface

76 |

Use this interface to test your ElevenLabs voice integration locally without making phone calls.

77 | 78 |
79 |
80 | 81 |
82 | 83 | 84 |
85 | 86 |
Not connected
87 |
88 | 89 |
90 | 91 | 92 |
93 | 94 | 338 | 339 | 340 | --------------------------------------------------------------------------------