├── LICENSE ├── README.md ├── crewAI_next_generation_github_version.py ├── secrets_example.toml └── tools ├── __init__.py ├── __pycache__ ├── __init__.cpython-311.pyc ├── browser_tools.cpython-311.pyc ├── calculator_tools.cpython-311.pyc └── search_tools.cpython-311.pyc ├── browser_tools.py ├── calculator_tools.py └── search_tools.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Ingmar Stapel 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AT-agents realized with CrewAI 2 | This program uses CrewAI to build a web-app using three agents doing some research stuff in the internet. 3 | 4 | ## Blog Post 5 | To get some more information about the project just visit my blog: https://ai-box.eu/top-story/llm-agenten-arbeiten-eigenstaendig-mit-crewai-automatisieren/1306/ 6 | 7 | 8 | ![CrewAI Web-APP](https://ai-box.eu/wp-content/uploads/2024/03/CrewAI_AI_agent_web_app.jpg) 9 | 10 | The video is available here: [https://www.youtube.com/watch?v=qMMvO6gsR4A](https://www.youtube.com/watch?v=qMMvO6gsR4A) 11 | 12 | This is my blog: [https://ai-box.eu/category/large-language-models/](https://ai-box.eu/category/large-language-models/) 13 | -------------------------------------------------------------------------------- /crewAI_next_generation_github_version.py: -------------------------------------------------------------------------------- 1 | # Autor: Ingmar Stapel 2 | # Datum: 20240330 3 | # Version: 1.0 4 | # Homepage: https://ai-box.eu/ 5 | 6 | import os 7 | import json 8 | import requests 9 | from crewai import Agent, Task, Crew, Process 10 | from crewai_tools import tool 11 | import streamlit as st 12 | import datetime 13 | from langchain.prompts import PromptTemplate 14 | from langchain.chains import LLMChain 15 | from textwrap import dedent 16 | 17 | # Source 18 | # The following GitHub repo helped me alot to build this app 19 | # URL: https://github.com/joaomdmoura/crewAI 20 | 21 | # This video hleped me to get the streamlit agent callback functionality running 22 | # URL: https://www.youtube.com/watch?v=nKG_kbQUDDE 23 | 24 | # The repository from Tony Kipkemboi explains very nice how to use agents and tools. 25 | # The SearchTools is from his repository. 26 | #https://github.com/tonykipkemboi/trip_planner_agent 27 | 28 | 29 | # Additional information 30 | # Ollama as OpenSource large language model server is needed 31 | # to run this web-app. 32 | # URL: https://ollama.com/ 33 | 34 | # You can choose to use a local model through Ollama for example. 35 | from langchain_community.llms import Ollama 36 | 37 | 38 | # The URL below shows the API endpoint and lists all available LLMs hosted by 39 | # the Ollama server you are running on-prem. 40 | # Please change the IP-address for you Ollama server. 41 | json_url = "http://192.168.2.57:11434/api/tags" 42 | local_base_url="http://192.168.2.57:11434" 43 | 44 | # I have published a HowTo setup Ollama server that it works over the network 45 | # URL: https://ai-box.eu/top-story/ollama-ubuntu-installation-und-konfiguration/1191/ 46 | 47 | # This configures the ollama_llm which will be used by our agents later. 48 | ollama_llm = Ollama(model="openhermes", base_url=local_base_url) 49 | 50 | 51 | # Install duckduckgo-search for this example: 52 | # !pip install -U duckduckgo-search 53 | from langchain_community.tools import DuckDuckGoSearchRun 54 | search_tool = DuckDuckGoSearchRun() 55 | 56 | from tools.search_tools import SearchTools 57 | 58 | st.set_page_config(page_title="Your network of AI agents") 59 | 60 | tab0, tab4, tab1, tab3, tab2 = st.tabs(["Main: ", "The tasks", "Researcher: ", "Business Angel: ", "Autor: "]) 61 | 62 | task_value_1 = "empty" 63 | task_value_2 = "empty" 64 | task_value_3 = "empty" 65 | # Fetch JSON data from the URL with the model names 66 | response = requests.get(json_url) 67 | 68 | 69 | # This is more or less a work around that hopefully will work for the dd_search. 70 | @tool('DuckDuckGoSearch') 71 | def dd_search(search_query: str): 72 | """Search the web for information on a given topic""" 73 | return DuckDuckGoSearchRun().run(search_query) 74 | 75 | # To display what the agents are currently doing this streamlit_callback function is needed. 76 | def streamlit_callback(step_output): 77 | # This function will be called after each step of the agent's execution 78 | st.markdown("---") 79 | for step in step_output: 80 | if isinstance(step, tuple) and len(step) == 2: 81 | action, observation = step 82 | if isinstance(action, dict) and "tool" in action and "tool_input" in action and "log" in action: 83 | st.markdown(f"# Action") 84 | st.markdown(f"**Tool:** {action['tool']}") 85 | st.markdown(f"**Tool Input** {action['tool_input']}") 86 | st.markdown(f"**Log:** {action['log']}") 87 | st.markdown(f"**Action:** {action['Action']}") 88 | st.markdown( 89 | f"**Action Input:** ```json\n{action['tool_input']}\n```") 90 | elif isinstance(action, str): 91 | st.markdown(f"**Action:** {action}") 92 | else: 93 | st.markdown(f"**Action:** {str(action)}") 94 | 95 | st.markdown(f"**Observation**") 96 | if isinstance(observation, str): 97 | observation_lines = observation.split('\n') 98 | for line in observation_lines: 99 | if line.startswith('Title: '): 100 | st.markdown(f"**Title:** {line[7:]}") 101 | elif line.startswith('Link: '): 102 | st.markdown(f"**Link:** {line[6:]}") 103 | elif line.startswith('Snippet: '): 104 | st.markdown(f"**Snippet:** {line[9:]}") 105 | elif line.startswith('-'): 106 | st.markdown(line) 107 | else: 108 | st.markdown(line) 109 | else: 110 | st.markdown(str(observation)) 111 | else: 112 | st.markdown(step) 113 | 114 | # Now set the session state for the text variables. 115 | if "text_task_in1" not in st.session_state: 116 | st.session_state.text_task_in1 = None 117 | 118 | if "text_task_in2" not in st.session_state: 119 | st.session_state.text_task_in2 = None 120 | 121 | if "text_task_in3" not in st.session_state: 122 | st.session_state.text_task_in3 = None 123 | 124 | # Start with the design and building up the functionality of the web-app. 125 | # The architecture and technical design of the web-app is not very nice. 126 | # Feel free to optimize that. 127 | with tab1: 128 | st.subheader("Your research agent:") 129 | 130 | # Check if the request was successful 131 | if response.status_code == 200: 132 | # Parse the JSON response 133 | data = response.json() 134 | 135 | # Extract the model names from the JSON response 136 | names = [model["name"] for model in data["models"]] 137 | 138 | default_id=names.index("openhermes:latest") 139 | # Populate the dropdown box 140 | model_researcher = st.selectbox('Select a LLM model for the researcher:', names, key="model_researcher", index=default_id) 141 | else: 142 | st.error(f"Failed to fetch data from {json_url}. Error code: {response.status_code}") 143 | 144 | # Create a slider to select the temperature of the llm 145 | temperature_researcher = st.slider('Select a LLM temperature value between 0 and 1 [higher is more creative, lower is more coherent]', key="temperature_researcher", min_value=0.0, max_value=1.0, step=0.01) 146 | 147 | max_iterations_researcher = st.selectbox('Set the max value for interations:', ('5', '10', '15', '20', '25'), key="iter_researcher", index=2) 148 | ollama_llm_researcher = Ollama(model=model_researcher, base_url=local_base_url, temperature=temperature_researcher) 149 | 150 | role_researcher = st.text_area('role:','Senior research analyst', key="role_researcher", height=20) 151 | goal_researcher = st.text_area('goal:', 'As a Senior Research Analyst, you play a key role in analyzing data to offer strategic insights for decision-making. This requires strong analytical skills, critical thinking, and industry knowledge.', key="goal_researcher", height=200) 152 | backstory_researcher = st.text_area('backstory:', 'As a Senior Research Analyst, you hold an advanced degree in fields like economics or statistics. With expertise in research methodologies and data analysis, you execute projects across diverse industries. Your insights aid decision-making, and you stay updated on industry trends through continuous learning.', key="backstory_researcher", height=200) 153 | 154 | with tab2: 155 | st.subheader("Your author agent:") 156 | 157 | # Check if the request was successful 158 | if response.status_code == 200: 159 | # Parse the JSON response 160 | data = response.json() 161 | 162 | # Extract the model names from the JSON response 163 | names = [model["name"] for model in data["models"]] 164 | 165 | default_id=names.index("mistral:latest") 166 | # Populate the dropdown box 167 | model_autor = st.selectbox('Select a LLM model for the autor:', names, key="model_autor", index=default_id) 168 | else: 169 | st.error(f"Failed to fetch data from {json_url}. Error code: {response.status_code}") 170 | 171 | # Create a slider to select the temperature of the llm 172 | temperature_autor = st.slider('Select a LLM temperature value between 0 and 1 [higher is more creative, lower is more coherent]', key="temperature_autor", min_value=0.0, max_value=1.0, step=0.01) 173 | 174 | max_iterations_autor = st.selectbox('Set the max value for interations:', ('5', '10', '15', '20', '25'), key="iter_autor", index=2) 175 | ollama_llm_autor = Ollama(model=model_autor, base_url=local_base_url, temperature=temperature_autor) 176 | 177 | 178 | role_autor = st.text_area('role:','Tech content autor', key="role_autor", height=20) 179 | goal_autor = st.text_area('goal:', 'As a Tech Content Author you are playing a crucial role in creating and curating high-quality content focused on technology topics. This role requires a combination of technical expertise, writing proficiency, and the ability to communicate complex concepts in a clear and engaging manner.', 180 | key="goal_autor", height=200) 181 | backstory_autor = st.text_area('backstory:', 'As a Tech Content Author, you hold a degree in journalism, communications, computer science, or related fields. With a passion for technology, you possess a deep understanding of technical concepts and trends. Starting your career in roles like technical writing or content creation, you have honed strong writing skills and the ability to simplify complex ideas. Through continuous learning, you stay updated on emerging technologies, ensuring your content remains relevant in the ever-changing tech landscape.', 182 | key="backstory_autor", height=200) 183 | 184 | 185 | with tab3: 186 | # This is the tab which is used to define the agent specifig llm agent. 187 | # All the description below is used as an example that the user of that web-app 188 | # has an idea how to define such an agent. 189 | st.subheader("Your investor agent:") 190 | 191 | # Check if the request was successful an the ollama server is responding. 192 | if response.status_code == 200: 193 | # Parse the JSON response 194 | data = response.json() 195 | 196 | # Extract the model names from the JSON response generated by the ollama server 197 | names = [model["name"] for model in data["models"]] 198 | 199 | # Populate the dropdown box with the available models. Set openhermes as default. 200 | default_id=names.index("openhermes:latest") 201 | model_consultant = st.selectbox('Select a LLM model for the agent:', names, key="model_consultant", index=default_id) 202 | else: 203 | st.error(f"Failed to fetch data from {json_url}. Error code: {response.status_code}") 204 | 205 | # Create a slider to select the temperature of the llm 206 | temperature_consultant = st.slider('Select a LLM temperature value between 0 and 1 [higher is more creative, lower is more coherent]', key="temperature_consultant", min_value=0.0, max_value=1.0, step=0.01) 207 | 208 | # Set the max value how long an agent is allowed to interate. 209 | max_iterations_consultant = st.selectbox('Set the max value for interations:', ('5', '10', '15', '20', '25'), key="iter_consultant", index=2) 210 | 211 | # Define the llm call for the ollama server we like to use for our agent 212 | ollama_llm_consultant = Ollama(model=model_consultant, base_url=local_base_url, temperature=temperature_consultant) 213 | 214 | # Define now our agent 215 | role_consultant = st.text_area('role:','Business Angel and venture capital consultant', key="role_consultant", height=20) 216 | goal_consultant = st.text_area('goal:', 'As a Business Angels and Venture Capital Consultant you are playing a vital role in the startup ecosystem by providing funding, mentorship, and strategic guidance to early-stage companies. While their roles share similarities, they differ in terms of investment focus, funding sources, and level of involvement.', 217 | key="goal_consultant", height=200) 218 | backstory_consultant = st.text_area('backstory:', 'Business Angels and Venture Capital Consultants typically possess extensive experience in finance, entrepreneurship, and investment management. They may have backgrounds in fields such as investment banking, private equity, corporate finance, or startup leadership. Many have built successful careers in the financial industry, gaining expertise in deal sourcing, due diligence, portfolio management, and strategic advisory.', 219 | key="backstory_consultant", height=200) 220 | 221 | with tab4: 222 | st.subheader("The agent tasks:") 223 | 224 | st.session_state.text_task_in1 = st.text_area('Task 1 Researcher:', 225 | dedent(f"""Conduct a comprehensive analysis of the latest high performing startups active in the 226 | field of generative AI. It is important that those startups with their advancements in 227 | generative AI are active in the finance sector since a year. Identify key startups, 228 | breakthrough technologies, and potential fast growing startups with impact in the finance 229 | sector caused by generative AI. As a researcher you analyse how generative AI will change 230 | the finance industry. It would be good to know if that startup is still searching for money 231 | investments actively. Your final answer MUST be a full analysis report. 232 | Example Report: 233 | Finance Tech Startup Research Table: 234 | - Startup 1: 235 | - Name: "Kern AI" 236 | - Investment sum: 1.00.00.000 237 | - Founded in: 2022 238 | - Number of Employees: 50 239 | - Company homepage: https://www.kern.ai/ 240 | - Startup 2: 241 | - Name: "Scrub AI" 242 | - Investment sum: 5.00.00.000 243 | - Founded in: 2023 244 | - Number of Employees: 22 245 | - Company homepage: https://scrub-ai.com/ 246 | Today is the """)+str(datetime.date.today())+""" .""", key="text_task_in_1") 247 | 248 | st.session_state.text_task_in2 = st.text_area('Task 2 Autor / Writer:', 249 | dedent(f"""Using the insights provided, write an article like an engaging blog post that highlights the most significant startups 250 | active in generative AI with important advancements in this field. Your written article should be informative yet accessible, catering to a tech-savvy startup scene and 251 | audience. Make it sound cool, avoid complex words so it doesn't sound like AI. Your final answer MUST be the a full structures blog post 252 | The article you are writing has a minimum of 1600 words and highlights 10 startups. In the summary please list the startups with web addresses like url's headlines and bullet points for easy reading. 253 | The text itself is enriched with nice emojis to highlight important parts. 254 | 255 | The structure of the article you have to write could look like the example below: 256 | 257 | Example article structure: 258 | Executive Summary: 259 | - Overview of the AI startup's performance. 260 | - Key financial metrics and achievements. 261 | - Future growth prospects. 262 | - Introduction: 263 | - Brief background of the AI startup. 264 | - Mission and objectives. 265 | - Market Analysis: 266 | - Analysis of the AI market segment. 267 | - Growth trends and opportunities. 268 | - Competitive landscape. 269 | - Business Model: 270 | - Description of the AI startup's business model. 271 | - Revenue streams. 272 | - Cost structure. 273 | - Financial Performance: 274 | - Revenue analysis: 275 | - Revenue growth over time. 276 | - Revenue sources (e.g., product sales, subscriptions, services). 277 | - Profitability analysis: 278 | - Gross profit margin. 279 | - Operating profit margin. 280 | - Net profit margin. 281 | - Cash flow analysis: 282 | - Operating cash flow. 283 | - Investing cash flow. 284 | - Financing cash flow. 285 | - Balance sheet analysis: 286 | - Assets composition. 287 | - Liabilities and equity. 288 | - Key financial ratios: 289 | - Return on Investment (ROI). 290 | - Return on Equity (ROE). 291 | - Debt-to-Equity ratio. 292 | - Current ratio. 293 | - Quick ratio. 294 | - Investment Analysis: 295 | - Valuation: 296 | - Methods used (e.g., Discounted Cash Flow, Comparable Company Analysis). 297 | - Assumptions and inputs. 298 | - Investment risks: 299 | - Market risks. 300 | - Technology risks. 301 | - Regulatory risks. 302 | - Strategic Initiatives: 303 | - Expansion plans. 304 | - Research and development efforts. 305 | - Strategic partnerships. 306 | - Conclusion: 307 | - Summary of key findings. 308 | - Recommendations for investors. 309 | - Future outlook. 310 | - Appendix: 311 | - Detailed financial tables. 312 | - Glossary of financial terms. 313 | - References: 314 | - Sources of information used in the report. \nToday is the: """) +str(datetime.date.today())+""" .""", key="text_task_in_2") 315 | 316 | st.session_state.text_task_in3 = st.text_area('Task 3 Business Angel:', dedent(f"""Involve evaluating investment opportunities, conducting due diligence 317 | on potential ventures, and advising startups on strategy, fundraising, and growth tactics. Search how much venture capital each startup already raised. 318 | Add a comment if an future investment would be an option for an investor. Only from interest are startups in finance sector which are active over the last 319 | year and this year. Additionally, they often facilitate connections between entrepreneurs and potential investors, leveraging their network to bridge the 320 | gap between promising startups and capital sources. 321 | Executive Summary: 322 | - Concise overview of the investment opportunity. 323 | - Highlights of key figures and decision points. 324 | - Summary of investment recommendations. 325 | Introduction: 326 | - Introduction to the company or opportunity being presented. 327 | - Purpose of the report. 328 | - Scope and methodology. 329 | Market Analysis: 330 | - Market overview: 331 | - Size, growth rate, and trends. 332 | - Market segmentation. 333 | - Competitive landscape: 334 | - Major players and market share. 335 | - Competitive advantages of the company. 336 | Business Model: 337 | - Description of the company's business model. 338 | - Revenue streams and sources. 339 | - Cost structure and scalability. 340 | Financial Performance: 341 | - Revenue analysis: 342 | - Historical revenue trends. 343 | - Forecasted revenue growth. 344 | - Profitability analysis: 345 | - Gross margin, operating margin, net margin. 346 | - Cash flow analysis: 347 | - Operating cash flow, free cash flow. 348 | - Key financial ratios: 349 | - Return on Investment (ROI), Return on Equity (ROE), Debt-to-Equity ratio, etc. 350 | Investment Thesis: 351 | 352 | Investment opportunity: 353 | - Value proposition. 354 | - Unique selling points. 355 | - Potential returns: 356 | - Expected ROI. 357 | - Risk-adjusted returns. 358 | - Risks and Mitigation Strategies: 359 | 360 | - Identification of potential risks: 361 | - Market risks, operational risks, regulatory risks, etc. 362 | - Mitigation strategies: 363 | - Plans to address identified risks. 364 | - Strategic Growth Initiatives: 365 | Expansion plans: 366 | - Geographic expansion, product diversification, etc. 367 | Research and development: 368 | - Innovation pipeline and investments. 369 | Strategic partnerships: 370 | - Alliances, joint ventures, collaborations. 371 | Valuation: 372 | Valuation methodology: 373 | - Discounted Cash Flow (DCF), Comparable Company Analysis (CCA), etc. 374 | - Valuation assumptions and inputs. 375 | Investment Recommendations: 376 | - Summary of key findings and analysis. 377 | Investment decision: 378 | - Buy, sell, hold recommendations. 379 | - Justification of recommendations. 380 | Conclusion: 381 | - Summary of the investment opportunity. 382 | - Closing remarks. 383 | Appendix: 384 | - Detailed financial tables. 385 | - Glossary of financial terms. 386 | - Assumptions used in the analysis. 387 | References: 388 | - Sources of information used in the report. 389 | Today is the """)+str(datetime.date.today())+""" .""", key="text_task_in_3") 390 | 391 | task_in_1_new = st.session_state.text_task_in1 392 | task_in_2_new = st.session_state.text_task_in2 393 | task_in_3_new = st.session_state.text_task_in3 394 | 395 | with tab0: 396 | st.title('Do my analysis') 397 | task_description = st.text_area('Your short task description here is used to re-write Task 1 - Task 3 so that they fit thematically with the new input.') 398 | 399 | # Check if the request was successful 400 | if response.status_code == 200: 401 | # Parse the JSON response 402 | data = response.json() 403 | 404 | # Extract the model names from the JSON response 405 | names = [model["name"] for model in data["models"]] 406 | 407 | # Populate the dropdown box 408 | default_id=names.index("openhermes:latest") 409 | model_rewrite = st.selectbox('Select a LLM model for re-writing the tasks 1 - 3:', names, key="model_rewrite", index=default_id) 410 | else: 411 | st.error(f"Failed to fetch data from {json_url}. Error code: {response.status_code}") 412 | # Create a slider to select the temperature of the llm 413 | temperature_rewrite_task = st.slider('Select a LLM temperature value between 0 and 1 [higher is more creative, lower is more coherent]', min_value=0.0, max_value=1.0, step=0.01) 414 | 415 | if st.button('Start Generation NOW'): 416 | with st.status("🤖 **Now rewriting the tasks for your three agents...**", state="running", expanded=True) as status: 417 | ollama_llm_rewrite_task = Ollama(model=model_rewrite, base_url=local_base_url, temperature=temperature_rewrite_task) 418 | 419 | template_task_1 = "As an AI assistant please write a task description for an AI agent whos role is to be a researcher who like to understand various topics. This is an example task description for an AI agent. The AI agent needs this task to understand what he has to do. \n Example task description:\n" + st.session_state.text_task_in1 + "\n Please rewrite this task description for the new topic which is described as follows: \n New topic: \n{task_description} \nImportant for the rewritten new task description is to keep the structure of the example task description provided." 420 | prompt_task_1 = PromptTemplate(template=template_task_1, input_variables=["task_description"]) 421 | llm_chain = LLMChain(prompt=prompt_task_1, llm=ollama_llm_rewrite_task) 422 | task_in_1_new = llm_chain.run({"task_description": task_description}) 423 | 424 | template_task_3 = "As an AI assistant please write a task description for an AI agent whos role is an business angle investor who does analysis. This is an example task description for an AI agent. The AI agent needs this task to understand what he has to do. \n Example task description:\n" + st.session_state.text_task_in3 + "\n Please rewrite this task description for the new topic which is described as follows: \n New topic: \n{task_description} \nImportant for the rewritten new task description is to keep the structure of the example task description provided." 425 | prompt_task_3 = PromptTemplate(template=template_task_3, input_variables=["task_description"]) 426 | llm_chain = LLMChain(prompt=prompt_task_3, llm=ollama_llm_rewrite_task) 427 | task_in_3_new = llm_chain.run({"task_description": task_description}) 428 | 429 | template_task_2 = "As an AI assistant please write a task description for an AI agent whos role is to be an autor who likes to write articles. This is an example task description for an AI agent. The AI agent needs this task to understand what he has to do. \n Example task description:\n" + st.session_state.text_task_in2 + "\n Please rewrite this task description for the new topic which is described as follows: \n New topic: \n{task_description} \nImportant for the rewritten new task description is to keep the structure of the example task description provided." 430 | prompt_task_2 = PromptTemplate(template=template_task_2, input_variables=["task_description"]) 431 | llm_chain = LLMChain(prompt=prompt_task_2, llm=ollama_llm_rewrite_task) 432 | task_in_2_new = llm_chain.run({"task_description": task_description}) 433 | 434 | 435 | st.text_area('Task 1 Researcher rewritten:', task_in_1_new, key="text_task_in_1_re") 436 | st.text_area('Task 3 Business Angel rewritten:', task_in_3_new, key="text_task_in_3_re") 437 | st.text_area('Task 2 Autor / Writer rewritten:', task_in_2_new, key="text_task_in_2_re") 438 | 439 | # Define your agents with roles and goals 440 | researcher = Agent( 441 | max_inter=max_iterations_researcher, 442 | role=role_researcher, 443 | goal=goal_researcher, 444 | backstory=backstory_researcher, 445 | 446 | verbose=True, 447 | allow_delegation=True, 448 | tools=[ 449 | SearchTools.search_internet, 450 | dd_search, 451 | ], 452 | llm=ollama_llm_researcher, 453 | step_callback=streamlit_callback 454 | ) 455 | 456 | consultant = Agent( 457 | max_inter=max_iterations_consultant, 458 | role=role_consultant, 459 | goal=goal_consultant, 460 | backstory=backstory_consultant, 461 | 462 | verbose=True, 463 | allow_delegation=False, 464 | tools=[ 465 | SearchTools.search_internet, 466 | dd_search, 467 | ], 468 | llm=ollama_llm_consultant, 469 | step_callback=streamlit_callback 470 | ) 471 | 472 | autor = Agent( 473 | max_inter=max_iterations_autor, 474 | role=role_autor , 475 | goal=goal_autor, 476 | backstory=backstory_autor, 477 | verbose=True, 478 | allow_delegation=False, 479 | llm=ollama_llm_autor, 480 | step_callback=streamlit_callback 481 | ) 482 | 483 | # Create tasks for your agents 484 | task1 = Task( 485 | description=task_in_1_new, 486 | agent=researcher, 487 | expected_output="Do my work please" 488 | ) 489 | 490 | # Create tasks for your agents 491 | task2 = Task( 492 | description=task_in_2_new, 493 | agent=autor, 494 | expected_output="Do my work please" 495 | ) 496 | 497 | # Create tasks for your agents 498 | task3 = Task( 499 | description=task_in_3_new, 500 | agent=consultant, 501 | expected_output="Do my work please" 502 | ) 503 | 504 | with st.status("🤖 **Agents doing your work...**", state="running", expanded=True) as status: 505 | with st.container(height=800, border=False): 506 | crew = Crew( 507 | agents=[researcher, consultant, autor], 508 | tasks=[task1, task3, task2], 509 | verbose=2, # You can set it to 1 or 2 to different logging levels 510 | ) 511 | result = crew.kickoff() 512 | status.update(label="✅ Research activity finished!", 513 | state="complete", expanded=False) 514 | 515 | print("######################") 516 | print(result) 517 | st.subheader('Your requested analysis is ready: :blue[how cool is that] :sunglasses:') 518 | st.markdown(result) 519 | 520 | st.download_button( 521 | label="Download", 522 | data=result, 523 | file_name="meeting_prep.md", 524 | mime="text/plain" 525 | ) 526 | -------------------------------------------------------------------------------- /secrets_example.toml: -------------------------------------------------------------------------------- 1 | SERPER_API_KEY="API_KEY_HERE" # https://serper.dev/ (free tier) 2 | BROWSERLESS_API_KEY="API_KEY_HERE" # https://www.browserless.io/ (free tier) 3 | OPENAI_API_KEY="API_KEY_HERE" -------------------------------------------------------------------------------- /tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/custom-build-robots/ai-agents-with-CrewAI/1acb63368162588956469c97b9ec25b420cc20cb/tools/__init__.py -------------------------------------------------------------------------------- /tools/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/custom-build-robots/ai-agents-with-CrewAI/1acb63368162588956469c97b9ec25b420cc20cb/tools/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /tools/__pycache__/browser_tools.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/custom-build-robots/ai-agents-with-CrewAI/1acb63368162588956469c97b9ec25b420cc20cb/tools/__pycache__/browser_tools.cpython-311.pyc -------------------------------------------------------------------------------- /tools/__pycache__/calculator_tools.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/custom-build-robots/ai-agents-with-CrewAI/1acb63368162588956469c97b9ec25b420cc20cb/tools/__pycache__/calculator_tools.cpython-311.pyc -------------------------------------------------------------------------------- /tools/__pycache__/search_tools.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/custom-build-robots/ai-agents-with-CrewAI/1acb63368162588956469c97b9ec25b420cc20cb/tools/__pycache__/search_tools.cpython-311.pyc -------------------------------------------------------------------------------- /tools/browser_tools.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import requests 4 | import streamlit as st 5 | from crewai import Agent, Task 6 | from langchain.tools import tool 7 | from unstructured.partition.html import partition_html 8 | 9 | 10 | class BrowserTools(): 11 | 12 | @tool("Scrape website content") 13 | def scrape_and_summarize_website(website): 14 | """Useful to scrape and summarize a website content""" 15 | url = f"https://chrome.browserless.io/content?token={st.secrets['BROWSERLESS_API_KEY']}" 16 | payload = json.dumps({"url": website}) 17 | headers = {'cache-control': 'no-cache', 'content-type': 'application/json'} 18 | response = requests.request("POST", url, headers=headers, data=payload) 19 | elements = partition_html(text=response.text) 20 | content = "\n\n".join([str(el) for el in elements]) 21 | content = [content[i:i + 8000] for i in range(0, len(content), 8000)] 22 | summaries = [] 23 | for chunk in content: 24 | agent = Agent( 25 | role='Principal Researcher', 26 | goal= 27 | 'Do amazing researches and summaries based on the content you are working with', 28 | backstory= 29 | "You're a Principal Researcher at a big company and you need to do a research about a given topic.", 30 | allow_delegation=False) 31 | task = Task( 32 | agent=agent, 33 | description= 34 | f'Analyze and summarize the content bellow, make sure to include the most relevant information in the summary, return only the summary nothing else.\n\nCONTENT\n----------\n{chunk}' 35 | ) 36 | summary = task.execute() 37 | summaries.append(summary) 38 | return "\n\n".join(summaries) 39 | -------------------------------------------------------------------------------- /tools/calculator_tools.py: -------------------------------------------------------------------------------- 1 | from langchain.tools import tool 2 | 3 | 4 | class CalculatorTools(): 5 | 6 | @tool("Make a calcualtion") 7 | def calculate(operation): 8 | """Useful to perform any mathematical calculations, 9 | like sum, minus, multiplication, division, etc. 10 | The input to this tool should be a mathematical 11 | expression, a couple examples are `200*7` or `5000/2*10` 12 | """ 13 | return eval(operation) 14 | -------------------------------------------------------------------------------- /tools/search_tools.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import requests 4 | import streamlit as st 5 | from langchain.tools import tool 6 | 7 | 8 | class SearchTools(): 9 | 10 | @tool("Search the internet") 11 | def search_internet(query): 12 | """Useful to search the internet 13 | about a a given topic and return relevant results""" 14 | top_result_to_return = 4 15 | url = "https://google.serper.dev/search" 16 | payload = json.dumps({"q": query}) 17 | headers = { 18 | 'X-API-KEY': st.secrets['SERPER_API_KEY'], 19 | 'content-type': 'application/json' 20 | } 21 | response = requests.request("POST", url, headers=headers, data=payload) 22 | # check if there is an organic key 23 | if 'organic' not in response.json(): 24 | return "Sorry, I couldn't find anything about that, there could be an error with you serper api key." 25 | else: 26 | results = response.json()['organic'] 27 | string = [] 28 | for result in results[:top_result_to_return]: 29 | try: 30 | string.append('\n'.join([ 31 | f"Title: {result['title']}", f"Link: {result['link']}", 32 | f"Snippet: {result['snippet']}", "\n-----------------" 33 | ])) 34 | except KeyError: 35 | next 36 | 37 | return '\n'.join(string) 38 | --------------------------------------------------------------------------------