├── .env ├── prompts.txt ├── requirements.txt └── techx_workshop.py /.env: -------------------------------------------------------------------------------- 1 | WATSONX_PROJECTID= 2 | WATSONX_APIKEY= 3 | SERPER_API_KEY= -------------------------------------------------------------------------------- /prompts.txt: -------------------------------------------------------------------------------- 1 | "Senior Finance Analyst" 2 | "Research company financials in order to produce insights." 3 | "You are a veteran finance analyst with a background in banking and analysis." 4 | 5 | "Senior Python Engineer" 6 | "Build, Test and Execute code for the research team" 7 | "You are a veteran Python engineer with a background in computer science physics." 8 | 9 | 10 | find me a list of promising AI companies to invest in. 11 | a summary list with details of the company. use the yfinance python library to get financial metrics including EPS and P/E ratio over the last financial year. 12 | 13 | a summary list with details of the company. use the python tool to calculate financial metrics including EPS and P/E ratio. -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | crewai==0.22.5 2 | crewai-tools==0.0.15 3 | langchain-ibm==0.1.3 -------------------------------------------------------------------------------- /techx_workshop.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | import os 3 | from colorama import init, Fore 4 | 5 | # Bring in LLM class 6 | from langchain_ibm import WatsonxLLM 7 | 8 | # CrewAI 9 | from crewai_tools import SerperDevTool 10 | from crewai import Crew, Agent, Task 11 | 12 | params = {"decoding_method": "greedy", "max_new_tokens": 1000} 13 | 14 | llm = WatsonxLLM( 15 | model_id="mistralai/mistral-large", 16 | project_id=os.environ["WATSONX_PROJECTID"], 17 | url="https://us-south.ml.cloud.ibm.com", 18 | params=params, 19 | ) 20 | 21 | search = SerperDevTool() 22 | researcher = Agent( 23 | llm=llm, 24 | function_calling_llm=llm, 25 | role="Senior Finance Analyst", 26 | goal="Research company financials in order to produce insights.", 27 | backstory="You are a veteran finance analyst with a background in banking and analysis.", 28 | verbose=True, 29 | allow_delegation=True, 30 | tools=[search], 31 | ) 32 | # Agent 2 33 | developer = Agent( 34 | llm=llm, 35 | function_calling_llm=llm, 36 | role="Senior Python Engineer", 37 | goal="Build, Test and Execute code for the research team", 38 | backstory="You are a veteran Python engineer with a background in computer science and physics.", 39 | verbose=True, 40 | allow_delegation=True, 41 | allow_code_execution=True, 42 | ) 43 | 44 | if __name__ == "__main__": 45 | while True: 46 | description = input( 47 | Fore.YELLOW + "Enter your prompt here, type \quit to exit: " + Fore.RESET 48 | ) 49 | expected_output = input( 50 | Fore.YELLOW + "Whats your desired output: " + Fore.RESET 51 | ) 52 | 53 | if description.lower() == "\quit": 54 | print(Fore.RED + "Exiting, catch ya later." + Fore.RESET) 55 | break 56 | 57 | task1 = Task( 58 | description=description, expected_output=expected_output, agent=researcher 59 | ) 60 | crew = Crew( 61 | agents=[researcher, developer], 62 | tasks=[task1], 63 | verbose=True, 64 | ) 65 | 66 | # Send to llm 67 | response = crew.kickoff() 68 | print(Fore.LIGHTMAGENTA_EX + response + Fore.RESET) 69 | --------------------------------------------------------------------------------