├── ARK-Morgendagensapplikasjoner.pptx ├── Hallo_MariusSandbuDND.pptx ├── agentsmith.py ├── ai.gpt ├── capture.py ├── chainlit.py ├── chatgpt-basic.py ├── cloudgpt.md ├── copilot.md ├── eldenmark_2024_final 1.pptx ├── ex-langchain.py ├── ex-langchainsqlstreamlit.py ├── expertslivedk ├── agentsmith.py ├── david.py ├── el-autogen.py ├── el-kodetake.py ├── el-langchain.py ├── el-langchainagent.py ├── jarvissql.py ├── narrator.py ├── narrator_capture.py └── readme.md ├── genai.md ├── gptagents.md ├── jarvissql.py ├── kodeanalytiker.py ├── langchain-langsmith.py ├── langchain-pdf.py ├── mvpdagen-25-10.pptx ├── narrator.py ├── secagent.py └── streamlit.py /ARK-Morgendagensapplikasjoner.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/msandbu/gpt-ai/f31508021a877a964abab9ab770618acc9f96f64/ARK-Morgendagensapplikasjoner.pptx -------------------------------------------------------------------------------- /Hallo_MariusSandbuDND.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/msandbu/gpt-ai/f31508021a877a964abab9ab770618acc9f96f64/Hallo_MariusSandbuDND.pptx -------------------------------------------------------------------------------- /agentsmith.py: -------------------------------------------------------------------------------- 1 | import autogen 2 | 3 | config_list = autogen.config_list_from_json( 4 | "OAI_CONFIG_LIST", 5 | filter_dict={ 6 | "model": ["gpt-4", "gpt4", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-v0314"], 7 | }, 8 | ) 9 | 10 | llm_config = {"config_list": config_list, "seed": 42} 11 | user_proxy = autogen.UserProxyAgent( 12 | name="User_proxy", 13 | system_message="A human admin.", 14 | code_execution_config={"last_n_messages": 2, "work_dir": "groupchat"}, 15 | human_input_mode="TERMINATE" 16 | ) 17 | coder = autogen.AssistantAgent( 18 | name="Coder", 19 | llm_config=llm_config, 20 | ) 21 | pm = autogen.AssistantAgent( 22 | name="Product_manager", 23 | system_message="Creative in software product ideas.", 24 | llm_config=llm_config, 25 | ) 26 | groupchat = autogen.GroupChat(agents=[user_proxy, coder, pm], messages=[], max_round=12) 27 | manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config) 28 | 29 | user_proxy.initiate_chat(manager, message="Find a latest paper about gpt-4 on arxiv and find its potential applications in software.") 30 | # type exit to terminate the chat 31 | -------------------------------------------------------------------------------- /ai.gpt: -------------------------------------------------------------------------------- 1 | # This python example uses Azure Speech Recognition for Speech collection, uses Elevenlabs for speech synthentis and OpenAI to interpet the text. 2 | # If you want to try this out please change the API Keys 3 | # You also need to install the MPV library https://github.com/rossy/mpv-install/blob/master/README.md if you want to enable audio collection 4 | 5 | import azure.cognitiveservices.speech as speechsdk 6 | import openai 7 | import elevenlabs 8 | import json 9 | import requests 10 | 11 | from elevenlabs import set_api_key 12 | 13 | # Initialize your API keys 14 | set_api_key("") ## Elevenlabs API Key 15 | openai.api_key = '' ## OpenAI Key 16 | 17 | # Set up the Azure Speech SDK 18 | speech_key, service_region = "", "eastus" ## Azure Speech API Key 19 | speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region) 20 | speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config) 21 | 22 | def get_current_weather(location, unit="fahrenheit"): 23 | """Get the current weather in a given location.""" 24 | api_key = '' # Replace with your API key from OpenWeatherMap 25 | base_url = "http://api.openweathermap.org/data/2.5/weather" 26 | units = "metric" 27 | 28 | 29 | params = { 30 | 'q': location, 31 | 'appid': api_key, 32 | 'units': units 33 | } 34 | 35 | response = requests.get(base_url, params=params) 36 | print(params) 37 | print(response) 38 | 39 | if response.status_code == 200: 40 | data = response.json() 41 | main = data['main'] 42 | weather = data['weather'][0] 43 | 44 | weather_info = { 45 | "location": location, 46 | "temperature": str(main['temp']), 47 | "unit": unit, 48 | "forecast": [weather['description']] 49 | } 50 | return json.dumps(weather_info) 51 | else: 52 | return json.dumps({"error": f"Could not fetch weather for {location}. Error {response.status_code}: {response.text}"}) 53 | 54 | def handle_function_call(response_message): 55 | available_functions = { 56 | "get_current_weather": get_current_weather, 57 | } 58 | function_name = response_message["function_call"]["name"] 59 | fuction_to_call = available_functions[function_name] 60 | function_args = json.loads(response_message["function_call"]["arguments"]) 61 | function_response = fuction_to_call( 62 | location=function_args.get("location"), 63 | unit=function_args.get("unit"), 64 | ) 65 | return function_name, function_response 66 | 67 | messages = [{"role": "system", "content": "You are an intelligent assistant."}] 68 | functions = [ 69 | { 70 | "name": "get_current_weather", 71 | "description": "Get the current weather in a given location", 72 | "parameters": { 73 | "type": "object", 74 | "properties": { 75 | "location": {"type": "string", "description": "The city and state, e.g. San Francisco, CA"}, 76 | "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, 77 | }, 78 | "required": ["location"], 79 | }, 80 | } 81 | ] 82 | 83 | while True: 84 | print("Talk now") 85 | result = speech_recognizer.recognize_once() 86 | 87 | message = format(result.text) 88 | if message: 89 | messages.append({"role": "user", "content": message}) 90 | chat = openai.ChatCompletion.create( 91 | model="gpt-3.5-turbo", 92 | messages=messages, 93 | functions=functions, 94 | function_call="auto" 95 | ) 96 | reply = chat.choices[0].message.content 97 | 98 | # Handle function calls from the assistant 99 | if chat.choices[0].message.get("function_call"): 100 | function_name, function_response = handle_function_call(chat.choices[0].message) 101 | messages.append({"role": "function", "name": function_name, "content": function_response}) 102 | 103 | second_response = openai.ChatCompletion.create( 104 | model="gpt-3.5-turbo-0613", 105 | messages=messages, 106 | ) 107 | response_content = second_response.choices[0].message.content 108 | print(f"ChatGPT: {response_content}") 109 | messages.append({"role": "assistant", "content": response_content}) 110 | 111 | audio_stream = elevenlabs.generate(text=response_content, voice="Matthew", stream=True) 112 | output = elevenlabs.stream(audio_stream) 113 | else: 114 | # If no function call, just handle the original GPT-3 response 115 | print(f"ChatGPT: {reply}") 116 | messages.append({"role": "assistant", "content": reply}) 117 | 118 | audio_stream = elevenlabs.generate(text=reply, voice="Matthew", stream=True) 119 | output = elevenlabs.stream(audio_stream) 120 | -------------------------------------------------------------------------------- /capture.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import time 3 | from PIL import Image 4 | import numpy as np 5 | import os 6 | 7 | # Folder 8 | folder = "frames" 9 | 10 | # Create the frames folder if it doesn't exist 11 | frames_dir = os.path.join(os.getcwd(), folder) 12 | os.makedirs(frames_dir, exist_ok=True) 13 | 14 | # Initialize the webcam 15 | cap = cv2.VideoCapture(0) 16 | 17 | # Check if the webcam is opened correctly 18 | if not cap.isOpened(): 19 | raise IOError("Cannot open webcam") 20 | 21 | # Wait for the camera to initialize and adjust light levels 22 | time.sleep(2) 23 | 24 | while True: 25 | ret, frame = cap.read() 26 | if ret: 27 | # Convert the frame to a PIL image 28 | pil_img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) 29 | 30 | # Resize the image 31 | max_size = 250 32 | ratio = max_size / max(pil_img.size) 33 | new_size = tuple([int(x*ratio) for x in pil_img.size]) 34 | resized_img = pil_img.resize(new_size, Image.LANCZOS) 35 | 36 | # Convert the PIL image back to an OpenCV image 37 | frame = cv2.cvtColor(np.array(resized_img), cv2.COLOR_RGB2BGR) 38 | 39 | # Save the frame as an image file 40 | print("📸 Say cheese! Saving frame.") 41 | path = f"{folder}/frame.jpg" 42 | cv2.imwrite(path, frame) 43 | else: 44 | print("Failed to capture image") 45 | 46 | # Wait for 2 seconds 47 | time.sleep(2) 48 | 49 | # Release the camera and close all windows 50 | cap.release() 51 | cv2.destroyAllWindows() 52 | -------------------------------------------------------------------------------- /chainlit.py: -------------------------------------------------------------------------------- 1 | # pip install langchain openai google-search-results 2 | 3 | import os 4 | import chainlit as cl 5 | from dotenv import load_dotenv 6 | import openai 7 | import langchain 8 | 9 | os.environ["OPENAI_API_KEY"] ="" 10 | os.environ["SERPAPI_API_KEY"] ="" 11 | 12 | from langchain import OpenAI, LLMMathChain, SerpAPIWrapper 13 | from langchain.agents import initialize_agent, Tool, AgentExecutor 14 | from langchain.chat_models import ChatOpenAI 15 | 16 | @cl.on_chat_start 17 | def start(): 18 | llm = ChatOpenAI(temperature=0, streaming=True) 19 | llm1 = OpenAI(temperature=0, streaming=True) 20 | search = SerpAPIWrapper() 21 | llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True) 22 | 23 | tools = [ 24 | Tool( 25 | name="Search", 26 | func=search.run, 27 | description="useful for when you need to answer questions about current events. You should ask targeted questions", 28 | ), 29 | Tool( 30 | name="Calculator", 31 | func=llm_math_chain.run, 32 | description="useful for when you need to answer questions about math", 33 | ), 34 | ] 35 | agent = initialize_agent( 36 | tools, llm1, agent="chat-zero-shot-react-description", verbose=True 37 | ) 38 | cl.user_session.set("agent", agent) 39 | 40 | 41 | @cl.on_message 42 | async def main(message): 43 | agent = cl.user_session.get("agent") # type: AgentExecutor 44 | cb = cl.LangchainCallbackHandler(stream_final_answer=True) 45 | 46 | await cl.make_async(agent.run)(message, callbacks=[cb]) -------------------------------------------------------------------------------- /chatgpt-basic.py: -------------------------------------------------------------------------------- 1 | import os 2 | import openai 3 | 4 | # Load your API key from an environment variable or secret management service 5 | openai.api_key = "OPENAIKEY" 6 | HELICONE_API_KEY="HELICONEAPI" 7 | from helicone.openai_proxy import openai 8 | 9 | response = openai.ChatCompletion.create( 10 | model="gpt-4", 11 | messages=[ 12 | { 13 | "role": "system", 14 | "content": "You are an AI assistant that helps the users with getting answers to their questions." 15 | }, 16 | { 17 | "role": "user", 18 | "content": "What is the best way to get started with OpenAI?" 19 | } 20 | ], 21 | max_tokens=256, 22 | top_p=1, 23 | frequency_penalty=0, 24 | presence_penalty=0, 25 | user="alice@bob.com", 26 | cache=True, 27 | properties={"conversation_id": 12}, 28 | rate_limit_policy={"quota": 100, "time_window": 60, "segment": "user"} 29 | ) 30 | 31 | print(response) -------------------------------------------------------------------------------- /cloudgpt.md: -------------------------------------------------------------------------------- 1 | ## LLM Features 2 | 3 | It should be noted here that FM refers to Foundation language models. Since most providers also have support for 3.party LLMs such as LLaMA2. 4 | 5 | * References to Bison is also just one of the codenames for PALM. 6 | 7 | Last Update: 12/07/2024 8 | 9 | | Features | Google | Microsoft | Amazon Web Services | 10 | |----------------------------------|---------------------------|------------------------|----------------------| 11 | | **LLM Service/Runtime** | Vertex AI | Azure OpenAI | Bedrock | 12 | | **LLM Models available** | PaLM, LLaMa2, Falcon, Claude2* | GPT, LLaMa3, Falcon, Phi-3 | Titan, Claude2, Cohere | 13 | | **LLM Models Code** | Code-Bison | GPT-4o | | 14 | | **LLM Models Security** | Sec-PaLM | Fine-tuned GPT-4 | | 15 | | **LLM Models Catalog** | Model Garden | Model Catalog | Model Providers | 16 | | **LLM Token Size FM** | 32k (PaLM2) | 128k (GPT4) | 8k (Titan) | 17 | | **LLM Integration framework** | Vertex AI Extensions | Microsoft Semantic Kernel | | 18 | | **LLM Safety filter** | | Azure AI Content Safety | | 19 | | **LLM Fine-tuning support** | Code-bison(PaLM), text-bison(PaLM) | GPT-3.5 supports Fine-tuning | | 20 | | **LLM Agent** | Vertex AI Conversation | Power Virtual agents | Bedrock Agents, Amazon Lex | 21 | 22 | ## Machine Learning Features 23 | 24 | | Features | Google | Microsoft | Amazon Web Services | 25 | |----------------------------------|---------------------------|------------------------|----------------------| 26 | | **ML Service** | Vertex AI Workbench | Azure Machine Learning | Amazon Sagemaker | 27 | | **ML Model Catalogue 3.party** | | Huggingface | | 28 | | **Vector Search** | Vertex AI Vector Search | Azure Cognitive Search (Preview) | Amazon OpenSearch, Amazon Kendra | 29 | 30 | ## Text and Speech Features 31 | 32 | | Features | Google | Microsoft | Amazon Web Services | 33 | |----------------------------------|---------------------------|------------------------|----------------------| 34 | | **Text-to-image** | Imagen | DALL-E | | 35 | | **Speech-to-text** | Chirp | Azure Speech Recognition, Whisper | Amazon Transcribe | 36 | | **Text-to-speech** | Vertex AI | Azure Text to speech, Whisper | Amazon Polly | 37 | 38 | ## Additional Services 39 | 40 | | Features | Google | Microsoft | Amazon Web Services | 41 | |----------------------------------|---------------------------|------------------------|----------------------| 42 | | **Vector database** | Cloud SQL (Pgvector), AlloyDB, Vertex AI Vector Search | Azure Cosmos DB | Amazon RDS (Pgvector) | 43 | | **Embedding** | Text embedding API Gecko | Ada OpenAI Embedding | Titan Embeddings | 44 | | **Integration services - Langchain** | Vertex AI, Google Search | Azure Cognitive Search | | 45 | | **Code assistant based AI** | Duet AI | Github Copilot | Amazon Code Whisperer | 46 | | **Collaboration GPT** | Duet AI | Microsoft Copilot | | 47 | | **Digital Watermarking** | Synthid (Image) | | | 48 | | **Security Powered LLM** | Security Command Center AI | Microsoft Security Copilot | | 49 | -------------------------------------------------------------------------------- /copilot.md: -------------------------------------------------------------------------------- 1 | ## Overview of different Copilot, ChatGPT and Bing features from Microsoft 2 | 3 | This is meant to illustrate the differences between the different GPT enabled services that Microsoft has such as CoPilot and Bing Chat but also how it compared to ChatGPT. 4 | It shows the different features, privacy and security mechanisms which apply to tne end-user. 5 | 6 | ### NB: Windows Copilot is not included since it uses Bing Chat underneath 7 | 8 | 9 | | Features | ChatGPT (and ChatGPT Plus) | Microsoft Copilot | Microsoft Copilot Enterprise | Bing Enterprise Chat Powered by M365 Copilot | Copilot Chat / Business Chat (in Teams) | 10 | |---------------------------------------------------------------------------------------------|------------------------------------------|------------------------------------------|------------------------------------------|-----------------------------------------------|------------------------------------------------| 11 | | **Description** | Commercial ChatGPT service | Formerly known as Bing Chat | Formerly known as Bing Chat Enterprise | Available as part of Microsoft 365 Copilot | Available as part of Microsoft 365 Copilot | 12 | | **Data grounding for Microsoft 365 data** | No (Access to OneDrive) | No | No | Yes | Yes | 13 | | **License and cost** | Free (20$ Per user for plus) | Free | Free if you have existing licenses (A3/E3/A5/E5) (5$) | 30$ Per user Per month | 30$ Per user per month | 14 | | **Access to M365 Personal Data** | No (Access to OneDrive) | No | No | Yes | Yes | 15 | | **Support for 3.party data sources** | Via Plugins | Via Plugins | No | Graph Data Connectors | Graph Data Connectors | 16 | | **Chat History** | Yes (can be disabled) | Yes | No (per session) | No (per session) | Yes | 17 | | **User and business data is protected and won’t leak outside the organization** | No (data can be leaked via 3 party plugins) | No (data can be leaked via 3 party plugins) | Yes | Yes | Yes | 18 | | **Language Model** | GPT3-5 and GPT4, GPT4-Turbo and GPT-4o | GPT3-5 and GPT4 | GPT3-5 and GPT4 | GPT3-5 and GPT4 | GPT3-5 and GPT4 | 19 | | **Data processed** | US | US/EU (Worldwide) | US/EU (Worldwide) | Within geo region such as EU for EU Customers | Within geo region such as EU for EU Customers | 20 | | **Uses Internet Search** | Yes (Supported via functions) | Yes | Yes | Yes (Enabled by default) | Yes (Enabled by default) | 21 | | **Support Plugins** | Yes (ChatGPT Plus) | Yes | No | Yes | Yes (ChatGPT Plugins, Power Platform Integrations and Teams Message Extensions | 22 | | **"When was the movie released?"** | The movie was released on July 21, 2023. | 21-Jul-23 | 21-Jul-23 | The movie was released on July 21, 2023. | The movie was released on July 21, 2023. | 23 | | **"When is my next meeting?**" | No Answer | No Answer | No Answer | Today with my team | Today with my team | 24 | | "**Summarize /ABC.doc stored in Microsoft 365"** | No (available using plugins) | No | No | Yes | Yes | 25 | | **Voice search** | Yes | Yes | Yes | No | No | 26 | 27 | 1* Grounding means that data is indexed and made available for the language model. This is utilized using a Search engine on top which search trough the data and makes it available. 28 | -------------------------------------------------------------------------------- /eldenmark_2024_final 1.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/msandbu/gpt-ai/f31508021a877a964abab9ab770618acc9f96f64/eldenmark_2024_final 1.pptx -------------------------------------------------------------------------------- /ex-langchain.py: -------------------------------------------------------------------------------- 1 | # pip install langchain openai google-search-results 2 | 3 | import os 4 | import openai 5 | import langchain 6 | 7 | os.environ["OPENAI_API_KEY"] ="" 8 | os.environ["SQL_SERVER_USERNAME"] = "" 9 | os.environ["SQL_SERVER_ENDPOINT"] = "" 10 | os.environ["SQL_SERVER_PASSWORD"] = "" 11 | os.environ["SQL_SERVER_DATABASE"] = "" 12 | 13 | from sqlalchemy import create_engine 14 | from sqlalchemy.engine.url import URL 15 | from langchain.sql_database import SQLDatabase 16 | 17 | db_config = { 18 | 'drivername': 'mssql+pyodbc', 19 | 'username': os.environ["SQL_SERVER_USERNAME"] + '@' + os.environ["SQL_SERVER_ENDPOINT"], 20 | 'password': os.environ["SQL_SERVER_PASSWORD"], 21 | 'host': os.environ["SQL_SERVER_ENDPOINT"], 22 | 'port': 1433, 23 | 'database': os.environ["SQL_SERVER_DATABASE"], 24 | 'query': {'driver': 'ODBC Driver 17 for SQL Server'} 25 | } 26 | 27 | db_url = URL.create(**db_config) 28 | db = SQLDatabase.from_uri(db_url) 29 | 30 | from langchain.agents import load_tools 31 | from langchain.agents import initialize_agent 32 | from langchain.agents import AgentType 33 | from langchain.llms import OpenAI 34 | from langchain.agents.agent_toolkits import SQLDatabaseToolkit 35 | from langchain.sql_database import SQLDatabase 36 | from langchain.agents import AgentExecutor 37 | from langchain.agents.agent_types import AgentType 38 | from langchain.agents import create_sql_agent 39 | 40 | 41 | llm = OpenAI(temperature=0) 42 | toolkit = SQLDatabaseToolkit(db=db, llm=llm) 43 | 44 | agent_executor = create_sql_agent( 45 | llm=llm, 46 | toolkit=toolkit, 47 | verbose=True, 48 | agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION, 49 | ) 50 | 51 | agent_executor.run("Which user has ID number 5?") 52 | -------------------------------------------------------------------------------- /ex-langchainsqlstreamlit.py: -------------------------------------------------------------------------------- 1 | import os 2 | #from dotenv import load_dotenv 3 | import openai 4 | import langchain 5 | 6 | os.environ["OPENAI_API_KEY"] ="" 7 | os.environ["SQL_SERVER_USERNAME"] = "" 8 | os.environ["SQL_SERVER_ENDPOINT"] = "" 9 | os.environ["SQL_SERVER_PASSWORD"] = "" 10 | os.environ["SQL_SERVER_DATABASE"] = "" 11 | 12 | from sqlalchemy import create_engine 13 | from sqlalchemy.engine.url import URL 14 | from langchain.sql_database import SQLDatabase 15 | 16 | db_config = { 17 | 'drivername': 'mssql+pyodbc', 18 | 'username': os.environ["SQL_SERVER_USERNAME"] + '@' + os.environ["SQL_SERVER_ENDPOINT"], 19 | 'password': os.environ["SQL_SERVER_PASSWORD"], 20 | 'host': os.environ["SQL_SERVER_ENDPOINT"], 21 | 'port': 1433, 22 | 'database': os.environ["SQL_SERVER_DATABASE"], 23 | 'query': {'driver': 'ODBC Driver 17 for SQL Server'} 24 | } 25 | 26 | db_url = URL.create(**db_config) 27 | db = SQLDatabase.from_uri(db_url) 28 | 29 | from langchain.agents import load_tools 30 | from langchain.agents import initialize_agent 31 | from langchain.agents import AgentType 32 | from langchain.llms import OpenAI 33 | from langchain.agents.agent_toolkits import SQLDatabaseToolkit 34 | from langchain.sql_database import SQLDatabase 35 | from langchain.agents import AgentExecutor 36 | from langchain.agents.agent_types import AgentType 37 | from langchain.agents import create_sql_agent 38 | #from langchain.callbacks import StreamlitCallbackHandler 39 | import streamlit as st 40 | 41 | # Page title 42 | st.set_page_config(page_title='🦜🔗 Ask the SQLSaturday App') 43 | st.title('📎Ask the SQLSaturda Oslo DB with Clippy!') 44 | 45 | 46 | def generate_response(input_query): 47 | llm = OpenAI(temperature=0) 48 | toolkit = SQLDatabaseToolkit(db=db, llm=llm) 49 | agent_executor = create_sql_agent( 50 | llm=llm, 51 | toolkit=toolkit, 52 | verbose=True, 53 | agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION, 54 | ) 55 | response = agent_executor.run(input_query) 56 | return st.success(response) 57 | 58 | question_list = [ 59 | 'How many rows are there?', 60 | 'What kind of tables are here?', 61 | 'How many are called John?', 62 | 'Other'] 63 | query_text = st.selectbox('Select an example query:', question_list) 64 | openai_api_key = st.text_input('OpenAI API Key', type='password', disabled=not (query_text)) 65 | 66 | # App logic 67 | if query_text == 'Other': 68 | query_text = st.text_input('Enter your query:', placeholder = 'Enter query here ...') 69 | if not openai_api_key.startswith('sk-'): 70 | st.warning('Please enter your OpenAI API key!', icon='⚠') 71 | if openai_api_key.startswith('sk-'): 72 | st.header('Output') 73 | generate_response(query_text) 74 | 75 | 76 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /expertslivedk/agentsmith.py: -------------------------------------------------------------------------------- 1 | import autogen 2 | 3 | config_list = autogen.config_list_from_json( 4 | "OAI_CONFIG_LIST", 5 | filter_dict={ 6 | "model": ["gpt-4", "gpt4", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-v0314"], 7 | }, 8 | ) 9 | 10 | llm_config = {"config_list": config_list, "seed": 42} 11 | user_proxy = autogen.UserProxyAgent( 12 | name="User_proxy", 13 | system_message="A human admin.", 14 | code_execution_config={"last_n_messages": 2, "work_dir": "groupchat"}, 15 | human_input_mode="TERMINATE" 16 | ) 17 | coder = autogen.AssistantAgent( 18 | name="Coder", 19 | llm_config=llm_config, 20 | ) 21 | pm = autogen.AssistantAgent( 22 | name="Product_manager", 23 | system_message="Creative in software product ideas.", 24 | llm_config=llm_config, 25 | ) 26 | groupchat = autogen.GroupChat(agents=[user_proxy, coder, pm], messages=[], max_round=12) 27 | manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config) 28 | 29 | user_proxy.initiate_chat(manager, message="Find a latest paper about gpt-4 on arxiv and find its potential applications in software.") 30 | # type exit to terminate the chat 31 | -------------------------------------------------------------------------------- /expertslivedk/david.py: -------------------------------------------------------------------------------- 1 | import os 2 | from openai import OpenAI 3 | import base64 4 | import json 5 | import time 6 | import simpleaudio as sa 7 | import errno 8 | from elevenlabs import generate, play, voices 9 | from elevenlabs import set_api_key 10 | set_api_key("APIKEYFORELEVENLABS") 11 | 12 | client = OpenAI() 13 | 14 | 15 | 16 | def play_audio(text): 17 | audio = generate(text=text, voice="Gltr1is83rrQkB5Q6m2S", model="eleven_turbo_v2") 18 | 19 | unique_id = base64.urlsafe_b64encode(os.urandom(30)).decode("utf-8").rstrip("=") 20 | dir_path = os.path.join("narration", unique_id) 21 | os.makedirs(dir_path, exist_ok=True) 22 | file_path = os.path.join(dir_path, "audio.wav") 23 | 24 | with open(file_path, "wb") as f: 25 | f.write(audio) 26 | 27 | play(audio) 28 | 29 | 30 | 31 | def analyze_image(base64_image, script): 32 | response = client.chat.completions.create( 33 | model="gpt-4", 34 | messages=[ 35 | { 36 | "role": "system", 37 | "content": """ 38 | You are Sir David Attenborough. Narrate the picture of the human as if it is a nature documentary. 39 | Make it snarky and funny. Don't repeat yourself. Make it short. If I do anything remotely interesting, make a big deal about it! 40 | """, 41 | }, 42 | ] 43 | + script, 44 | max_tokens=500, 45 | ) 46 | response_text = response.choices[0].message.content 47 | return response_text 48 | 49 | 50 | 51 | def main(): 52 | script = [] 53 | 54 | while True: 55 | 56 | # analyze posture 57 | print("👀 David is watching...") 58 | analysis = analyze_image(script=script) 59 | 60 | print("🎙️ David says:") 61 | print(analysis) 62 | 63 | play_audio(analysis) 64 | 65 | script = script + [{"role": "assistant", "content": analysis}] 66 | 67 | # wait for 5 seconds 68 | time.sleep(5) 69 | 70 | 71 | if __name__ == "__main__": 72 | main() 73 | -------------------------------------------------------------------------------- /expertslivedk/el-autogen.py: -------------------------------------------------------------------------------- 1 | import autogen 2 | 3 | config_list = autogen.config_list_from_json( 4 | "OAI_CONFIG_LIST", 5 | filter_dict={ 6 | "model": ["gpt-4", "gpt4", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-v0314"], 7 | }, 8 | ) 9 | 10 | llm_config = {"config_list": config_list, "seed": 42} 11 | user_proxy = autogen.UserProxyAgent( 12 | name="User_proxy", 13 | system_message="A human admin.", 14 | code_execution_config={"last_n_messages": 2, "work_dir": "groupchat"}, 15 | human_input_mode="TERMINATE" 16 | ) 17 | coder = autogen.AssistantAgent( 18 | name="Coder", 19 | llm_config=llm_config, 20 | ) 21 | pm = autogen.AssistantAgent( 22 | name="Product_manager", 23 | system_message="Creative in software product ideas.", 24 | llm_config=llm_config, 25 | ) 26 | groupchat = autogen.GroupChat(agents=[user_proxy, coder, pm], messages=[], max_round=12) 27 | manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config) 28 | 29 | user_proxy.initiate_chat(manager, message="Write a Python script that can collect weather information using REST API.") 30 | # type exit to terminate the chat 31 | -------------------------------------------------------------------------------- /expertslivedk/el-kodetake.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ["OPENAI_API_KEY"] = "" 3 | os.environ["SERPAPI_API_KEY"] = "" 4 | import streamlit as st 5 | from openai import OpenAI 6 | 7 | client = OpenAI() 8 | from collections import deque 9 | from typing import Dict, List, Optional, Any 10 | #do this to load the env variables 11 | 12 | 13 | # Definer Streamlit layout 14 | st.title("Kode tåge") 15 | language = st.selectbox("Velg Språk", ["Python", "JavaScript", "PowerShell"]) 16 | code_input = st.text_area("Last opp kode som skal analyseres, her er noen eksempler https://github.com/das-lab/mpsd/tree/main/malicious_pure") 17 | 18 | 19 | def explain_code(input_code, language): 20 | model_engine = "gpt-3.5-turbo" # Change to the desired OpenAI model 21 | message = [ 22 | { 23 | "role": "system", 24 | "content": "You are a helpful assistant to help desribe code to the user" 25 | }, 26 | { 27 | "role": "user", 28 | "content": f"Forklar hva følgende {language} kode gjør for noe: \n\n{input_code}" 29 | } 30 | ] 31 | response = client.chat.completions.create(model=model_engine, 32 | messages=message, 33 | max_tokens=1024, 34 | n=1, 35 | stop=None, 36 | temperature=0.7) 37 | return response.choices[0].message.content 38 | 39 | 40 | # Temperature and token slider 41 | temperature = st.sidebar.slider( 42 | "Temperature", 43 | min_value=0.0, 44 | max_value=1.0, 45 | value=0.5, 46 | step=0.1 47 | ) 48 | tokens = st.sidebar.slider( 49 | "Tokens", 50 | min_value=64, 51 | max_value=2048, 52 | value=256, 53 | step=64 54 | ) 55 | # Define Streamlit app behavior 56 | if st.button("Forklar"): 57 | output_text = explain_code(code_input, language) 58 | st.write("Kodetåke:", output_text) -------------------------------------------------------------------------------- /expertslivedk/el-langchain.py: -------------------------------------------------------------------------------- 1 | import os 2 | import openai 3 | 4 | from langchain.chat_models import AzureChatOpenAI 5 | from langchain.llms.openai import OpenAI 6 | from langchain.chains import LLMChain 7 | from langchain.prompts import PromptTemplate 8 | from langchain.memory import ConversationBufferWindowMemory 9 | from langchain.prompts.chat import ( 10 | ChatPromptTemplate, 11 | SystemMessagePromptTemplate, 12 | HumanMessagePromptTemplate, 13 | ) 14 | 15 | os.environ["AZURE_OPENAI_API_KEY"] = "" 16 | os.environ["AZURE_OPENAI_ENDPOINT"] = "" 17 | os.environ["OPENAI_API_VERSION"] = "2024-02-15-preview" 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | llm = AzureChatOpenAI(deployment_name="trd", temperature=0.7) 27 | system_message = "You are an AI assistant that tells jokes." 28 | 29 | system_message_prompt = SystemMessagePromptTemplate.from_template(system_message) 30 | human_template="{text}" 31 | human_message_prompt = HumanMessagePromptTemplate.from_template(human_template) 32 | chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt]) 33 | 34 | chain = LLMChain(llm=llm, prompt=chat_prompt) 35 | result = chain.run(f"Tell me a dad joke about norwegians") 36 | print(result) 37 | 38 | 39 | -------------------------------------------------------------------------------- /expertslivedk/el-langchainagent.py: -------------------------------------------------------------------------------- 1 | import os 2 | import openai 3 | 4 | from langchain.chat_models import AzureChatOpenAI 5 | from langchain.llms.openai import OpenAI 6 | from langchain.chains import LLMChain 7 | from langchain.prompts import PromptTemplate 8 | from langchain.memory import ConversationBufferWindowMemory 9 | from langchain.prompts.chat import ( 10 | ChatPromptTemplate, 11 | SystemMessagePromptTemplate, 12 | HumanMessagePromptTemplate, 13 | ) 14 | 15 | os.environ["AZURE_OPENAI_API_KEY"] = "" 16 | os.environ["AZURE_OPENAI_ENDPOINT"] = "" 17 | os.environ["OPENAI_API_VERSION"] = "2024-02-15-preview" 18 | os.environ["SERPAPI_API_KEY"] ="" 19 | 20 | 21 | from langchain.agents import load_tools 22 | from langchain.agents import initialize_agent 23 | from langchain.agents import AgentType 24 | from langchain.llms import OpenAI 25 | 26 | llm = AzureChatOpenAI(deployment_name="trd", temperature=0.7) 27 | tools = load_tools(["serpapi"], llm=llm) 28 | agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True) 29 | 30 | agent.run("Can you find the current weather in Oslo? and only specify the temprature") -------------------------------------------------------------------------------- /expertslivedk/jarvissql.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | import openai 4 | import langchain 5 | import azure.cognitiveservices.speech as speechsdk 6 | import elevenlabs 7 | from elevenlabs import set_api_key 8 | import json 9 | import requests 10 | 11 | from langchain.agents.agent_toolkits import SQLDatabaseToolkit 12 | from langchain.sql_database import SQLDatabase 13 | from langchain.agents import AgentExecutor 14 | from langchain.agents import create_sql_agent 15 | from langchain import LLMMathChain, OpenAI, SQLDatabase, SerpAPIWrapper 16 | from langchain.agents import initialize_agent, Tool 17 | from langchain.agents import AgentType 18 | from langchain.chat_models import ChatOpenAI 19 | from langchain.callbacks.streaming_stdout_final_only import ( 20 | FinalStreamingStdOutCallbackHandler, 21 | ) 22 | 23 | set_api_key("") ## elevenlabs 24 | os.environ["OPENAI_API_KEY"] ="sk-" 25 | os.environ["SQL_SERVER_USERNAME"] = "" 26 | os.environ["SQL_SERVER_ENDPOINT"] = ".database.windows.net" 27 | os.environ["SQL_SERVER_PASSWORD"] = "" 28 | os.environ["SQL_SERVER_DATABASE"] = "" 29 | os.environ["SERPAPI_API_KEY"] ="" 30 | 31 | speech_key, service_region = "SpeechKEY", "westeurope" 32 | speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region) 33 | speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config) 34 | 35 | from sqlalchemy import create_engine 36 | from sqlalchemy.engine.url import URL 37 | 38 | 39 | db_config = { 40 | 'drivername': 'mssql+pyodbc', 41 | 'username': os.environ["SQL_SERVER_USERNAME"] + '@' + os.environ["SQL_SERVER_ENDPOINT"], 42 | 'password': os.environ["SQL_SERVER_PASSWORD"], 43 | 'host': os.environ["SQL_SERVER_ENDPOINT"], 44 | 'port': 1433, 45 | 'database': os.environ["SQL_SERVER_DATABASE"], 46 | 'query': {'driver': 'ODBC Driver 17 for SQL Server'} 47 | } 48 | 49 | from langchain.agents import create_sql_agent 50 | 51 | 52 | llm = OpenAI(streaming=True,temperature=0) 53 | search = SerpAPIWrapper() 54 | db_url = URL.create(**db_config) 55 | db = SQLDatabase.from_uri(db_url) 56 | llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613") 57 | toolkit = SQLDatabaseToolkit(db=db, llm=llm) 58 | db_chain = create_sql_agent( 59 | llm=llm, 60 | toolkit=toolkit, 61 | verbose=True, 62 | agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION, 63 | ) 64 | tools = [ 65 | Tool( 66 | name = "Search", 67 | func=search.run, 68 | description="useful for when you need to answer questions about current events. You should ask targeted questions" 69 | ), 70 | Tool( 71 | name="FooBar-DB", 72 | func=db_chain.run, 73 | description="useful to answer questions about John in the database or create databases tables" 74 | ) 75 | ] 76 | 77 | while True: 78 | print("Talk now") 79 | result = speech_recognizer.recognize_once() 80 | print("Recognized: {}".format(result.text)) 81 | message = format(result.text) 82 | agent = initialize_agent(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True, handle_parsing_errors=True) 83 | response = agent( 84 | { 85 | "input": result.text 86 | } 87 | ) 88 | response["output"] 89 | print(response["output"]) 90 | audio_stream = elevenlabs.generate(text=response["output"],voice="Matthew", stream=True) 91 | output = elevenlabs.stream(audio_stream) 92 | 93 | -------------------------------------------------------------------------------- /expertslivedk/narrator.py: -------------------------------------------------------------------------------- 1 | import os 2 | from openai import OpenAI 3 | import base64 4 | import json 5 | import time 6 | import simpleaudio as sa 7 | import errno 8 | from elevenlabs import generate, play, voices 9 | from elevenlabs import set_api_key 10 | set_api_key("b8b07774d13e8a6517240190442588dd") 11 | 12 | client = OpenAI() 13 | 14 | 15 | def encode_image(image_path): 16 | while True: 17 | try: 18 | with open(image_path, "rb") as image_file: 19 | return base64.b64encode(image_file.read()).decode("utf-8") 20 | except IOError as e: 21 | if e.errno != errno.EACCES: 22 | # Not a "file in use" error, re-raise 23 | raise 24 | # File is being written to, wait a bit and retry 25 | time.sleep(0.1) 26 | 27 | 28 | def play_audio(text): 29 | audio = generate(text=text, voice="Gltr1is83rrQkB5Q6m2S", model="eleven_turbo_v2") 30 | 31 | unique_id = base64.urlsafe_b64encode(os.urandom(30)).decode("utf-8").rstrip("=") 32 | dir_path = os.path.join("narration", unique_id) 33 | os.makedirs(dir_path, exist_ok=True) 34 | file_path = os.path.join(dir_path, "audio.wav") 35 | 36 | with open(file_path, "wb") as f: 37 | f.write(audio) 38 | 39 | play(audio) 40 | 41 | 42 | def generate_new_line(base64_image): 43 | return [ 44 | { 45 | "role": "user", 46 | "content": [ 47 | {"type": "text", "text": "Describe this image"}, 48 | { 49 | "type": "image_url", 50 | "image_url": f"data:image/jpeg;base64,{base64_image}", 51 | }, 52 | ], 53 | }, 54 | ] 55 | 56 | 57 | def analyze_image(base64_image, script): 58 | response = client.chat.completions.create( 59 | model="gpt-4-vision-preview", 60 | messages=[ 61 | { 62 | "role": "system", 63 | "content": """ 64 | You are Sir David Attenborough. Narrate the picture of the human as if it is a nature documentary and mention that the person looks really nervous. 65 | Make it snarky and funny. Don't repeat yourself. Make it short. If I do anything remotely interesting, make a big deal about it! 66 | """, 67 | }, 68 | ] 69 | + script 70 | + generate_new_line(base64_image), 71 | max_tokens=500, 72 | ) 73 | response_text = response.choices[0].message.content 74 | return response_text 75 | 76 | 77 | def main(): 78 | script = [] 79 | 80 | while True: 81 | # path to your image 82 | image_path = os.path.join(os.getcwd(), "./frames/frame.jpg") 83 | 84 | # getting the base64 encoding 85 | base64_image = encode_image(image_path) 86 | 87 | # analyze posture 88 | print("👀 David is watching...") 89 | analysis = analyze_image(base64_image, script=script) 90 | 91 | print("🎙️ David says:") 92 | print(analysis) 93 | 94 | play_audio(analysis) 95 | 96 | script = script + [{"role": "assistant", "content": analysis}] 97 | 98 | # wait for 5 seconds 99 | time.sleep(5) 100 | 101 | 102 | if __name__ == "__main__": 103 | main() 104 | -------------------------------------------------------------------------------- /expertslivedk/narrator_capture.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import time 3 | from PIL import Image 4 | import numpy as np 5 | import os 6 | 7 | # Folder 8 | folder = "frames" 9 | 10 | # Create the frames folder if it doesn't exist 11 | frames_dir = os.path.join(os.getcwd(), folder) 12 | os.makedirs(frames_dir, exist_ok=True) 13 | 14 | # Initialize the webcam 15 | cap = cv2.VideoCapture(0) 16 | 17 | # Check if the webcam is opened correctly 18 | if not cap.isOpened(): 19 | raise IOError("Cannot open webcam") 20 | 21 | # Wait for the camera to initialize and adjust light levels 22 | time.sleep(2) 23 | 24 | while True: 25 | ret, frame = cap.read() 26 | if ret: 27 | # Convert the frame to a PIL image 28 | pil_img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) 29 | 30 | # Resize the image 31 | max_size = 250 32 | ratio = max_size / max(pil_img.size) 33 | new_size = tuple([int(x*ratio) for x in pil_img.size]) 34 | resized_img = pil_img.resize(new_size, Image.LANCZOS) 35 | 36 | # Convert the PIL image back to an OpenCV image 37 | frame = cv2.cvtColor(np.array(resized_img), cv2.COLOR_RGB2BGR) 38 | 39 | # Save the frame as an image file 40 | print("📸 Say cheese! Saving frame.") 41 | path = f"{folder}/frame.jpg" 42 | cv2.imwrite(path, frame) 43 | else: 44 | print("Failed to capture image") 45 | 46 | # Wait for 2 seconds 47 | time.sleep(2) 48 | 49 | # Release the camera and close all windows 50 | cap.release() 51 | cv2.destroyAllWindows() 52 | -------------------------------------------------------------------------------- /expertslivedk/readme.md: -------------------------------------------------------------------------------- 1 | Contains scripts and content from Experts-live Denmark. 2 | 3 | You just need to change the scripts with your own API keys from different services. 4 | -------------------------------------------------------------------------------- /genai.md: -------------------------------------------------------------------------------- 1 | | Features | Microsoft Copilot | Google Gemini | AWS Q | 2 | |------------------------------------|-----------------------------------------------------|------------------------------------------------------|--------------------------------------------------------| 3 | | **LLM** | OpenAI GPT v4 | Gemini | Most likely Anthropic Claude | 4 | | **Price** | $20 per user per month for Copilot Pro, $30 per user per month for Copilot 365 | $20 per user per month for Gemini Advanced | $20 per user per month for Amazon Q Business Pro | 5 | | **Context size** | 128,000 tokens ~ 100,000 words | 1 million tokens ~ 750,000 words | 200,000 tokens ~ 150,000 words | 6 | | **Internet search integration** | Bing Search | Google Search | No | 7 | | **File types supported** | PDF, DOCX, XLSX, PPTX, RTF, TXT, CSV, LOG, INI, CONFIG, GIF, JFIF, PJPEG, JPEG, JPG, PJP, PNG, WEBP, WAV, HTML, CSS, MD, RMD, TEX, LATEX, IPYNB, JSON, TOML, YAML, YML | TXT, DOC, DOCX, PDF, RTF, DOT, DOTX, HWP, HWPX, CSV, TSV, XLS, XLSX | PDF, HTML, XML, XSLT, MD, CSV, XLS, XLSX, JSON, RTF, PPT, PPTX, DOCX, TXT | 8 | | **File size support** | 10 MB per file | 100 MB per file | 50 MB per file | 9 | | **API support** | No | No | Yes | 10 | | **GPT Builders** | Copilot AI Agents (Part of Copilot Studio aimed at businesses) | Yes, Gems | Yes, Q Apps | 11 | | **Integration with Collaboration** | Yes, integrated in Office Apps with Pro | Yes, integrated with Google Workspace with Advanced | No, but has various data integrations | 12 | | **Language Support** | 26 Different languages | 33 Different languages | Only supported using English | 13 | | **Text-to-image** | Yes, DALL-E 3 | Yes, Imagen 3 | No | 14 | | **Text-to-voice** | Yes | Yes (10 different voices) | No | 15 | | **Voice-to-text** | Yes | Yes | No | 16 | | **Other integrations** | Klarna, Suno, OpenTable, Shop | YouTube, Google Maps, Google Hotels | Various AWS and third-party services (AEM, Alfresco, Amazon RDS, Box, Confluence, Dropbox, GitHub, Jira, Service Now, Slack, ZenDesk, SharePoint, Microsoft 365 etc.) | 17 | | **Benchmarks (MMLU)** | 88.4 (GPT-4o) | 81.9 (Gemini 1.5) | 86.8% (Claude 3 Opus) | 18 | | **Benchmarks (NIHS)** | 90% | 99.7% | 99% | 19 | -------------------------------------------------------------------------------- /gptagents.md: -------------------------------------------------------------------------------- 1 | # AI Agents Comparison 2 | 3 | | Name | Copilot Studio | Elevenlabs Agents | Replit Agents | AWS Bedrock Agents | Datastrax Astra | Phidata | Copilot SharePoint Agents | Google Agentspace | Google Vertex AI Builder | Autogen | LangGraph | 4 | |-----------------------------|-----------------------------|-----------------------------|-----------------------------|-----------------------------|-----------------------------|-----------------------------|-----------------------------|-----------------------------|-----------------------------|-----------------------------|-----------------------------| 5 | | Development | SaaS Portal | SaaS portal or using SDK | SaaS Portal or mobile app | SaaS portal | SaaS portal | SDK | SaaS portal | SaaS | SaaS / API | SDK | SDK | 6 | | Description | Automated Software Development | Low Code AI Builder | | | | | Agents for SharePoint content | Google Agentspace Enterprise brings AI agents and AI-powered search to enterprises. | | An open-source programming framework for building AI agents and facilitating cooperation among multiple agents to solve tasks | A library for building stateful, multi-actor applications with LLMs, used to create agent and multi-agent workflows | 7 | | Delivery Model | SaaS | SaaS | SaaS | SaaS | SaaS | Self-hosted | SaaS | SaaS | Self-hosted | Self-hosted | Self-hosted / SaaS | 8 | | LLMs and Customization | Azure OpenAI (not customizable) | Gemini, GPT, Claude, Custom LLMs | AI21, Claude, Cohere, Meta, Mistral | Amazon Bedrock, Anthropic, Azure OpenAI, Groq, NVIDIA, OpenAI, Custom | OpenAI, OpenAI Like, Anthropic Claude, AWS Bedrock Claude, Azure, Cohere, DeepSeek, Fireworks, Gemini - AI Studio, Gemini - VertexAI, Groq, HuggingFace, Mistral, Nvidia, Ollama, OpenRouter, Sambanova, Together, xAI | Azure OpenAI (not customizable) | Google Gemini | Google Gemini | OpenAI, Azure OpenAI or OpenAI compatible (v0.2) | | | 9 | | Customizable System Prompt | No (only system prompt) | Yes | No | Yes | Yes | Yes | No | No | Yes | Yes | Yes | 10 | | Data Sources | Bing Search, Azure OpenAI, SharePoint, Custom files | Custom files, URL, or text | Web, Amazon S3, Confluence (preview), Salesforce (preview), SharePoint (preview) | API Request, Directory, File, SQL Query (Beta), URL, Webhook | Arxiv, Combined, CSV, Docx, Document, JSON, LangChain, LlamaIndex, PDF, PDF Url, Text, Website, Wikipedia | SharePoint | Google Search, Confluence Cloud, Confluence Data Center On-premises, Jira Cloud, Jira Data Center On-premises, Salesforce, ServiceNow, SharePoint Online, Slack, Dropbox, Box, OneDrive | Website, BigQuery, Cloud Storage, API, Cloud SQL, Spanner, Bigtable, Firestore, AlloyDB, Google Drive, Gmail, Google Sites, Google Calendar, Google Groups | | | | 11 | | RAG / Vector Stores | Amazon OpenSearch | Amazon Neptune | Amazon Aurora, Pinecone | Redis Enterprise Cloud | MongoDB Atlas | Astra DB, Astra DB Graph, Cassandra, Cassandra Graph, Chroma DB, Clickhouse, Couchbase, Elasticsearch, FAISS, Hyper-Converged, Milvus, MongoDB Atlas, OpenSearch, PGVector, Pinecone, Qdrant, Redis | PgVector, SingleStore, LanceDB, Pinecone, Qdrant, ChromaDB | Microsoft Graph | | | 12 | | Speech Capabilities | Yes | Yes (primary interface) | No | No | Yes | No | No | Yes | No | No | No | 13 | | Custom Speech Models | No | Yes | No | No | Yes (integration with ElevenLabs) | No | No | No | No | No | No | 14 | | Multimodality | Yes | Yes (however interface is audio) | No | Yes | Yes | No | Yes | Yes | Yes | Yes | Yes | 15 | | Multilingual | Yes | Yes | No | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | 16 | | Cost | TBD | TBD | TBD | TBD | TBD | TBD | TBD | TBD | TBD | TBD | TBD | 17 | | Actions / Functions | OpenAPI spec or Lambda Function | Astra DB, Astra DB CQL, Bing Search API, Calculator, DuckDuckGo Search, Exa Search Beta, Glean Search API, Google Search API, Google Serper API, Python REPL, RetrieverTool, Search API, Serp Search API, Tavily AI Search, Wikidata API, Wikipedia API, WolframAlpha API | Airflow, Apify, Arxiv, AWS Lambda, BaiduSearch, Calculator, Cal.com, Composio, Crawl4AI, CSV, Dalle, DuckDb, DuckDuckGo, Email, Exa, Fal, File, Firecrawl, Giphy, Github, Google Search, Hacker News, Jina Reader, Jira, Linear, Lumalabs, MLX Transcribe, ModelsLabs | | | | | Jira Cloud, Workday, Gmail, Google Calendar, Outlook email, Outlook calendar | OpenAPI, Data store, Function | | | 18 | | Publishing Method | Website, Teams, Slack, Email, Mobile-app | Web Widget | SharePoint and Teams | | | | | Agentspace portal and API | API | | | 19 | | Security Capabilities | Amazon Bedrock Guardrails | Prompt Security | | | | | | | | | | 20 | | Memory | Yes | Yes | | Yes | Yes | | | | | Yes | Yes | 21 | | Monitoring / Tracing | Yes | Limited | | Yes | Yes | | Yes | Yes | Yes | Yes | Yes | 22 | | Multiagent Support | No | No | No | Yes | Yes | No | No | No | No | Yes | Yes | 23 | | Interaction with OS | No | No | No | No | Yes | No | No | No | No | Yes | Yes | 24 | | Remote Execution Environment | Limited to Code Interpreter | No | Limited to Code Interpreter | Limited to Code Interpreter | Runs where framework is installed | No | No | Limited to Code Interpreter | Runs where framework is installed | Runs where framework is installed | Runs where framework is installed | 25 | 26 | -------------------------------------------------------------------------------- /jarvissql.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | import openai 4 | import langchain 5 | import azure.cognitiveservices.speech as speechsdk 6 | import elevenlabs 7 | import json 8 | import requests 9 | 10 | from langchain.agents.agent_toolkits import SQLDatabaseToolkit 11 | from langchain.sql_database import SQLDatabase 12 | from langchain.agents import AgentExecutor 13 | from langchain.agents import create_sql_agent 14 | from langchain import LLMMathChain, OpenAI, SQLDatabase, SerpAPIWrapper 15 | from langchain.agents import initialize_agent, Tool 16 | from langchain.agents import AgentType 17 | from langchain.chat_models import ChatOpenAI 18 | from langchain.callbacks.streaming_stdout_final_only import ( 19 | FinalStreamingStdOutCallbackHandler, 20 | ) 21 | 22 | os.environ["OPENAI_API_KEY"] ="" 23 | os.environ["SQL_SERVER_USERNAME"] = "" 24 | os.environ["SQL_SERVER_ENDPOINT"] = "" 25 | os.environ["SQL_SERVER_PASSWORD"] = "" 26 | os.environ["SQL_SERVER_DATABASE"] = "" 27 | os.environ["SERPAPI_API_KEY"] ="" 28 | 29 | speech_key, service_region = "", "eastus" 30 | speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region) 31 | speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config) 32 | 33 | from sqlalchemy import create_engine 34 | from sqlalchemy.engine.url import URL 35 | 36 | 37 | db_config = { 38 | 'drivername': 'mssql+pyodbc', 39 | 'username': os.environ["SQL_SERVER_USERNAME"] + '@' + os.environ["SQL_SERVER_ENDPOINT"], 40 | 'password': os.environ["SQL_SERVER_PASSWORD"], 41 | 'host': os.environ["SQL_SERVER_ENDPOINT"], 42 | 'port': 1433, 43 | 'database': os.environ["SQL_SERVER_DATABASE"], 44 | 'query': {'driver': 'ODBC Driver 17 for SQL Server'} 45 | } 46 | 47 | from langchain.agents import create_sql_agent 48 | 49 | 50 | llm = OpenAI(streaming=True,temperature=0) 51 | search = SerpAPIWrapper() 52 | db_url = URL.create(**db_config) 53 | db = SQLDatabase.from_uri(db_url) 54 | llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613") 55 | toolkit = SQLDatabaseToolkit(db=db, llm=llm) 56 | db_chain = create_sql_agent( 57 | llm=llm, 58 | toolkit=toolkit, 59 | verbose=True, 60 | agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION, 61 | ) 62 | tools = [ 63 | Tool( 64 | name = "Search", 65 | func=search.run, 66 | description="useful for when you need to answer questions about current events. You should ask targeted questions" 67 | ), 68 | Tool( 69 | name="FooBar-DB", 70 | func=db_chain.run, 71 | description="useful to answer questions about John in the database" 72 | ) 73 | ] 74 | 75 | while True: 76 | print("Talk now") 77 | result = speech_recognizer.recognize_once() 78 | print("Recognized: {}".format(result.text)) 79 | message = format(result.text) 80 | agent = initialize_agent(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True,) 81 | response = agent( 82 | { 83 | "input": result.text 84 | } 85 | ) 86 | response["output"] 87 | print(response["output"]) 88 | audio_stream = elevenlabs.generate(text=response["output"],voice="Matthew", stream=True) 89 | output = elevenlabs.stream(audio_stream) 90 | 91 | -------------------------------------------------------------------------------- /kodeanalytiker.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ["OPENAI_API_KEY"] = "" 3 | os.environ["SERPAPI_API_KEY"] = "" 4 | import streamlit as st 5 | import openai 6 | from collections import deque 7 | from typing import Dict, List, Optional, Any 8 | #do this to load the env variables 9 | 10 | 11 | # Definer Streamlit layout 12 | st.title("Kodetåke") 13 | language = st.selectbox("Velg Språk", ["Python", "JavaScript", "PowerShell"]) 14 | code_input = st.text_area("Last opp kode som skal analyseres") 15 | 16 | 17 | def explain_code(input_code, language): 18 | model_engine = "gpt-3.5-turbo" # Change to the desired OpenAI model 19 | message = [ 20 | { 21 | "role": "system", 22 | "content": "You are a helpful assistant to help desribe code to the user, always reply in Norwegian back to the user. Always use Norwegian" 23 | }, 24 | { 25 | "role": "user", 26 | "content": f"Forklar hva følgende {language} kode gjør for noe: \n\n{input_code}" 27 | } 28 | ] 29 | response = openai.ChatCompletion.create( 30 | model=model_engine, 31 | messages=message, 32 | max_tokens=1024, 33 | n=1, 34 | stop=None, 35 | temperature=0.7, 36 | ) 37 | return response.choices[0].message['content'] 38 | 39 | 40 | # Temperature and token slider 41 | temperature = st.sidebar.slider( 42 | "Temperature", 43 | min_value=0.0, 44 | max_value=1.0, 45 | value=0.5, 46 | step=0.1 47 | ) 48 | tokens = st.sidebar.slider( 49 | "Tokens", 50 | min_value=64, 51 | max_value=2048, 52 | value=256, 53 | step=64 54 | ) 55 | # Define Streamlit app behavior 56 | if st.button("Forklar"): 57 | output_text = explain_code(code_input, language) 58 | st.write("Kodetåke:", output_text) 59 | -------------------------------------------------------------------------------- /langchain-langsmith.py: -------------------------------------------------------------------------------- 1 | # pip install langchain openai google-search-results 2 | 3 | import os 4 | from dotenv import load_dotenv 5 | import openai 6 | import langchain 7 | 8 | os.environ["OPENAI_API_KEY"] ="" 9 | os.environ["SERPAPI_API_KEY"] ="" 10 | os.environ["LANGCHAIN_TRACING_V2"]="true" 11 | os.environ["LANGCHAIN_ENDPOINT"]="https://api.smith.langchain.com" 12 | os.environ["LANGCHAIN_API_KEY"]="" 13 | os.environ["LANGCHAIN_PROJECT"]="" 14 | 15 | 16 | from langchain.agents import load_tools 17 | from langchain.agents import initialize_agent 18 | from langchain.agents import AgentType 19 | from langchain.llms import OpenAI 20 | 21 | llm = OpenAI(temperature=0) 22 | tools = load_tools(["serpapi", "llm-math"], llm=llm) 23 | agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True) 24 | 25 | agent.run("How many Teslas have been sold in 2023 in Norway?") -------------------------------------------------------------------------------- /langchain-pdf.py: -------------------------------------------------------------------------------- 1 | # pip install langchain openai google-search-results 2 | 3 | import os 4 | from dotenv import load_dotenv 5 | import openai 6 | import langchain 7 | import getpass 8 | 9 | os.environ["OPENAI_API_KEY"] ="OPENAPIKEY" 10 | 11 | from langchain.agents import load_tools 12 | from langchain.agents import initialize_agent 13 | from langchain.agents import AgentType 14 | from langchain.llms import OpenAI 15 | from langchain.document_loaders import PyPDFLoader 16 | from langchain.vectorstores import FAISS 17 | from langchain.embeddings.openai import OpenAIEmbeddings 18 | 19 | loader = PyPDFLoader("PDFFILE.PDF") 20 | pages = loader.load_and_split() 21 | faiss_index = FAISS.from_documents(pages, OpenAIEmbeddings()) 22 | docs = faiss_index.similarity_search("How will the community be engaged?", k=2) 23 | for doc in docs: 24 | print(str(doc.metadata["page"]) + ":", doc.page_content[:300]) -------------------------------------------------------------------------------- /mvpdagen-25-10.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/msandbu/gpt-ai/f31508021a877a964abab9ab770618acc9f96f64/mvpdagen-25-10.pptx -------------------------------------------------------------------------------- /narrator.py: -------------------------------------------------------------------------------- 1 | import os 2 | from openai import OpenAI 3 | import base64 4 | import json 5 | import time 6 | import simpleaudio as sa 7 | import errno 8 | from elevenlabs import generate, play, voices 9 | from elevenlabs import set_api_key 10 | set_api_key("") 11 | 12 | client = OpenAI() 13 | 14 | 15 | def encode_image(image_path): 16 | while True: 17 | try: 18 | with open(image_path, "rb") as image_file: 19 | return base64.b64encode(image_file.read()).decode("utf-8") 20 | except IOError as e: 21 | if e.errno != errno.EACCES: 22 | # Not a "file in use" error, re-raise 23 | raise 24 | # File is being written to, wait a bit and retry 25 | time.sleep(0.1) 26 | 27 | 28 | def play_audio(text): 29 | audio = generate(text=text, voice="Gltr1is83rrQkB5Q6m2S", model="eleven_turbo_v2") 30 | 31 | unique_id = base64.urlsafe_b64encode(os.urandom(30)).decode("utf-8").rstrip("=") 32 | dir_path = os.path.join("narration", unique_id) 33 | os.makedirs(dir_path, exist_ok=True) 34 | file_path = os.path.join(dir_path, "audio.wav") 35 | 36 | with open(file_path, "wb") as f: 37 | f.write(audio) 38 | 39 | play(audio) 40 | 41 | 42 | def generate_new_line(base64_image): 43 | return [ 44 | { 45 | "role": "user", 46 | "content": [ 47 | {"type": "text", "text": "Describe this image"}, 48 | { 49 | "type": "image_url", 50 | "image_url": f"data:image/jpeg;base64,{base64_image}", 51 | }, 52 | ], 53 | }, 54 | ] 55 | 56 | 57 | def analyze_image(base64_image, script): 58 | response = client.chat.completions.create( 59 | model="gpt-4-vision-preview", 60 | messages=[ 61 | { 62 | "role": "system", 63 | "content": """ 64 | You are Sir David Attenborough. Narrate the picture of the human as if it is a nature documentary and mention that the person looks really nervous. 65 | Make it snarky and funny. Don't repeat yourself. Make it short. If I do anything remotely interesting, make a big deal about it! 66 | """, 67 | }, 68 | ] 69 | + script 70 | + generate_new_line(base64_image), 71 | max_tokens=500, 72 | ) 73 | response_text = response.choices[0].message.content 74 | return response_text 75 | 76 | 77 | def main(): 78 | script = [] 79 | 80 | while True: 81 | # path to your image 82 | image_path = os.path.join(os.getcwd(), "./frames/frame.jpg") 83 | 84 | # getting the base64 encoding 85 | base64_image = encode_image(image_path) 86 | 87 | # analyze posture 88 | print("👀 David is watching...") 89 | analysis = analyze_image(base64_image, script=script) 90 | 91 | print("🎙️ David says:") 92 | print(analysis) 93 | 94 | play_audio(analysis) 95 | 96 | script = script + [{"role": "assistant", "content": analysis}] 97 | 98 | # wait for 5 seconds 99 | time.sleep(5) 100 | 101 | 102 | if __name__ == "__main__": 103 | main() 104 | -------------------------------------------------------------------------------- /secagent.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ["OPENAI_API_KEY"] = "" 3 | os.environ["SERPAPI_API_KEY"] = "" 4 | import streamlit as st 5 | from collections import deque 6 | from typing import Dict, List, Optional, Any 7 | 8 | from langchain import LLMChain, OpenAI, PromptTemplate 9 | from langchain.embeddings import OpenAIEmbeddings 10 | from langchain.llms import BaseLLM 11 | from langchain.vectorstores.base import VectorStore 12 | from pydantic import BaseModel, Field 13 | from langchain.chains.base import Chain 14 | from langchain.vectorstores import FAISS 15 | from langchain.docstore import InMemoryDocstore 16 | 17 | st.title('SikkerSøkSiri') 18 | 19 | # Define your embedding model 20 | embeddings_model = OpenAIEmbeddings() 21 | # Initialize the vectorstore as empty 22 | import faiss 23 | 24 | embedding_size = 1536 25 | index = faiss.IndexFlatL2(embedding_size) 26 | vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}) 27 | 28 | class TaskCreationChain(LLMChain): 29 | """Chain to generates tasks.""" 30 | 31 | @classmethod 32 | def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain: 33 | """Get the response parser.""" 34 | task_creation_template = ( 35 | "You are an task creation AI that uses the result of an execution agent" 36 | " to create new tasks with the following objective: {objective}," 37 | " The last completed task has the result: {result}." 38 | " This result was based on this task description: {task_description}." 39 | " These are incomplete tasks: {incomplete_tasks}." 40 | " Based on the result, create new tasks to be completed" 41 | " by the AI system that do not overlap with incomplete tasks." 42 | " Return the tasks as an array." 43 | ) 44 | prompt = PromptTemplate( 45 | template=task_creation_template, 46 | input_variables=[ 47 | "result", 48 | "task_description", 49 | "incomplete_tasks", 50 | "objective", 51 | ], 52 | ) 53 | return cls(prompt=prompt, llm=llm, verbose=verbose) 54 | 55 | class TaskPrioritizationChain(LLMChain): 56 | """Chain to prioritize tasks.""" 57 | 58 | @classmethod 59 | def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain: 60 | """Get the response parser.""" 61 | task_prioritization_template = ( 62 | "You are an AI agent focusing on finding security information and tasked with cleaning the formatting of and reprioritizing" 63 | " the following tasks: {task_names}." 64 | " Consider the ultimate objective of your team: {objective}." 65 | " Do not remove any tasks. Return the result as a numbered list, like:" 66 | " #. First task" 67 | " #. Second task" 68 | " Start the task list with number {next_task_id}." 69 | ) 70 | prompt = PromptTemplate( 71 | template=task_prioritization_template, 72 | input_variables=["task_names", "next_task_id", "objective"], 73 | ) 74 | return cls(prompt=prompt, llm=llm, verbose=verbose) 75 | 76 | from langchain.agents import ZeroShotAgent, Tool, AgentExecutor 77 | from langchain import OpenAI, SerpAPIWrapper, LLMChain 78 | 79 | todo_prompt = PromptTemplate.from_template( 80 | "You are a AI Security virtual assistant who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}" 81 | ) 82 | todo_chain = LLMChain(llm=OpenAI(temperature=0), prompt=todo_prompt) 83 | search = SerpAPIWrapper() 84 | tools = [ 85 | Tool( 86 | name="Search", 87 | func=search.run, 88 | description="useful for when you need to answer questions about current events", 89 | ), 90 | Tool( 91 | name="TODO", 92 | func=todo_chain.run, 93 | description="useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!", 94 | ), 95 | ] 96 | 97 | 98 | prefix = """You are an Security AI agent who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}.""" 99 | suffix = """Question: {task} 100 | {agent_scratchpad}""" 101 | prompt = ZeroShotAgent.create_prompt( 102 | tools, 103 | prefix=prefix, 104 | suffix=suffix, 105 | input_variables=["objective", "task", "context", "agent_scratchpad"], 106 | ) 107 | 108 | def get_next_task( 109 | task_creation_chain: LLMChain, 110 | result: Dict, 111 | task_description: str, 112 | task_list: List[str], 113 | objective: str, 114 | ) -> List[Dict]: 115 | """Get the next task.""" 116 | incomplete_tasks = ", ".join(task_list) 117 | response = task_creation_chain.run( 118 | result=result, 119 | task_description=task_description, 120 | incomplete_tasks=incomplete_tasks, 121 | objective=objective, 122 | ) 123 | new_tasks = response.split("\n") 124 | return [{"task_name": task_name} for task_name in new_tasks if task_name.strip()] 125 | 126 | def prioritize_tasks( 127 | task_prioritization_chain: LLMChain, 128 | this_task_id: int, 129 | task_list: List[Dict], 130 | objective: str, 131 | ) -> List[Dict]: 132 | """Prioritize tasks.""" 133 | task_names = [t["task_name"] for t in task_list] 134 | next_task_id = int(this_task_id) + 1 135 | response = task_prioritization_chain.run( 136 | task_names=task_names, next_task_id=next_task_id, objective=objective 137 | ) 138 | new_tasks = response.split("\n") 139 | prioritized_task_list = [] 140 | for task_string in new_tasks: 141 | if not task_string.strip(): 142 | continue 143 | task_parts = task_string.strip().split(".", 1) 144 | if len(task_parts) == 2: 145 | task_id = task_parts[0].strip() 146 | task_name = task_parts[1].strip() 147 | prioritized_task_list.append({"task_id": task_id, "task_name": task_name}) 148 | return prioritized_task_list 149 | 150 | def _get_top_tasks(vectorstore, query: str, k: int) -> List[str]: 151 | """Get the top k tasks based on the query.""" 152 | results = vectorstore.similarity_search_with_score(query, k=k) 153 | if not results: 154 | return [] 155 | sorted_results, _ = zip(*sorted(results, key=lambda x: x[1], reverse=True)) 156 | return [str(item.metadata["task"]) for item in sorted_results] 157 | 158 | 159 | def execute_task( 160 | vectorstore, execution_chain: LLMChain, objective: str, task: str, k: int = 5 161 | ) -> str: 162 | """Execute a task.""" 163 | context = _get_top_tasks(vectorstore, query=objective, k=k) 164 | return execution_chain.run(objective=objective, context=context, task=task) 165 | 166 | class BabyAGI(Chain, BaseModel): 167 | """Controller model for the BabyAGI agent.""" 168 | 169 | task_list: deque = Field(default_factory=deque) 170 | task_creation_chain: TaskCreationChain = Field(...) 171 | task_prioritization_chain: TaskPrioritizationChain = Field(...) 172 | execution_chain: AgentExecutor = Field(...) 173 | task_id_counter: int = Field(1) 174 | vectorstore: VectorStore = Field(init=False) 175 | max_iterations: Optional[int] = None 176 | 177 | class Config: 178 | """Configuration for this pydantic object.""" 179 | 180 | arbitrary_types_allowed = True 181 | 182 | def add_task(self, task: Dict): 183 | self.task_list.append(task) 184 | 185 | def print_task_list(self): 186 | print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m") 187 | for t in self.task_list: 188 | print(str(t["task_id"]) + ": " + t["task_name"]) 189 | 190 | def print_next_task(self, task: Dict): 191 | print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m") 192 | print(str(task["task_id"]) + ": " + task["task_name"]) 193 | return (str(task["task_id"]) + ": " + task["task_name"]) 194 | 195 | def print_task_result(self, result: str): 196 | print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m") 197 | return(result) 198 | 199 | @property 200 | def input_keys(self) -> List[str]: 201 | return ["objective"] 202 | 203 | @property 204 | def output_keys(self) -> List[str]: 205 | return [] 206 | 207 | @st.cache_data 208 | def _call(_self, inputs: Dict[str, Any]) -> Dict[str, Any]: 209 | result_list = [] 210 | """Run the agent.""" 211 | objective = inputs["objective"] 212 | first_task = inputs.get("first_task", "Make a todo list") 213 | _self.add_task({"task_id": 1, "task_name": first_task}) 214 | num_iters = 0 215 | while True: 216 | if _self.task_list: 217 | _self.print_task_list() 218 | 219 | # Step 1: Pull the first task 220 | task = _self.task_list.popleft() 221 | _self.print_next_task(task) 222 | # st.write('**Task:** \n') 223 | # st.write(_self.print_next_task(task)) 224 | 225 | # Step 2: Execute the task 226 | result = execute_task( 227 | _self.vectorstore, _self.execution_chain, objective, task["task_name"] 228 | ) 229 | this_task_id = int(task["task_id"]) 230 | _self.print_task_result(result) 231 | st.write('**Result:** \n') 232 | st.write(_self.print_task_result(result)) 233 | result_list.append(result) 234 | 235 | # Step 3: Store the result in Pinecone 236 | result_id = f"result_{task['task_id']}" 237 | _self.vectorstore.add_texts( 238 | texts=[result], 239 | metadatas=[{"task": task["task_name"]}], 240 | ids=[result_id], 241 | ) 242 | 243 | # Step 4: Create new tasks and reprioritize task list 244 | new_tasks = get_next_task( 245 | _self.task_creation_chain, 246 | result, 247 | task["task_name"], 248 | [t["task_name"] for t in _self.task_list], 249 | objective, 250 | ) 251 | for new_task in new_tasks: 252 | _self.task_id_counter += 1 253 | new_task.update({"task_id": _self.task_id_counter}) 254 | _self.add_task(new_task) 255 | _self.task_list = deque( 256 | prioritize_tasks( 257 | _self.task_prioritization_chain, 258 | this_task_id, 259 | list(_self.task_list), 260 | objective, 261 | ) 262 | ) 263 | num_iters += 1 264 | if _self.max_iterations is not None and num_iters == _self.max_iterations: 265 | print( 266 | "\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m" 267 | ) 268 | st.success('Task Completed!', icon="✅") 269 | break 270 | 271 | # Create a temporary file to hold the text 272 | with open('output.txt', 'w') as f: 273 | for item in result_list: 274 | f.write(item) 275 | f.write("\n\n") 276 | 277 | return {} 278 | 279 | @classmethod 280 | def from_llm( 281 | cls, llm: BaseLLM, vectorstore: VectorStore, verbose: bool = False, **kwargs 282 | ) -> "BabyAGI": 283 | """Initialize the BabyAGI Controller.""" 284 | task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose) 285 | task_prioritization_chain = TaskPrioritizationChain.from_llm( 286 | llm, verbose=verbose 287 | ) 288 | llm_chain = LLMChain(llm=llm, prompt=prompt) 289 | tool_names = [tool.name for tool in tools] 290 | agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names) 291 | agent_executor = AgentExecutor.from_agent_and_tools( 292 | agent=agent, tools=tools, verbose=True 293 | ) 294 | return cls( 295 | task_creation_chain=task_creation_chain, 296 | task_prioritization_chain=task_prioritization_chain, 297 | execution_chain=agent_executor, 298 | vectorstore=vectorstore, 299 | **kwargs, 300 | ) 301 | 302 | 303 | def get_text(): 304 | input_text = st.text_input("Type in your prompt below", key="input") 305 | return input_text 306 | 307 | user_input = get_text() 308 | 309 | OBJECTIVE = user_input 310 | llm = OpenAI(temperature=0) 311 | # Logging of LLMChains 312 | verbose = False 313 | # If None, will keep on going forever. Customize the number of loops you want it to go through. 314 | max_iterations: Optional[int] = 3 315 | baby_agi = BabyAGI.from_llm( 316 | llm=llm, vectorstore=vectorstore, verbose=verbose, max_iterations=max_iterations 317 | ) 318 | 319 | if (user_input): 320 | baby_agi({"objective": OBJECTIVE}) 321 | 322 | # Download the file using Streamlit's download_button() function 323 | st.download_button( 324 | label='Download Results', 325 | data=open('output.txt', 'rb').read(), 326 | file_name='output.txt', 327 | mime='text/plain' 328 | ) 329 | 330 | # baby_agi({"objective": OBJECTIVE}) 331 | -------------------------------------------------------------------------------- /streamlit.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from langchain.llms import OpenAI 3 | 4 | st.title('Fagfrokost ChatGPT Demo') 5 | 6 | openai_api_key = st.sidebar.text_input('Legg inn OpenAI nøkler') 7 | 8 | def generate_response(input_text): 9 | llm = OpenAI(temperature=0.1, openai_api_key=openai_api_key) 10 | st.info(llm(input_text)) 11 | 12 | with st.form('my_form'): 13 | text = st.text_area('Enter text:', 'Legg inn ditt spørsmål her') 14 | submitted = st.form_submit_button('Submit') 15 | if not openai_api_key.startswith('sk-'): 16 | st.warning('Please enter your OpenAI API key!', icon='⚠') 17 | if submitted and openai_api_key.startswith('sk-'): 18 | generate_response(text) --------------------------------------------------------------------------------