├── .gitignore
├── 02-groupchat
├── .env
└── main.py
├── 06-logging
├── OAI_CONFIG_LIST.json
└── main.py
├── 08-dalle
├── OAI_CONFIG_LIST.json
└── dalle.py
├── 12-txt2img
├── test.png
└── main.py
├── 03-snake
├── OAI_CONFIG_LIST.json
├── main.py
└── code
│ ├── snake_game.py
│ └── snake_game_updated.py
├── 01-twoway-chat
├── OAI_CONFIG_LIST.json
├── main.py
└── coding
│ └── stock_price_chart.py
├── 05-nested-chats
├── OAI_CONFIG_LIST.json
└── main.py
├── 07-vision
├── OAI_CONFIG_LIST.json
└── vision.py
├── 13-simple-image
├── OAI_CONFIG_LIST.json
├── filename_859705.png
└── image-generation.py
├── 10-function-calling
├── OAI_CONFIG_LIST.json
└── main.py
├── 14-reddit-newsletter
├── OAI_CONFIG_LIST.json
├── reddit.md
└── main.py
├── 04-sequence_chat
├── OAI_CONFIG_LIST.json
└── main.py
├── README.md
├── requirements.txt
├── 09-lmstudio
└── main.py
└── 11-tools
└── tools.py
/.gitignore:
--------------------------------------------------------------------------------
1 | /.idea/
2 |
--------------------------------------------------------------------------------
/02-groupchat/.env:
--------------------------------------------------------------------------------
1 | OPENAI_API_KEY=sk-proj-1111
--------------------------------------------------------------------------------
/06-logging/OAI_CONFIG_LIST.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "model": "gpt-3.5-turbo",
4 | "api_key": "sk-1111"
5 | }
6 | ]
--------------------------------------------------------------------------------
/08-dalle/OAI_CONFIG_LIST.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "model": "dall-e-3",
4 | "api_key": "sk-proj-1111"
5 | }
6 | ]
--------------------------------------------------------------------------------
/12-txt2img/test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tylerprogramming/autogen-beginner-course/HEAD/12-txt2img/test.png
--------------------------------------------------------------------------------
/03-snake/OAI_CONFIG_LIST.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "model": "gpt-3.5-turbo",
4 | "api_key": "sk-proj-1111"
5 | }
6 | ]
--------------------------------------------------------------------------------
/01-twoway-chat/OAI_CONFIG_LIST.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "model": "gpt-3.5-turbo",
4 | "api_key": "sk-proj-1111"
5 | }
6 | ]
--------------------------------------------------------------------------------
/05-nested-chats/OAI_CONFIG_LIST.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "model": "gpt-3.5-turbo",
4 | "api_key": "sk-proj-1111"
5 | }
6 | ]
--------------------------------------------------------------------------------
/07-vision/OAI_CONFIG_LIST.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "model": "gpt-4-vision-preview",
4 | "api_key": "sk-proj-1111"
5 | }
6 | ]
--------------------------------------------------------------------------------
/13-simple-image/OAI_CONFIG_LIST.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "model": "gpt-3.5-turbo",
4 | "api_key": "sk-proj-1111"
5 | }
6 | ]
--------------------------------------------------------------------------------
/10-function-calling/OAI_CONFIG_LIST.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "model": "gpt-3.5-turbo",
4 | "api_key": "sk-proj-1111"
5 | }
6 | ]
--------------------------------------------------------------------------------
/14-reddit-newsletter/OAI_CONFIG_LIST.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "model": "gpt-3.5-turbo",
4 | "api_key": "sk-proj-1111"
5 | }
6 | ]
--------------------------------------------------------------------------------
/13-simple-image/filename_859705.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tylerprogramming/autogen-beginner-course/HEAD/13-simple-image/filename_859705.png
--------------------------------------------------------------------------------
/04-sequence_chat/OAI_CONFIG_LIST.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "model": "gpt-3.5-turbo",
4 | "api_key": "sk-proj-1111"
5 | },
6 | {
7 | "model": "gpt-4",
8 | "api_key": "sk-proj-1111"
9 | }
10 | ]
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## AutoGen Beginner Course
2 |
3 | - install pycharm community edition ide (if you don't have one)
4 | - install anaconda (if you don't want to use venv)
5 | - Reddit project is bonus, try it out!
6 |
7 | Join my [Discord](https://discord.gg/Db6e8KkHww) community for any questions!
8 |
9 | To install the requirements, type this in your terminal:
10 | pip install -r requirements.txt
11 |
12 | Bonus Project Reddit URL: [Create App](https://old.reddit.com/prefs/apps/)
--------------------------------------------------------------------------------
/12-txt2img/main.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 | API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
4 | headers = {"Authorization": "Bearer your_token"}
5 |
6 |
7 | def query(payload):
8 | response = requests.post(API_URL, headers=headers, json=payload)
9 | return response.content
10 |
11 |
12 | image_bytes = query({
13 | "inputs": "Astronaut riding a horse",
14 | })
15 | # You can access the image with PIL.Image for example
16 | import io
17 | from PIL import Image
18 |
19 | image = Image.open(io.BytesIO(image_bytes))
20 | image.save("test.png")
21 |
--------------------------------------------------------------------------------
/01-twoway-chat/main.py:
--------------------------------------------------------------------------------
1 | import autogen
2 |
3 |
4 | def main():
5 | config_list = autogen.config_list_from_json(
6 | env_or_file="OAI_CONFIG_LIST.json"
7 | )
8 |
9 | assistant = autogen.AssistantAgent(
10 | name="Assistant",
11 | llm_config={
12 | "config_list": config_list
13 | }
14 | )
15 |
16 | user_proxy = autogen.UserProxyAgent(
17 | name="user",
18 | human_input_mode="ALWAYS",
19 | code_execution_config={
20 | "work_dir": "coding",
21 | "use_docker": False
22 | }
23 | )
24 |
25 | user_proxy.initiate_chat(assistant, message="Plot a chart of META and TESLA stock price change.")
26 |
27 |
28 | if __name__ == "__main__":
29 | main()
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | annotated-types==0.6.0
2 | anyio==4.3.0
3 | beautifulsoup4==4.12.3
4 | certifi==2024.2.2
5 | charset-normalizer==3.3.2
6 | contourpy==1.2.1
7 | cycler==0.12.1
8 | diskcache==5.6.3
9 | distro==1.9.0
10 | docker==7.0.0
11 | FLAML==2.1.2
12 | fonttools==4.51.0
13 | h11==0.14.0
14 | httpcore==1.0.5
15 | httpx==0.27.0
16 | idna==3.7
17 | kiwisolver==1.4.5
18 | matplotlib==3.8.4
19 | numpy==1.26.4
20 | openai==1.20.0
21 | packaging==24.0
22 | pandas==2.2.2
23 | pillow==10.3.0
24 | pyautogen==0.2.26
25 | pydantic==2.7.1
26 | pydantic_core==2.18.2
27 | pygame==2.5.2
28 | pyparsing==3.1.2
29 | python-dateutil==2.9.0.post0
30 | python-dotenv==1.0.1
31 | pytz==2024.1
32 | regex==2024.4.16
33 | requests==2.31.0
34 | six==1.16.0
35 | sniffio==1.3.1
36 | soupsieve==2.5
37 | termcolor==2.4.0
38 | tiktoken==0.6.0
39 | tqdm==4.66.2
40 | typing_extensions==4.11.0
41 | tzdata==2024.1
42 | urllib3==2.2.1
43 |
--------------------------------------------------------------------------------
/09-lmstudio/main.py:
--------------------------------------------------------------------------------
1 | import autogen
2 |
3 |
4 | def main():
5 | phi2 = {
6 | "config_list": [
7 | {
8 | "model": "TheBloke/phi-2-GGUF",
9 | "base_url": "http://localhost:1234/v1",
10 | "api_key": "lm-studio",
11 | },
12 | ],
13 | "cache_seed": None,
14 | "max_tokens": 1024
15 | }
16 |
17 | phil = autogen.ConversableAgent(
18 | "Phil (Phi-2)",
19 | llm_config=phi2,
20 | system_message="""
21 | Your name is Phil and you are a comedian.
22 | """,
23 | )
24 | # Create the agent that represents the user in the conversation.
25 | user_proxy = autogen.UserProxyAgent(
26 | "user_proxy",
27 | code_execution_config=False,
28 | default_auto_reply="...",
29 | human_input_mode="NEVER"
30 | )
31 |
32 | user_proxy.initiate_chat(phil, message="Tell me a joke!")
33 |
34 |
35 | if __name__ == "__main__":
36 | main()
37 |
--------------------------------------------------------------------------------
/08-dalle/dalle.py:
--------------------------------------------------------------------------------
1 | import autogen
2 |
3 |
4 | def main():
5 | # If you have created an OAI_CONFIG_LIST.json file in the current working directory, that file will be used.
6 | config_list = autogen.config_list_from_json(
7 | env_or_file="OAI_CONFIG_LIST.json",
8 | filter_dict={
9 | "model": ["dall-e-3"]
10 | }
11 | )
12 |
13 | # Create the agent that uses the LLM.
14 | assistant = autogen.AssistantAgent(
15 | "assistant",
16 | llm_config={"config_list": config_list}
17 | )
18 |
19 | # Create the agent that represents the user in the conversation.
20 | user_proxy = autogen.UserProxyAgent(
21 | "user_proxy",
22 | code_execution_config={"work_dir": "coding", "use_docker": False}
23 | )
24 |
25 | user_proxy.initiate_chat(
26 | assistant,
27 | message="Create an image of a robot holding a sign saying 'AutoGen' with the background of wallstreet."
28 | )
29 |
30 |
31 | if __name__ == "__main__":
32 | main()
33 |
--------------------------------------------------------------------------------
/01-twoway-chat/coding/stock_price_chart.py:
--------------------------------------------------------------------------------
1 | # filename: stock_price_chart.py
2 | import pandas as pd
3 | import matplotlib.pyplot as plt
4 | import requests
5 |
6 | # Function to fetch stock price data from Alpha Vantage API
7 | def fetch_stock_data(symbol):
8 | api_key = 'YOUR_API_KEY' # Replace 'YOUR_API_KEY' with your actual API Key
9 | url = f'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={symbol}&apikey={api_key}'
10 | response = requests.get(url)
11 | data = response.json()['Time Series (Daily)']
12 | df = pd.DataFrame(data).T
13 | df.index = pd.to_datetime(df.index)
14 | df['close'] = pd.to_numeric(df['4. close'])
15 | return df['close']
16 |
17 | # Fetch stock price data for NVDA and TESLA
18 | nvda_stock_data = fetch_stock_data('NVDA')
19 | tesla_stock_data = fetch_stock_data('TSLA')
20 |
21 | # Plot the stock price change
22 | plt.figure(figsize=(14, 7))
23 | plt.plot(nvda_stock_data.index, nvda_stock_data.values, label='NVDA')
24 | plt.plot(tesla_stock_data.index, tesla_stock_data.values, label='TESLA')
25 | plt.title('NVDA vs TESLA Stock Price Change')
26 | plt.xlabel('Date')
27 | plt.ylabel('Stock Price (USD)')
28 | plt.legend()
29 | plt.grid(True)
30 | plt.show()
--------------------------------------------------------------------------------
/03-snake/main.py:
--------------------------------------------------------------------------------
1 | import autogen
2 |
3 |
4 | config_list = autogen.config_list_from_json(
5 | env_or_file="OAI_CONFIG_LIST.json",
6 | )
7 |
8 | llm_config = {
9 | "cache_seed": 47,
10 | "temperature": 0,
11 | "config_list": config_list,
12 | "timeout": 120,
13 | }
14 | user_proxy = autogen.UserProxyAgent(
15 | name="User",
16 | system_message="Executor. Execute the code written by the coder and suggest updates if there are errors.",
17 | human_input_mode="NEVER",
18 | code_execution_config={
19 | "last_n_messages": 3,
20 | "work_dir": "code",
21 | "use_docker": False,
22 | },
23 | )
24 |
25 | coder = autogen.AssistantAgent(
26 | name="Coder",
27 | llm_config=llm_config,
28 | system_message="""
29 | If you want the user to save the code in a file before executing it, put # filename: inside the code block as the first line.
30 | Coder. Your job is to write complete code. You primarily are a game programmer. Make sure to save the code to disk.
31 | """,
32 | )
33 |
34 | pm = autogen.AssistantAgent(
35 | name="Product_manager",
36 | system_message="Help plan out to create games.",
37 | llm_config=llm_config,
38 | )
39 |
40 | group_chat = autogen.GroupChat(
41 | agents=[user_proxy, coder, pm], messages=[], max_round=15
42 | )
43 | manager = autogen.GroupChatManager(groupchat=group_chat, llm_config=llm_config)
44 |
45 | user_proxy.initiate_chat(
46 | manager,
47 | message=
48 | """
49 | I would like to create a snake game in Python! Make sure the game ends when the player hits the side of the screen.
50 | """,
51 | )
52 |
--------------------------------------------------------------------------------
/13-simple-image/image-generation.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import random
3 | import io
4 | import autogen
5 | from PIL import Image
6 | from typing import Annotated
7 |
8 | # api and headers
9 | API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
10 | headers = {"Authorization": "Bearer your_token"}
11 |
12 |
13 | # the function to take in prompt and convert to image and save to a file
14 | def create_image(message: Annotated[str, "The response from the LLM"]) -> str:
15 | response = requests.post(API_URL, headers=headers, json=message)
16 | image_bytes = response.content
17 |
18 | random_number = random.randint(1, 1000000)
19 | file_name = "filename_" + str(random_number) + ".png"
20 |
21 | Image.open(io.BytesIO(image_bytes)).save(file_name)
22 | return message
23 |
24 |
25 | # the llm_config
26 | llm_config = {
27 | "config_list": autogen.config_list_from_json(
28 | env_or_file="OAI_CONFIG_LIST.json",
29 | ),
30 | "temperature": 0.5,
31 | "seed": 41
32 | }
33 |
34 | # Create the agent workflow
35 | assistant = autogen.AssistantAgent(
36 | name="Assistant",
37 | system_message="You are a helpful AI assistant. "
38 | "Return 'TERMINATE' when the task is done.",
39 | llm_config=llm_config,
40 |
41 | )
42 | user_proxy = autogen.UserProxyAgent(
43 | name="User",
44 | is_termination_msg=lambda msg: msg.get("content") is not None and "TERMINATE" in msg["content"],
45 | human_input_mode="NEVER",
46 | code_execution_config={
47 | "use_docker": False
48 | }
49 | )
50 |
51 |
52 | assistant.register_for_llm(name="create_image", description="Create an image from a text description")(create_image)
53 | user_proxy.register_for_execution(name="create_image")(create_image)
54 |
55 |
56 | user_proxy.initiate_chat(
57 | assistant, message="""
58 | Create an image of a professional futbol player.
59 | """
60 | )
61 |
--------------------------------------------------------------------------------
/14-reddit-newsletter/reddit.md:
--------------------------------------------------------------------------------
1 | # A Glimpse Into the AI Frontier
2 |
3 | ---
4 |
5 | ## Let OpenAI know in the Unsubscribe survey, that Claude 3 Opus is taking the lead!
6 |
7 | Have been using both in parallel for over a week now and I naturally started not opening ChatGPT anymore, but only using Opus 3 - It's actually crazy how much better it is, if you use it for a while and go back.
8 |
9 | If you come to the same conclusion and cancel your subscription as well, let them know in the survey and let's hope, they hurry up with their next model release!
10 |
11 | **Author:** TheBanq
12 |
13 | **Check it out:** [Link Here](https://www.reddit.com/r/OpenAI/comments/1br60oz/let_openai_know_in_the_unsubscribe_survey_that/)
14 |
15 | ---
16 |
17 | ## Film Bros AI : creates Short Films in 20 seconds [Parody]
18 |
19 | **Author:** CreativeAIgency
20 |
21 | **Check it out:** [Link Here](https://youtu.be/BIcqKT75-mk)
22 |
23 | ---
24 |
25 | ## 100% AI-generated Podcast by GPT and Claude
26 |
27 | I wrote a program to have Claude and GPT chat with each other - 100% AI-generated podcast - the result is fascinating!
28 |
29 | Here is an unedited podcast between a female painter (Claude Sonnet) and a male musician (GPT 3.5 turbo): [Link](https://soundcloud.com/yummymushroom/music-and-painting-ai-podcast)
30 |
31 | I also generated a Chinese version (same personas but the conversations are totally different) - this version is even better - the artists talked about how other forms of art inspired them such as dancing and they even discussed how to collaborate and started brainstorming! The TTS for the Chinese version is not as natural as the English version but still quite awesome. The content is actually interesting.
32 |
33 | The Chinese version: [Link](https://soundcloud.com/yummymushroom/ai-podcast-chinese)
34 |
35 | Enjoy!
36 |
37 | **Author:** Ordinary_Ad_404
38 |
39 | **Check it out:** [Link Here](https://www.reddit.com/r/OpenAI/comments/1br44xo/100_aigenerated_podcast_by_gpt_and_claude/)
--------------------------------------------------------------------------------
/05-nested-chats/main.py:
--------------------------------------------------------------------------------
1 | import autogen
2 |
3 | config_list = autogen.config_list_from_json(env_or_file="OAI_CONFIG_LIST.json")
4 | llm_config = {"config_list": config_list}
5 |
6 | task = """Write a concise but engaging blogpost about Meta."""
7 |
8 | writer = autogen.AssistantAgent(
9 | name="Writer",
10 | llm_config={"config_list": config_list},
11 | system_message="""
12 | You are a professional writer, known for your insightful and engaging articles.
13 | You transform complex concepts into compelling narratives.
14 | You should improve the quality of the content based on the feedback from the user.
15 | """,
16 | )
17 |
18 | user_proxy = autogen.UserProxyAgent(
19 | name="User",
20 | human_input_mode="NEVER",
21 | is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
22 | code_execution_config={
23 | "last_n_messages": 1,
24 | "work_dir": "my_code",
25 | "use_docker": False,
26 | }
27 | )
28 |
29 | critic = autogen.AssistantAgent(
30 | name="Critic",
31 | llm_config={"config_list": config_list},
32 | system_message="""
33 | You are a critic, known for your thoroughness and commitment to standards.
34 | Your task is to scrutinize content for any harmful elements or regulatory violations, ensuring
35 | all materials align with required guidelines.
36 | For code
37 | """,
38 | )
39 |
40 |
41 | def reflection_message(recipient, messages, sender, config):
42 | print("Reflecting...")
43 | return f"Reflect and provide critique on the following writing. \n\n {recipient.chat_messages_for_summary(sender)[-1]['content']}"
44 |
45 |
46 | user_proxy.register_nested_chats(
47 | [
48 | {
49 | "recipient": critic,
50 | "message": reflection_message,
51 | "summary_method": "last_msg",
52 | "max_turns": 1
53 | }
54 | ],
55 | trigger=writer
56 | )
57 |
58 | user_proxy.initiate_chat(recipient=writer, message=task, max_turns=2, summary_method="last_msg")
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
--------------------------------------------------------------------------------
/11-tools/tools.py:
--------------------------------------------------------------------------------
1 | import autogen
2 | from typing import Annotated
3 | import datetime
4 |
5 |
6 | def get_weather(location: Annotated[str, "The location"]) -> str:
7 | if location == "Florida":
8 | return "It's hot in Florida!"
9 | elif location == "Maine":
10 | return "It's cold in Maine"
11 | else:
12 | return f"I don't know this place {location}"
13 |
14 |
15 | def get_time(timezone: Annotated[str, "The timezone we are in"]) -> str:
16 | now = datetime.datetime.now()
17 |
18 | # Get the time with timezone information
19 | current_time = now.strftime("%Y-%m-%d %H:%M:%S %Z")
20 |
21 | return f"Current time in your {timezone}: {current_time}"
22 |
23 |
24 | # Let's first define the assistant agent that suggests tool calls.
25 | assistant = autogen.ConversableAgent(
26 | name="Assistant",
27 | system_message="You are a helpful AI assistant. "
28 | "Return 'TERMINATE' when the task is done.",
29 | llm_config={
30 | "config_list": [
31 | {
32 | "model": "gpt-4",
33 | "api_key": "sk-proj-1111"
34 | }
35 | ]
36 | }
37 | )
38 |
39 | # The user proxy agent is used for interacting with the assistant agent
40 | # and executes tool calls.
41 | user_proxy = autogen.ConversableAgent(
42 | name="User",
43 | is_termination_msg=lambda msg: msg.get("content") is not None and "TERMINATE" in msg["content"],
44 | human_input_mode="NEVER",
45 | )
46 |
47 | # Register the tool signature with the assistant agent.
48 | assistant.register_for_llm(name="get_weather", description="Get the current weather for a specific location")(get_weather)
49 | assistant.register_for_llm(name="get_time", description="The IANA time zone name, e.g. America/Los_Angeles")(get_time)
50 |
51 |
52 | # Register the tool function with the user proxy agent.
53 | user_proxy.register_for_execution(name="get_weather")(get_weather)
54 | user_proxy.register_for_execution(name="get_time")(get_time)
55 |
56 | user_proxy.initiate_chat(
57 | assistant,
58 | message="What time is it in Florida?"
59 | )
60 |
61 |
--------------------------------------------------------------------------------
/14-reddit-newsletter/main.py:
--------------------------------------------------------------------------------
1 | from langchain_community.document_loaders.reddit import RedditPostsLoader
2 | import autogen
3 |
4 | # https://www.reddit.com/prefs/apps/
5 | loader = RedditPostsLoader(
6 | client_id="-client-id",
7 | client_secret="-secret-key",
8 | user_agent="extractor by u/tyler_programming",
9 | categories=["new"], # Note: Categories can be only of following value - "controversial" "hot" "new" "rising" "top"
10 | mode="subreddit",
11 | search_queries=[
12 | "openai"
13 | ], # List of subreddits to load posts from
14 | number_posts=3, # Default value is 10
15 | )
16 |
17 | documents = loader.load()
18 |
19 | config_list = autogen.config_list_from_json(env_or_file="OAI_CONFIG_LIST.json")
20 | llm_config = {"config_list": config_list, "seed": 45}
21 |
22 | writer = autogen.AssistantAgent(
23 | name="Writer",
24 | llm_config={"config_list": config_list},
25 | system_message="""
26 | You won't change the information given, just parse the page_content from the reddit post. No code will
27 | be written.
28 | """,
29 | )
30 |
31 | user_proxy = autogen.UserProxyAgent(
32 | name="User",
33 | human_input_mode="NEVER",
34 | max_consecutive_auto_reply=3,
35 | code_execution_config=False
36 | )
37 |
38 | user_proxy.initiate_chat(
39 | recipient=writer,
40 | message=f"""I need you to extract the page_content and url from each of {documents},
41 | with each document extracted separate from each other. Make sure this is formatted with Markdown. Get it ready
42 | for an email, but don't add or change what is in the documents. Make sure to use the FULL page_content
43 | from the document.
44 |
45 | Create a newsletter from this information with:
46 |
47 | [Newsletter Title Here] - make sure to create a catchy title
48 |
49 | The format for markdown should be:
50 |
51 | Title of the document
52 | The Page Content
53 | The Author
54 | The url
55 | """,
56 | max_turns=2,
57 | summary_method="last_msg"
58 | )
59 |
--------------------------------------------------------------------------------
/10-function-calling/main.py:
--------------------------------------------------------------------------------
1 | from typing import Literal
2 |
3 | import autogen
4 | from typing_extensions import Annotated
5 |
6 | config_list = autogen.config_list_from_json(
7 | env_or_file="OAI_CONFIG_LIST.json",
8 | filter_dict={
9 | "model": ["gpt-3.5-turbo"]
10 | }
11 | )
12 |
13 | llm_config = {
14 | "config_list": config_list,
15 | "timeout": 120
16 | }
17 |
18 | currency_bot = autogen.AssistantAgent(
19 | name="currency_bot",
20 | system_message="For currency exchange tasks, only use the functions you have been provided with. Reply TERMINATE "
21 | "when the task is done.",
22 | llm_config=llm_config
23 | )
24 |
25 | user_proxy = autogen.UserProxyAgent(
26 | name="user_proxy",
27 | is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"),
28 | human_input_mode="NEVER",
29 | max_consecutive_auto_reply=5,
30 | code_execution_config=False
31 | )
32 |
33 | CurrencySymbol = Literal["USD", "EUR"]
34 |
35 |
36 | def exchange_rate(base_currency: CurrencySymbol, quote_currency: CurrencySymbol) -> float:
37 | if base_currency == quote_currency:
38 | return 1.0
39 | elif base_currency == "USD" and quote_currency == "EUR":
40 | return 1 / 1.09
41 | elif base_currency == "EUR" and quote_currency == "USD":
42 | return 1 / 1.1
43 | else:
44 | raise ValueError(f"Unknown currencies: {base_currency}, {quote_currency}")
45 |
46 |
47 | @user_proxy.register_for_execution()
48 | @currency_bot.register_for_llm(description="Currency exchange calculator")
49 | def currency_calculator(
50 | base_amount: Annotated[float, "Amount of currency in base_currency"],
51 | base_currency: Annotated[CurrencySymbol, "Base currency"] = "USD",
52 | quote_currency: Annotated[CurrencySymbol, "Quote currency"] = "EUR"
53 | ) -> str:
54 | quote_amount = exchange_rate(base_currency, quote_currency) * base_amount
55 | return f"{quote_amount} - {quote_currency}"
56 |
57 |
58 | user_proxy.initiate_chat(
59 | currency_bot,
60 | message="Can you give me the answer to 2 + 2?"
61 | )
62 |
--------------------------------------------------------------------------------
/04-sequence_chat/main.py:
--------------------------------------------------------------------------------
1 | import autogen
2 |
3 | config_list = autogen.config_list_from_json(
4 | env_or_file="OAI_CONFIG_LIST.json",
5 | filter_dict={
6 | "model": ["gpt-3.5-turbo"]
7 | },
8 | )
9 |
10 | llm_config = {
11 | "config_list": config_list,
12 | "timeout": 120,
13 | }
14 |
15 | assistant_quote1 = autogen.AssistantAgent(
16 | name="assistant1",
17 | system_message="You are an assistant agent who gives quotes. Return 'TERMINATE' when the task is done.",
18 | llm_config=llm_config,
19 | )
20 |
21 | assistant_quote2 = autogen.AssistantAgent(
22 | name="assistant2",
23 | system_message="You are another assistant agent who gives quotes. Return 'TERMINATE' when the task is done.",
24 | llm_config=llm_config,
25 | max_consecutive_auto_reply=1
26 | )
27 |
28 | assistant_create_new = autogen.AssistantAgent(
29 | name="assistant3",
30 | system_message="You will create a new quote based on others. Return 'TERMINATE' when the task is done.",
31 | llm_config=llm_config,
32 | max_consecutive_auto_reply=1
33 | )
34 |
35 | user_proxy = autogen.UserProxyAgent(
36 | name="user_proxy",
37 | is_termination_msg=lambda msg: msg.get("content") is not None and "TERMINATE" in msg["content"],
38 | human_input_mode="NEVER",
39 | max_consecutive_auto_reply=10,
40 | code_execution_config=False
41 | )
42 |
43 | user_proxy.initiate_chats(
44 | [
45 | {
46 | "recipient": assistant_quote1,
47 | "message": "give a quote from a famous author",
48 | "clear_history": True,
49 | "silent": False,
50 | "summary_method": "reflection_with_llm"
51 | },
52 | {
53 | "recipient": assistant_quote2,
54 | "message": "give another quote from a famous author",
55 | "clear_history": True,
56 | "silent": False,
57 | "summary_method": "reflection_with_llm"
58 | },
59 | {
60 | "recipient": assistant_create_new,
61 | "message": "based on the previous quotes, come up with your own!",
62 | "clear_history": True,
63 | "silent": False,
64 | "summary_method": "reflection_with_llm"
65 | }
66 | ]
67 | )
68 |
--------------------------------------------------------------------------------
/07-vision/vision.py:
--------------------------------------------------------------------------------
1 | import autogen
2 | from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalConversableAgent
3 |
4 | image_goldendoodle = "https://th.bing.com/th/id/R.422068ce8af4e15b0634fe2540adea7a?rik=y4OcXBE%2fqutDOw&pid=ImgRaw&r=0"
5 | image_corgi = "https://cdn.pixabay.com/photo/2019/08/19/07/45/corgi-4415649_1280.jpg"
6 | image_luigi_yoshi_mario = "https://cdn.pixabay.com/photo/2016/07/30/14/25/mario-1557240_1280.jpg"
7 | image_super_nintendo = "https://cdn.pixabay.com/photo/2018/04/26/06/59/technology-3351286_1280.jpg"
8 | image_mitochondria = "https://cdn.pixabay.com/photo/2021/07/18/05/36/cell-6474673_1280.jpg"
9 | image_waldo = "https://i.stack.imgur.com/reNlF.jpg"
10 |
11 | config_list_4v = autogen.config_list_from_json(
12 | "OAI_CONFIG_LIST.json",
13 | filter_dict={
14 | "model": ["gpt-4-vision-preview"],
15 | },
16 | )
17 |
18 | image_agent = MultimodalConversableAgent(
19 | name="image-explainer",
20 | llm_config={"config_list": config_list_4v, "temperature": 0.5, "max_tokens": 500}
21 | )
22 |
23 | user_proxy = autogen.UserProxyAgent(
24 | name="User_proxy",
25 | system_message="A human admin.",
26 | human_input_mode="NEVER",
27 | max_consecutive_auto_reply=0,
28 | code_execution_config=False
29 | )
30 |
31 | # Example 1
32 | # user_proxy.initiate_chat(
33 | # image_agent,
34 | # message=f"""
35 | # Can you describe this image in detail?
36 | # """
37 | # )
38 |
39 | # Example 2
40 | # user_proxy.initiate_chat(
41 | # image_agent,
42 | # message=f"""What is this picture of and describe everything in it?
""")
43 | #
44 | # user_proxy.send(
45 | # message=f"""
46 | # What dog is this a picture of?
47 | #
48 | # Which of these dogs tends to bark more, this one or the previous dog image?
49 | # """,
50 | # recipient=image_agent
51 | # )
52 |
53 | # Example 3
54 | user_proxy.initiate_chat(
55 | image_agent,
56 | message=f"""
57 | What is this picture of and describe everything in it?
58 | """)
59 |
60 | user_proxy.send(
61 | message=f"""
62 | What game is displayed here?
63 |
64 | Among all of these characters, which one has sold the most amount of games? Can you also give some figures for all
65 | characters shown?
66 | """,
67 | recipient=image_agent
68 | )
69 |
--------------------------------------------------------------------------------
/02-groupchat/main.py:
--------------------------------------------------------------------------------
1 | import autogen
2 | import dotenv
3 |
4 | dotenv.load_dotenv()
5 |
6 | config_list = autogen.config_list_from_dotenv(
7 | ".env",
8 | {"gpt-3.5-turbo": "OPENAI_API_KEY"}
9 | )
10 |
11 | llm_config = {
12 | "cache_seed": 43, # change the cache_seed for different trials
13 | "temperature": 0,
14 | "config_list": config_list,
15 | "timeout": 120, # in seconds
16 | }
17 | user_proxy = autogen.UserProxyAgent(
18 | name="Admin",
19 | system_message="A human admin. Interact with the planner to discuss the plan. Plan execution needs to be approved "
20 | "by this admin.",
21 | code_execution_config={
22 | "work_dir": "code",
23 | "use_docker": False
24 | },
25 | human_input_mode="TERMINATE",
26 | )
27 | engineer = autogen.AssistantAgent(
28 | name="Engineer",
29 | llm_config=llm_config,
30 | system_message="""Engineer. You follow an approved plan. Make sure you save code to disk. You write python/shell
31 | code to solve tasks. Wrap the code in a code block that specifies the script type and the name of the file to
32 | save to disk.""",
33 | )
34 | scientist = autogen.AssistantAgent(
35 | name="Scientist",
36 | llm_config=llm_config,
37 | system_message="""Scientist. You follow an approved plan. You are able to categorize papers after seeing their
38 | abstracts printed. You don't write code.""",
39 | )
40 | planner = autogen.AssistantAgent(
41 | name="Planner",
42 | system_message="""Planner. Suggest a plan. Revise the plan based on feedback from admin and critic, until admin approval.
43 | The plan may involve an engineer who can write code and a scientist who doesn't write code.
44 | Explain the plan first. Be clear which step is performed by an engineer, and which step is performed by a scientist.
45 | """,
46 | llm_config=llm_config,
47 | )
48 |
49 | critic = autogen.AssistantAgent(
50 | name="Critic",
51 | system_message="Critic. Double check plan, claims, code from other agents and provide feedback. Check whether the "
52 | "plan includes adding verifiable info such as source URL.",
53 | llm_config=llm_config,
54 | )
55 | group_chat = autogen.GroupChat(
56 | agents=[user_proxy, engineer, scientist, planner, critic], messages=[], max_round=12
57 | )
58 | manager = autogen.GroupChatManager(groupchat=group_chat, llm_config=llm_config)
59 |
60 | user_proxy.initiate_chat(
61 | manager,
62 | message="""
63 | Find papers on LLM applications from arxiv in the last week, create a markdown table of different domains.
64 | """,
65 | )
66 |
--------------------------------------------------------------------------------
/06-logging/main.py:
--------------------------------------------------------------------------------
1 | import json
2 | import autogen
3 | import pandas as pd
4 | import sqlite3
5 |
6 | # set up the llm_configuration
7 | llm_config = {
8 | "config_list": autogen.config_list_from_json(
9 | env_or_file="OAI_CONFIG_LIST.json",
10 | ),
11 | "temperature": 0
12 | }
13 |
14 | # Start logging
15 | logging_session_id = autogen.runtime_logging.start(config={"dbname": "logs.db"})
16 | print("Started Logging session ID: " + str(logging_session_id))
17 |
18 | # Create an agent workflow and run it
19 | assistant = autogen.AssistantAgent(name="assistant", llm_config=llm_config)
20 | user_proxy = autogen.UserProxyAgent(
21 | name="user_proxy",
22 | code_execution_config=False,
23 | human_input_mode="NEVER",
24 | is_termination_msg=lambda msg: "TERMINATE" in msg["content"],
25 | )
26 |
27 | user_proxy.initiate_chat(
28 | assistant, message="What is the height of the Sears Tower? Only respond with the answer and terminate"
29 | )
30 |
31 | # Stop logging
32 | autogen.runtime_logging.stop()
33 |
34 |
35 | # create function to get log
36 | def get_log(dbname="logs.db", table="chat_completions"):
37 | con = sqlite3.connect(dbname)
38 | query = f"SELECT request, response, cost, start_time, end_time from {table}"
39 | cursor = con.execute(query)
40 | rows = cursor.fetchall()
41 | column_names = [description[0] for description in cursor.description]
42 |
43 | data = [dict(zip(column_names, row)) for row in rows]
44 |
45 | con.close()
46 |
47 | return data
48 |
49 |
50 | def str_to_dict(s):
51 | return json.loads(s)
52 |
53 |
54 | # use pandas to get extra information and print out to terminal
55 | def get_log(dbname="logs.db", table="chat_completions"):
56 | import sqlite3
57 |
58 | con = sqlite3.connect(dbname)
59 | query = f"SELECT request, response, cost, start_time, end_time from {table}"
60 | cursor = con.execute(query)
61 | rows = cursor.fetchall()
62 | column_names = [description[0] for description in cursor.description]
63 | data = [dict(zip(column_names, row)) for row in rows]
64 | con.close()
65 | return data
66 |
67 | def str_to_dict(s):
68 | return json.loads(s)
69 |
70 |
71 | log_data = get_log()
72 | log_data_df = pd.DataFrame(log_data)
73 |
74 | log_data_df["total_tokens"] = log_data_df.apply(
75 | lambda row: str_to_dict(row["response"])["usage"]["total_tokens"], axis=1
76 | )
77 |
78 | log_data_df["request"] = log_data_df.apply(lambda row: str_to_dict(row["request"])["messages"][0]["content"], axis=1)
79 |
80 | log_data_df["response"] = log_data_df.apply(
81 | lambda row: str_to_dict(row["response"])["choices"][0]["message"]["content"], axis=1
82 | )
83 |
84 | print(log_data_df)
85 |
--------------------------------------------------------------------------------
/03-snake/code/snake_game.py:
--------------------------------------------------------------------------------
1 | # filename: snake_game.py
2 |
3 | import turtle
4 | import time
5 | import random
6 |
7 | delay = 0.1
8 | score = 0
9 |
10 | # Set up the screen
11 | win = turtle.Screen()
12 | win.title("Snake Game")
13 | win.bgcolor("black")
14 | win.setup(width=600, height=600)
15 | win.tracer(0) # Turns off the screen updates
16 |
17 | # Snake head
18 | head = turtle.Turtle()
19 | head.speed(0)
20 | head.shape("square")
21 | head.color("white")
22 | head.penup()
23 | head.goto(0, 0)
24 | head.direction = "stop"
25 |
26 | # Snake food
27 | food = turtle.Turtle()
28 | food.speed(0)
29 | food.shape("circle")
30 | food.color("red")
31 | food.penup()
32 | food.goto(0, 100)
33 |
34 | segments = []
35 |
36 | # Functions
37 | def go_up():
38 | if head.direction != "down":
39 | head.direction = "up"
40 |
41 | def go_down():
42 | if head.direction != "up":
43 | head.direction = "down"
44 |
45 | def go_left():
46 | if head.direction != "right":
47 | head.direction = "left"
48 |
49 | def go_right():
50 | if head.direction != "left":
51 | head.direction = "right"
52 |
53 | def move():
54 | if head.direction == "up":
55 | y = head.ycor()
56 | head.sety(y + 20)
57 |
58 | if head.direction == "down":
59 | y = head.ycor()
60 | head.sety(y - 20)
61 |
62 | if head.direction == "left":
63 | x = head.xcor()
64 | head.setx(x - 20)
65 |
66 | if head.direction == "right":
67 | x = head.xcor()
68 | head.setx(x + 20)
69 |
70 | # Keyboard bindings
71 | win.listen()
72 | win.onkeypress(go_up, "w")
73 | win.onkeypress(go_down, "s")
74 | win.onkeypress(go_left, "a")
75 | win.onkeypress(go_right, "d")
76 |
77 | # Main game loop
78 | while True:
79 | try:
80 | win.update()
81 |
82 | # Check for a collision with the border
83 | if head.xcor()>290 or head.xcor()<-290 or head.ycor()>290 or head.ycor()<-290:
84 | time.sleep(1)
85 | head.goto(0,0)
86 | head.direction = "Stop"
87 |
88 | # Hide the segments
89 | for segment in segments:
90 | segment.goto(1000, 1000)
91 |
92 | # Clear the segments list
93 | segments.clear()
94 |
95 | # Reset the score
96 | score = 0
97 |
98 | # Check for a collision with the food
99 | if head.distance(food) < 20:
100 | # Move the food to a random spot
101 | x = random.randint(-290, 290)
102 | y = random.randint(-290, 290)
103 | food.goto(x,y)
104 |
105 | # Add a segment
106 | new_segment = turtle.Turtle()
107 | new_segment.speed(0)
108 | new_segment.shape("square")
109 | new_segment.color("grey")
110 | new_segment.penup()
111 | segments.append(new_segment)
112 |
113 | # Increase the score
114 | score += 10
115 |
116 | # Move the end segments first in reverse order
117 | for index in range(len(segments) - 1, 0, -1):
118 | x = segments[index-1].xcor()
119 | y = segments[index-1].ycor()
120 | segments[index].goto(x, y)
121 |
122 | # Move segment 0 to where the head is
123 | if len(segments) > 0:
124 | x = head.xcor()
125 | y = head.ycor()
126 | segments[0].goto(x,y)
127 |
128 | move()
129 |
130 | time.sleep(delay)
131 |
132 | except turtle.Terminator:
133 | break
134 |
135 | win.mainloop()
--------------------------------------------------------------------------------
/03-snake/code/snake_game_updated.py:
--------------------------------------------------------------------------------
1 | # filename: snake_game_updated.py
2 |
3 | import pygame
4 | import random
5 |
6 | # Initialize Pygame
7 | pygame.init()
8 |
9 | # Constants
10 | SCREEN_WIDTH = 800
11 | SCREEN_HEIGHT = 600
12 | GRID_SIZE = 20
13 | GRID_WIDTH = SCREEN_WIDTH // GRID_SIZE
14 | GRID_HEIGHT = SCREEN_HEIGHT // GRID_SIZE
15 | FPS = 10
16 |
17 | # Colors
18 | BLACK = (0, 0, 0)
19 | WHITE = (255, 255, 255)
20 | GREEN = (0, 255, 0)
21 | RED = (255, 0, 0)
22 |
23 | # Snake class
24 | class Snake:
25 | def __init__(self):
26 | self.length = 1
27 | self.positions = [((SCREEN_WIDTH // 2), (SCREEN_HEIGHT // 2))]
28 | self.direction = random.choice([(0, -1), (0, 1), (-1, 0), (1, 0)])
29 | self.color = GREEN
30 |
31 | def get_head_position(self):
32 | return self.positions[0]
33 |
34 | def turn(self, point):
35 | if self.length > 1 and (point[0] * -1, point[1] * -1) == self.direction:
36 | return
37 | else:
38 | self.direction = point
39 |
40 | def move(self):
41 | cur = self.get_head_position()
42 | x, y = self.direction
43 | new = (((cur[0] + (x * GRID_SIZE)) % SCREEN_WIDTH, (cur[1] + (y * GRID_SIZE)) % SCREEN_HEIGHT)
44 | if len(self.positions) > 2 and new in self.positions[2:]:
45 | self.reset()
46 | else:
47 | self.positions.insert(0, new)
48 | if len(self.positions) > self.length:
49 | self.positions.pop()
50 |
51 | def reset(self):
52 | self.length = 1
53 | self.positions = [((SCREEN_WIDTH // 2), (SCREEN_HEIGHT // 2))]
54 | self.direction = random.choice([(0, -1), (0, 1), (-1, 0), (1, 0)])
55 |
56 | def draw(self, surface):
57 | for p in self.positions:
58 | pygame.draw.rect(surface, self.color, (p[0], p[1], GRID_SIZE, GRID_SIZE))
59 |
60 | def handle_keys(self):
61 | for event in pygame.event.get():
62 | if event.type == pygame.QUIT:
63 | pygame.quit()
64 | elif event.type == pygame.KEYDOWN:
65 | if event.key == pygame.K_q:
66 | pygame.quit()
67 | elif event.key == pygame.K_UP:
68 | self.turn((0, -1))
69 | elif event.key == pygame.K_DOWN:
70 | self.turn((0, 1))
71 | elif event.key == pygame.K_LEFT:
72 | self.turn((-1, 0))
73 | elif event.key == pygame.K_RIGHT:
74 | self.turn((1, 0))
75 |
76 | # Food class
77 | class Food:
78 | def __init__(self):
79 | self.position = (0, 0)
80 | self.color = RED
81 | self.randomize_position()
82 |
83 | def randomize_position(self):
84 | self.position = (random.randint(0, GRID_WIDTH - 1) * GRID_SIZE, random.randint(0, GRID_HEIGHT - 1) * GRID_SIZE)
85 |
86 | def draw(self, surface):
87 | pygame.draw.rect(surface, self.color, (self.position[0], self.position[1], GRID_SIZE, GRID_SIZE))
88 |
89 | # Main function
90 | def main():
91 | screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
92 | clock = pygame.time.Clock()
93 | snake = Snake()
94 | food = Food()
95 | score = 0
96 |
97 | while True:
98 | screen.fill(BLACK)
99 | snake.handle_keys()
100 | snake.move()
101 | snake.draw(screen)
102 | food.draw(screen)
103 | pygame.display.update()
104 | clock.tick(FPS)
105 |
106 | head_x, head_y = snake.get_head_position()
107 | if head_x >= SCREEN_WIDTH or head_x < 0 or head_y >= SCREEN_HEIGHT or head_y < 0:
108 | snake.reset()
109 |
110 | if head_x == food.position[0] and head_y == food.position[1]:
111 | snake.length += 1
112 | score += 1
113 | food.randomize_position()
114 |
115 | if __name__ == '__main__':
116 | main()
--------------------------------------------------------------------------------