├── 006_bedtime_story_teller ├── .env.example ├── requirements.txt ├── README.md ├── app.py └── utils.py ├── 004_speech_synthesis ├── .env.example ├── images │ ├── joe-rogan-profile.jpg │ └── jordan-peterson-profile.jpg ├── elevenlabs_guide.md ├── requirements.txt ├── app.py └── utils.py ├── 003_timetable_scheduler ├── requirements.txt ├── students.csv ├── subjects.csv └── timetable_scheduler.ipynb ├── 00x_cstmr_srv_returns ├── orders.db └── reqs.md ├── 005_youtube_content_ideas_from_trending_post ├── requirements.txt ├── main.py ├── extract_post_content.py ├── README.md └── generate_youtube_content_ideas.py ├── README.md ├── .gitignore ├── 001_ed_tech_quiz ├── utils.py └── app.py ├── requirements.txt └── 002_youtube_content_ideas_ai └── youtube_traction.ipynb /006_bedtime_story_teller/.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= 2 | ELEVEN_API_KEY= -------------------------------------------------------------------------------- /004_speech_synthesis/.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY="OPEN AI API KEY HERE" 2 | ELEVEN_API_KEY="Eleven Labs API KEY HERE" -------------------------------------------------------------------------------- /003_timetable_scheduler/requirements.txt: -------------------------------------------------------------------------------- 1 | jupyter==1.0.0 2 | openai==0.27.8 3 | python-dotenv==1.0.0 4 | langchain==0.0.239 -------------------------------------------------------------------------------- /00x_cstmr_srv_returns/orders.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cogent-Labs-Inc/langchain-series/HEAD/00x_cstmr_srv_returns/orders.db -------------------------------------------------------------------------------- /006_bedtime_story_teller/requirements.txt: -------------------------------------------------------------------------------- 1 | elevenlabs==0.2.21 2 | langchain==0.0.247 3 | python-dotenv==1.0.0 4 | streamlit==1.25.0 5 | openai==0.27.8 -------------------------------------------------------------------------------- /004_speech_synthesis/images/joe-rogan-profile.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cogent-Labs-Inc/langchain-series/HEAD/004_speech_synthesis/images/joe-rogan-profile.jpg -------------------------------------------------------------------------------- /004_speech_synthesis/images/jordan-peterson-profile.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cogent-Labs-Inc/langchain-series/HEAD/004_speech_synthesis/images/jordan-peterson-profile.jpg -------------------------------------------------------------------------------- /005_youtube_content_ideas_from_trending_post/requirements.txt: -------------------------------------------------------------------------------- 1 | requests==2.30.0 2 | openai==0.27.6 3 | python-dotenv==1.0.0 4 | langchain==0.0.149 5 | streamlit==1.25.0 6 | lxml==4.9.3 7 | -------------------------------------------------------------------------------- /004_speech_synthesis/elevenlabs_guide.md: -------------------------------------------------------------------------------- 1 | # Eleven Labs 2 | 3 | Visit the following link to get your Eleven Labs API key: [https://docs.elevenlabs.io/api-reference/quick-start/authentication](https://docs.elevenlabs.io/api-reference/quick-start/authentication) -------------------------------------------------------------------------------- /003_timetable_scheduler/students.csv: -------------------------------------------------------------------------------- 1 | StudentName,Subject1,Subject2,Subject3,Considerations 2 | John Doe,Mathematics,Science,History,Avoid classes on Mondays and Fridays 3 | Jane Smith,English,Mathematics,Science,Prefer afternoon sessions on Tuesdays and Thursdays 4 | Michael Johnson,History,Chemistry,Science,No classes on Wednesdays -------------------------------------------------------------------------------- /005_youtube_content_ideas_from_trending_post/main.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | 3 | from extract_post_content import extract_hackernews_page_content 4 | from generate_youtube_content_ideas import generate_youtube_ideas_from_content 5 | 6 | 7 | def main(): 8 | """ 9 | Function to execute the script 10 | """ 11 | st.title("Youtube Viral Video Ideas Generator") 12 | 13 | url = st.text_input("Enter the URL:") 14 | 15 | if url: 16 | with st.spinner("Processing..."): 17 | page_content = extract_hackernews_page_content(url) 18 | response = generate_youtube_ideas_from_content(page_content) 19 | 20 | st.markdown(response) 21 | else: 22 | st.warning("Please enter a URL first.") 23 | 24 | 25 | if __name__ == "__main__": 26 | main() 27 | -------------------------------------------------------------------------------- /003_timetable_scheduler/subjects.csv: -------------------------------------------------------------------------------- 1 | SubjectName,Number of Classes per week,AvailableClass1,AvailableClass2,AvailableClass3,AvailableClass4,AvailableClass5 2 | Mathematics,3,Monday 9:00 AM - 10:30 AM,Tuesday 1:00 PM - 2:30 PM,Tuesday 3:00 PM - 4:30 PM,Thursday 1:00 PM - 2:30 PM,Thursday 3:00 PM - 4:30 PM 3 | Science,3,Tuesday 9:00 AM - 10:30 AM,Tuesday 1:00 PM - 2:30 PM,Thursday 11:00 AM - 12:30 PM,Friday 1:00 PM - 2:30 PM,Friday 3:00 PM - 4:30 PM 4 | History,2,Monday 11:00 AM - 12:30 PM,Monday 3:00 PM - 4:30 PM,Wednesday 1:00 PM - 2:30 PM,Wednesday 3:00 PM - 4:30 PM,Friday 3:00 PM - 4:30 PM 5 | English,3,Tuesday 9:00 AM - 10:30 AM,Tuesday 1:00 PM - 2:30 PM,Wednesday 9:00 AM - 10:30 AM,Thursday 1:00 PM - 2:30 PM,Thursday 3:00 PM - 4:30 PM 6 | Chemistry,2,Monday 1:00 PM - 2:30 PM,Tuesday 1:00 PM - 2:30 PM,Wednesday 3:00 PM - 4:30 PM,Thursday 11:00 AM - 12:30 PM,Thursday 9:00 AM - 10:30 AM -------------------------------------------------------------------------------- /006_bedtime_story_teller/README.md: -------------------------------------------------------------------------------- 1 | # Bedtime Storyteller 2 | 3 | Bedtime storyteller is a generative AI Streamlit app developed using Langchain, OpenAI apis, and ElevenLabs. It creates 4 | bedtime stories for children according to their specific requirements. The stories are designed in such a way that the 5 | children are part of the story and the story reflects their interests befittingly. 6 | 7 | ### Steps to Execute: 8 | 9 | - Create the virtual environment using the command 10 | 11 | `python -m venv env-name` 12 | 13 | - Activate the virtual environment using the command 14 | 15 | `source env-name/bin/activate` 16 | 17 | - Navigate to the app directory 18 | 19 | `cd 006_bedtime_story_teller/` 20 | 21 | - Install the requirements using the command 22 | 23 | `pip install -r requirements.txt` 24 | 25 | - Run the Streamlit app using 26 | 27 | `streamlit run app.py` 28 | 29 | Before running the app, create a `.env` file, containing the API keys, in the same format as `.env.example` 30 | 31 | ### How it Works? 32 | 33 | - Gets the children data from the user 34 | - Generates a customized bedtime story using Langchain 35 | - Generates audio form of the story with ElevenLabs 36 | 37 | ### POC Data 38 | 39 | - The app is designed for two children, for the POC. -------------------------------------------------------------------------------- /005_youtube_content_ideas_from_trending_post/extract_post_content.py: -------------------------------------------------------------------------------- 1 | import lxml.html 2 | import requests 3 | 4 | 5 | def refactor_extracted_comments(comments: list) -> str: 6 | """ 7 | Refactors and cleans the extracted comments 8 | 9 | :param comments: comments being refactored 10 | :return: refactored comments 11 | """ 12 | comments = "".join(comments) 13 | lines = comments.split("\n") 14 | cleaned_lines = [line.strip() for line in lines] 15 | 16 | refactored_lines = [line for line in cleaned_lines if line] 17 | refactored_text = "\n".join(refactored_lines).replace("\n\n", "\n").replace("reply", "\nreply:") 18 | 19 | return refactored_text 20 | 21 | 22 | def extract_hackernews_page_content(url: str) -> dict: 23 | """ 24 | Extracts the page content from the specified url 25 | 26 | :param url: url for which content is extracted 27 | :return: extracted page content 28 | """ 29 | response = requests.get(url=url) 30 | 31 | page = lxml.html.fromstring(response.text) 32 | title_text = page.xpath("//span[@class='titleline']//a/text()") 33 | description_text = page.xpath("//div[@class='toptext']/text()") 34 | comments = page.xpath("//div[@class='comment']//descendant-or-self::*/text()") 35 | 36 | refactored_comments = refactor_extracted_comments(comments) 37 | 38 | return { 39 | "title": "".join(title_text), 40 | "description": "".join(description_text), 41 | "comments": refactored_comments 42 | } 43 | -------------------------------------------------------------------------------- /005_youtube_content_ideas_from_trending_post/README.md: -------------------------------------------------------------------------------- 1 | # Viral Youtube Video Ideas Generator 2 | 3 | It is a Generative AI Streamlit app that uses Langchain and OpenAI API to generate viral youtube ideas for the 4 | extracted content of Hacker news post. 5 | 6 | ## Stack Details 7 | - Python = 3.10 8 | 9 | ### Steps to Execute: 10 | - Create the virtual environment using the command 11 | ``` 12 | python3 -m venv your-env-name` 13 | ``` 14 | - Activate the virtual environment using the command 15 | ``` 16 | source env_name/bin/activate 17 | ``` 18 | - Navigate to the app directory 19 | ``` 20 | cd 005_youtube_content_ideas_from_trending_post/` 21 | ``` 22 | - Install the requirements using the command 23 | ``` 24 | pip install -r requirements.txt 25 | ``` 26 | - Execute the Streamlit app on local host using the command 27 | ``` 28 | streamlit run main.py 29 | ``` 30 | 31 | ### How it Works? 32 | - When a user inputs url of a Hacker news post, it extracts the page title, description, comments and replies of the post. 33 | - A method refactors the extracted comments so that they are understandable by the LLM (Large Language Model). 34 | - In the final step the extracted content is used to generate the prompt, which is passed to the chat model, which in turn 35 | returns viral YouTube ideas with their description and content script 36 | 37 | ### POC Data 38 | - For the POC of getting viral YouTube videos content, we are using the following URL of Hacker news post 39 | 40 | `https://news.ycombinator.com/item?id=36811026` 41 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Langchain-Series 2 | 3 | The "langchain-series" project is a collection of diverse little apps that demonstrate the practical applications of AI chatbots in real-world scenarios. Each app within the series showcases the capabilities of chatbot technologies in different domains, providing users with interactive and engaging experiences. 4 | 5 | ## Stack Details 6 | 7 | Python = 3.10 8 | 9 | ## Installation 10 | 11 | 1. Clone the repository to your local machine: 12 | 2. Create a virtual environment using the following command: 13 | ``` 14 | python3 -m venv your-env-name 15 | ``` 16 | 3. Activate the virtual environment: 17 | - On Linux or macOS: 18 | ``` 19 | source env_name/bin/activate 20 | ``` 21 | - On Windows: 22 | ``` 23 | env_name\Scripts\activate 24 | ``` 25 | 26 | 4. Install the project dependencies by running the following command: 27 | ``` 28 | pip install -r requirements.txt 29 | ``` 30 | 31 | ## Setting Up Environment Variables 32 | 33 | To use the project, you need to set up your environment variables. Follow the steps below to create a `.env` file in the base project directory and add your OpenAI API key. 34 | 35 | 1. Create a new file named `.env` in the base project directory. 36 | 37 | 2. Open the `.env` file in a text editor and add the following line: 38 | ``` 39 | OPENAI_API_KEY=your-api-key-goes-here 40 | ``` 41 | ## Usage 42 | 43 | 1. Navigate to the project you want to run: `cd project_name` 44 | 45 | 2. Run the application using the following command: 46 | ``` 47 | streamlit run app.py 48 | ``` -------------------------------------------------------------------------------- /004_speech_synthesis/requirements.txt: -------------------------------------------------------------------------------- 1 | aiohttp==3.8.5 2 | aiosignal==1.3.1 3 | altair==5.0.1 4 | asttokens==2.2.1 5 | async-timeout==4.0.2 6 | attrs==23.1.0 7 | backcall==0.2.0 8 | backports.zoneinfo==0.2.1 9 | blinker==1.6.2 10 | cachetools==5.3.1 11 | certifi==2023.7.22 12 | charset-normalizer==3.2.0 13 | click==8.1.6 14 | dataclasses-json==0.5.13 15 | decorator==5.1.1 16 | elevenlabs==0.2.21 17 | executing==1.2.0 18 | frozenlist==1.4.0 19 | gitdb==4.0.10 20 | GitPython==3.1.32 21 | greenlet==2.0.2 22 | idna==3.4 23 | importlib-metadata==6.8.0 24 | importlib-resources==6.0.0 25 | ipython==8.12.2 26 | jedi==0.18.2 27 | Jinja2==3.1.2 28 | jsonschema==4.18.4 29 | jsonschema-specifications==2023.7.1 30 | langchain==0.0.240 31 | langsmith==0.0.14 32 | markdown-it-py==3.0.0 33 | MarkupSafe==2.1.3 34 | marshmallow==3.20.1 35 | matplotlib-inline==0.1.6 36 | mdurl==0.1.2 37 | multidict==6.0.4 38 | mypy-extensions==1.0.0 39 | numexpr==2.8.4 40 | numpy==1.24.4 41 | openai==0.27.8 42 | openapi-schema-pydantic==1.2.4 43 | packaging==23.1 44 | pandas==2.0.3 45 | parso==0.8.3 46 | pexpect==4.8.0 47 | pickleshare==0.7.5 48 | Pillow==9.5.0 49 | pkgutil_resolve_name==1.3.10 50 | prompt-toolkit==3.0.39 51 | protobuf==4.23.4 52 | ptyprocess==0.7.0 53 | pure-eval==0.2.2 54 | pyarrow==12.0.1 55 | pydantic==1.10.12 56 | pydeck==0.8.0 57 | Pygments==2.15.1 58 | Pympler==1.0.1 59 | python-dateutil==2.8.2 60 | python-dotenv==1.0.0 61 | pytz==2023.3 62 | pytz-deprecation-shim==0.1.0.post0 63 | PyYAML==6.0.1 64 | referencing==0.30.0 65 | regex==2023.6.3 66 | requests==2.31.0 67 | rich==13.4.2 68 | rpds-py==0.9.2 69 | six==1.16.0 70 | smmap==5.0.0 71 | SQLAlchemy==2.0.19 72 | stack-data==0.6.2 73 | streamlit==1.25.0 74 | tenacity==8.2.2 75 | tiktoken==0.4.0 76 | toml==0.10.2 77 | toolz==0.12.0 78 | tornado==6.3.2 79 | tqdm==4.65.0 80 | traitlets==5.9.0 81 | typing-inspect==0.9.0 82 | typing_extensions==4.7.1 83 | tzdata==2023.3 84 | tzlocal==4.3.1 85 | urllib3==2.0.4 86 | validators==0.20.0 87 | watchdog==3.0.0 88 | wcwidth==0.2.6 89 | yarl==1.9.2 90 | zipp==3.16.2 91 | -------------------------------------------------------------------------------- /006_bedtime_story_teller/app.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from dotenv import load_dotenv 3 | 4 | from utils import generate_child_fields, get_voice_by_name, generate_audio, generate_bedtime_story 5 | 6 | load_dotenv() 7 | 8 | 9 | def main(): 10 | st.title("Bedtime Story Generator") 11 | 12 | st.write( 13 | "Please enter the details about the children to create a bedtime story." 14 | ) 15 | 16 | child_1_details = generate_child_fields(1) 17 | 18 | child_2_details = generate_child_fields(2) 19 | 20 | st.write("Further details") 21 | children_relation = st.text_input("Relation between children*") 22 | 23 | children_data = { 24 | "children": [child_1_details, child_2_details], 25 | "relation between children": children_relation, 26 | } 27 | 28 | required_fields_filled = all(child_data is not None for child_data in children_data["children"]) 29 | 30 | if st.button("Generate Bedtime Story") and required_fields_filled and children_relation: 31 | st.subheader("Bedtime Story") 32 | 33 | with st.spinner("Processing..."): 34 | st.session_state.bedtime_story = generate_bedtime_story(children_data) 35 | 36 | if "bedtime_story" in st.session_state: 37 | st.write(st.session_state.bedtime_story) 38 | speaker_type = st.selectbox("Select the Speaker Voice Type", ["Male", "Female"]) 39 | 40 | if st.button("Generate Audio"): 41 | speaker = "Thomas" if speaker_type == "Male" else "Dorothy" 42 | 43 | with st.spinner("Getting Voice from ElevenLabs..."): 44 | speaker_voice = get_voice_by_name(speaker) 45 | 46 | with st.spinner("Generating audio..."): 47 | audio_bytes = generate_audio(st.session_state.bedtime_story, speaker_voice) 48 | 49 | if audio_bytes: 50 | st.success("Audio Generated") 51 | st.audio(audio_bytes, format="audio/wav") 52 | 53 | else: 54 | st.warning("Please fill in all the required fields before generating the bedtime story.") 55 | 56 | 57 | if __name__ == "__main__": 58 | main() 59 | -------------------------------------------------------------------------------- /005_youtube_content_ideas_from_trending_post/generate_youtube_content_ideas.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from langchain.chains import LLMChain 5 | from langchain.chat_models import ChatOpenAI 6 | from langchain.prompts import ( 7 | ChatPromptTemplate, 8 | SystemMessagePromptTemplate, 9 | HumanMessagePromptTemplate 10 | ) 11 | 12 | load_dotenv() 13 | 14 | chat = ChatOpenAI(openai_api_key=os.getenv("OPENAI_API_KEY")) 15 | 16 | 17 | def generate_youtube_ideas_from_content(page_content: dict) -> str: 18 | """ 19 | Generates youtube ideas for the given content 20 | 21 | :param page_content: extracted title, description and comments/replies 22 | :return: generated ideas 23 | """ 24 | human_message = """ 25 | I've the following topic 26 | {title} 27 | with the description 28 | {description} 29 | And the following are the comments on that 30 | {comments} 31 | 32 | Generate 5 different viral YouTube content ideas related to this. For each idea, please provide 33 | title, description and youtube content script (with timestamps) 34 | 35 | Output the result in the following format using markdown: 36 | 37 | Idea 1: 38 | Title: 39 | Description: 40 | YouTube content script(with timestamps): 41 | 0:00 some script content 42 | 0:30 other script content 43 | 44 | Idea 2: 45 | Title: 46 | Description: 47 | YouTube content script(with timestamps): 48 | 0:00 some script content 49 | 0:30 other script content 50 | 51 | ... 52 | """ 53 | 54 | system_message_prompt = SystemMessagePromptTemplate.from_template("You are an expert youtube content creator.") 55 | human_message_prompt = HumanMessagePromptTemplate.from_template(human_message) 56 | 57 | chat_prompt = ChatPromptTemplate.from_messages( 58 | [ 59 | system_message_prompt, 60 | human_message_prompt 61 | ] 62 | ) 63 | 64 | chat_model = ChatOpenAI(temperature=0.2, model="gpt-3.5-turbo-16k") 65 | 66 | llm_chain = LLMChain(prompt=chat_prompt, llm=chat_model) 67 | response = llm_chain.run( 68 | title=page_content["title"], 69 | description=page_content["description"], 70 | comments=page_content["comments"] 71 | ) 72 | 73 | response = response.replace("\n", "\n\n") 74 | 75 | return response 76 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | .idea/ 132 | -------------------------------------------------------------------------------- /00x_cstmr_srv_returns/reqs.md: -------------------------------------------------------------------------------- 1 | # Customer Service Chatbot - Returns 2 | 3 | ## Introduction 4 | 5 | This Markdown document outlines the development plan for a customer service chatbot that handles returns for customers. The chatbot will provide information about general returns policies and track the details of customer orders from a database. The chatbot will be implemented as an AI agent with two main tools: a tool for question and answer on policy documents, and another tool for handling database operations to answer customer queries. 6 | 7 | ## Development Plan 8 | 9 | ### Step 1: Design Conversational Flow 10 | 11 | - Determine the conversation flow between the chatbot and the user. 12 | - Identify potential user intents and map them to appropriate responses. 13 | - Create a flowchart or diagram illustrating the chatbot's conversational structure. 14 | 15 | ### Step 2: Develop QA Tool for Policy Docs 16 | 17 | - Implement a question and answer tool for policy documents using natural language processing techniques. 18 | - Train the QA tool on the policy documents and associated questions and answers. 19 | - Test the QA tool with sample queries to ensure accurate responses. 20 | 21 | ### Step 3: Set Up Database 22 | 23 | - Design and set up a database to store customer order information. 24 | - Define the necessary tables and fields to track order details, including order numbers, customer information, and return status. 25 | - Populate the database with sample data for testing purposes. 26 | 27 | ### Step 4: Implement Database Operations Tool 28 | 29 | - Develop a tool that interacts with the database to retrieve order information based on the provided order number. 30 | - Implement necessary database queries to fetch order details. 31 | - Test the database operations tool to ensure correct retrieval of order information. 32 | 33 | ### Step 5: Integrate QA Tool and Database Operations Tool 34 | 35 | - Combine the QA tool for policy documents and the database operations tool into a unified chatbot interface. 36 | - Define the conversation flow to handle different user intents, such as general policy inquiries and order tracking requests. 37 | - Test the integrated chatbot to ensure smooth interactions between the two tools. 38 | 39 | ### Step 6: Evaluate and Iterate 40 | 41 | - Collect user feedback and evaluate the chatbot's performance and effectiveness. 42 | - Identify areas for improvement and iterate on the chatbot's design and functionality. 43 | - Implement updates and enhancements based on user feedback to continually enhance the customer service experience. 44 | -------------------------------------------------------------------------------- /001_ed_tech_quiz/utils.py: -------------------------------------------------------------------------------- 1 | import PyPDF2 2 | import json 3 | import traceback 4 | 5 | 6 | def parse_file(file): 7 | if file.name.endswith(".pdf"): 8 | try: 9 | pdf_reader = PyPDF2.PdfReader(file) 10 | text = "" 11 | for page in pdf_reader.pages: 12 | text += page.extract_text() 13 | return text 14 | except PyPDF2.utils.PdfReadError: 15 | raise Exception("Error reading the PDF file.") 16 | 17 | elif file.name.endswith(".txt"): 18 | return file.read().decode("utf-8") 19 | 20 | else: 21 | raise Exception( 22 | "Unsupported file format. Only PDF and TXT files are supported." 23 | ) 24 | 25 | 26 | def get_table_data(quiz_str): 27 | try: 28 | # convert the quiz from a str to dict 29 | quiz_dict = json.loads(quiz_str) 30 | quiz_table_data = [] 31 | # Iterate over the quiz dictionary and extract the required information 32 | for key, value in quiz_dict.items(): 33 | mcq = value["mcq"] 34 | options = " | ".join( 35 | [ 36 | f"{option}: {option_value}" 37 | for option, option_value in value["options"].items() 38 | ] 39 | ) 40 | correct = value["correct"] 41 | quiz_table_data.append({"MCQ": mcq, "Choices": options, "Correct": correct}) 42 | return quiz_table_data 43 | except Exception as e: 44 | traceback.print_exception(type(e), e, e.__traceback__) 45 | return False 46 | 47 | 48 | RESPONSE_JSON = { 49 | "1": { 50 | "no": "1", 51 | "mcq": "multiple choice question", 52 | "options": { 53 | "a": "choice here", 54 | "b": "choice here", 55 | "c": "choice here", 56 | "d": "choice here", 57 | }, 58 | "correct": "correct answer", 59 | }, 60 | "2": { 61 | "no": "2", 62 | "mcq": "multiple choice question", 63 | "options": { 64 | "a": "choice here", 65 | "b": "choice here", 66 | "c": "choice here", 67 | "d": "choice here", 68 | }, 69 | "correct": "correct answer", 70 | }, 71 | "3": { 72 | "no": "3", 73 | "mcq": "multiple choice question", 74 | "options": { 75 | "a": "choice here", 76 | "b": "choice here", 77 | "c": "choice here", 78 | "d": "choice here", 79 | }, 80 | "correct": "correct answer", 81 | }, 82 | } 83 | -------------------------------------------------------------------------------- /004_speech_synthesis/app.py: -------------------------------------------------------------------------------- 1 | from utils import ( 2 | generate_intro, 3 | generate_audio, 4 | get_image_path, 5 | get_voices, 6 | get_voice_by_name, 7 | ) 8 | import streamlit as st 9 | from dotenv import load_dotenv 10 | 11 | 12 | load_dotenv() 13 | 14 | 15 | def main(): 16 | st.markdown( 17 | "

Speech Generator

", 18 | unsafe_allow_html=True, 19 | ) 20 | 21 | # Create a form with three input fields 22 | with st.form("intro_form"): 23 | topic = st.text_input("Topic (Max 300 characters)", max_chars=300) 24 | theme = st.text_area("Theme") 25 | summary = st.text_area("Summary") 26 | show = st.selectbox( 27 | "Select podcast style", 28 | ["The Joe Rogan Experience", "The Jordan B. Peterson Podcast"], 29 | ) 30 | 31 | generate_intro_button = st.form_submit_button("Generate Intro") 32 | 33 | if generate_intro_button and all([topic, theme, summary, show]): 34 | with st.spinner("Generating Intro..."): 35 | # Call the function to generate the intro and get the output 36 | podcast_intro = generate_intro(topic, theme, summary, show) 37 | st.session_state.podcast_intro = podcast_intro 38 | elif generate_intro_button and not all([topic, theme, summary, show]): 39 | st.warning("Fill all the above fields.") 40 | 41 | if "podcast_intro" in st.session_state: 42 | podcast_intro = st.session_state.podcast_intro 43 | st.header("Podcast Introduction") 44 | st.write(podcast_intro) 45 | 46 | if "selected_speaker" not in st.session_state: 47 | st.session_state.selected_speaker = "" 48 | 49 | if "podcast_intro" in st.session_state: 50 | st.header("Speech Synthesis") 51 | speakers = get_voices() 52 | speaker = st.selectbox("Select a Speaker", speakers, index=0) 53 | 54 | # Create a button "Create" to generate audio 55 | if speaker and speaker != "": 56 | st.session_state.selected_speaker = speaker 57 | speaker_image_path = get_image_path(speaker) 58 | if speaker_image_path: 59 | st.image(speaker_image_path, caption=speaker, use_column_width=True) 60 | with st.spinner("Getting Voice from ElevenLabs..."): 61 | speaker_voice = get_voice_by_name(speaker) 62 | with st.spinner("Generating audio..."): 63 | audio_bytes = generate_audio(podcast_intro, speaker_voice) 64 | if audio_bytes: 65 | st.success("Audio Generated") 66 | st.audio(audio_bytes, format="audio/wav") 67 | else: 68 | st.error("Speaker name must be selected") 69 | 70 | 71 | if __name__ == "__main__": 72 | main() 73 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiohttp==3.8.4 2 | aiosignal==1.3.1 3 | altair==4.2.2 4 | anyio==3.7.1 5 | appnope==0.1.3 6 | argon2-cffi==21.3.0 7 | argon2-cffi-bindings==21.2.0 8 | arrow==1.2.3 9 | asttokens==2.2.1 10 | async-timeout==4.0.2 11 | attrs==23.1.0 12 | backcall==0.2.0 13 | backports.zoneinfo==0.2.1 14 | beautifulsoup4==4.12.2 15 | bleach==6.0.0 16 | blinker==1.6.2 17 | cachetools==5.3.0 18 | certifi==2023.5.7 19 | cffi==1.15.1 20 | charset-normalizer==3.1.0 21 | click==8.1.3 22 | comm==0.1.3 23 | dataclasses-json==0.5.7 24 | debugpy==1.6.7 25 | decorator==5.1.1 26 | defusedxml==0.7.1 27 | elevenlabs==0.2.19 28 | entrypoints==0.4 29 | exceptiongroup==1.1.2 30 | executing==1.2.0 31 | fastjsonschema==2.17.1 32 | fqdn==1.5.1 33 | frozenlist==1.3.3 34 | gitdb==4.0.10 35 | GitPython==3.1.31 36 | greenlet==2.0.2 37 | idna==3.4 38 | importlib-metadata==6.6.0 39 | importlib-resources==5.12.0 40 | ipykernel==6.24.0 41 | ipython==8.12.2 42 | ipython-genutils==0.2.0 43 | isoduration==20.11.0 44 | jedi==0.18.2 45 | Jinja2==3.1.2 46 | jsonpointer==2.4 47 | jsonschema==4.17.3 48 | jupyter-events==0.6.3 49 | jupyter_client==8.3.0 50 | jupyter_core==5.3.1 51 | jupyter_server==2.7.0 52 | jupyter_server_terminals==0.4.4 53 | jupyterlab-pygments==0.2.2 54 | langchain==0.0.149 55 | markdown-it-py==2.2.0 56 | MarkupSafe==2.1.2 57 | marshmallow==3.19.0 58 | marshmallow-enum==1.5.1 59 | matplotlib-inline==0.1.6 60 | mdurl==0.1.2 61 | mistune==3.0.1 62 | multidict==6.0.4 63 | mypy-extensions==1.0.0 64 | nbclassic==1.0.0 65 | nbclient==0.8.0 66 | nbconvert==7.7.1 67 | nbformat==5.9.1 68 | nest-asyncio==1.5.6 69 | notebook==6.5.4 70 | notebook_shim==0.2.3 71 | numexpr==2.8.4 72 | numpy==1.24.3 73 | openai==0.27.6 74 | openapi-schema-pydantic==1.2.4 75 | overrides==7.3.1 76 | packaging==23.1 77 | pandas==2.0.1 78 | pandocfilters==1.5.0 79 | parso==0.8.3 80 | pexpect==4.8.0 81 | pickleshare==0.7.5 82 | Pillow==9.5.0 83 | pkgutil_resolve_name==1.3.10 84 | platformdirs==3.9.1 85 | prometheus-client==0.17.1 86 | prompt-toolkit==3.0.39 87 | protobuf==3.20.3 88 | psutil==5.9.5 89 | ptyprocess==0.7.0 90 | pure-eval==0.2.2 91 | pyarrow==12.0.0 92 | pycparser==2.21 93 | pydantic==1.10.7 94 | pydeck==0.8.1b0 95 | Pygments==2.15.1 96 | Pympler==1.0.1 97 | PyPDF2==3.0.1 98 | pyrsistent==0.19.3 99 | python-dateutil==2.8.2 100 | python-dotenv==1.0.0 101 | python-json-logger==2.0.7 102 | pytube==15.0.0 103 | pytz==2023.3 104 | PyYAML==6.0 105 | pyzmq==25.1.0 106 | regex==2023.5.5 107 | requests==2.30.0 108 | rfc3339-validator==0.1.4 109 | rfc3986-validator==0.1.1 110 | rich==13.3.5 111 | Send2Trash==1.8.2 112 | six==1.16.0 113 | smmap==5.0.0 114 | sniffio==1.3.0 115 | soupsieve==2.4.1 116 | SQLAlchemy==2.0.13 117 | stack-data==0.6.2 118 | streamlit==1.22.0 119 | tenacity==8.2.2 120 | terminado==0.17.1 121 | tiktoken==0.4.0 122 | tinycss2==1.2.1 123 | toml==0.10.2 124 | toolz==0.12.0 125 | tornado==6.3.2 126 | tqdm==4.65.0 127 | traitlets==5.9.0 128 | typing-inspect==0.8.0 129 | typing_extensions==4.5.0 130 | tzdata==2023.3 131 | tzlocal==5.0.1 132 | uri-template==1.3.0 133 | urllib3==2.0.2 134 | validators==0.20.0 135 | watchdog==3.0.0 136 | wcwidth==0.2.6 137 | webcolors==1.13 138 | webencodings==0.5.1 139 | websocket-client==1.6.1 140 | yarl==1.9.2 141 | youtube-transcript-api==0.6.1 142 | zipp==3.15.0 143 | -------------------------------------------------------------------------------- /004_speech_synthesis/utils.py: -------------------------------------------------------------------------------- 1 | from langchain.prompts import PromptTemplate 2 | from langchain.chains import LLMChain 3 | from langchain.chat_models import ChatOpenAI 4 | from elevenlabs import generate, Voice, voices 5 | import streamlit as st 6 | import os 7 | 8 | 9 | def generate_intro(topic, theme, summary, show): 10 | prompt_template = """ As an expert writer, your task is to create an introduction for a captivating podcast that will leave \ 11 | the listeners spellbound and eager to explore more. Your audience is diverse, and your words should resonate with \ 12 | their curiosity, emotions, and interests. Following are the steps to guide you in crafting the perfect introduction: 13 | 14 | TOPIC: {topic} 15 | 16 | THEME: {theme} 17 | 18 | SUMMARY: {summary} 19 | 20 | Looking at above TOPIC, THEME and SUMMARY, your mission is to weave a magical introduction in the style of {show} that effortlessly draws \ 21 | the listeners into the heart of the narrative. The introduction must be engaging, \ 22 | thought-provoking, and leave an indelible mark on the minds of those who tune in. 23 | 24 | Consider the following guidelines while composing your introduction: 25 | Begin with a captivating hook: Capture the audience's attention right from the start with a mesmerizing opening line \ 26 | that sparks curiosity and creates an irresistible urge to explore further. 27 | 28 | Set the tone: Infuse the introduction with an appropriate tone that complements the podcast's theme, \ 29 | be it mysterious, uplifting, nostalgic, or thrilling. 30 | 31 | Paint vivid mental images: Use your mastery of language to create beautiful visualizations and \ 32 | paint a world that lures listeners into the realms of the podcast's subject matter. 33 | 34 | Appeal to emotions: Connect with the audience's emotions by incorporating elements that evoke \ 35 | empathy, excitement, wonder, or even nostalgia. 36 | 37 | Unveil the essence: While keeping the mystery alive, provide a glimpse of what the podcast entails, \ 38 | leaving the listeners yearning for more insights and revelations. 39 | 40 | Embrace diversity: Ensure your writing is inclusive, relatable, and appeals to a broad range \ 41 | of audiences, transcending age, culture, and background. 42 | 43 | Be concise yet impactful: Craft a concise introduction that leaves a lasting impact, \ 44 | making every word count towards creating an enchanting experience. 45 | Embrace your role as an expert writer, and let your creativity and linguistic prowess shine through \ 46 | in this introductory piece. Never break your character. The Introduction must be \ 47 | 300 words at max. The podcast introduction must be in style of {show}. 48 | 49 | Introduction: 50 | """ 51 | llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.1) 52 | PROMPT = PromptTemplate( 53 | template=prompt_template, 54 | input_variables=["topic", "theme", "summary", "show"], 55 | ) 56 | chain = LLMChain(llm=llm, prompt=PROMPT, verbose=True, output_key="introduction") 57 | 58 | response = chain({"topic": topic, "theme": theme, "summary": summary, "show": show}) 59 | return response.get("introduction") 60 | 61 | 62 | def generate_audio(intro, voice): 63 | return generate(text=intro, voice=voice, model="eleven_monolingual_v1") 64 | 65 | 66 | def get_image_path(speaker): 67 | image_path = f"./images/{speaker.lower().replace(' ', '-')}-profile.jpg" 68 | if os.path.exists(image_path): 69 | return image_path 70 | else: 71 | return None 72 | 73 | @st.cache_data(show_spinner=False) 74 | def get_voices(): 75 | speakers = [voice.name for voice in voices()] 76 | speakers.insert(0, "") 77 | return speakers 78 | 79 | 80 | def get_voice_by_name(name): 81 | return [voice for voice in voices() if voice.name == name][0] 82 | -------------------------------------------------------------------------------- /006_bedtime_story_teller/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import streamlit as st 4 | from elevenlabs import voices, generate 5 | from langchain.chains import LLMChain 6 | from langchain.chat_models import ChatOpenAI 7 | from langchain.prompts.chat import ( 8 | ChatPromptTemplate, 9 | SystemMessagePromptTemplate, 10 | HumanMessagePromptTemplate, 11 | ) 12 | 13 | 14 | def generate_bedtime_story(children_data): 15 | os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") 16 | chat = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.7) 17 | 18 | system_message_prompt = SystemMessagePromptTemplate.from_template( 19 | """As a gifted author of bedtime stories for children, your tales consistently embody these captivating elements: 20 | 1. Intriguing climaxes and surprising anti-climaxes. 21 | 2. Thrilling adventures. 22 | 3. A series of engaging and challenging tasks for children. 23 | 4. Detailed descriptions of each task and the steps taken to accomplish them. 24 | 5. Stimulate problem-solving skills in young readers. 25 | 6. Introduce a multitude of imaginative creatures with varying powers. 26 | 7. Accompany the story with colorful illustrations to maintain interest and aid understanding. 27 | 8. Incorporate recurring sequences or phrases that soothe and reassure young listeners. 28 | 9. Engage young readers with elaborative descriptions that captivate their imagination. 29 | 10. Infuse positive affirmations throughout the story to encourage confidence and kindness. 30 | 11. Provide a heartwarming happy ending with valuable life lessons to inspire young minds. 31 | 12. Include a variety of problems with climaxes and anti-climaxes for added excitement. 32 | 13. Employ a pleasant tone that soothes the hearts and minds of young readers. 33 | 14. Demonstrate verbosity and innovation in storytelling to keep the narrative fresh and captivating. 34 | """ 35 | ) 36 | human_message_prompt = HumanMessagePromptTemplate.from_template( 37 | """Below are the profiles of two children: 38 | 39 | {children_data} 40 | 41 | Your enchanting task is to craft a mesmerizing detailed and descriptive bedtime story that contains these characters. 42 | You must include each child's characteristics given in the above data, into the story. 43 | 44 | Embrace the magic of storytelling and create a unique unforgettable innovative tale that imparts wisdom, ignites imagination, encourages action with tasks and problem solving, and leaves a lasting impression on their young hearts. 45 | You must describe each task or adventure they accomplish in very detail. The story must be descriptive. 46 | """ 47 | ) 48 | 49 | chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt]) 50 | chain = LLMChain(llm=chat, prompt=chat_prompt, verbose=True) 51 | 52 | response = chain.run(children_data=children_data) 53 | return response 54 | 55 | 56 | def generate_child_fields(child_number): 57 | st.subheader(f"Child {child_number} Details") 58 | name = st.text_input(f"C{child_number}. Name*") 59 | gender = st.selectbox(f"C{child_number}. Gender*", ["Male", "Female"]) 60 | age = st.number_input(f"C{child_number}. Age*", min_value=1, max_value=100) 61 | interests = st.text_area(f"C{child_number}. Interests (separated by commas)*") 62 | superpowers = st.text_area(f"C{child_number}. Superpowers loved (separated by commas)*") 63 | challenges_fears = st.text_input(f"C{child_number}. Challenges/Fears*") 64 | 65 | dream_destination = st.text_input(f"C{child_number}. Dream Destination") 66 | hobbies = st.text_area(f"C{child_number}. Hobbies/Activities (separated by commas)") 67 | best_person_name = st.text_input(f"C{child_number}. Best person name") 68 | best_person_relation = st.text_input(f"C{child_number}. Best person relation") 69 | favorite_book_movie = st.text_input(f"C{child_number}. Favorite Book/Movie") 70 | favorite_food = st.text_input(f"C{child_number}. Favorite Food") 71 | 72 | required_fields = [name, gender, age, interests, superpowers, challenges_fears] 73 | if not all(required_fields): 74 | return None 75 | 76 | return { 77 | "Name": name, 78 | "Gender": gender, 79 | "Age": age, 80 | "Interests": interests.split(","), 81 | "Superpowers loved": superpowers.split(","), 82 | "Dream Destination": dream_destination, 83 | "Challenges/Fears": challenges_fears, 84 | "Hobbies/activities": hobbies.split(","), 85 | "Best person name": best_person_name, 86 | "Relation with best person": best_person_relation, 87 | "Favorite Food": favorite_food, 88 | "Favorite Book/Movie": favorite_book_movie, 89 | } 90 | 91 | 92 | @st.cache_data(show_spinner=False) 93 | def get_voice_by_name(name): 94 | return next((voice for voice in voices() if voice.name == name), None) 95 | 96 | 97 | def generate_audio(intro, voice): 98 | return generate(text=intro, voice=voice, model="eleven_monolingual_v1") 99 | -------------------------------------------------------------------------------- /001_ed_tech_quiz/app.py: -------------------------------------------------------------------------------- 1 | import json 2 | from dotenv import load_dotenv 3 | from langchain.llms import OpenAI 4 | from langchain.chains import LLMChain 5 | from langchain.prompts import PromptTemplate 6 | from langchain.chains import SequentialChain 7 | import streamlit as st 8 | import traceback 9 | import pandas as pd 10 | from langchain.callbacks import get_openai_callback 11 | from utils import parse_file, get_table_data, RESPONSE_JSON 12 | 13 | load_dotenv() 14 | 15 | # This is an LLMChain to create 10-20 multiple choice questions from a given piece of text. 16 | llm = OpenAI(model_name="gpt-3.5-turbo", temperature=0, max_tokens=-1) 17 | 18 | template = """ 19 | Text: {text} 20 | You are an expert MCQ maker. Given the above text, it is your job to\ 21 | create a quiz of {number} multiple choice questions for grade {grade} students in {tone} tone. 22 | Make sure that questions are not repeated and check all the questions to be conforming to the text as well. 23 | Make sure to format your response like the RESPONSE_JSON below and use it as a guide.\ 24 | Ensure to make the {number} MCQs. 25 | ### RESPONSE_JSON 26 | {response_json} 27 | """ 28 | quiz_generation_prompt = PromptTemplate( 29 | input_variables=["text", "number", "grade", "tone", "response_json"], 30 | template=template, 31 | ) 32 | quiz_chain = LLMChain( 33 | llm=llm, prompt=quiz_generation_prompt, output_key="quiz", verbose=True 34 | ) 35 | 36 | # This is an LLMChain to evaluate the multiple choice questions created by the above chain 37 | llm = OpenAI(model_name="gpt-3.5-turbo", temperature=0) 38 | template = """You are an expert english grammarian and writer. Given a multiple choice quiz for {grade} grade students.\ 39 | You need to evaluate complexity of the questions and give a complete analysis of the quiz if the students 40 | will be able to understand the questions and answer them. Only use at max 50 words for complexity analysis. 41 | If quiz is not at par with the cognitive and analytical abilities of the students,\ 42 | update the quiz questions which need to be changed and change the tone such that it perfectly fits the students abilities. 43 | Quiz MCQs: 44 | {quiz} 45 | Critique from an expert english writer of the above quiz:""" 46 | 47 | quiz_evaluation_prompt = PromptTemplate( 48 | input_variables=["grade", "quiz"], template=template 49 | ) 50 | review_chain = LLMChain( 51 | llm=llm, prompt=quiz_evaluation_prompt, output_key="review", verbose=True 52 | ) 53 | 54 | # This is the overall chain where we run these two chains in sequence. 55 | generate_evaluate_chain = SequentialChain( 56 | chains=[quiz_chain, review_chain], 57 | input_variables=["text", "number", "grade", "tone", "response_json"], 58 | # Here we return multiple variables 59 | output_variables=["quiz", "review"], 60 | verbose=True, 61 | ) 62 | 63 | st.title("🦜⛓️ Langchain-Series: 001-Quiz Generation for Educational Content") 64 | 65 | # Create a form using st.form 66 | with st.form("user_inputs"): 67 | # File upload 68 | uploaded_file = st.file_uploader("Upload a pdf or text file") 69 | 70 | # Input fields 71 | mcq_count = st.number_input("No of MCQs", min_value=3, max_value=20) 72 | grade = st.number_input("Insert Grade", min_value=1, max_value=10) 73 | tone = st.text_input("Insert Quiz tone", max_chars=100, placeholder="simple") 74 | 75 | button = st.form_submit_button("Create quiz") 76 | 77 | # Check if the button is clicked and all fields have inputs 78 | if button and uploaded_file is not None and mcq_count and grade and tone: 79 | with st.spinner("Loading..."): 80 | try: 81 | text = parse_file(uploaded_file) 82 | 83 | # count tokens and cost of api call 84 | with get_openai_callback() as cb: 85 | response = generate_evaluate_chain( 86 | { 87 | "text": text, 88 | "number": mcq_count, 89 | "grade": grade, 90 | "tone": tone, 91 | "response_json": json.dumps(RESPONSE_JSON), 92 | } 93 | ) 94 | except Exception as e: 95 | traceback.print_exception(type(e), e, e.__traceback__) 96 | st.error("Error") 97 | else: 98 | print(f"Total Tokens: {cb.total_tokens}") 99 | print(f"Prompt Tokens: {cb.prompt_tokens}") 100 | print(f"Completion Tokens: {cb.completion_tokens}") 101 | print(f"Total Cost (USD): ${cb.total_cost}") 102 | 103 | if isinstance(response, dict): 104 | # Extract quiz data from the response 105 | quiz = response.get("quiz", None) 106 | if quiz is not None: 107 | table_data = get_table_data(quiz) 108 | if table_data is not None: 109 | df = pd.DataFrame(table_data) 110 | df.index = df.index + 1 111 | st.table(df) 112 | # Display the review in a text box 113 | st.text_area(label="Review", value=response["review"]) 114 | else: 115 | st.error("Error in table data") 116 | else: 117 | st.write(response) 118 | -------------------------------------------------------------------------------- /003_timetable_scheduler/timetable_scheduler.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "e2c4b72a", 6 | "metadata": { 7 | "collapsed": false, 8 | "jupyter": { 9 | "outputs_hidden": false 10 | } 11 | }, 12 | "source": [ 13 | "# Time Scheduler for Campus Classes\n", 14 | "\n", 15 | "Time scheduler is a `Generative-AI` based app, developed in `langchain` using OpenAI `gpt-4` model. It takes subjects and students data in CSV format and generates time schedule for each individual student." 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": 3, 21 | "id": "a601772a", 22 | "metadata": { 23 | "ExecuteTime": { 24 | "end_time": "2023-07-25T10:18:24.478753Z", 25 | "start_time": "2023-07-25T10:18:24.476285Z" 26 | } 27 | }, 28 | "outputs": [], 29 | "source": [ 30 | "import os\n", 31 | "import openai\n", 32 | "from dotenv import load_dotenv\n", 33 | "\n", 34 | "load_dotenv()\n", 35 | "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\")" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": 4, 41 | "id": "9399578c", 42 | "metadata": { 43 | "ExecuteTime": { 44 | "end_time": "2023-07-25T10:18:29.593723Z", 45 | "start_time": "2023-07-25T10:18:29.590850Z" 46 | }, 47 | "collapsed": false, 48 | "jupyter": { 49 | "outputs_hidden": false 50 | } 51 | }, 52 | "outputs": [], 53 | "source": [ 54 | "import csv\n", 55 | "\n", 56 | "def read_data_from_csv(filename):\n", 57 | " data = []\n", 58 | "\n", 59 | " with open(filename, 'r') as csvfile:\n", 60 | " return list(csv.DictReader(csvfile))" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": 5, 66 | "id": "fcd7d1c6", 67 | "metadata": { 68 | "ExecuteTime": { 69 | "end_time": "2023-07-25T10:18:30.818170Z", 70 | "start_time": "2023-07-25T10:18:30.815866Z" 71 | }, 72 | "collapsed": false, 73 | "jupyter": { 74 | "outputs_hidden": false 75 | } 76 | }, 77 | "outputs": [], 78 | "source": [ 79 | "subjects_data = read_data_from_csv(\"subjects.csv\")\n", 80 | "students_data = read_data_from_csv(\"students.csv\")" 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": 6, 86 | "id": "5e939b6e", 87 | "metadata": { 88 | "ExecuteTime": { 89 | "end_time": "2023-07-25T10:20:16.288610Z", 90 | "start_time": "2023-07-25T10:20:06.973870Z" 91 | }, 92 | "collapsed": false, 93 | "jupyter": { 94 | "outputs_hidden": false 95 | } 96 | }, 97 | "outputs": [], 98 | "source": [ 99 | "from langchain.chains import LLMChain\n", 100 | "from langchain.chat_models import ChatOpenAI\n", 101 | "from langchain.prompts.chat import (\n", 102 | " ChatPromptTemplate,\n", 103 | " SystemMessagePromptTemplate,\n", 104 | " HumanMessagePromptTemplate,\n", 105 | ")\n", 106 | "\n", 107 | "chat = ChatOpenAI(model=\"gpt-4\", temperature=0)" 108 | ] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "execution_count": 9, 113 | "id": "187c7aba-775a-4f49-8ff7-e6f2b1b9c496", 114 | "metadata": {}, 115 | "outputs": [], 116 | "source": [ 117 | "system_message_prompt = SystemMessagePromptTemplate.from_template(\n", 118 | " \"You are an expert university class scheduler with proficiency in interpreting JSON.\"\n", 119 | ")\n", 120 | "human_message_prompt = HumanMessagePromptTemplate.from_template(\n", 121 | "\"\"\"\n", 122 | "The following subjects` data contains subject names, the total number of classes to be attended by a student, and the available class slots for each subject.\n", 123 | "{subjects_data}\n", 124 | "\n", 125 | "Here is the data for a specific student, including his registered subjects and considerations. Your task is to schedule classes for these subjects.\n", 126 | "{student}\n", 127 | "\n", 128 | "The number of classes per week for each subject must be the same as mentioned in the data. You must make sure that classes should not overlap. If there is a conflict between the student's considerations and the scheduling, ignore the consideration. Include a brief comment at the end regarding the extent to which the considerations have been met, specifying the subject names.\n", 129 | "\n", 130 | "The timetable should be prepared in the following format:\n", 131 | "StudentName:\n", 132 | "Subject1:\n", 133 | "Classes list\n", 134 | "Subject2:\n", 135 | "Classes list\n", 136 | "Subject3:\n", 137 | "Classes list\n", 138 | "...\n", 139 | "\n", 140 | "Remember that classes must not overlap, you can ignore considerations when needed.\n", 141 | "\"\"\"\n", 142 | ")\n", 143 | "chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])\n", 144 | "chain = LLMChain(llm=chat, prompt=chat_prompt)" 145 | ] 146 | }, 147 | { 148 | "cell_type": "code", 149 | "execution_count": 10, 150 | "id": "c4419a4c", 151 | "metadata": { 152 | "ExecuteTime": { 153 | "end_time": "2023-07-25T10:21:58.475060Z", 154 | "start_time": "2023-07-25T10:20:29.002146Z" 155 | }, 156 | "collapsed": false, 157 | "jupyter": { 158 | "outputs_hidden": false 159 | } 160 | }, 161 | "outputs": [ 162 | { 163 | "name": "stdout", 164 | "output_type": "stream", 165 | "text": [ 166 | "Here is the class schedule for John Doe:\n", 167 | "\n", 168 | "StudentName: John Doe\n", 169 | "\n", 170 | "Subject1: Mathematics\n", 171 | "Classes list:\n", 172 | "- Tuesday 1:00 PM - 2:30 PM\n", 173 | "- Tuesday 3:00 PM - 4:30 PM\n", 174 | "- Thursday 1:00 PM - 2:30 PM\n", 175 | "\n", 176 | "Subject2: Science\n", 177 | "Classes list:\n", 178 | "- Tuesday 9:00 AM - 10:30 AM\n", 179 | "- Thursday 11:00 AM - 12:30 PM\n", 180 | "- Friday 1:00 PM - 2:30 PM\n", 181 | "\n", 182 | "Subject3: History\n", 183 | "Classes list:\n", 184 | "- Wednesday 1:00 PM - 2:30 PM\n", 185 | "- Wednesday 3:00 PM - 4:30 PM\n", 186 | "\n", 187 | "Comment: The student's consideration to avoid classes on Mondays and Fridays has been partially met. For the subject Mathematics, all classes are scheduled on Tuesday and Thursday. For the subject History, all classes are scheduled on Wednesday. However, for the subject Science, one class had to be scheduled on Friday due to the limited availability of class slots and to avoid overlap with other subjects.\n", 188 | "\n", 189 | "Please note that the schedule has been prepared to ensure that there are no overlapping classes. The student's considerations were taken into account to the extent possible, but the priority was to ensure that the required number of classes per week for each subject is met. \n", 190 | "\n", 191 | "Here is the class schedule for Jane Smith:\n", 192 | "\n", 193 | "StudentName: Jane Smith\n", 194 | "\n", 195 | "Subject1: English\n", 196 | "Classes list:\n", 197 | "- Tuesday 9:00 AM - 10:30 AM\n", 198 | "- Wednesday 9:00 AM - 10:30 AM\n", 199 | "- Thursday 1:00 PM - 2:30 PM\n", 200 | "\n", 201 | "Subject2: Mathematics\n", 202 | "Classes list:\n", 203 | "- Monday 9:00 AM - 10:30 AM\n", 204 | "- Tuesday 3:00 PM - 4:30 PM\n", 205 | "- Thursday 3:00 PM - 4:30 PM\n", 206 | "\n", 207 | "Subject3: Science\n", 208 | "Classes list:\n", 209 | "- Tuesday 1:00 PM - 2:30 PM\n", 210 | "- Thursday 11:00 AM - 12:30 PM\n", 211 | "- Friday 1:00 PM - 2:30 PM\n", 212 | "\n", 213 | "The student's considerations were to prefer afternoon sessions on Tuesdays and Thursdays. This has been met to the extent possible. For English, one class is scheduled in the afternoon on Thursday. For Mathematics, two classes are scheduled in the afternoon on Tuesday and Thursday. For Science, one class is scheduled in the afternoon on Tuesday. However, due to the need to avoid class overlap and meet the required number of classes per week, not all classes could be scheduled in the afternoon on these days. \n", 214 | "\n", 215 | "Here is the class schedule for Michael Johnson:\n", 216 | "\n", 217 | "StudentName: Michael Johnson\n", 218 | "\n", 219 | "Subject1: History\n", 220 | "Classes list:\n", 221 | "- Monday 11:00 AM - 12:30 PM\n", 222 | "- Monday 3:00 PM - 4:30 PM\n", 223 | "\n", 224 | "Subject2: Chemistry\n", 225 | "Classes list:\n", 226 | "- Monday 1:00 PM - 2:30 PM\n", 227 | "- Thursday 11:00 AM - 12:30 PM\n", 228 | "\n", 229 | "Subject3: Science\n", 230 | "Classes list:\n", 231 | "- Tuesday 9:00 AM - 10:30 AM\n", 232 | "- Thursday 11:00 AM - 12:30 PM\n", 233 | "- Friday 1:00 PM - 2:30 PM\n", 234 | "\n", 235 | "Comment: The student's consideration of 'No classes on Wednesdays' has been fully met. All classes have been scheduled on days other than Wednesday. \n", 236 | "\n" 237 | ] 238 | } 239 | ], 240 | "source": [ 241 | "for student in students_data:\n", 242 | " response = chain.run(subjects_data=subjects_data, student=student)\n", 243 | " print(response, '\\n')" 244 | ] 245 | }, 246 | { 247 | "cell_type": "code", 248 | "execution_count": null, 249 | "id": "c7e87b4e", 250 | "metadata": {}, 251 | "outputs": [], 252 | "source": [] 253 | } 254 | ], 255 | "metadata": { 256 | "kernelspec": { 257 | "display_name": "Python 3 (ipykernel)", 258 | "language": "python", 259 | "name": "python3" 260 | }, 261 | "language_info": { 262 | "codemirror_mode": { 263 | "name": "ipython", 264 | "version": 3 265 | }, 266 | "file_extension": ".py", 267 | "mimetype": "text/x-python", 268 | "name": "python", 269 | "nbconvert_exporter": "python", 270 | "pygments_lexer": "ipython3", 271 | "version": "3.10.12" 272 | } 273 | }, 274 | "nbformat": 4, 275 | "nbformat_minor": 5 276 | } 277 | -------------------------------------------------------------------------------- /002_youtube_content_ideas_ai/youtube_traction.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "pycharm": { 7 | "name": "#%% md\n" 8 | } 9 | }, 10 | "source": [ 11 | "# AI in Content Generation: Youtube Ideas from historical content performance\n", 12 | "\n", 13 | "\n", 14 | "## Channel: This Week in Startups" 15 | ] 16 | }, 17 | { 18 | "cell_type": "markdown", 19 | "metadata": { 20 | "pycharm": { 21 | "name": "#%% md\n" 22 | } 23 | }, 24 | "source": [ 25 | "The provided code is a class called \"Summarizer\" that uses the Langchain library to summarize YouTube videos. It has the following key functionalities:\n", 26 | "\n", 27 | "Loading Data: The \\_load_data() method utilizes the YoutubeLoader class from the Langchain library to load YouTube video data, including the video's content and metadata.\n", 28 | "\n", 29 | "Chunking Data: The create_chunks() method splits the video content into smaller text chunks using the RecursiveCharacterTextSplitter class. This helps in processing large text inputs more efficiently.\n", 30 | "\n", 31 | "Summarizing: The summarize() method initializes a summarization chain from the Langchain library using the load_summarize_chain() function. It then asynchronously runs the summarization chain on the text chunks generated earlier using the \\_chain_run() method. The summary result is collected and returned along with the video metadata.\n", 32 | "\n", 33 | "Overall, this code provides a convenient way to summarize YouTube videos by leveraging Langchain's text processing capabilities.\n" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": 1, 39 | "metadata": { 40 | "pycharm": { 41 | "name": "#%%\n" 42 | } 43 | }, 44 | "outputs": [], 45 | "source": [ 46 | "import asyncio\n", 47 | "from langchain.chat_models import ChatOpenAI\n", 48 | "from langchain.chains.summarize import load_summarize_chain\n", 49 | "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", 50 | "from langchain.document_loaders import YoutubeLoader\n", 51 | "\n", 52 | "import pandas as pd\n", 53 | "\n", 54 | "\n", 55 | "class Summarizer:\n", 56 | " def __init__(self, url, llm):\n", 57 | " self.url = url\n", 58 | " self.llm = llm\n", 59 | " self.doc_chunks = []\n", 60 | " self.metadata = []\n", 61 | "\n", 62 | " def _load_data(self):\n", 63 | " loader = YoutubeLoader.from_youtube_url(self.url, add_video_info=True)\n", 64 | "\n", 65 | " return loader.load()\n", 66 | "\n", 67 | " def create_chunks(self):\n", 68 | " text_splitter = RecursiveCharacterTextSplitter(\n", 69 | " chunk_size=3500,\n", 70 | " chunk_overlap=20,\n", 71 | " length_function=len,\n", 72 | " )\n", 73 | " text = self._load_data()\n", 74 | " self.doc_chunks = text_splitter.create_documents(\n", 75 | " [doc.page_content for doc in text]\n", 76 | " )\n", 77 | " self.metadata = text[0].metadata\n", 78 | "\n", 79 | " return\n", 80 | "\n", 81 | " async def _chain_run(self, chain, docs):\n", 82 | " return await chain.arun(docs)\n", 83 | "\n", 84 | " async def summarize(self):\n", 85 | " summarizer_chain = load_summarize_chain(llm=self.llm, chain_type=\"map_reduce\")\n", 86 | " tasks = [self._chain_run(summarizer_chain, self.doc_chunks)]\n", 87 | " summary = await asyncio.gather(*tasks)\n", 88 | "\n", 89 | " return {\"summary\": summary[0], \"metadata\": self.metadata}" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 2, 95 | "metadata": { 96 | "pycharm": { 97 | "name": "#%%\n" 98 | } 99 | }, 100 | "outputs": [], 101 | "source": [ 102 | "import concurrent.futures\n", 103 | "import asyncio\n", 104 | "import time\n", 105 | "\n", 106 | "\n", 107 | "def process_summary(url):\n", 108 | " llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0.5)\n", 109 | " summarizer = Summarizer(url, llm)\n", 110 | " summarizer.create_chunks()\n", 111 | "\n", 112 | " return asyncio.run(summarizer.summarize())\n", 113 | "\n", 114 | "\n", 115 | "def pool_executor(urls):\n", 116 | " results = []\n", 117 | "\n", 118 | " with concurrent.futures.ThreadPoolExecutor() as executor:\n", 119 | " # Submit each URL to the executor\n", 120 | " futures = [executor.submit(process_summary, url) for url in urls]\n", 121 | "\n", 122 | " # Wait for all futures to complete\n", 123 | " for future in concurrent.futures.as_completed(futures):\n", 124 | " try:\n", 125 | " result = future.result()\n", 126 | " results.append(result)\n", 127 | " except Exception as e:\n", 128 | " print(f\"Error processing URL: {e}\")\n", 129 | "\n", 130 | " return results" 131 | ] 132 | }, 133 | { 134 | "cell_type": "code", 135 | "execution_count": 3, 136 | "metadata": { 137 | "pycharm": { 138 | "name": "#%%\n" 139 | } 140 | }, 141 | "outputs": [ 142 | { 143 | "name": "stderr", 144 | "output_type": "stream", 145 | "text": [ 146 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised ServiceUnavailableError: The server is overloaded or not ready yet..\n", 147 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised ServiceUnavailableError: The server is overloaded or not ready yet..\n", 148 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised ServiceUnavailableError: The server is overloaded or not ready yet..\n", 149 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised ServiceUnavailableError: The server is overloaded or not ready yet..\n", 150 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89692 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 151 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89691 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 152 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89684 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 153 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89685 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 154 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89675 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 155 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89675 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 156 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89680 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 157 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89678 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 158 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89669 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 159 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89666 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 160 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89654 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 161 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89650 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 162 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89630 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 163 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89648 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 164 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89632 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 165 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89628 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 166 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89654 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 167 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 2.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89137 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 168 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 2.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89153 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 169 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 2.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89102 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n" 170 | ] 171 | }, 172 | { 173 | "name": "stderr", 174 | "output_type": "stream", 175 | "text": [ 176 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 2.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89096 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 177 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 2.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89975 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 178 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 2.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89969 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 179 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 2.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89967 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 180 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 2.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89968 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 181 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 2.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89966 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 182 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 2.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89963 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 183 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 2.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89963 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 184 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 2.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89962 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 185 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 2.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89955 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 186 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 2.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89953 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 187 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 2.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89948 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 188 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 4.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89771 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 189 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 4.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89730 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 190 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 4.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89728 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 191 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 4.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89723 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 192 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 4.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89721 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 193 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 4.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89716 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 194 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 4.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89710 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 195 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 4.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89709 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 196 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 4.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89692 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 197 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 4.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89693 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n" 198 | ] 199 | }, 200 | { 201 | "name": "stderr", 202 | "output_type": "stream", 203 | "text": [ 204 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 4.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89692 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 205 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 4.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89689 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 206 | "Retrying langchain.chat_models.openai.ChatOpenAI.completion_with_retry.._completion_with_retry in 1.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89619 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 207 | "Retrying langchain.chat_models.openai.ChatOpenAI.completion_with_retry.._completion_with_retry in 2.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 87531 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 208 | "Retrying langchain.chat_models.openai.ChatOpenAI.completion_with_retry.._completion_with_retry in 1.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 86526 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 209 | "Retrying langchain.chat_models.openai.ChatOpenAI.completion_with_retry.._completion_with_retry in 2.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 87830 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 210 | "Retrying langchain.chat_models.openai.ChatOpenAI.completion_with_retry.._completion_with_retry in 4.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 87364 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 211 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 8.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89865 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 212 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 8.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89860 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 213 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 8.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89857 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 214 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 8.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89853 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 215 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 8.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89851 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 216 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 8.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89832 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 217 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 8.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89793 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 218 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 8.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89709 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 219 | "Retrying langchain.chat_models.openai.ChatOpenAI.completion_with_retry.._completion_with_retry in 4.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 87860 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n", 220 | "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 16.0 seconds as it raised RateLimitError: Rate limit reached for default-gpt-3.5-turbo in organization org-ZMYqvRsyrZeVCAGtcdg16fJA on tokens per min. Limit: 90000 / min. Current: 89290 / min. Contact us through our help center at help.openai.com if you continue to have issues..\n" 221 | ] 222 | } 223 | ], 224 | "source": [ 225 | "high_engagement_vids_urls = [\n", 226 | " \"https://www.youtube.com/watch?v=_8bMMqy37y8&ab_channel=ThisWeekinStartups\",\n", 227 | " \"https://www.youtube.com/watch?v=1SWEF-lyW28&ab_channel=ThisWeekinStartups\",\n", 228 | " \"https://www.youtube.com/watch?v=oc5tHbEK0IQ&ab_channel=ThisWeekinStartups\",\n", 229 | " \"https://www.youtube.com/watch?v=jrd4snFDSVA&ab_channel=ThisWeekinStartups\",\n", 230 | "]\n", 231 | "\n", 232 | "low_engagement_vids_urls = [\n", 233 | " \"https://www.youtube.com/watch?v=UeIV4KcSUlk\",\n", 234 | " \"https://www.youtube.com/watch?v=hNcLMN_bZCM\",\n", 235 | " \"https://www.youtube.com/watch?v=ANd4jPLnMAU\",\n", 236 | " \"https://www.youtube.com/watch?v=J8YnxrGEzT4\",\n", 237 | "]\n", 238 | "\n", 239 | "high_engagement_summaries = pool_executor(high_engagement_vids_urls)\n", 240 | "\n", 241 | "low_engagement_summaries = pool_executor(low_engagement_vids_urls)" 242 | ] 243 | }, 244 | { 245 | "cell_type": "code", 246 | "execution_count": 4, 247 | "metadata": { 248 | "pycharm": { 249 | "name": "#%%\n" 250 | } 251 | }, 252 | "outputs": [ 253 | { 254 | "data": { 255 | "text/html": [ 256 | "
\n", 257 | "\n", 270 | "\n", 271 | " \n", 272 | " \n", 273 | " \n", 274 | " \n", 275 | " \n", 276 | " \n", 277 | " \n", 278 | " \n", 279 | " \n", 280 | " \n", 281 | " \n", 282 | " \n", 283 | " \n", 284 | " \n", 285 | " \n", 286 | " \n", 287 | " \n", 288 | " \n", 289 | " \n", 290 | " \n", 291 | " \n", 292 | " \n", 293 | " \n", 294 | " \n", 295 | " \n", 296 | " \n", 297 | " \n", 298 | " \n", 299 | " \n", 300 | "
summarymetadata
0The conversation between Jason Calacanis and B...{'source': 'oc5tHbEK0IQ&ab_channel=ThisWeekinS...
1The summary covers various topics including th...{'source': '_8bMMqy37y8&ab_channel=ThisWeekinS...
2The speaker discusses various topics related t...{'source': '1SWEF-lyW28&ab_channel=ThisWeekinS...
3The summary discusses various topics including...{'source': 'jrd4snFDSVA&ab_channel=ThisWeekinS...
\n", 301 | "
" 302 | ], 303 | "text/plain": [ 304 | " summary \n", 305 | "0 The conversation between Jason Calacanis and B... \\\n", 306 | "1 The summary covers various topics including th... \n", 307 | "2 The speaker discusses various topics related t... \n", 308 | "3 The summary discusses various topics including... \n", 309 | "\n", 310 | " metadata \n", 311 | "0 {'source': 'oc5tHbEK0IQ&ab_channel=ThisWeekinS... \n", 312 | "1 {'source': '_8bMMqy37y8&ab_channel=ThisWeekinS... \n", 313 | "2 {'source': '1SWEF-lyW28&ab_channel=ThisWeekinS... \n", 314 | "3 {'source': 'jrd4snFDSVA&ab_channel=ThisWeekinS... " 315 | ] 316 | }, 317 | "execution_count": 4, 318 | "metadata": {}, 319 | "output_type": "execute_result" 320 | } 321 | ], 322 | "source": [ 323 | "high_eng_df = pd.DataFrame.from_records(high_engagement_summaries)\n", 324 | "high_eng_df" 325 | ] 326 | }, 327 | { 328 | "cell_type": "code", 329 | "execution_count": 5, 330 | "metadata": { 331 | "pycharm": { 332 | "name": "#%%\n" 333 | } 334 | }, 335 | "outputs": [ 336 | { 337 | "data": { 338 | "text/html": [ 339 | "
\n", 340 | "\n", 353 | "\n", 354 | " \n", 355 | " \n", 356 | " \n", 357 | " \n", 358 | " \n", 359 | " \n", 360 | " \n", 361 | " \n", 362 | " \n", 363 | " \n", 364 | " \n", 365 | " \n", 366 | " \n", 367 | " \n", 368 | " \n", 369 | " \n", 370 | " \n", 371 | " \n", 372 | " \n", 373 | " \n", 374 | " \n", 375 | " \n", 376 | " \n", 377 | " \n", 378 | " \n", 379 | " \n", 380 | " \n", 381 | " \n", 382 | " \n", 383 | "
summarymetadata
0The summary discusses a crypto Roundtable disc...{'source': 'UeIV4KcSUlk', 'title': 'Banking cr...
1The podcast episode covers various topics incl...{'source': 'ANd4jPLnMAU', 'title': 'Coinbase c...
2The podcast episode features Coffeezilla, a Yo...{'source': 'J8YnxrGEzT4', 'title': 'Logan Paul...
3The provided text covers a range of topics inc...{'source': 'hNcLMN_bZCM', 'title': 'Bing dodge...
\n", 384 | "
" 385 | ], 386 | "text/plain": [ 387 | " summary \n", 388 | "0 The summary discusses a crypto Roundtable disc... \\\n", 389 | "1 The podcast episode covers various topics incl... \n", 390 | "2 The podcast episode features Coffeezilla, a Yo... \n", 391 | "3 The provided text covers a range of topics inc... \n", 392 | "\n", 393 | " metadata \n", 394 | "0 {'source': 'UeIV4KcSUlk', 'title': 'Banking cr... \n", 395 | "1 {'source': 'ANd4jPLnMAU', 'title': 'Coinbase c... \n", 396 | "2 {'source': 'J8YnxrGEzT4', 'title': 'Logan Paul... \n", 397 | "3 {'source': 'hNcLMN_bZCM', 'title': 'Bing dodge... " 398 | ] 399 | }, 400 | "execution_count": 5, 401 | "metadata": {}, 402 | "output_type": "execute_result" 403 | } 404 | ], 405 | "source": [ 406 | "low_eng_df = pd.DataFrame.from_records(low_engagement_summaries)\n", 407 | "low_eng_df" 408 | ] 409 | }, 410 | { 411 | "cell_type": "code", 412 | "execution_count": 6, 413 | "metadata": { 414 | "pycharm": { 415 | "name": "#%%\n" 416 | } 417 | }, 418 | "outputs": [], 419 | "source": [ 420 | "def format_summaries(data):\n", 421 | " formatted_strings = []\n", 422 | " for i, obj in enumerate(data, start=1):\n", 423 | " summary = obj[\"summary\"]\n", 424 | " views = obj[\"metadata\"][\"view_count\"]\n", 425 | " title = obj[\"metadata\"][\"title\"].split(\"|\")[0].strip()\n", 426 | "\n", 427 | " formatted_string = (\n", 428 | " f\"Video {i}\\nTitle: {title}\\nView Count: {views}\\nSummary: {summary}\\n\"\n", 429 | " )\n", 430 | " formatted_strings.append(formatted_string)\n", 431 | "\n", 432 | " result = \"\\n\".join(formatted_strings)\n", 433 | " return result\n", 434 | "\n", 435 | "\n", 436 | "high_eng_prompt = format_summaries(high_engagement_summaries)\n", 437 | "low_eng_prompt = format_summaries(low_engagement_summaries)" 438 | ] 439 | }, 440 | { 441 | "cell_type": "code", 442 | "execution_count": 7, 443 | "metadata": { 444 | "pycharm": { 445 | "name": "#%%\n" 446 | } 447 | }, 448 | "outputs": [ 449 | { 450 | "name": "stdout", 451 | "output_type": "stream", 452 | "text": [ 453 | "Video 1\n", 454 | "Title: Fireside chat with Jason Calacanis & Brad Gerstner hosted by Mubadala’s Ibrahim Ajami\n", 455 | "View Count: 100551\n", 456 | "Summary: The conversation between Jason Calacanis and Brad Gerstner covers various topics related to the technology industry, including their experiences in Silicon Valley, their respective companies and investments, and the challenges of building successful companies. They discuss the current state of the industry, the importance of innovation and perseverance, and the need for founders to stay focused on their products and customers. They also touch on the impact of AI, the potential of the metaverse, and the evolution of funding and technology. The speaker emphasizes the importance of data, the cloud, and AI in driving technological advancements and economic growth. They also discuss the role of venture capital, the importance of resilience and humility, and the benefits of in-person collaboration. The speaker encourages founders to be intellectually honest and transparent, to embrace failure as part of the process, and to continuously learn and adapt. They also highlight the importance of networking, learning from successful entrepreneurs, and building a global network. Overall, the speaker remains optimistic about the positive impact of technology on human progress and the potential for emerging markets to develop their own tech ecosystems.\n", 457 | "\n", 458 | "Video 2\n", 459 | "Title: ChatGPT vs Hollywood writers and the WGA strike with Lon Harris\n", 460 | "View Count: 137823\n", 461 | "Summary: The summary covers various topics including the challenges faced by writers in the entertainment industry, the potential of technology in creative writing, the impact of streaming platforms on compensation and job stability for writers, the potential of Bitcoin as a digital currency, and the changes happening with HBO Max. It also mentions the development of voice-mimicking bots for podcast advertising, concerns about excessive technology use, and recommendations for TV shows and movies.\n", 462 | "\n", 463 | "Video 3\n", 464 | "Title: Demoing Google’s MusicLM, AssemblyAI, and other AI tools with Sunny Madra\n", 465 | "View Count: 104388\n", 466 | "Summary: The speaker discusses various topics related to AI, podcasts, music creation, transcription technology, and the potential for AI personalities. They promote sponsors, introduce a guest, and discuss different AI platforms and tools. The speaker also mentions the importance of copyright and regulation in the AI industry and expresses interest in creating more content. They offer a VIP ticket to a summit event and discuss the interest surrounding it.\n", 467 | "\n", 468 | "Video 4\n", 469 | "Title: GPT-4 web browsing, plugin demos, AI layoffs + more with Sunny Madra & Vinny Lingham\n", 470 | "View Count: 92976\n", 471 | "Summary: The summary discusses various topics including the importance of incorporating social features into products, advancements in AI technology such as ChatGPT4 with web browsing capabilities, the potential impact of AI on job loss and opportunities, the use of AI tools for research and debate purposes, strategies to address issues like drug trafficking and water desalination, the potential of AI in video editing and podcasting, the benefits and limitations of AI in real estate, and the speaker's farewell and mention of future meetings.\n", 472 | "\n" 473 | ] 474 | } 475 | ], 476 | "source": [ 477 | "print(high_eng_prompt)" 478 | ] 479 | }, 480 | { 481 | "cell_type": "code", 482 | "execution_count": 8, 483 | "metadata": { 484 | "pycharm": { 485 | "name": "#%%\n" 486 | } 487 | }, 488 | "outputs": [ 489 | { 490 | "name": "stdout", 491 | "output_type": "stream", 492 | "text": [ 493 | "Video 1\n", 494 | "Title: Banking crisis impact, more Meta cuts, and GPT-4 with Sunny Madra and Vinny Lingham\n", 495 | "View Count: 20470\n", 496 | "Summary: The summary discusses a crypto Roundtable discussion with entrepreneurs Sunny and Vinnie. They cover topics such as the impact of the banking contagion on the crypto industry, potential adoption of a CBDC by the US, risk management, social media's influence on banking, and the need for startups to adapt. The summary also mentions the layoffs at Zuckerberg's company and the importance of being Sock 2 compliant for startups. It discusses the challenges faced by banks and startups in the funding environment and the potential risks of inflated valuations. The speaker emphasizes the importance of transparency and stability in the cryptocurrency market, as well as the potential of stablecoins and the need for regulation. The summary also touches on interest rates, the value of podcasts, and the changes being made by Zuckerberg at Facebook. It concludes with a mention of the capabilities of a new AI model called Chat GPT4 and the importance of staying updated and innovative in the tech industry.\n", 497 | "\n", 498 | "Video 2\n", 499 | "Title: Coinbase cuts 20%, Microsoft to invest $10B into OpenAI & Ascend Elements CEO Mike O’Kronley\n", 500 | "View Count: 30730\n", 501 | "Summary: The podcast episode covers various topics including extreme weather conditions, the rift between Coinbase and its employees, the Microsoft and OpenAI relationship, climate technology, and an interview with a climate founder. It also discusses the need for companies to be efficient and the potential job opportunities in other sectors. The episode is sponsored by Vanta, Mixpanel, and Squarespace. Additionally, it mentions the importance of recycling lithium-ion batteries and the increasing demand for renewable energy and electric vehicles.\n", 502 | "\n", 503 | "Video 3\n", 504 | "Title: Logan Paul & CryptoZoo’s alleged scam breakdown with Coffeezilla + OK Boomer with Em Herrera\n", 505 | "View Count: 14866\n", 506 | "Summary: The podcast episode features Coffeezilla, a YouTuber and investigative journalist, discussing the alleged scam involving crypto zoo and Logan Paul. They discuss their motivation for exposing scams and the lack of substance in many blockchain games. The episode also touches on Logan Paul's involvement in a failed crypto project and the potential legal repercussions. The speaker criticizes lawyers for their response and suggests that Logan Paul should take responsibility for any financial losses incurred by his fans. They also discuss the influence of successful influencers in promoting projects and the importance of vetting teams. The speaker briefly mentions their involvement in a lawsuit and their support for independent artists and investigative journalism. The conversation covers various topics such as founder education, fraud on FTX, and the desire for fame and success among influencers. Emily Herrera, an investor at Night Ventures, discusses her predictions for the consumer market in 2023 and the increasing influence of user opinions on purchasing decisions. The conversation highlights the importance of online reviews. The speaker also mentions the importance of personal cyber security and the need for consumer cyber security products. They discuss the prevalence of impersonators on social media platforms and the importance of password safety. The speaker encourages influencers to promote personal security and mentions their own podcast on the balance between wellness and technology.\n", 507 | "\n", 508 | "Video 4\n", 509 | "Title: Bing dodges $100B bullet & IVP's Tom Loverro on the looming startup collapse\n", 510 | "View Count: 20538\n", 511 | "Summary: The provided text covers a range of topics including the sharing of personal health data by companies without consent, concerns about the accuracy and misuse of AI-generated content, predictions of a potential mass extinction event for startups, the impact of deal structures on venture investments, the importance of fundraising and governance in startups, the importance of focusing on core survival during financial challenges, the influence of cognitive biases in venture investing, the significance of growth rates and stewardship of capital in startups, the extreme nature of the cryptocurrency market, the use of stock options to reward employees after layoffs, and the qualities to look for in investors and board members.\n", 512 | "\n" 513 | ] 514 | } 515 | ], 516 | "source": [ 517 | "print(low_eng_prompt)" 518 | ] 519 | }, 520 | { 521 | "cell_type": "code", 522 | "execution_count": 9, 523 | "metadata": { 524 | "pycharm": { 525 | "name": "#%%\n" 526 | } 527 | }, 528 | "outputs": [], 529 | "source": [ 530 | "from langchain.prompts import PromptTemplate\n", 531 | "from langchain.chains import LLMChain\n", 532 | "from langchain.chat_models import ChatOpenAI\n", 533 | "\n", 534 | "prompt_template = \"\"\" You are helpful AI assistant that helps to increase the engagement of youtube videos by analyzing the scripts of old videos.\\\n", 535 | "Looking at the given videos below in High_engagement_Videos and Low_engagement_videos sections,\\\n", 536 | "come up with new ideas for next videos.\\\n", 537 | " \n", 538 | "High_Engagement_Videos:\n", 539 | "{high_engagement_videos}\n", 540 | " \n", 541 | "Low_Engagement_Videos:\n", 542 | "{low_engagement_videos}\n", 543 | "\n", 544 | "Given the above High_Engagement_Videos and Low_Engagement_Videos, generate new ideas and themes.\n", 545 | "New ideas should be related to the High_Engagement_Videos by keeping the titles and summaries of the episodes in context and must be based on\\\n", 546 | "the common patterns between the High_Engagement_Videos and the guests in those episodes.\\\n", 547 | "The new videos should not have any content from Low_Engagement_Videos.\\\n", 548 | "Make sure to not include any speaker name in your suggested video topics or themes.\n", 549 | "Make sure to return at least 10 new ideas. Your response must be a csv file which contains the following columns:\\\n", 550 | "Topic, Theme, Summary. Summary should contain points to talk on the show and must be 100 words at max. Use | as a seperator\\\n", 551 | "and do not append any extra line in your csv response. Each row must have proper data and columns in each row must be three.\n", 552 | "If you don't know the answer, just say \"Hmm, I'm not sure.\"\\\n", 553 | "Don't try to make up an answer. \n", 554 | "\"\"\"\n", 555 | "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0.5)\n", 556 | "PROMPT = PromptTemplate(\n", 557 | " template=prompt_template,\n", 558 | " input_variables=[\"high_engagement_videos\", \"low_engagement_videos\"],\n", 559 | ")\n", 560 | "ideas_chain = LLMChain(llm=llm, prompt=PROMPT, verbose=True)" 561 | ] 562 | }, 563 | { 564 | "cell_type": "code", 565 | "execution_count": 10, 566 | "metadata": { 567 | "pycharm": { 568 | "name": "#%%\n" 569 | } 570 | }, 571 | "outputs": [ 572 | { 573 | "name": "stdout", 574 | "output_type": "stream", 575 | "text": [ 576 | "\n", 577 | "\n", 578 | "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", 579 | "Prompt after formatting:\n", 580 | "\u001b[32;1m\u001b[1;3m You are helpful AI assistant that helps to increase the engagement of youtube videos by analyzing the scripts of old videos.Looking at the given videos below in High_engagement_Videos and Low_engagement_videos sections,come up with new ideas for next videos. \n", 581 | "High_Engagement_Videos:\n", 582 | "Video 1\n", 583 | "Title: Fireside chat with Jason Calacanis & Brad Gerstner hosted by Mubadala’s Ibrahim Ajami\n", 584 | "View Count: 100551\n", 585 | "Summary: The conversation between Jason Calacanis and Brad Gerstner covers various topics related to the technology industry, including their experiences in Silicon Valley, their respective companies and investments, and the challenges of building successful companies. They discuss the current state of the industry, the importance of innovation and perseverance, and the need for founders to stay focused on their products and customers. They also touch on the impact of AI, the potential of the metaverse, and the evolution of funding and technology. The speaker emphasizes the importance of data, the cloud, and AI in driving technological advancements and economic growth. They also discuss the role of venture capital, the importance of resilience and humility, and the benefits of in-person collaboration. The speaker encourages founders to be intellectually honest and transparent, to embrace failure as part of the process, and to continuously learn and adapt. They also highlight the importance of networking, learning from successful entrepreneurs, and building a global network. Overall, the speaker remains optimistic about the positive impact of technology on human progress and the potential for emerging markets to develop their own tech ecosystems.\n", 586 | "\n", 587 | "Video 2\n", 588 | "Title: ChatGPT vs Hollywood writers and the WGA strike with Lon Harris\n", 589 | "View Count: 137823\n", 590 | "Summary: The summary covers various topics including the challenges faced by writers in the entertainment industry, the potential of technology in creative writing, the impact of streaming platforms on compensation and job stability for writers, the potential of Bitcoin as a digital currency, and the changes happening with HBO Max. It also mentions the development of voice-mimicking bots for podcast advertising, concerns about excessive technology use, and recommendations for TV shows and movies.\n", 591 | "\n", 592 | "Video 3\n", 593 | "Title: Demoing Google’s MusicLM, AssemblyAI, and other AI tools with Sunny Madra\n", 594 | "View Count: 104388\n", 595 | "Summary: The speaker discusses various topics related to AI, podcasts, music creation, transcription technology, and the potential for AI personalities. They promote sponsors, introduce a guest, and discuss different AI platforms and tools. The speaker also mentions the importance of copyright and regulation in the AI industry and expresses interest in creating more content. They offer a VIP ticket to a summit event and discuss the interest surrounding it.\n", 596 | "\n", 597 | "Video 4\n", 598 | "Title: GPT-4 web browsing, plugin demos, AI layoffs + more with Sunny Madra & Vinny Lingham\n", 599 | "View Count: 92976\n", 600 | "Summary: The summary discusses various topics including the importance of incorporating social features into products, advancements in AI technology such as ChatGPT4 with web browsing capabilities, the potential impact of AI on job loss and opportunities, the use of AI tools for research and debate purposes, strategies to address issues like drug trafficking and water desalination, the potential of AI in video editing and podcasting, the benefits and limitations of AI in real estate, and the speaker's farewell and mention of future meetings.\n", 601 | "\n", 602 | " \n", 603 | "Low_Engagement_Videos:\n", 604 | "Video 1\n", 605 | "Title: Banking crisis impact, more Meta cuts, and GPT-4 with Sunny Madra and Vinny Lingham\n", 606 | "View Count: 20470\n", 607 | "Summary: The summary discusses a crypto Roundtable discussion with entrepreneurs Sunny and Vinnie. They cover topics such as the impact of the banking contagion on the crypto industry, potential adoption of a CBDC by the US, risk management, social media's influence on banking, and the need for startups to adapt. The summary also mentions the layoffs at Zuckerberg's company and the importance of being Sock 2 compliant for startups. It discusses the challenges faced by banks and startups in the funding environment and the potential risks of inflated valuations. The speaker emphasizes the importance of transparency and stability in the cryptocurrency market, as well as the potential of stablecoins and the need for regulation. The summary also touches on interest rates, the value of podcasts, and the changes being made by Zuckerberg at Facebook. It concludes with a mention of the capabilities of a new AI model called Chat GPT4 and the importance of staying updated and innovative in the tech industry.\n", 608 | "\n", 609 | "Video 2\n", 610 | "Title: Coinbase cuts 20%, Microsoft to invest $10B into OpenAI & Ascend Elements CEO Mike O’Kronley\n", 611 | "View Count: 30730\n", 612 | "Summary: The podcast episode covers various topics including extreme weather conditions, the rift between Coinbase and its employees, the Microsoft and OpenAI relationship, climate technology, and an interview with a climate founder. It also discusses the need for companies to be efficient and the potential job opportunities in other sectors. The episode is sponsored by Vanta, Mixpanel, and Squarespace. Additionally, it mentions the importance of recycling lithium-ion batteries and the increasing demand for renewable energy and electric vehicles.\n", 613 | "\n", 614 | "Video 3\n", 615 | "Title: Logan Paul & CryptoZoo’s alleged scam breakdown with Coffeezilla + OK Boomer with Em Herrera\n", 616 | "View Count: 14866\n", 617 | "Summary: The podcast episode features Coffeezilla, a YouTuber and investigative journalist, discussing the alleged scam involving crypto zoo and Logan Paul. They discuss their motivation for exposing scams and the lack of substance in many blockchain games. The episode also touches on Logan Paul's involvement in a failed crypto project and the potential legal repercussions. The speaker criticizes lawyers for their response and suggests that Logan Paul should take responsibility for any financial losses incurred by his fans. They also discuss the influence of successful influencers in promoting projects and the importance of vetting teams. The speaker briefly mentions their involvement in a lawsuit and their support for independent artists and investigative journalism. The conversation covers various topics such as founder education, fraud on FTX, and the desire for fame and success among influencers. Emily Herrera, an investor at Night Ventures, discusses her predictions for the consumer market in 2023 and the increasing influence of user opinions on purchasing decisions. The conversation highlights the importance of online reviews. The speaker also mentions the importance of personal cyber security and the need for consumer cyber security products. They discuss the prevalence of impersonators on social media platforms and the importance of password safety. The speaker encourages influencers to promote personal security and mentions their own podcast on the balance between wellness and technology.\n", 618 | "\n", 619 | "Video 4\n", 620 | "Title: Bing dodges $100B bullet & IVP's Tom Loverro on the looming startup collapse\n", 621 | "View Count: 20538\n", 622 | "Summary: The provided text covers a range of topics including the sharing of personal health data by companies without consent, concerns about the accuracy and misuse of AI-generated content, predictions of a potential mass extinction event for startups, the impact of deal structures on venture investments, the importance of fundraising and governance in startups, the importance of focusing on core survival during financial challenges, the influence of cognitive biases in venture investing, the significance of growth rates and stewardship of capital in startups, the extreme nature of the cryptocurrency market, the use of stock options to reward employees after layoffs, and the qualities to look for in investors and board members.\n", 623 | "\n", 624 | "\n", 625 | "Given the above High_Engagement_Videos and Low_Engagement_Videos, generate new ideas and themes.\n", 626 | "New ideas should be related to the High_Engagement_Videos by keeping the titles and summaries of the episodes in context and must be based onthe common patterns between the High_Engagement_Videos and the guests in those episodes.The new videos should not have any content from Low_Engagement_Videos.Make sure to not include any speaker name in your suggested video topics or themes.\n", 627 | "Make sure to return at least 10 new ideas. Your response must be a csv file which contains the following columns:Topic, Theme, Summary. Summary should contain points to talk on the show and must be 100 words at max. Use | as a seperatorand do not append any extra line in your csv response. Each row must have proper data and columns in each row must be three.\n", 628 | "If you don't know the answer, just say \"Hmm, I'm not sure.\"Don't try to make up an answer. \n", 629 | "\u001b[0m\n" 630 | ] 631 | }, 632 | { 633 | "name": "stdout", 634 | "output_type": "stream", 635 | "text": [ 636 | "\n", 637 | "\u001b[1m> Finished chain.\u001b[0m\n" 638 | ] 639 | } 640 | ], 641 | "source": [ 642 | "response = ideas_chain.run(\n", 643 | " {\n", 644 | " \"high_engagement_videos\": high_eng_prompt,\n", 645 | " \"low_engagement_videos\": low_eng_prompt,\n", 646 | " }\n", 647 | ")" 648 | ] 649 | }, 650 | { 651 | "cell_type": "code", 652 | "execution_count": 27, 653 | "metadata": { 654 | "pycharm": { 655 | "name": "#%%\n" 656 | } 657 | }, 658 | "outputs": [ 659 | { 660 | "data": { 661 | "text/plain": [ 662 | "'Topic|Theme|Summary\\nAI in the Music Industry|Exploring the impact of AI on music creation and production|Discuss the role of AI in revolutionizing the music industry, including AI-powered music creation tools and platforms. Explore the benefits and limitations of using AI in music production, and the potential for AI to enhance creativity and collaboration among musicians. Highlight successful examples of AI-generated music and discuss the ethical considerations and copyright issues surrounding AI-generated music.\\nThe Future of Podcasting|Examining the potential of AI in podcasting|Discuss the role of AI in podcasting, including AI-powered transcription and editing tools. Explore how AI can improve podcast discovery and recommendation algorithms, and enhance the listener experience through personalized content. Highlight the benefits of using AI in podcast production, such as automated editing and audio enhancement. Discuss the potential challenges and ethical considerations of AI in podcasting, including the impact on human hosts and the need for transparency in AI-generated content.\\nThe Rise of Voice Assistants|Exploring the evolution and impact of voice assistants|Discuss the history and evolution of voice assistants, from Siri to Google Assistant and Alexa. Explore the impact of voice assistants on daily life, including their role in smart homes, voice search, and voice-controlled devices. Highlight the benefits and limitations of voice assistants, and discuss the future potential of AI-powered voice assistants in healthcare, customer service, and other industries. Address concerns about privacy and data security in relation to voice assistants.\\nThe Metaverse: A New Frontier|Examining the concept and potential of the metaverse|Discuss the concept of the metaverse, a virtual reality space where users can interact with a computer-generated environment and other users. Explore the potential applications of the metaverse in gaming, social media, education, and business. Discuss the challenges and opportunities of building a metaverse, including technological limitations, user adoption, and ethical considerations. Highlight successful examples of metaverse platforms and discuss the potential impact of the metaverse on society and the economy.\\nThe Future of Venture Capital|Analyzing the evolving landscape of venture capital|Discuss the current state of venture capital, including trends in funding, investment strategies, and the impact of AI and technology on the industry. Explore the challenges and opportunities for startups in accessing venture capital, and discuss the role of venture capitalists in supporting innovation and growth. Highlight successful examples of venture capital-backed companies and discuss the potential future developments in the venture capital industry.\\nThe Power of Networking|Exploring the importance of networking in the tech industry|Discuss the role of networking in building successful careers and businesses in the tech industry. Share tips and strategies for effective networking, including attending industry events, joining professional organizations, and leveraging social media platforms. Highlight the benefits of networking, such as access to mentorship, collaboration opportunities, and job prospects. Discuss the potential challenges and ethical considerations of networking, including the need for inclusivity and diversity in tech networking spaces.\\nThe Future of Funding in Emerging Markets|Examining the potential for tech ecosystems in emerging markets|Discuss the opportunities and challenges of developing tech ecosystems in emerging markets, including access to funding, talent, and infrastructure. Explore successful examples of tech startups in emerging markets and discuss the potential for these markets to become innovation hubs. Highlight the role of government policies and initiatives in supporting tech entrepreneurship in emerging markets. Discuss the potential impact of technology on economic growth and social development in emerging markets.\\nThe Impact of AI in Real Estate|Exploring the role of AI in the real estate industry|Discuss the applications of AI in the real estate industry, including AI-powered property valuation, market analysis, and virtual property tours. Explore how AI can improve efficiency and accuracy in real estate transactions, and discuss the potential impact of AI on job roles in the industry. Highlight successful examples of AI adoption in real estate and discuss the challenges and ethical considerations of using AI in the industry, such as data privacy and algorithm bias.\\nThe Future of Technology in Education|Examining the role of technology in transforming education|Discuss the potential of technology in improving access to education, personalized learning, and student engagement. Explore the impact of AI, virtual reality, and online platforms on traditional education models. Highlight successful examples of technology integration in education and discuss the challenges and ethical considerations of using technology in the classroom. Address concerns about the digital divide and the need for equitable access to technology in education.\\nThe Evolution of Streaming Platforms|Analyzing the changing landscape of streaming platforms|Discuss the evolution of streaming platforms, from traditional TV to on-demand services and live streaming. Explore the impact of streaming platforms on content creation, distribution, and monetization. Discuss the challenges and opportunities for content creators in the streaming industry, and the potential of AI in content recommendation and personalization. Highlight successful examples of streaming platforms and discuss the future trends and developments in the industry.'" 663 | ] 664 | }, 665 | "execution_count": 27, 666 | "metadata": {}, 667 | "output_type": "execute_result" 668 | } 669 | ], 670 | "source": [ 671 | "response" 672 | ] 673 | }, 674 | { 675 | "cell_type": "code", 676 | "execution_count": 28, 677 | "metadata": { 678 | "pycharm": { 679 | "name": "#%%\n" 680 | } 681 | }, 682 | "outputs": [], 683 | "source": [ 684 | "import pandas as pd\n", 685 | "from io import StringIO\n", 686 | "\n", 687 | "csv_file = StringIO(response)\n", 688 | "# Read the CSV data and create a DataFrame\n", 689 | "df = pd.read_csv(csv_file, sep=\"|\")" 690 | ] 691 | }, 692 | { 693 | "cell_type": "code", 694 | "execution_count": 29, 695 | "metadata": {}, 696 | "outputs": [ 697 | { 698 | "data": { 699 | "text/html": [ 700 | "
\n", 701 | "\n", 714 | "\n", 715 | " \n", 716 | " \n", 717 | " \n", 718 | " \n", 719 | " \n", 720 | " \n", 721 | " \n", 722 | " \n", 723 | " \n", 724 | " \n", 725 | " \n", 726 | " \n", 727 | " \n", 728 | " \n", 729 | " \n", 730 | " \n", 731 | " \n", 732 | " \n", 733 | " \n", 734 | " \n", 735 | " \n", 736 | " \n", 737 | " \n", 738 | " \n", 739 | " \n", 740 | " \n", 741 | " \n", 742 | " \n", 743 | " \n", 744 | " \n", 745 | " \n", 746 | " \n", 747 | " \n", 748 | " \n", 749 | " \n", 750 | " \n", 751 | " \n", 752 | " \n", 753 | " \n", 754 | " \n", 755 | " \n", 756 | " \n", 757 | " \n", 758 | " \n", 759 | " \n", 760 | " \n", 761 | " \n", 762 | " \n", 763 | " \n", 764 | " \n", 765 | " \n", 766 | " \n", 767 | " \n", 768 | " \n", 769 | " \n", 770 | " \n", 771 | " \n", 772 | " \n", 773 | " \n", 774 | " \n", 775 | " \n", 776 | " \n", 777 | " \n", 778 | " \n", 779 | " \n", 780 | " \n", 781 | " \n", 782 | " \n", 783 | " \n", 784 | " \n", 785 | "
TopicThemeSummary
0AI in the Music IndustryExploring the impact of AI on music creation a...Discuss the role of AI in revolutionizing the ...
1The Future of PodcastingExamining the potential of AI in podcastingDiscuss the role of AI in podcasting, includin...
2The Rise of Voice AssistantsExploring the evolution and impact of voice as...Discuss the history and evolution of voice ass...
3The Metaverse: A New FrontierExamining the concept and potential of the met...Discuss the concept of the metaverse, a virtua...
4The Future of Venture CapitalAnalyzing the evolving landscape of venture ca...Discuss the current state of venture capital, ...
5The Power of NetworkingExploring the importance of networking in the ...Discuss the role of networking in building suc...
6The Future of Funding in Emerging MarketsExamining the potential for tech ecosystems in...Discuss the opportunities and challenges of de...
7The Impact of AI in Real EstateExploring the role of AI in the real estate in...Discuss the applications of AI in the real est...
8The Future of Technology in EducationExamining the role of technology in transformi...Discuss the potential of technology in improvi...
9The Evolution of Streaming PlatformsAnalyzing the changing landscape of streaming ...Discuss the evolution of streaming platforms, ...
\n", 786 | "
" 787 | ], 788 | "text/plain": [ 789 | " Topic \n", 790 | "0 AI in the Music Industry \\\n", 791 | "1 The Future of Podcasting \n", 792 | "2 The Rise of Voice Assistants \n", 793 | "3 The Metaverse: A New Frontier \n", 794 | "4 The Future of Venture Capital \n", 795 | "5 The Power of Networking \n", 796 | "6 The Future of Funding in Emerging Markets \n", 797 | "7 The Impact of AI in Real Estate \n", 798 | "8 The Future of Technology in Education \n", 799 | "9 The Evolution of Streaming Platforms \n", 800 | "\n", 801 | " Theme \n", 802 | "0 Exploring the impact of AI on music creation a... \\\n", 803 | "1 Examining the potential of AI in podcasting \n", 804 | "2 Exploring the evolution and impact of voice as... \n", 805 | "3 Examining the concept and potential of the met... \n", 806 | "4 Analyzing the evolving landscape of venture ca... \n", 807 | "5 Exploring the importance of networking in the ... \n", 808 | "6 Examining the potential for tech ecosystems in... \n", 809 | "7 Exploring the role of AI in the real estate in... \n", 810 | "8 Examining the role of technology in transformi... \n", 811 | "9 Analyzing the changing landscape of streaming ... \n", 812 | "\n", 813 | " Summary \n", 814 | "0 Discuss the role of AI in revolutionizing the ... \n", 815 | "1 Discuss the role of AI in podcasting, includin... \n", 816 | "2 Discuss the history and evolution of voice ass... \n", 817 | "3 Discuss the concept of the metaverse, a virtua... \n", 818 | "4 Discuss the current state of venture capital, ... \n", 819 | "5 Discuss the role of networking in building suc... \n", 820 | "6 Discuss the opportunities and challenges of de... \n", 821 | "7 Discuss the applications of AI in the real est... \n", 822 | "8 Discuss the potential of technology in improvi... \n", 823 | "9 Discuss the evolution of streaming platforms, ... " 824 | ] 825 | }, 826 | "execution_count": 29, 827 | "metadata": {}, 828 | "output_type": "execute_result" 829 | } 830 | ], 831 | "source": [ 832 | "df" 833 | ] 834 | }, 835 | { 836 | "cell_type": "code", 837 | "execution_count": null, 838 | "metadata": {}, 839 | "outputs": [], 840 | "source": [] 841 | } 842 | ], 843 | "metadata": { 844 | "kernelspec": { 845 | "display_name": "Python 3 (ipykernel)", 846 | "language": "python", 847 | "name": "python3" 848 | }, 849 | "language_info": { 850 | "codemirror_mode": { 851 | "name": "ipython", 852 | "version": 3 853 | }, 854 | "file_extension": ".py", 855 | "mimetype": "text/x-python", 856 | "name": "python", 857 | "nbconvert_exporter": "python", 858 | "pygments_lexer": "ipython3", 859 | "version": "3.8.16" 860 | } 861 | }, 862 | "nbformat": 4, 863 | "nbformat_minor": 2 864 | } 865 | --------------------------------------------------------------------------------