├── AI Chatbot PyTorch ├── intents.json └── main.py ├── AI Hashtag Generator ├── .env ├── app.py ├── static │ └── style.css └── templates │ └── index.html ├── AI Voice Assistant Vapi └── app.py ├── Annual Report Summary ├── counter.py └── main.py ├── Anomaly Detection Time Series Data └── main.py ├── Bluetooth Chat ├── client.py └── server.py ├── CSGO Round Winner Prediction └── CSGO Prediction.ipynb ├── Crypto Demo Portfolio ├── backend │ ├── __init__.py │ ├── main.py │ ├── models.py │ └── schemas.py └── frontend │ ├── README.md │ ├── eslint.config.js │ ├── index.html │ ├── package-lock.json │ ├── package.json │ ├── public │ └── vite.svg │ ├── src │ ├── App.css │ ├── App.tsx │ ├── assets │ │ └── react.svg │ ├── components │ │ ├── Dashboard.tsx │ │ ├── Login.tsx │ │ └── Register.tsx │ ├── context │ │ └── AuthContext.tsx │ ├── index.css │ ├── main.tsx │ ├── services │ │ └── api.ts │ └── vite-env.d.ts │ ├── tsconfig.app.json │ ├── tsconfig.json │ ├── tsconfig.node.json │ └── vite.config.ts ├── Deep Reinforcement Learning - OpenAI Gym ├── intelligent_agent.py └── random_agent.py ├── Docker Crash Course ├── Flask App │ ├── Dockerfile │ ├── app.py │ ├── requirements.txt │ └── templates │ │ └── index.html └── Shopping List App │ ├── backend │ ├── Dockerfile │ ├── logs │ │ └── backend.log │ ├── main.py │ └── requirements.txt │ ├── docker-compose.yml │ └── frontend │ ├── .env │ ├── .gitignore │ ├── Dockerfile │ ├── README.md │ ├── eslint.config.js │ ├── index.html │ ├── package-lock.json │ ├── package.json │ ├── public │ └── vite.svg │ ├── src │ ├── App.css │ ├── App.tsx │ ├── assets │ │ └── react.svg │ ├── index.css │ ├── main.tsx │ └── vite-env.d.ts │ ├── tsconfig.app.json │ ├── tsconfig.json │ ├── tsconfig.node.json │ └── vite.config.ts ├── Drawdata Tutorial ├── .ipynb_checkpoints │ ├── Main-checkpoint.html │ └── Main-checkpoint.ipynb ├── .virtual_documents │ └── Main.ipynb ├── Main.html └── Main.ipynb ├── E-Commerce AI Agent ├── app.py ├── static │ └── style.css └── templates │ ├── base.html │ └── index.html ├── Fake News Detection └── Fake News Detection.ipynb ├── Financial Dashboard └── main.py ├── Intelligent AI Web Chatbot ├── Flask App │ ├── app.py │ ├── model │ │ ├── chatbot_model.keras │ │ ├── classes.pkl │ │ ├── intents.json │ │ └── words.pkl │ ├── templates │ │ └── index.html │ └── utils.py └── Preparation │ ├── chatbot.py │ ├── intents.json │ ├── model │ ├── chatbot_model.keras │ ├── classes.pkl │ └── words.pkl │ └── model_training.py ├── K-Nearest Neighbors From Scratch └── main.py ├── LLM Development ├── main.py ├── main2.py ├── main3.py └── main4.py ├── LLM Wrappers ├── LaTeX Example │ ├── app.py │ ├── static │ │ └── styles.css │ └── templates │ │ └── index.html ├── Translator Example │ ├── app.py │ ├── static │ │ └── styles.css │ └── templates │ │ └── index.html └── YouTube Example │ ├── app.py │ ├── static │ └── styles.css │ └── templates │ └── index.html ├── Laptop Price Prediction └── Laptop Price Prediction.ipynb ├── Live Face Recognition └── main.py ├── LoRA Fine-Tuning ├── Evaluation_TinyLlama_Frobinate.ipynb ├── Evaluation_TinyLlama_Math.ipynb ├── Fine-Tuning_TinyLlama_Frobinate.ipynb ├── Fine-Tuning_TinyLlama_Math.ipynb ├── frobinate.jsonl └── frobinate_test.jsonl ├── Local AI Agent ├── .env └── main.py ├── MCP Tutorial └── my-mcp-server │ ├── .python-version │ ├── README.md │ ├── main.py │ ├── pyproject.toml │ └── uv.lock ├── MonsterUI ├── counter_app.py └── todo_app.py ├── News Aggregator ├── app.py ├── static │ └── style.css └── templates │ ├── base.html │ ├── index.html │ └── search_results.html ├── Nuitka ├── editor.py ├── editor_qt6.py └── main.py ├── PDF Table Extraction ├── extract_camelot.py ├── extract_llmwhisperer.py ├── extract_pdfplumber.py ├── extract_py2pdf.py └── extract_tabula.py ├── Polling App Flask ├── app.py ├── polls.csv └── templates │ ├── index.html │ ├── new_poll.html │ └── show_poll.html ├── Python Interface Definitions ├── __init__.py ├── circle.py ├── circle.pyi ├── circle_module │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-39.pyc │ │ └── circle.cpython-39.pyc │ ├── circle.py │ └── circle.pyi └── main.py ├── Ragas Tutorial └── Video Notebook.ipynb ├── Sign E-Mails Certificates └── main.py ├── TTKBootstrap ├── basics_tkinter.py ├── basics_ttkbootstrap.py ├── calculator_tkinter.py ├── calculator_ttkbootstrap.py ├── demo.py ├── login_tkinter.py ├── login_ttkbootstrap.py ├── stopwatch_tkinter.py └── stopwatch_ttkbootstrap.py ├── Text Generation AI - Next Word Prediction └── Text Generation AI.ipynb ├── Todo List App Flask ├── main.py └── templates │ ├── edit.html │ └── index.html └── Weather App Django └── weather_project ├── db.sqlite3 ├── manage.py ├── weather_app ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-39.pyc │ ├── admin.cpython-39.pyc │ ├── apps.cpython-39.pyc │ ├── models.cpython-39.pyc │ ├── urls.cpython-39.pyc │ └── views.cpython-39.pyc ├── admin.py ├── apps.py ├── migrations │ ├── __init__.py │ └── __pycache__ │ │ └── __init__.cpython-39.pyc ├── models.py ├── static │ └── style.css ├── templates │ └── weather_app │ │ ├── city_weather.html │ │ └── index.html ├── tests.py ├── urls.py └── views.py └── weather_project ├── __init__.py ├── __pycache__ ├── __init__.cpython-39.pyc ├── settings.cpython-39.pyc ├── urls.cpython-39.pyc └── wsgi.cpython-39.pyc ├── asgi.py ├── settings.py ├── urls.py └── wsgi.py /AI Chatbot PyTorch/intents.json: -------------------------------------------------------------------------------- 1 | { 2 | "intents": [ 3 | { 4 | "tag": "greeting", 5 | "patterns": ["Hi", "How are you", "Is anyone there?", "Hello", "Good day", "Whats up", "Hey", "greetings"], 6 | "responses": ["Hello!", "Good to see you again!", "Hi there, how can I help?"] 7 | }, 8 | { 9 | "tag": "goodbye", 10 | "patterns": ["cya", "See you later", "Goodbye", "I am Leaving", "Have a Good day", "bye", "cao", "see ya"], 11 | "responses": ["Sad to see you go :(", "Talk to you later", "Goodbye!"] 12 | }, 13 | { 14 | "tag": "programming", 15 | "patterns": ["What is progamming?", "What is coding?", "Tell me about programming", "Tell me about coding", "What is software development?"], 16 | "responses": ["Programming, coding or software development, means writing computer code to automate tasks."] 17 | }, 18 | { 19 | "tag": "resource", 20 | "patterns": ["Where can I learn to code?", "Best way to learn to code", "How can I learn programming", "Good programming resources", 21 | "Can you recommend good coding resources?"], 22 | "responses": ["Check out the NeuralNine YouTube channel and The Python Bible series (7 in 1)."] 23 | }, 24 | { 25 | "tag": "stocks", 26 | "patterns": ["What are my stocks?", "Which stocks do I own?", "Show my stock portfolio"], 27 | "responses": ["Here are your stocks!"] 28 | } 29 | ] 30 | } 31 | -------------------------------------------------------------------------------- /AI Hashtag Generator/.env: -------------------------------------------------------------------------------- 1 | OPENAI_KEY="PUT YOUR API_KEY HERE" 2 | -------------------------------------------------------------------------------- /AI Hashtag Generator/app.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from flask import Flask, render_template, request, jsonify 4 | import base64 5 | import requests 6 | from dotenv import load_dotenv 7 | 8 | app = Flask(__name__) 9 | 10 | load_dotenv() 11 | 12 | def encode_image(image_path): 13 | with open(image_path, "rb") as image_file: 14 | return base64.b64encode(image_file.read()).decode('utf-8') 15 | 16 | @app.route('/', methods=['GET', 'POST']) 17 | def index(): 18 | base64_image = None 19 | if request.method == 'POST': 20 | image = request.files['image'] 21 | image.save("uploaded_image.jpg") 22 | 23 | base64_image = encode_image("uploaded_image.jpg") 24 | 25 | headers = { 26 | "Content-Type": "application/json", 27 | "Authorization": f"Bearer {os.getenv('OPENAI_KEY')}" 28 | } 29 | 30 | payload = { 31 | "model": "gpt-4o-mini", 32 | "messages": [ 33 | { 34 | "role": "system", 35 | "content": "You are a hashtag generation model. When you get an image as input, your response should always contain exactly 30 hashtags separated by commas." 36 | }, 37 | { 38 | "role": "user", 39 | "content": [ 40 | { 41 | "type": "text", 42 | "text": "Provide the hashtags for this image:" 43 | }, 44 | { 45 | "type": "image_url", 46 | "image_url": { 47 | "url": f"data:image/jpeg;base64,{base64_image}" 48 | } 49 | } 50 | ] 51 | } 52 | ], 53 | "max_tokens": 300 54 | } 55 | 56 | response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload) 57 | 58 | hashtags = response.json().get("choices")[0].get("message").get("content").split(',') 59 | 60 | return render_template('index.html', hashtags=hashtags, base64_image=base64_image) 61 | 62 | return render_template('index.html', hashtags=None) 63 | 64 | 65 | if __name__ == '__main__': 66 | app.run(debug=True) 67 | -------------------------------------------------------------------------------- /AI Hashtag Generator/static/style.css: -------------------------------------------------------------------------------- 1 | body { 2 | font-family: Arial, sans-serif; 3 | background-color: #f4f4f9; 4 | margin: 0; 5 | padding: 0; 6 | text-align: center; 7 | } 8 | 9 | .container { 10 | width: 80%; 11 | margin: 0 auto; 12 | padding: 20px; 13 | background-color: white; 14 | box-shadow: 0px 0px 15px rgba(0, 0, 0, 0.1); 15 | border-radius: 8px; 16 | } 17 | 18 | h1 { 19 | color: #333; 20 | margin-bottom: 20px; 21 | } 22 | 23 | .file-input { 24 | display: flex; 25 | justify-content: center; 26 | align-items: center; 27 | gap: 20px; 28 | margin-bottom: 20px; 29 | } 30 | 31 | .submit-btn { 32 | background-color: #4caf50; 33 | color: white; 34 | padding: 10px 20px; 35 | border: none; 36 | border-radius: 5px; 37 | cursor: pointer; 38 | font-size: 16px; 39 | } 40 | 41 | .submit-btn:hover { 42 | background-color: #45a049; 43 | } 44 | 45 | #image-preview-container { 46 | margin-top: 20px; 47 | } 48 | 49 | #image-preview { 50 | max-width: 300px; 51 | max-height: 300px; 52 | border: 2px solid #ddd; 53 | border-radius: 5px; 54 | margin-top: 10px; 55 | } 56 | 57 | #hashtag-container { 58 | display: flex; 59 | flex-wrap: wrap; 60 | justify-content: center; 61 | gap: 10px; 62 | margin-top: 20px; 63 | } 64 | 65 | .hashtag-btn { 66 | background-color: #f0f0f0; 67 | border: 1px solid #ccc; 68 | border-radius: 5px; 69 | padding: 10px 15px; 70 | font-size: 14px; 71 | cursor: pointer; 72 | transition: background-color 0.3s; 73 | } 74 | 75 | .hashtag-btn:hover { 76 | background-color: #ddd; 77 | } 78 | 79 | .hashtag-btn.selected { 80 | background-color: #4caf50; 81 | color: white; 82 | border: 1px solid #45a049; 83 | } 84 | 85 | #selected-hashtags { 86 | width: 100%; 87 | padding: 10px; 88 | margin-top: 20px; 89 | font-size: 16px; 90 | border: 1px solid #ccc; 91 | border-radius: 5px; 92 | background-color: #f9f9f9; 93 | resize: none; 94 | } 95 | -------------------------------------------------------------------------------- /AI Hashtag Generator/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Hashtag Generator 7 | 8 | 9 | 10 |
11 |

Generate Hashtags

12 | 13 |
14 |
15 | 16 | 17 |
18 | 19 |
20 |

Image Preview:

21 | No Image Selected 24 |
25 |
26 | 27 | {% if hashtags %} 28 |
29 |

Select Hashtags:

30 |
31 | {% for hashtag in hashtags %} 32 | 33 | {% endfor %} 34 |
35 | 36 |

Selected Hashtags:

37 | 38 |
39 | {% endif %} 40 |
41 | 42 | 74 | 75 | 76 | -------------------------------------------------------------------------------- /Annual Report Summary/counter.py: -------------------------------------------------------------------------------- 1 | import PyPDF2 2 | import tiktoken 3 | 4 | def load_file(path): 5 | with open(path, 'rb') as f: 6 | reader = PyPDF2.PdfReader(f) 7 | return "".join(page.extract_text() or "" for page in reader.pages) 8 | 9 | text = load_file('meta_10k.pdf') 10 | enc = tiktoken.encoding_for_model('gpt-4o') 11 | print(len(enc.encode(text))) 12 | 13 | -------------------------------------------------------------------------------- /Annual Report Summary/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from datetime import datetime 4 | from typing import List, Optional 5 | 6 | import PyPDF2 7 | import tiktoken 8 | from google import genai 9 | from dotenv import load_dotenv 10 | from pydantic import BaseModel, Field 11 | 12 | from markdown2 import markdown 13 | from weasyprint import HTML 14 | 15 | 16 | load_dotenv() 17 | 18 | 19 | def load_file(path): 20 | with open(path, 'rb') as f: 21 | reader = PyPDF2.PdfReader(f) 22 | return "".join(page.extract_text() or "" for page in reader.pages) 23 | 24 | 25 | text = load_file('meta_10k.pdf') 26 | 27 | 28 | class AnnualReport(BaseModel): 29 | company_name: str = Field(..., description="Name of the company as reported in the 10-K") 30 | cik: str = Field(..., description="Central Index Key (CIK) identifier assigned by the SEC") 31 | fiscal_year_end: datetime = Field(..., description="Fiscal year end date") 32 | filing_date: datetime = Field(..., description="Date when the 10-K was filed with the SEC") 33 | total_revenue: Optional[float] = Field(None, description="Total revenue for the fiscal year (in USD)") 34 | net_income: Optional[float] = Field(None, description="Net income (profit) for the fiscal year (in USD)") 35 | total_assets: Optional[float] = Field(None, description="Total assets at fiscal year end (in USD)") 36 | total_liabilities: Optional[float] = Field(None, description="Total liabilities at fiscal year end (in USD)") 37 | operating_cash_flow: Optional[float] = Field(None, description="Net cash provided by operating activities (in USD)") 38 | cash_and_equivalents: Optional[float] = Field(None, description="Cash and cash equivalents at fiscal year end (in USD)") 39 | num_employees: Optional[int] = Field(None, description="Number of employees reported") 40 | auditor: Optional[str] = Field(None, description="Name of the external auditor") 41 | business_description: Optional[str] = Field(None, description="Company’s business overview (Item 1)") 42 | risk_factors: Optional[List[str]] = Field(None, description="Key risk factors (Item 1A)") 43 | management_discussion: Optional[str] = Field(None, description="Management’s Discussion & Analysis (Item 7)") 44 | 45 | 46 | client = genai.Client(api_key=os.getenv('GEMINI_API_KEY')) 47 | 48 | schema_definition = json.dumps(AnnualReport.model_json_schema(), indent=2, ensure_ascii=False) 49 | 50 | prompt = f"Analyze the following annual report (10-K) and fill the data model based on it:\n\n{text}\n\n" 51 | prompt += f"The output needs to be in the following format:\n\n{schema_definition}\n\nNo extra fields allowed at all!" 52 | 53 | response = client.models.generate_content( 54 | model='gemini-2.0-flash', 55 | contents=prompt, 56 | config={ 57 | 'response_mime_type': 'application/json', 58 | 'response_schema': AnnualReport 59 | } 60 | ) 61 | 62 | ar = AnnualReport.model_validate_json(response.text) 63 | 64 | print(ar) 65 | 66 | 67 | 68 | md_lines = [ 69 | f"# {ar.company_name} Annual Report {ar.fiscal_year_end.year}", 70 | f"**CIK:** {ar.cik}", 71 | f"**Fiscal Year End:** {ar.fiscal_year_end.strftime('%Y-%m-%d')}", 72 | f"**Filing Date:** {ar.filing_date.strftime('%Y-%m-%d')}", 73 | "## Financials" 74 | ] 75 | 76 | if ar.total_revenue is not None: 77 | md_lines.append(f"- **Total Revenue:** ${ar.total_revenue:,.2f}") 78 | if ar.net_income is not None: 79 | md_lines.append(f"- **Net Income:** ${ar.net_income:,.2f}") 80 | if ar.total_assets is not None: 81 | md_lines.append(f"- **Total Assets:** ${ar.total_assets:,.2f}") 82 | if ar.total_liabilities is not None: 83 | md_lines.append(f"- **Total Liabilities:** ${ar.total_liabilities:,.2f}") 84 | if ar.operating_cash_flow is not None: 85 | md_lines.append(f"- **Operating Cash Flow:** ${ar.operating_cash_flow:,.2f}") 86 | if ar.cash_and_equivalents is not None: 87 | md_lines.append(f"- **Cash & Equivalents:** ${ar.cash_and_equivalents:,.2f}") 88 | if ar.num_employees is not None: 89 | md_lines.append(f"- **Number of Employees:** {ar.num_employees}") 90 | if ar.auditor: 91 | md_lines.append(f"- **Auditor:** {ar.auditor}") 92 | 93 | if ar.business_description: 94 | md_lines += ["\n## Business Description", ar.business_description] 95 | if ar.risk_factors: 96 | md_lines += ["\n## Risk Factors"] + [f"- {rf}" for rf in ar.risk_factors] 97 | if ar.management_discussion: 98 | md_lines += ["\n## Management Discussion & Analysis", ar.management_discussion] 99 | 100 | md = "\n\n".join(md_lines) 101 | html = markdown(md) 102 | company = ar.company_name.replace(" ", "_") 103 | filename = f"annual_report_{company}_{ar.fiscal_year_end.year}.pdf" 104 | HTML(string=html).write_pdf(filename) 105 | 106 | -------------------------------------------------------------------------------- /Anomaly Detection Time Series Data/main.py: -------------------------------------------------------------------------------- 1 | # pip install adtk 2 | 3 | import pandas as pd 4 | import matplotlib.pyplot as plt 5 | import yfinance as yf 6 | 7 | from adtk.data import validate_series 8 | from adtk.visualization import plot 9 | from adtk.detector import ThresholdAD, QuantileAD, InterQuartileRangeAD, GeneralizedESDTestAD, PersistAD, VolatilityShiftAD, CustomizedDetectorHD 10 | 11 | s_train = pd.read_csv("temperature.csv", parse_dates=True, squeeze=True) 12 | s_train["Date"] = pd.to_datetime(s_train["Date"]) 13 | s_train = s_train.set_index("Date") 14 | s_train = s_train['Mean'] 15 | 16 | # s_train = yf.download("AAPL")['Close'] 17 | 18 | s_train = validate_series(s_train) 19 | print(s_train) 20 | 21 | plot(s_train) 22 | plt.show() 23 | 24 | # Threshold Anomaly Detection (manually define min max threshold) 25 | threshold_ad = ThresholdAD(high=0.75, low=-0.5) 26 | anomalies = threshold_ad.detect(s_train) 27 | plot(s_train, anomaly=anomalies, anomaly_color="red", anomaly_tag="marker") 28 | plt.show() 29 | 30 | # Quantile Anomaly Detection (manually define percentiles) 31 | quantile_ad = QuantileAD(high=0.99, low=0.01) 32 | anomalies = quantile_ad.fit_detect(s_train) 33 | plot(s_train, anomaly=anomalies, ts_linewidth=1, ts_markersize=3, anomaly_markersize=5, anomaly_color='red', anomaly_tag="marker") 34 | plt.show() 35 | 36 | # Inter Quartile Range Anomaly Detection (IQR = Q3 - Q1, with c we multiply for tolerance, so c * IQR) 37 | iqr_ad = InterQuartileRangeAD(c=1.5) 38 | anomalies = iqr_ad.fit_detect(s_train) 39 | plot(s_train, anomaly=anomalies, ts_linewidth=1, ts_markersize=3, anomaly_markersize=5, anomaly_color='red', anomaly_tag="marker") 40 | plt.show() 41 | 42 | # Generalized Extreme Studentized Deviate (ESD) Test (assumes normal distribution, only use when this assumption makes sense) 43 | esd_ad = GeneralizedESDTestAD(alpha=0.3) 44 | anomalies = esd_ad.fit_detect(s_train) 45 | plot(s_train, anomaly=anomalies, ts_linewidth=1, ts_markersize=3, anomaly_markersize=5, anomaly_color='red', anomaly_tag="marker") 46 | plt.show() 47 | 48 | # Persist Anomaly Detection 49 | # compares each value with previous one, detect positive or negative changes 50 | persist_ad = PersistAD(c=3.0, side='positive') 51 | anomalies = persist_ad.fit_detect(s_train) 52 | plot(s_train, anomaly=anomalies, ts_linewidth=1, ts_markersize=3, anomaly_color='red') 53 | plt.show() 54 | 55 | persist_ad = PersistAD(c=1.5, side='negative') 56 | anomalies = persist_ad.fit_detect(s_train) 57 | plot(s_train, anomaly=anomalies, ts_linewidth=1, ts_markersize=3, anomaly_color='red') 58 | plt.show() 59 | 60 | persist_ad.window = 24 # by default just one day, we can adjust it for mid- to long-term detection 61 | anomalies = persist_ad.fit_detect(s_train) 62 | plot(s_train, anomaly=anomalies, ts_linewidth=1, ts_markersize=3, anomaly_color='red') 63 | plt.show() 64 | 65 | # Volatility Shift Anomaly Detection 66 | s_train = yf.download("TSLA")['Close'] 67 | s_train = validate_series(s_train) 68 | 69 | volatility_shift_ad = VolatilityShiftAD(c=6.0, side='positive', window=30) 70 | anomalies = volatility_shift_ad.fit_detect(s_train) 71 | plot(s_train, anomaly=anomalies, anomaly_color='red') 72 | plt.show() 73 | -------------------------------------------------------------------------------- /Bluetooth Chat/client.py: -------------------------------------------------------------------------------- 1 | import socket 2 | 3 | client = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM) 4 | client.connect(("", 4)) 5 | 6 | print(f"Connected!") 7 | 8 | try: 9 | while True: 10 | message = input("Enter message: ") 11 | client.send(message.encode('utf-8')) 12 | data = client.recv(1024) 13 | if not data: 14 | break 15 | print(f"Received: {data.decode('utf-8')}") 16 | 17 | except OSError: 18 | pass 19 | 20 | print("Disconnected") 21 | 22 | client.close() 23 | -------------------------------------------------------------------------------- /Bluetooth Chat/server.py: -------------------------------------------------------------------------------- 1 | import socket 2 | 3 | # Steps 4 | # 1. Device Manager -> Realtek Bluetooth Adapter 5 | # 2. Right Click -> Properties -> Advanced -> Address 6 | # 3. Turn on Bluetooth on both devices and make server device visible 7 | 8 | server = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM) # RFCOMM specific protocol 9 | server.bind(("", 4)) # MAC Address and Channel 4 10 | server.listen(1) 11 | 12 | print("Waiting for connection...") 13 | 14 | client, addr = server.accept() 15 | print(f"Accepted connection from {addr}") 16 | 17 | try: 18 | while True: 19 | data = client.recv(1024) 20 | if not data: 21 | break 22 | print(f"Received: {data.decode('utf-8')}") 23 | message = input("Enter message: ") 24 | client.send(message.encode('utf-8')) 25 | except OSError: 26 | pass 27 | 28 | print("Disconnected") 29 | 30 | client.close() 31 | server.close() 32 | -------------------------------------------------------------------------------- /Crypto Demo Portfolio/backend/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralNine/youtube-tutorials/46237e565f097c9827d82a76b2f19668f248c5c8/Crypto Demo Portfolio/backend/__init__.py -------------------------------------------------------------------------------- /Crypto Demo Portfolio/backend/models.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import Column, Integer, String, Float, ForeignKey, DateTime 2 | from sqlalchemy.orm import relationship 3 | from sqlalchemy.ext.declarative import declarative_base 4 | 5 | Base = declarative_base() 6 | 7 | 8 | class User(Base): 9 | __tablename__ = 'users' 10 | id = Column(Integer, primary_key=True, index=True) 11 | username = Column(String, index=True, unique=True) 12 | password = Column(String) 13 | portfolio = relationship('Portfolio', back_populates='user', uselist=False) 14 | 15 | 16 | class Portfolio(Base): 17 | __tablename__ = 'portfolios' 18 | id = Column(Integer, primary_key=True, index=True) 19 | user_id = Column(Integer, ForeignKey('users.id')) 20 | total_added_money = Column(Float, default=0) 21 | available_money = Column(Float, default=0) 22 | 23 | user = relationship('User', back_populates='portfolio') 24 | assets = relationship('Asset', back_populates='portfolio') 25 | transactions = relationship('Transaction', back_populates='portfolio') 26 | 27 | 28 | class Asset(Base): 29 | __tablename__ = 'assets' 30 | id = Column(Integer, primary_key=True, index=True) 31 | portfolio_id = Column(Integer, ForeignKey('portfolios.id')) 32 | symbol = Column(String) 33 | quantity = Column(Float) 34 | 35 | portfolio = relationship('Portfolio', back_populates='assets') 36 | 37 | 38 | class Transaction(Base): 39 | __tablename__ = 'transactions' 40 | id = Column(Integer, primary_key=True, index=True) 41 | portfolio_id = Column(Integer, ForeignKey('portfolios.id')) 42 | symbol = Column(String) 43 | quantity = Column(Float) 44 | price = Column(Float) 45 | timestamp = Column(DateTime) 46 | 47 | portfolio = relationship('Portfolio', back_populates='transactions') 48 | 49 | -------------------------------------------------------------------------------- /Crypto Demo Portfolio/backend/schemas.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | 4 | class UserCreate(BaseModel): 5 | username: str 6 | password: str 7 | 8 | 9 | class AddMoney(BaseModel): 10 | amount: float 11 | 12 | 13 | class TradeAsset(BaseModel): 14 | symbol: str 15 | quantity: float 16 | 17 | -------------------------------------------------------------------------------- /Crypto Demo Portfolio/frontend/README.md: -------------------------------------------------------------------------------- 1 | # React + TypeScript + Vite 2 | 3 | This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules. 4 | 5 | Currently, two official plugins are available: 6 | 7 | - [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react) uses [Babel](https://babeljs.io/) for Fast Refresh 8 | - [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh 9 | 10 | ## Expanding the ESLint configuration 11 | 12 | If you are developing a production application, we recommend updating the configuration to enable type-aware lint rules: 13 | 14 | ```js 15 | export default tseslint.config({ 16 | extends: [ 17 | // Remove ...tseslint.configs.recommended and replace with this 18 | ...tseslint.configs.recommendedTypeChecked, 19 | // Alternatively, use this for stricter rules 20 | ...tseslint.configs.strictTypeChecked, 21 | // Optionally, add this for stylistic rules 22 | ...tseslint.configs.stylisticTypeChecked, 23 | ], 24 | languageOptions: { 25 | // other options... 26 | parserOptions: { 27 | project: ['./tsconfig.node.json', './tsconfig.app.json'], 28 | tsconfigRootDir: import.meta.dirname, 29 | }, 30 | }, 31 | }) 32 | ``` 33 | 34 | You can also install [eslint-plugin-react-x](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-x) and [eslint-plugin-react-dom](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-dom) for React-specific lint rules: 35 | 36 | ```js 37 | // eslint.config.js 38 | import reactX from 'eslint-plugin-react-x' 39 | import reactDom from 'eslint-plugin-react-dom' 40 | 41 | export default tseslint.config({ 42 | plugins: { 43 | // Add the react-x and react-dom plugins 44 | 'react-x': reactX, 45 | 'react-dom': reactDom, 46 | }, 47 | rules: { 48 | // other rules... 49 | // Enable its recommended typescript rules 50 | ...reactX.configs['recommended-typescript'].rules, 51 | ...reactDom.configs.recommended.rules, 52 | }, 53 | }) 54 | ``` 55 | -------------------------------------------------------------------------------- /Crypto Demo Portfolio/frontend/eslint.config.js: -------------------------------------------------------------------------------- 1 | import js from '@eslint/js' 2 | import globals from 'globals' 3 | import reactHooks from 'eslint-plugin-react-hooks' 4 | import reactRefresh from 'eslint-plugin-react-refresh' 5 | import tseslint from 'typescript-eslint' 6 | 7 | export default tseslint.config( 8 | { ignores: ['dist'] }, 9 | { 10 | extends: [js.configs.recommended, ...tseslint.configs.recommended], 11 | files: ['**/*.{ts,tsx}'], 12 | languageOptions: { 13 | ecmaVersion: 2020, 14 | globals: globals.browser, 15 | }, 16 | plugins: { 17 | 'react-hooks': reactHooks, 18 | 'react-refresh': reactRefresh, 19 | }, 20 | rules: { 21 | ...reactHooks.configs.recommended.rules, 22 | 'react-refresh/only-export-components': [ 23 | 'warn', 24 | { allowConstantExport: true }, 25 | ], 26 | }, 27 | }, 28 | ) 29 | -------------------------------------------------------------------------------- /Crypto Demo Portfolio/frontend/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Vite + React + TS 8 | 9 | 10 |
11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /Crypto Demo Portfolio/frontend/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "frontend", 3 | "private": true, 4 | "version": "0.0.0", 5 | "type": "module", 6 | "scripts": { 7 | "dev": "vite", 8 | "build": "tsc -b && vite build", 9 | "lint": "eslint .", 10 | "preview": "vite preview" 11 | }, 12 | "dependencies": { 13 | "@tailwindcss/vite": "^4.1.7", 14 | "@types/react-router-dom": "^5.3.3", 15 | "axios": "^1.9.0", 16 | "react": "^19.1.0", 17 | "react-dom": "^19.1.0", 18 | "react-router-dom": "^7.6.0" 19 | }, 20 | "devDependencies": { 21 | "@eslint/js": "^9.25.0", 22 | "@tailwindcss/forms": "^0.5.10", 23 | "@types/react": "^19.1.2", 24 | "@types/react-dom": "^19.1.2", 25 | "@vitejs/plugin-react": "^4.4.1", 26 | "autoprefixer": "^10.4.21", 27 | "eslint": "^9.25.0", 28 | "eslint-plugin-react-hooks": "^5.2.0", 29 | "eslint-plugin-react-refresh": "^0.4.19", 30 | "globals": "^16.0.0", 31 | "postcss": "^8.5.3", 32 | "tailwindcss": "^4.1.7", 33 | "typescript": "~5.8.3", 34 | "typescript-eslint": "^8.30.1", 35 | "vite": "^6.3.5" 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /Crypto Demo Portfolio/frontend/public/vite.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Crypto Demo Portfolio/frontend/src/App.css: -------------------------------------------------------------------------------- 1 | /* Empty file - using Tailwind CSS for styling */ 2 | -------------------------------------------------------------------------------- /Crypto Demo Portfolio/frontend/src/App.tsx: -------------------------------------------------------------------------------- 1 | import { BrowserRouter as Router, Routes, Route, Navigate } from 'react-router-dom' 2 | import Register from './components/Register' 3 | import Login from './components/Login' 4 | import Dashboard from './components/Dashboard' 5 | import { AuthProvider, useAuth } from './context/AuthContext' 6 | 7 | function AppRoutes() { 8 | const { isAuthenticated, loading } = useAuth() 9 | 10 | if (loading) { 11 | return
Loading...
12 | } 13 | 14 | return ( 15 | 16 | : } /> 17 | : } /> 18 | : } /> 19 | } /> 20 | 21 | ) 22 | } 23 | 24 | function App() { 25 | return ( 26 | 27 | 28 | 29 | 30 | 31 | ) 32 | } 33 | 34 | export default App 35 | -------------------------------------------------------------------------------- /Crypto Demo Portfolio/frontend/src/assets/react.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Crypto Demo Portfolio/frontend/src/components/Login.tsx: -------------------------------------------------------------------------------- 1 | import { useState } from 'react'; 2 | import type { FormEvent } from 'react'; 3 | import { useNavigate } from 'react-router-dom'; 4 | import { login } from '../services/api'; 5 | import { useAuth } from '../context/AuthContext'; 6 | 7 | const Login = () => { 8 | const [username, setUsername] = useState(''); 9 | const [password, setPassword] = useState(''); 10 | const [error, setError] = useState(''); 11 | const [loading, setLoading] = useState(false); 12 | const navigate = useNavigate(); 13 | const { login: authLogin } = useAuth(); 14 | 15 | const handleSubmit = async (e: FormEvent) => { 16 | e.preventDefault(); 17 | 18 | try { 19 | setError(''); 20 | setLoading(true); 21 | await login(username, password); 22 | authLogin(); 23 | navigate('/dashboard'); 24 | } catch (err: any) { 25 | setError(err.response?.data?.detail || 'Invalid username or password'); 26 | } finally { 27 | setLoading(false); 28 | } 29 | }; 30 | 31 | return ( 32 |
33 |
34 |
35 |

36 | Sign in to your account 37 |

38 |

39 | Don't have an account?{' '} 40 | 44 | Register 45 | 46 |

47 |
48 | 49 |
50 | {error && ( 51 |
52 |
{error}
53 |
54 | )} 55 | 56 |
57 |
58 | 59 | setUsername(e.target.value)} 68 | /> 69 |
70 |
71 | 72 | setPassword(e.target.value)} 81 | /> 82 |
83 |
84 | 85 |
86 | 99 |
100 |
101 |
102 |
103 | ); 104 | }; 105 | 106 | export default Login; 107 | -------------------------------------------------------------------------------- /Crypto Demo Portfolio/frontend/src/context/AuthContext.tsx: -------------------------------------------------------------------------------- 1 | import { createContext, useContext, useState, useEffect } from 'react'; 2 | import type { ReactNode } from 'react'; 3 | import { checkAuth, logout } from '../services/api'; 4 | 5 | interface AuthContextType { 6 | isAuthenticated: boolean; 7 | loading: boolean; 8 | login: () => void; 9 | logout: () => void; 10 | } 11 | 12 | const AuthContext = createContext(undefined); 13 | 14 | export const AuthProvider = ({ children }: { children: ReactNode }) => { 15 | const [isAuthenticated, setIsAuthenticated] = useState(false); 16 | const [loading, setLoading] = useState(true); 17 | 18 | useEffect(() => { 19 | const auth = checkAuth(); 20 | setIsAuthenticated(auth); 21 | setLoading(false); 22 | }, []); 23 | 24 | const handleLogin = () => { 25 | setIsAuthenticated(true); 26 | }; 27 | 28 | const handleLogout = () => { 29 | logout(); 30 | setIsAuthenticated(false); 31 | }; 32 | 33 | return ( 34 | 42 | {children} 43 | 44 | ); 45 | }; 46 | 47 | export const useAuth = () => { 48 | const context = useContext(AuthContext); 49 | if (context === undefined) { 50 | throw new Error('useAuth must be used within an AuthProvider'); 51 | } 52 | return context; 53 | }; -------------------------------------------------------------------------------- /Crypto Demo Portfolio/frontend/src/index.css: -------------------------------------------------------------------------------- 1 | @import 'tailwindcss'; 2 | 3 | /* Base styles */ 4 | :root { 5 | font-family: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif; 6 | -webkit-font-smoothing: antialiased; 7 | -moz-osx-font-smoothing: grayscale; 8 | } 9 | 10 | /* Basic reset */ 11 | body { 12 | margin: 0; 13 | min-height: 100vh; 14 | } 15 | 16 | /* Keep some of the existing styles if needed */ 17 | :root { 18 | line-height: 1.5; 19 | font-weight: 400; 20 | color-scheme: light dark; 21 | font-synthesis: none; 22 | text-rendering: optimizeLegibility; 23 | } 24 | 25 | /* Reset default dark mode */ 26 | :root { 27 | color: #213547; 28 | background-color: #ffffff; 29 | } 30 | 31 | /* Ensure buttons use Tailwind styling */ 32 | button { 33 | border-radius: 0.375rem; 34 | font-weight: 500; 35 | cursor: pointer; 36 | } 37 | 38 | a { 39 | font-weight: 500; 40 | color: #646cff; 41 | text-decoration: inherit; 42 | } 43 | a:hover { 44 | color: #535bf2; 45 | } 46 | 47 | h1 { 48 | font-size: 3.2em; 49 | line-height: 1.1; 50 | } 51 | 52 | button:focus, 53 | button:focus-visible { 54 | outline: 4px auto -webkit-focus-ring-color; 55 | } 56 | 57 | @media (prefers-color-scheme: light) { 58 | :root { 59 | color: #213547; 60 | background-color: #ffffff; 61 | } 62 | a:hover { 63 | color: #747bff; 64 | } 65 | button { 66 | background-color: #f9f9f9; 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /Crypto Demo Portfolio/frontend/src/main.tsx: -------------------------------------------------------------------------------- 1 | import { StrictMode } from 'react' 2 | import { createRoot } from 'react-dom/client' 3 | import './index.css' 4 | import App from './App.tsx' 5 | 6 | createRoot(document.getElementById('root')!).render( 7 | 8 | 9 | , 10 | ) 11 | -------------------------------------------------------------------------------- /Crypto Demo Portfolio/frontend/src/services/api.ts: -------------------------------------------------------------------------------- 1 | import axios from 'axios'; 2 | 3 | const API_URL = 'http://localhost:8000'; 4 | 5 | // Types 6 | export interface UserCreate { 7 | username: string; 8 | password: string; 9 | } 10 | 11 | export interface LoginResponse { 12 | access_token: string; 13 | token_type: string; 14 | } 15 | 16 | export interface AddMoney { 17 | amount: number; 18 | } 19 | 20 | export interface TradeAsset { 21 | symbol: string; 22 | quantity: number; 23 | } 24 | 25 | export interface Asset { 26 | symbol: string; 27 | quantity: number; 28 | current_price: number; 29 | total_value: number; 30 | performance_abs: number; 31 | performance_rel: number; 32 | } 33 | 34 | export interface Portfolio { 35 | total_added_money: number; 36 | available_money: number; 37 | total_value: number; 38 | performance_abs: number; 39 | performance_rel: number; 40 | assets: Asset[]; 41 | } 42 | 43 | // API client setup 44 | const api = axios.create({ 45 | baseURL: API_URL, 46 | headers: { 47 | 'Content-Type': 'application/json', 48 | }, 49 | }); 50 | 51 | // Set auth token for requests 52 | export const setAuthToken = (token: string) => { 53 | if (token) { 54 | api.defaults.headers.common['Authorization'] = `Bearer ${token}`; 55 | } else { 56 | delete api.defaults.headers.common['Authorization']; 57 | } 58 | }; 59 | 60 | // API functions 61 | export const register = async (userData: UserCreate) => { 62 | return api.post('/register', userData); 63 | }; 64 | 65 | export const login = async (username: string, password: string) => { 66 | const formData = new FormData(); 67 | formData.append('username', username); 68 | formData.append('password', password); 69 | 70 | const response = await api.post('/login', formData, { 71 | headers: { 72 | 'Content-Type': 'application/x-www-form-urlencoded', 73 | }, 74 | }); 75 | 76 | if (response.data.access_token) { 77 | setAuthToken(response.data.access_token); 78 | localStorage.setItem('token', response.data.access_token); 79 | } 80 | 81 | return response.data; 82 | }; 83 | 84 | export const logout = () => { 85 | localStorage.removeItem('token'); 86 | setAuthToken(''); 87 | }; 88 | 89 | export const checkAuth = () => { 90 | const token = localStorage.getItem('token'); 91 | if (token) { 92 | setAuthToken(token); 93 | return true; 94 | } 95 | return false; 96 | }; 97 | 98 | export const addMoney = async (amount: number) => { 99 | return api.post('/add-money', { amount }); 100 | }; 101 | 102 | export const buyAsset = async (symbol: string, quantity: number) => { 103 | return api.post('/buy', { symbol, quantity }); 104 | }; 105 | 106 | export const sellAsset = async (symbol: string, quantity: number) => { 107 | return api.post('/sell', { symbol, quantity }); 108 | }; 109 | 110 | export const getPortfolio = async () => { 111 | return api.get('/portfolio'); 112 | }; 113 | 114 | export default { 115 | register, 116 | login, 117 | logout, 118 | checkAuth, 119 | addMoney, 120 | buyAsset, 121 | sellAsset, 122 | getPortfolio, 123 | }; 124 | -------------------------------------------------------------------------------- /Crypto Demo Portfolio/frontend/src/vite-env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | -------------------------------------------------------------------------------- /Crypto Demo Portfolio/frontend/tsconfig.app.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo", 4 | "target": "ES2020", 5 | "useDefineForClassFields": true, 6 | "lib": ["ES2020", "DOM", "DOM.Iterable"], 7 | "module": "ESNext", 8 | "skipLibCheck": true, 9 | 10 | /* Bundler mode */ 11 | "moduleResolution": "bundler", 12 | "allowImportingTsExtensions": true, 13 | "verbatimModuleSyntax": true, 14 | "moduleDetection": "force", 15 | "noEmit": true, 16 | "jsx": "react-jsx", 17 | 18 | /* Linting */ 19 | "strict": true, 20 | "noUnusedLocals": true, 21 | "noUnusedParameters": true, 22 | "erasableSyntaxOnly": true, 23 | "noFallthroughCasesInSwitch": true, 24 | "noUncheckedSideEffectImports": true 25 | }, 26 | "include": ["src"] 27 | } 28 | -------------------------------------------------------------------------------- /Crypto Demo Portfolio/frontend/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "files": [], 3 | "references": [ 4 | { "path": "./tsconfig.app.json" }, 5 | { "path": "./tsconfig.node.json" } 6 | ] 7 | } 8 | -------------------------------------------------------------------------------- /Crypto Demo Portfolio/frontend/tsconfig.node.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo", 4 | "target": "ES2022", 5 | "lib": ["ES2023"], 6 | "module": "ESNext", 7 | "skipLibCheck": true, 8 | 9 | /* Bundler mode */ 10 | "moduleResolution": "bundler", 11 | "allowImportingTsExtensions": true, 12 | "verbatimModuleSyntax": true, 13 | "moduleDetection": "force", 14 | "noEmit": true, 15 | 16 | /* Linting */ 17 | "strict": true, 18 | "noUnusedLocals": true, 19 | "noUnusedParameters": true, 20 | "erasableSyntaxOnly": true, 21 | "noFallthroughCasesInSwitch": true, 22 | "noUncheckedSideEffectImports": true 23 | }, 24 | "include": ["vite.config.ts"] 25 | } 26 | -------------------------------------------------------------------------------- /Crypto Demo Portfolio/frontend/vite.config.ts: -------------------------------------------------------------------------------- 1 | import { defineConfig } from 'vite' 2 | import react from '@vitejs/plugin-react' 3 | import tailwindcss from '@tailwindcss/vite' 4 | 5 | // https://vite.dev/config/ 6 | export default defineConfig({ 7 | plugins: [react(), tailwindcss()], 8 | }) 9 | -------------------------------------------------------------------------------- /Deep Reinforcement Learning - OpenAI Gym/intelligent_agent.py: -------------------------------------------------------------------------------- 1 | import gym # pip install gym 2 | import numpy as np 3 | 4 | from tensorflow.keras.models import Sequential 5 | from tensorflow.keras.layers import Dense, Flatten 6 | from tensorflow.keras.optimizers import Adam 7 | 8 | from rl.agents import DQNAgent # pip install keras-rl2 9 | from rl.policy import BoltzmannQPolicy # important to have gym==0.25.2 10 | from rl.memory import SequentialMemory 11 | 12 | env = gym.make("CartPole-v1") # no render mode to prevent display while training 13 | 14 | states = env.observation_space.shape[0] 15 | actions = env.action_space.n 16 | 17 | print(states) 18 | print(actions) 19 | 20 | model = Sequential() 21 | model.add(Flatten(input_shape=(1, states))) 22 | model.add(Dense(24, activation="relu")) 23 | model.add(Dense(24, activation="relu")) 24 | model.add(Dense(actions, activation="linear")) 25 | 26 | agent = DQNAgent( 27 | model=model, 28 | memory=SequentialMemory(limit=50000, window_length=1), 29 | policy=BoltzmannQPolicy(), 30 | nb_actions=actions, 31 | nb_steps_warmup=10, 32 | target_model_update=0.01 33 | ) 34 | 35 | print(env) 36 | print(env.observation_space) 37 | 38 | agent.compile(Adam(lr=0.001), metrics=["mae"]) 39 | agent.fit(env, nb_steps=100000, visualize=False, verbose=1) 40 | 41 | results = agent.test(env, nb_episodes=10, visualize=True) 42 | print(np.mean(results.history["episode_reward"])) 43 | 44 | env.close() 45 | -------------------------------------------------------------------------------- /Deep Reinforcement Learning - OpenAI Gym/random_agent.py: -------------------------------------------------------------------------------- 1 | import random 2 | import gym # pip install gym==0.25.2 3 | 4 | env = gym.make("CartPole-v1", render_mode="human") 5 | 6 | episodes = 10 7 | for episode in range(1, episodes+1): 8 | state = env.reset() 9 | done = False 10 | score = 0 11 | 12 | while not done: # try alternatively while True to see full fail 13 | action = random.choice([0, 1]) 14 | n_state, reward, done, info = env.step(action) # in newer version five arguments (truncated between done and info) 15 | score += reward 16 | env.render() # need pip install pyglet 17 | 18 | print(f"Episode; {episode} Score: {score}") 19 | 20 | env.close() 21 | -------------------------------------------------------------------------------- /Docker Crash Course/Flask App/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12-slim 2 | 3 | WORKDIR /app 4 | 5 | COPY requirements.txt . 6 | 7 | RUN pip3 install --no-cache-dir -r requirements.txt 8 | 9 | COPY . . 10 | 11 | EXPOSE 5000 12 | 13 | ENV DEBUG=False 14 | ENV PORT=5000 15 | ENV LOG_FILE=/data/logs/log.txt 16 | 17 | CMD ["python3", "app.py"] 18 | -------------------------------------------------------------------------------- /Docker Crash Course/Flask App/app.py: -------------------------------------------------------------------------------- 1 | import os 2 | from flask import Flask, render_template, request 3 | 4 | app = Flask(__name__) 5 | 6 | LOG_FILE = os.getenv("LOG_FILE", "message_log.txt") 7 | os.makedirs(os.path.dirname(LOG_FILE), exist_ok=True) 8 | 9 | 10 | @app.route('/', methods=['GET', 'POST']) 11 | def index(): 12 | if request.method == 'POST': 13 | message = request.form.get('message') 14 | 15 | with open(LOG_FILE, 'a') as f: 16 | f.write(message + '\n') 17 | 18 | messages = [] 19 | if os.path.exists(LOG_FILE): 20 | with open(LOG_FILE, 'r') as f: 21 | messages = f.read().split('\n')[:-1] 22 | 23 | return render_template('index.html', messages=[m.strip() for m in messages]) 24 | 25 | 26 | if __name__ == '__main__': 27 | app.run(host="0.0.0.0", port=int(os.getenv("PORT", 5000)), debug=os.getenv("DEBUG", "False") == "True") 28 | 29 | -------------------------------------------------------------------------------- /Docker Crash Course/Flask App/requirements.txt: -------------------------------------------------------------------------------- 1 | blinker==1.9.0 2 | click==8.1.8 3 | Flask==3.1.0 4 | itsdangerous==2.2.0 5 | Jinja2==3.1.5 6 | MarkupSafe==3.0.2 7 | Werkzeug==3.1.3 8 | -------------------------------------------------------------------------------- /Docker Crash Course/Flask App/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Simple Flask App 7 | 8 | 9 | 10 |
11 | 12 | 13 |
14 | 15 |

Messages

16 | 17 |
    18 | {% for message in messages %} 19 |
  • {{ message }}
  • 20 | {% endfor %} 21 |
22 | 23 | 24 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/backend/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12-slim 2 | 3 | WORKDIR /app 4 | 5 | COPY requirements.txt . 6 | 7 | RUN pip install -r requirements.txt 8 | 9 | COPY main.py . 10 | 11 | EXPOSE 8000 12 | 13 | CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"] 14 | 15 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/backend/logs/backend.log: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralNine/youtube-tutorials/46237e565f097c9827d82a76b2f19668f248c5c8/Docker Crash Course/Shopping List App/backend/logs/backend.log -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/backend/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, HTTPException 2 | from fastapi.middleware.cors import CORSMiddleware 3 | from pydantic import BaseModel 4 | import psycopg2 5 | from psycopg2.extras import RealDictCursor 6 | import time 7 | 8 | LOG_FILE = 'logs/log.txt' 9 | 10 | app = FastAPI() 11 | 12 | app.add_middleware( 13 | CORSMiddleware, 14 | allow_origins=["http://localhost"], 15 | allow_credentials=True, 16 | allow_methods=["*"], 17 | allow_headers=["*"], 18 | ) 19 | 20 | class Item(BaseModel): 21 | text: str 22 | 23 | def get_db_connection(): 24 | while True: 25 | try: 26 | return psycopg2.connect( 27 | host="db", 28 | database="shopping", 29 | user="postgres", 30 | password="postgres", 31 | cursor_factory=RealDictCursor 32 | ) 33 | except psycopg2.OperationalError: 34 | print("Waiting for database...") 35 | time.sleep(1) 36 | 37 | conn = None 38 | cursor = None 39 | 40 | @app.on_event("startup") 41 | async def startup(): 42 | global conn, cursor 43 | conn = get_db_connection() 44 | cursor = conn.cursor() 45 | 46 | cursor.execute(""" 47 | CREATE TABLE IF NOT EXISTS items ( 48 | id SERIAL PRIMARY KEY, 49 | text VARCHAR(255) NOT NULL 50 | ) 51 | """) 52 | conn.commit() 53 | 54 | @app.on_event("shutdown") 55 | async def shutdown(): 56 | if cursor: 57 | cursor.close() 58 | if conn: 59 | conn.close() 60 | 61 | @app.get("/items") 62 | def read_items(): 63 | cursor.execute("SELECT * FROM items") 64 | items = cursor.fetchall() 65 | return items 66 | 67 | @app.post("/items") 68 | def create_item(item: Item): 69 | cursor.execute("INSERT INTO items (text) VALUES (%s) RETURNING *", (item.text,)) 70 | new_item = cursor.fetchone() 71 | conn.commit() 72 | 73 | with open(LOG_FILE, 'a') as f: 74 | f.write(f'Created item {item.text}\n') 75 | 76 | 77 | return new_item 78 | 79 | @app.delete("/items/{item_id}") 80 | def delete_item(item_id: int): 81 | cursor.execute("DELETE FROM items WHERE id = %s RETURNING *", (item_id,)) 82 | deleted_item = cursor.fetchone() 83 | if deleted_item is None: 84 | raise HTTPException(status_code=404, detail="Item not found") 85 | conn.commit() 86 | 87 | with open(LOG_FILE, 'a') as f: 88 | f.write(f'Deleted item with id {item_id}\n') 89 | 90 | return {"message": "Item deleted"} 91 | 92 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/backend/requirements.txt: -------------------------------------------------------------------------------- 1 | annotated-types==0.7.0 2 | anyio==4.8.0 3 | click==8.1.8 4 | exceptiongroup==1.2.2 5 | fastapi==0.115.8 6 | h11==0.14.0 7 | idna==3.10 8 | psycopg2-binary==2.9.10 9 | pydantic==2.10.6 10 | pydantic_core==2.27.2 11 | sniffio==1.3.1 12 | starlette==0.45.3 13 | typing_extensions==4.12.2 14 | uvicorn==0.34.0 15 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | db: 3 | image: postgres:14 4 | environment: 5 | POSTGRES_DB: shopping 6 | POSTGRES_USER: postgres 7 | POSTGRES_PASSWORD: postgres 8 | volumes: 9 | - pgdata:/var/lib/postgresql/data 10 | 11 | backend: 12 | build: ./backend 13 | image: neuralnine/tutorialbackend 14 | ports: 15 | - "8000:8000" 16 | depends_on: 17 | - db 18 | volumes: 19 | - logs:/app/logs 20 | 21 | frontend: 22 | build: ./frontend 23 | image: neuralnine/tutorialfrontend 24 | ports: 25 | - "80:80" 26 | depends_on: 27 | - backend 28 | 29 | volumes: 30 | logs: 31 | pgdata: 32 | 33 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/frontend/.env: -------------------------------------------------------------------------------- 1 | VITE_API_URL=http://localhost:8000 2 | 3 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/frontend/.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | pnpm-debug.log* 8 | lerna-debug.log* 9 | 10 | node_modules 11 | dist 12 | dist-ssr 13 | *.local 14 | 15 | # Editor directories and files 16 | .vscode/* 17 | !.vscode/extensions.json 18 | .idea 19 | .DS_Store 20 | *.suo 21 | *.ntvs* 22 | *.njsproj 23 | *.sln 24 | *.sw? 25 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/frontend/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:20-alpine AS builder 2 | 3 | WORKDIR /app 4 | 5 | COPY package.json package-lock.json* ./ 6 | 7 | RUN npm ci 8 | 9 | COPY . . 10 | 11 | RUN npm run build 12 | 13 | FROM nginx:1.25-alpine 14 | 15 | WORKDIR /usr/share/nginx/html 16 | 17 | RUN rm -rf ./* 18 | 19 | COPY --from=builder /app/dist ./ 20 | 21 | EXPOSE 80 22 | 23 | CMD ["nginx", "-g", "daemon off;"] 24 | 25 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/frontend/README.md: -------------------------------------------------------------------------------- 1 | # React + TypeScript + Vite 2 | 3 | This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules. 4 | 5 | Currently, two official plugins are available: 6 | 7 | - [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react/README.md) uses [Babel](https://babeljs.io/) for Fast Refresh 8 | - [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh 9 | 10 | ## Expanding the ESLint configuration 11 | 12 | If you are developing a production application, we recommend updating the configuration to enable type aware lint rules: 13 | 14 | - Configure the top-level `parserOptions` property like this: 15 | 16 | ```js 17 | export default tseslint.config({ 18 | languageOptions: { 19 | // other options... 20 | parserOptions: { 21 | project: ['./tsconfig.node.json', './tsconfig.app.json'], 22 | tsconfigRootDir: import.meta.dirname, 23 | }, 24 | }, 25 | }) 26 | ``` 27 | 28 | - Replace `tseslint.configs.recommended` to `tseslint.configs.recommendedTypeChecked` or `tseslint.configs.strictTypeChecked` 29 | - Optionally add `...tseslint.configs.stylisticTypeChecked` 30 | - Install [eslint-plugin-react](https://github.com/jsx-eslint/eslint-plugin-react) and update the config: 31 | 32 | ```js 33 | // eslint.config.js 34 | import react from 'eslint-plugin-react' 35 | 36 | export default tseslint.config({ 37 | // Set the react version 38 | settings: { react: { version: '18.3' } }, 39 | plugins: { 40 | // Add the react plugin 41 | react, 42 | }, 43 | rules: { 44 | // other rules... 45 | // Enable its recommended rules 46 | ...react.configs.recommended.rules, 47 | ...react.configs['jsx-runtime'].rules, 48 | }, 49 | }) 50 | ``` 51 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/frontend/eslint.config.js: -------------------------------------------------------------------------------- 1 | import js from '@eslint/js' 2 | import globals from 'globals' 3 | import reactHooks from 'eslint-plugin-react-hooks' 4 | import reactRefresh from 'eslint-plugin-react-refresh' 5 | import tseslint from 'typescript-eslint' 6 | 7 | export default tseslint.config( 8 | { ignores: ['dist'] }, 9 | { 10 | extends: [js.configs.recommended, ...tseslint.configs.recommended], 11 | files: ['**/*.{ts,tsx}'], 12 | languageOptions: { 13 | ecmaVersion: 2020, 14 | globals: globals.browser, 15 | }, 16 | plugins: { 17 | 'react-hooks': reactHooks, 18 | 'react-refresh': reactRefresh, 19 | }, 20 | rules: { 21 | ...reactHooks.configs.recommended.rules, 22 | 'react-refresh/only-export-components': [ 23 | 'warn', 24 | { allowConstantExport: true }, 25 | ], 26 | }, 27 | }, 28 | ) 29 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/frontend/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Vite + React + TS 8 | 9 | 10 |
11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/frontend/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "frontend", 3 | "private": true, 4 | "version": "0.0.0", 5 | "type": "module", 6 | "scripts": { 7 | "dev": "vite", 8 | "build": "tsc -b && vite build", 9 | "lint": "eslint .", 10 | "preview": "vite preview" 11 | }, 12 | "dependencies": { 13 | "react": "^19.0.0", 14 | "react-dom": "^19.0.0" 15 | }, 16 | "devDependencies": { 17 | "@eslint/js": "^9.19.0", 18 | "@types/react": "^19.0.8", 19 | "@types/react-dom": "^19.0.3", 20 | "@vitejs/plugin-react": "^4.3.4", 21 | "eslint": "^9.19.0", 22 | "eslint-plugin-react-hooks": "^5.0.0", 23 | "eslint-plugin-react-refresh": "^0.4.18", 24 | "globals": "^15.14.0", 25 | "typescript": "~5.7.2", 26 | "typescript-eslint": "^8.22.0", 27 | "vite": "^6.1.0" 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/frontend/public/vite.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/frontend/src/App.css: -------------------------------------------------------------------------------- 1 | #root { 2 | max-width: 1280px; 3 | margin: 0 auto; 4 | padding: 2rem; 5 | text-align: center; 6 | } 7 | 8 | .logo { 9 | height: 6em; 10 | padding: 1.5em; 11 | will-change: filter; 12 | transition: filter 300ms; 13 | } 14 | .logo:hover { 15 | filter: drop-shadow(0 0 2em #646cffaa); 16 | } 17 | .logo.react:hover { 18 | filter: drop-shadow(0 0 2em #61dafbaa); 19 | } 20 | 21 | @keyframes logo-spin { 22 | from { 23 | transform: rotate(0deg); 24 | } 25 | to { 26 | transform: rotate(360deg); 27 | } 28 | } 29 | 30 | @media (prefers-reduced-motion: no-preference) { 31 | a:nth-of-type(2) .logo { 32 | animation: logo-spin infinite 20s linear; 33 | } 34 | } 35 | 36 | .card { 37 | padding: 2em; 38 | } 39 | 40 | .read-the-docs { 41 | color: #888; 42 | } 43 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/frontend/src/App.tsx: -------------------------------------------------------------------------------- 1 | import { useState, useEffect } from 'react' 2 | import './App.css' 3 | 4 | interface Item { 5 | id: number; 6 | text: string; 7 | } 8 | 9 | const API_URL = import.meta.env.VITE_API_URL; 10 | 11 | function App() { 12 | const [items, setItems] = useState([]) 13 | const [newItem, setNewItem] = useState('') 14 | 15 | useEffect(() => { 16 | fetchItems() 17 | }, []) 18 | 19 | const fetchItems = async () => { 20 | const response = await fetch(`${API_URL}/items`) 21 | const data = await response.json() 22 | setItems(data) 23 | } 24 | 25 | const addItem = async (e: React.FormEvent) => { 26 | e.preventDefault() 27 | if (!newItem.trim()) return 28 | 29 | await fetch(`${API_URL}/items`, { 30 | method: 'POST', 31 | headers: { 32 | 'Content-Type': 'application/json', 33 | }, 34 | body: JSON.stringify({ text: newItem }), 35 | }) 36 | setNewItem('') 37 | fetchItems() 38 | } 39 | 40 | const deleteItem = async (id: number) => { 41 | await fetch(`${API_URL}/items/${id}`, { 42 | method: 'DELETE', 43 | }) 44 | fetchItems() 45 | } 46 | 47 | return ( 48 |
49 |

Shopping List

50 |
51 | setNewItem(e.target.value)} 55 | placeholder="Add new item" 56 | /> 57 | 58 |
59 |
    60 | {items.map((item) => ( 61 |
  • 62 | {item.text} 63 | 64 |
  • 65 | ))} 66 |
67 |
68 | ) 69 | } 70 | 71 | export default App 72 | 73 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/frontend/src/assets/react.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/frontend/src/index.css: -------------------------------------------------------------------------------- 1 | :root { 2 | font-family: Inter, system-ui, Avenir, Helvetica, Arial, sans-serif; 3 | line-height: 1.5; 4 | font-weight: 400; 5 | 6 | color-scheme: light dark; 7 | color: rgba(255, 255, 255, 0.87); 8 | background-color: #242424; 9 | 10 | font-synthesis: none; 11 | text-rendering: optimizeLegibility; 12 | -webkit-font-smoothing: antialiased; 13 | -moz-osx-font-smoothing: grayscale; 14 | } 15 | 16 | a { 17 | font-weight: 500; 18 | color: #646cff; 19 | text-decoration: inherit; 20 | } 21 | a:hover { 22 | color: #535bf2; 23 | } 24 | 25 | body { 26 | margin: 0; 27 | display: flex; 28 | place-items: center; 29 | min-width: 320px; 30 | min-height: 100vh; 31 | } 32 | 33 | h1 { 34 | font-size: 3.2em; 35 | line-height: 1.1; 36 | } 37 | 38 | button { 39 | border-radius: 8px; 40 | border: 1px solid transparent; 41 | padding: 0.6em 1.2em; 42 | font-size: 1em; 43 | font-weight: 500; 44 | font-family: inherit; 45 | background-color: #1a1a1a; 46 | cursor: pointer; 47 | transition: border-color 0.25s; 48 | } 49 | button:hover { 50 | border-color: #646cff; 51 | } 52 | button:focus, 53 | button:focus-visible { 54 | outline: 4px auto -webkit-focus-ring-color; 55 | } 56 | 57 | @media (prefers-color-scheme: light) { 58 | :root { 59 | color: #213547; 60 | background-color: #ffffff; 61 | } 62 | a:hover { 63 | color: #747bff; 64 | } 65 | button { 66 | background-color: #f9f9f9; 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/frontend/src/main.tsx: -------------------------------------------------------------------------------- 1 | import { StrictMode } from 'react' 2 | import { createRoot } from 'react-dom/client' 3 | import './index.css' 4 | import App from './App.tsx' 5 | 6 | createRoot(document.getElementById('root')!).render( 7 | 8 | 9 | , 10 | ) 11 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/frontend/src/vite-env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/frontend/tsconfig.app.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo", 4 | "target": "ES2020", 5 | "useDefineForClassFields": true, 6 | "lib": ["ES2020", "DOM", "DOM.Iterable"], 7 | "module": "ESNext", 8 | "skipLibCheck": true, 9 | 10 | /* Bundler mode */ 11 | "moduleResolution": "bundler", 12 | "allowImportingTsExtensions": true, 13 | "isolatedModules": true, 14 | "moduleDetection": "force", 15 | "noEmit": true, 16 | "jsx": "react-jsx", 17 | 18 | /* Linting */ 19 | "strict": true, 20 | "noUnusedLocals": true, 21 | "noUnusedParameters": true, 22 | "noFallthroughCasesInSwitch": true, 23 | "noUncheckedSideEffectImports": true 24 | }, 25 | "include": ["src"] 26 | } 27 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/frontend/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "files": [], 3 | "references": [ 4 | { "path": "./tsconfig.app.json" }, 5 | { "path": "./tsconfig.node.json" } 6 | ] 7 | } 8 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/frontend/tsconfig.node.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo", 4 | "target": "ES2022", 5 | "lib": ["ES2023"], 6 | "module": "ESNext", 7 | "skipLibCheck": true, 8 | 9 | /* Bundler mode */ 10 | "moduleResolution": "bundler", 11 | "allowImportingTsExtensions": true, 12 | "isolatedModules": true, 13 | "moduleDetection": "force", 14 | "noEmit": true, 15 | 16 | /* Linting */ 17 | "strict": true, 18 | "noUnusedLocals": true, 19 | "noUnusedParameters": true, 20 | "noFallthroughCasesInSwitch": true, 21 | "noUncheckedSideEffectImports": true 22 | }, 23 | "include": ["vite.config.ts"] 24 | } 25 | -------------------------------------------------------------------------------- /Docker Crash Course/Shopping List App/frontend/vite.config.ts: -------------------------------------------------------------------------------- 1 | import { defineConfig } from 'vite' 2 | import react from '@vitejs/plugin-react' 3 | 4 | // https://vite.dev/config/ 5 | export default defineConfig({ 6 | plugins: [react()], 7 | }) 8 | -------------------------------------------------------------------------------- /Drawdata Tutorial/.virtual_documents/Main.ipynb: -------------------------------------------------------------------------------- 1 | from drawdata import ScatterWidget 2 | 3 | 4 | widget = ScatterWidget() 5 | 6 | 7 | widget 8 | 9 | 10 | df = widget.data_as_pandas 11 | 12 | 13 | df 14 | 15 | 16 | import numpy as np 17 | import pandas as pd 18 | import matplotlib.pyplot as plt 19 | from sklearn.svm import LinearSVC, SVC 20 | from sklearn.linear_model import LogisticRegression 21 | from sklearn.neighbors import KNeighborsClassifier 22 | from sklearn.ensemble import RandomForestClassifier 23 | from sklearn.preprocessing import LabelEncoder 24 | from sklearn.inspection import DecisionBoundaryDisplay 25 | 26 | label_encoder = LabelEncoder() 27 | df['label_encoded'] = label_encoder.fit_transform(df['label']) 28 | 29 | X = df[['x', 'y']].values 30 | y = df['label_encoded'].values 31 | 32 | classifiers = { 33 | 'Logistic Regression': LogisticRegression(), 34 | 'Random Forest Classifier': RandomForestClassifier(n_jobs=-1), 35 | 'Linear Support Vector Classifier': LinearSVC(), 36 | 'Support Vector Classifier with RBF Kernel': SVC(kernel='rbf'), 37 | 'K-Neighbors Classifier': KNeighborsClassifier() 38 | } 39 | 40 | for name, clf in classifiers.items(): 41 | clf.fit(X, y) 42 | disp = DecisionBoundaryDisplay.from_estimator( 43 | clf, X, 44 | response_method="predict", 45 | xlabel='x', 46 | ylabel='y', 47 | alpha=0.3 48 | ) 49 | 50 | disp.ax_.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k') 51 | disp.ax_.set_title(name) 52 | plt.show() 53 | 54 | 55 | import warnings 56 | from sklearn.exceptions import ConvergenceWarning 57 | 58 | warnings.filterwarnings("ignore", category=ConvergenceWarning) 59 | 60 | import ipywidgets 61 | from sklearn.inspection import DecisionBoundaryDisplay 62 | from IPython.display import HTML 63 | 64 | output = ipywidgets.Output() 65 | widget3 = ScatterWidget() 66 | 67 | classifier_selector = ipywidgets.RadioButtons( 68 | options=['Logistic Regression', 'Random Forest', 'SVC Poly', 'SVC RBF'], 69 | description='Classifier:', 70 | ) 71 | 72 | @output.capture(clear_output=True) 73 | def on_change(change): 74 | df = widget3.data_as_pandas 75 | if len(df) and (df['color'].nunique() > 1): 76 | X = df[['x', 'y']].values 77 | y = df['color'] 78 | display(HTML("


")) 79 | fig = plt.figure(figsize=(12, 12)) 80 | 81 | if classifier_selector.value == 'Logistic Regression': 82 | classifier = LogisticRegression().fit(X, y) 83 | elif classifier_selector.value == 'SVC Poly': 84 | classifier = SVC(kernel='poly').fit(X, y) 85 | elif classifier_selector.value == 'SVC RBF': 86 | classifier = SVC(kernel='rbf').fit(X, y) 87 | else: 88 | classifier = RandomForestClassifier().fit(X, y) 89 | 90 | disp = DecisionBoundaryDisplay.from_estimator( 91 | classifier, 92 | X, 93 | response_method="predict", 94 | xlabel="x", ylabel="y", 95 | alpha=0.5, 96 | ) 97 | disp.ax_.scatter(X[:, 0], X[:, 1], c=y, edgecolor="k") 98 | plt.title(f"{classifier.__class__.__name__}") 99 | plt.show() 100 | 101 | widget3.observe(on_change, names=["data"]) 102 | classifier_selector.observe(on_change, names="value") 103 | on_change(None) 104 | 105 | ipywidgets.HBox([ipywidgets.VBox([widget3, classifier_selector]), output]) 106 | -------------------------------------------------------------------------------- /E-Commerce AI Agent/app.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import asyncio 4 | from typing import Optional 5 | 6 | from dotenv import load_dotenv 7 | from pydantic import BaseModel, Field 8 | from flask import Flask, render_template, redirect, url_for, flash, request 9 | from mcp import ClientSession, StdioServerParameters 10 | from mcp.client.stdio import stdio_client 11 | 12 | from langchain_openai import ChatOpenAI 13 | from langchain_mcp_adapters.tools import load_mcp_tools 14 | from langgraph.prebuilt import create_react_agent 15 | 16 | 17 | load_dotenv() 18 | model = ChatOpenAI(model='gpt-4o') 19 | 20 | server_params = StdioServerParameters( 21 | command='npx', 22 | args=['@brightdata/mcp'], 23 | env = { 24 | 'API_TOKEN': os.getenv('API_TOKEN'), 25 | 'BROWSER_AUTH': os.getenv('BROWSER_AUTH'), 26 | 'WEB_UNLOCKER_ZONE': os.getenv('WEB_UNLOCKER_ZONE') 27 | } 28 | ) 29 | 30 | SYSTEM_PROMPT = ( 31 | "To find products, first use the search_engine tool. When finding products, use the web_data tool for the platform. If none exists, scrape as markdown." 32 | "Example: Don't use web_data_bestbuy_products for search. Use it only for getting data on specific products you already found in search." 33 | ) 34 | 35 | PLATFORMS = ['Amazon', 'Best Buy', 'Ebay', 'Walmart', 'Target', 'Costco', 'Newegg'] 36 | 37 | 38 | class Hit(BaseModel): 39 | url: str = Field(..., description='The URL of the product that was found') 40 | title: str = Field(..., description='The title of the product that was found') 41 | rating: str = Field(..., description='The rating of the product (stars, number of ratings given etc.)') 42 | 43 | 44 | class PlatformBlock(BaseModel): 45 | platform: str = Field(..., description='Name of the platform') 46 | results: list[Hit] = Field(..., description='List of results for this platform') 47 | 48 | 49 | class ProductSearchResponse(BaseModel): 50 | platforms: list[PlatformBlock] = Field(..., description='Aggregated list of all results grouped by platform') 51 | 52 | 53 | 54 | app = Flask(__name__) 55 | app.secret_key = 'mysecretkey-not-for-prod' 56 | 57 | 58 | async def run_agent(query, platforms): 59 | async with stdio_client(server_params) as (read, write): 60 | async with ClientSession(read, write) as sess: 61 | await sess.initialize() 62 | 63 | tools = await load_mcp_tools(sess) 64 | 65 | agent = create_react_agent(model, tools, response_format=ProductSearchResponse) 66 | 67 | prompt = f'{query}\n\nPlatforms: {",".join(platforms)}' 68 | 69 | result = await agent.ainvoke( 70 | { 71 | 'messages': [ 72 | {'role': 'system', 'content': SYSTEM_PROMPT}, 73 | {'role': 'user', 'content': prompt} 74 | ] 75 | } 76 | ) 77 | 78 | structured = result['structured_response'] 79 | 80 | return structured.model_dump() 81 | 82 | 83 | @app.route("/", methods=["GET", "POST"]) 84 | def index(): 85 | if request.method == "POST": 86 | query = request.form.get("query", "").strip() 87 | platforms = request.form.getlist("platforms") 88 | if not query: 89 | flash("Please enter a search query.", "danger") 90 | return redirect(url_for("index")) 91 | if not platforms: 92 | flash("Select at least one platform.", "danger") 93 | return redirect(url_for("index")) 94 | 95 | try: 96 | response_json = asyncio.run(run_agent(query, platforms)) 97 | except Exception as exc: 98 | flash(f"Agent error: {exc}", "danger") 99 | return redirect(url_for("index")) 100 | 101 | return render_template( 102 | "index.html", 103 | query=query, 104 | platforms=PLATFORMS, 105 | selected=platforms, 106 | response=response_json, 107 | ) 108 | 109 | return render_template( 110 | "index.html", 111 | query="", 112 | platforms=PLATFORMS, 113 | selected=[], 114 | response=None, 115 | ) 116 | 117 | 118 | if __name__ == "__main__": 119 | app.run(host="0.0.0.0", port=8000, debug=True) 120 | -------------------------------------------------------------------------------- /E-Commerce AI Agent/static/style.css: -------------------------------------------------------------------------------- 1 | .card { border-radius: 1rem; } 2 | .card-header { border-top-left-radius: 1rem; border-top-right-radius: 1rem; } 3 | .btn { border-radius: 0.8rem; } 4 | 5 | .list-group-item:hover { background: #f8f9fa; } 6 | 7 | .form-check-input { cursor: pointer; } 8 | .form-check-label { cursor: pointer; } 9 | 10 | -------------------------------------------------------------------------------- /E-Commerce AI Agent/templates/base.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | E-Commerce Agent 6 | 7 | 8 | 12 | 13 | 14 | 15 | 16 | 21 | 22 |
23 | {% with messages = get_flashed_messages(with_categories=true) %} 24 | {% if messages %} 25 | {% for category, msg in messages %} 26 | 30 | {% endfor %} 31 | {% endif %} 32 | {% endwith %} 33 | 34 | {% block content %}{% endblock %} 35 |
36 | 37 | 40 | 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /E-Commerce AI Agent/templates/index.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | {% block content %} 3 |
4 |
5 |
6 |
7 |
Search products or content
8 | 9 |
10 | 15 |
16 | 17 |
18 | 19 |
20 | {% for plat in platforms %} 21 |
22 |
23 | 29 | 30 |
31 |
32 | {% endfor %} 33 |
34 |
35 | 36 | 37 |
38 |
39 | 40 | {% if response %} 41 |

Results

42 | {% for plat in response.platforms %} 43 |
44 |
45 | {{ plat.platform }} 46 |
47 |
    48 | {% for r in plat.results %} 49 |
  • 50 | {{ r.url }}
    51 | {{ r.title }} | {{ r.rating }} 52 |
  • 53 | {% endfor %} 54 | {% if not plat.results %} 55 |
  • No results returned.
  • 56 | {% endif %} 57 |
58 |
59 | {% endfor %} 60 | {% endif %} 61 |
62 |
63 | {% endblock %} 64 | 65 | 66 | -------------------------------------------------------------------------------- /Financial Dashboard/main.py: -------------------------------------------------------------------------------- 1 | import math 2 | import datetime as dt 3 | 4 | import numpy as np 5 | import yfinance as yf 6 | 7 | from bokeh.io import curdoc 8 | from bokeh.plotting import figure 9 | from bokeh.layouts import column, row 10 | from bokeh.models import TextInput, Button, DatePicker, MultiChoice 11 | 12 | 13 | def load_data(ticker1, ticker2, start, end): 14 | df1 = yf.download(ticker1, start, end) 15 | df2 = yf.download(ticker2, start, end) 16 | return df1, df2 17 | 18 | 19 | def update_plot(data, indicators, sync_axis=None): 20 | df = data 21 | gain = df.Close > df.Open 22 | loss = df.Open > df.Close 23 | width = 12 * 60 * 60 * 1000 # half day in ms 24 | 25 | if sync_axis is not None: 26 | p = figure(x_axis_type="datetime", tools="pan,wheel_zoom,box_zoom,reset,save", width=1000, x_range=sync_axis) 27 | else: 28 | p = figure(x_axis_type="datetime", tools="pan,wheel_zoom,box_zoom,reset,save", width=1000) 29 | 30 | p.xaxis.major_label_orientation = math.pi / 4 31 | p.grid.grid_line_alpha = 0.3 32 | 33 | p.segment(df.index, df.High, df.index, df.Low, color="black") 34 | p.vbar(df.index[gain], width, df.Open[gain], df.Close[gain], fill_color="#00ff00", line_color="#00ff00") 35 | p.vbar(df.index[loss], width, df.Open[loss], df.Close[loss], fill_color="#ff0000", line_color="#ff0000") 36 | 37 | for indicator in indicators: 38 | print(indicator) 39 | if indicator == "30 Day SMA": 40 | df['SMA30'] = df['Close'].rolling(30).mean() 41 | p.line(df.index, df.SMA30, color="purple", legend_label="30 Day SMA") 42 | elif indicator == "100 Day SMA": 43 | df['SMA100'] = df['Close'].rolling(100).mean() 44 | p.line(df.index, df.SMA100, color="blue", legend_label="100 Day SMA") 45 | elif indicator == "Linear Regression Line": 46 | par = np.polyfit(range(len(df.index.values)), df.Close.values, 1, full=True) 47 | slope = par[0][0] 48 | intercept = par[0][1] 49 | y_predicted = [slope * i + intercept for i in range(len(df.index.values))] 50 | p.segment(df.index[0], y_predicted[0], df.index[-1], y_predicted[-1], legend_label="Linear Regression", 51 | color="red") 52 | 53 | p.legend.location = "top_left" 54 | p.legend.click_policy = "hide" 55 | 56 | return p 57 | 58 | 59 | def on_button_click(main_stock, comparison_stock, start, end, indicators): 60 | source1, source2 = load_data(main_stock, comparison_stock, start, end) 61 | p = update_plot(source1, indicators) 62 | p2 = update_plot(source2, indicators, sync_axis=p.x_range) 63 | curdoc().clear() 64 | curdoc().add_root(layout) 65 | curdoc().add_root(row(p, p2)) 66 | 67 | 68 | stock1_text = TextInput(title="Main Stock") 69 | stock2_text = TextInput(title="Comparison Stock") 70 | date_picker_from = DatePicker(title='Start Date', value="2020-01-01", min_date="2000-01-01", max_date=dt.datetime.now().strftime("%Y-%m-%d")) 71 | date_picker_to = DatePicker(title='End Date', value="2020-02-01", min_date="2000-01-01", max_date=dt.datetime.now().strftime("%Y-%m-%d")) 72 | indicator_choice = MultiChoice(options=["100 Day SMA", "30 Day SMA", "Linear Regression Line"]) 73 | 74 | load_button = Button(label="Load Data", button_type="success") 75 | load_button.on_click(lambda: on_button_click(stock1_text.value, stock2_text.value, date_picker_from.value, date_picker_to.value, indicator_choice.value)) 76 | 77 | layout = column(stock1_text, stock2_text, date_picker_from, date_picker_to, indicator_choice, load_button) 78 | 79 | curdoc().clear() 80 | curdoc().add_root(layout) -------------------------------------------------------------------------------- /Intelligent AI Web Chatbot/Flask App/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, request, jsonify, render_template 2 | 3 | from utils import get_response, predict_class 4 | 5 | app = Flask(__name__, template_folder='templates') 6 | 7 | 8 | @app.route('/') 9 | def index(): 10 | return render_template('index.html') 11 | 12 | 13 | @app.route('/handle_message', methods=['POST']) 14 | def handle_message(): 15 | message = request.json['message'] 16 | intents_list = predict_class(message) 17 | response = get_response(intents_list) 18 | 19 | return jsonify({'response': response}) 20 | 21 | 22 | # curl -X POST http://127.0.0.1:5000/handle_message -d '{"message":"what is coding"}' -H "Content-Type: application/json" 23 | 24 | 25 | if __name__ == '__main__': 26 | app.run(host='0.0.0.0', debug=True) 27 | -------------------------------------------------------------------------------- /Intelligent AI Web Chatbot/Flask App/model/chatbot_model.keras: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralNine/youtube-tutorials/46237e565f097c9827d82a76b2f19668f248c5c8/Intelligent AI Web Chatbot/Flask App/model/chatbot_model.keras -------------------------------------------------------------------------------- /Intelligent AI Web Chatbot/Flask App/model/classes.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralNine/youtube-tutorials/46237e565f097c9827d82a76b2f19668f248c5c8/Intelligent AI Web Chatbot/Flask App/model/classes.pkl -------------------------------------------------------------------------------- /Intelligent AI Web Chatbot/Flask App/model/intents.json: -------------------------------------------------------------------------------- 1 | {"intents": [ 2 | {"tag": "greeting", 3 | "patterns": ["Hi", "How are you", "Is anyone there?", "Hello", "Good day", "Whats up", "Hey", "greetings"], 4 | "responses": ["Hello!", "Good to see you again!", "Hi there, how can I help?"], 5 | "context_set": "" 6 | }, 7 | {"tag": "goodbye", 8 | "patterns": ["cya", "See you later", "Goodbye", "I am Leaving", "Have a Good day", "bye", "cao", "see ya"], 9 | "responses": ["Sad to see you go :(", "Talk to you later", "Goodbye!"], 10 | "context_set": "" 11 | }, 12 | {"tag": "programming", 13 | "patterns": ["What is progamming?", "What is coding?", "Tell me about programming", "Tell me about coding", "What is software development?"], 14 | "responses": ["Programming, coding or software development, means writing computer code to automate tasks."], 15 | "context_set": "" 16 | }, 17 | {"tag": "how", 18 | "patterns": ["How are you?", "How is your day?", "How do you feel?", "Are you good?"], 19 | "responses": ["I am fine, thanks for asking!", "I feel great!"], 20 | "context_set": "" 21 | }, 22 | {"tag": "flask", 23 | "patterns": ["What is Flask?", "Do you know about Flask?", "Can you tell me something about Flask?", "Explain Flask to me!"], 24 | "responses": ["Flask is a Python web development framework, which is very simple and minimalistic."], 25 | "context_set": "" 26 | } 27 | ] 28 | } -------------------------------------------------------------------------------- /Intelligent AI Web Chatbot/Flask App/model/words.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralNine/youtube-tutorials/46237e565f097c9827d82a76b2f19668f248c5c8/Intelligent AI Web Chatbot/Flask App/model/words.pkl -------------------------------------------------------------------------------- /Intelligent AI Web Chatbot/Flask App/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Chat with Bot 7 | 8 | 9 | 78 | 79 | 80 |

Intelligent Web Chatbot

81 |
82 | 83 |
84 |
85 |
86 | 87 | 88 |
89 |
90 |
91 | 92 | 128 | 129 | 130 | -------------------------------------------------------------------------------- /Intelligent AI Web Chatbot/Flask App/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' 4 | 5 | import random 6 | import json 7 | import pickle 8 | import numpy as np 9 | 10 | import nltk 11 | from nltk.stem import WordNetLemmatizer 12 | 13 | from tensorflow.keras.models import load_model 14 | 15 | 16 | def clean_up_sentence(sentence): 17 | lemmatizer = WordNetLemmatizer() 18 | 19 | sentence_words = nltk.word_tokenize(sentence) 20 | sentence_words = [lemmatizer.lemmatize(word) for word in sentence_words] 21 | 22 | return sentence_words 23 | 24 | 25 | def bag_of_words(sentence): 26 | words = pickle.load(open('model/words.pkl', 'rb')) 27 | 28 | sentence_words = clean_up_sentence(sentence) 29 | bag = [0] * len(words) 30 | for w in sentence_words: 31 | for i, word in enumerate(words): 32 | if word == w: 33 | bag[i] = 1 34 | return np.array(bag) 35 | 36 | 37 | def predict_class(sentence): 38 | classes = pickle.load(open('model/classes.pkl', 'rb')) 39 | model = load_model('model/chatbot_model.keras') 40 | 41 | bow = bag_of_words(sentence) 42 | res = model.predict(np.array([bow]))[0] 43 | ERROR_THRESHOLD = 0.25 44 | 45 | results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD] 46 | results.sort(key=lambda x: x[1], reverse=True) 47 | 48 | return_list = [] 49 | 50 | for r in results: 51 | return_list.append({'intent': classes[r[0]], 'probability': str(r[1])}) 52 | 53 | return return_list 54 | 55 | 56 | def get_response(intents_list): 57 | intents_json = json.load(open('model/intents.json')) 58 | 59 | tag = intents_list[0]['intent'] 60 | list_of_intents = intents_json['intents'] 61 | 62 | for i in list_of_intents: 63 | if i['tag'] == tag: 64 | result = random.choice(i['responses']) 65 | break 66 | 67 | return result 68 | -------------------------------------------------------------------------------- /Intelligent AI Web Chatbot/Preparation/chatbot.py: -------------------------------------------------------------------------------- 1 | import random 2 | import json 3 | import pickle 4 | import numpy as np 5 | 6 | import nltk 7 | from nltk.stem import WordNetLemmatizer 8 | 9 | from tensorflow.keras.models import load_model 10 | 11 | lemmatizer = WordNetLemmatizer() 12 | intents = json.load(open('intents.json')) 13 | 14 | words = pickle.load(open('model/words.pkl', 'rb')) 15 | classes = pickle.load(open('model/classes.pkl', 'rb')) 16 | model = load_model('model/chatbot_model.keras') 17 | 18 | 19 | def clean_up_sentence(sentence): 20 | sentence_words = nltk.word_tokenize(sentence) 21 | sentence_words = [lemmatizer.lemmatize(word) for word in sentence_words] 22 | Intelligent Web Chatbot 23 | return sentence_words 24 | 25 | 26 | def bag_of_words(sentence): 27 | sentence_words = clean_up_sentence(sentence) 28 | bag = [0] * len(words) 29 | for w in sentence_words: 30 | for i, word in enumerate(words): 31 | if word == w: 32 | bag[i] = 1 33 | return np.array(bag) 34 | 35 | 36 | def predict_class(sentence): 37 | bow = bag_of_words(sentence) 38 | res = model.predict(np.array([bow]))[0] 39 | ERROR_THRESHOLD = 0.25 40 | 41 | results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD] 42 | results.sort(key=lambda x: x[1], reverse=True) 43 | 44 | return_list = [] 45 | 46 | for r in results: 47 | return_list.append({'intent': classes[r[0]], 'probability': str(r[1])}) 48 | 49 | return return_list 50 | 51 | 52 | def get_response(intents_list, intents_json): 53 | tag = intents_list[0]['intent'] 54 | list_of_intents = intents_json['intents'] 55 | 56 | for i in list_of_intents: 57 | if i['tag'] == tag: 58 | result = random.choice(i['responses']) 59 | break 60 | 61 | return result 62 | 63 | 64 | while True: 65 | message = input('') 66 | ints = predict_class(message) 67 | res = get_response(ints, intents) 68 | print(res) -------------------------------------------------------------------------------- /Intelligent AI Web Chatbot/Preparation/intents.json: -------------------------------------------------------------------------------- 1 | {"intents": [ 2 | {"tag": "greeting", 3 | "patterns": ["Hi", "How are you", "Is anyone there?", "Hello", "Good day", "Whats up", "Hey", "greetings"], 4 | "responses": ["Hello!", "Good to see you again!", "Hi there, how can I help?"], 5 | "context_set": "" 6 | }, 7 | {"tag": "goodbye", 8 | "patterns": ["cya", "See you later", "Goodbye", "I am Leaving", "Have a Good day", "bye", "cao", "see ya"], 9 | "responses": ["Sad to see you go :(", "Talk to you later", "Goodbye!"], 10 | "context_set": "" 11 | }, 12 | {"tag": "programming", 13 | "patterns": ["What is progamming?", "What is coding?", "Tell me about programming", "Tell me about coding", "What is software development?"], 14 | "responses": ["Programming, coding or software development, means writing computer code to automate tasks."], 15 | "context_set": "" 16 | }, 17 | {"tag": "how", 18 | "patterns": ["How are you?", "How is your day?", "How do you feel?", "Are you good?"], 19 | "responses": ["I am fine, thanks for asking!", "I feel great!"], 20 | "context_set": "" 21 | }, 22 | {"tag": "flask", 23 | "patterns": ["What is Flask?", "Do you know about Flask?", "Can you tell me something about Flask?", "Explain Flask to me!"], 24 | "responses": ["Flask is a Python web development framework, which is very simple and minimalistic."], 25 | "context_set": "" 26 | } 27 | ] 28 | } -------------------------------------------------------------------------------- /Intelligent AI Web Chatbot/Preparation/model/chatbot_model.keras: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralNine/youtube-tutorials/46237e565f097c9827d82a76b2f19668f248c5c8/Intelligent AI Web Chatbot/Preparation/model/chatbot_model.keras -------------------------------------------------------------------------------- /Intelligent AI Web Chatbot/Preparation/model/classes.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralNine/youtube-tutorials/46237e565f097c9827d82a76b2f19668f248c5c8/Intelligent AI Web Chatbot/Preparation/model/classes.pkl -------------------------------------------------------------------------------- /Intelligent AI Web Chatbot/Preparation/model/words.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralNine/youtube-tutorials/46237e565f097c9827d82a76b2f19668f248c5c8/Intelligent AI Web Chatbot/Preparation/model/words.pkl -------------------------------------------------------------------------------- /Intelligent AI Web Chatbot/Preparation/model_training.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' 4 | 5 | import random 6 | import json 7 | import pickle 8 | 9 | import numpy as np 10 | import nltk 11 | from nltk.stem import WordNetLemmatizer 12 | 13 | from tensorflow.keras.models import Sequential 14 | from tensorflow.keras.layers import Dense, Activation, Dropout 15 | from tensorflow.keras.optimizers import SGD 16 | 17 | lemmatizer = WordNetLemmatizer() 18 | 19 | intents = json.load(open('intents.json')) 20 | 21 | words = [] 22 | classes = [] 23 | documents = [] 24 | ignore_letters = ['?', '!', '.', ','] 25 | 26 | for intent in intents['intents']: 27 | for pattern in intent['patterns']: 28 | word_list = nltk.word_tokenize(pattern) 29 | words.extend(word_list) 30 | documents.append((word_list, intent['tag'])) 31 | if intent['tag'] not in classes: 32 | classes.append(intent['tag']) 33 | 34 | words = [lemmatizer.lemmatize(word) for word in words if word not in ignore_letters] 35 | words = sorted(set(words)) 36 | 37 | classes = sorted(set(classes)) 38 | 39 | pickle.dump(words, open('model/words.pkl', 'wb')) 40 | pickle.dump(classes, open('model/classes.pkl', 'wb')) 41 | 42 | training = [] 43 | output_empty = [0] * len(classes) 44 | 45 | for document in documents: 46 | bag = [] 47 | word_patterns = document[0] 48 | word_patterns = [lemmatizer.lemmatize(word.lower()) for word in word_patterns] 49 | 50 | for word in words: 51 | bag.append(1) if word in word_patterns else bag.append(0) 52 | 53 | output_row = list(output_empty) 54 | output_row[classes.index(document[1])] = 1 55 | training.append([bag, output_row]) 56 | 57 | random.shuffle(training) 58 | training = np.array(training) 59 | 60 | train_x = list(training[:, 0]) 61 | train_y = list(training[:, 1]) 62 | 63 | model = Sequential() 64 | model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu')) 65 | model.add(Dropout(0.5)) 66 | model.add(Dense(64, activation='relu')) 67 | model.add(Dropout(0.5)) 68 | model.add(Dense(len(train_y[0]), activation='softmax')) 69 | 70 | sgd = SGD(lr=0.01, weight_decay=1e-6, momentum=0.9, nesterov=True) 71 | model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) 72 | 73 | model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1) 74 | 75 | model.save('model/chatbot_model.keras') 76 | -------------------------------------------------------------------------------- /K-Nearest Neighbors From Scratch/main.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from collections import Counter 4 | 5 | points = {'blue': [[2,4], [1,3], [2,3], [3,2], [2,1]], 6 | 'orange': [[5,6], [4,5], [4,6], [6,6], [5,4]]} 7 | 8 | new_point = [3,3] 9 | 10 | 11 | def euclidean_distance(p, q): 12 | return np.sqrt(np.sum((np.array(p) - np.array(q)) ** 2)) 13 | 14 | 15 | class KNearestNeighbors: 16 | 17 | def __init__(self, k=3): 18 | self.k = k 19 | self.points = None 20 | 21 | def fit(self, points): 22 | self.points = points 23 | 24 | def predict(self, new_point): 25 | distances = [] 26 | 27 | for category in self.points: 28 | for point in self.points[category]: 29 | distance = euclidean_distance(point, new_point) 30 | distances.append([distance, category]) 31 | 32 | categories = [category[1] for category in sorted(distances)[:self.k]] 33 | result = Counter(categories).most_common(1)[0][0] 34 | return result 35 | 36 | 37 | clf = KNearestNeighbors(k=3) 38 | clf.fit(points) 39 | print(clf.predict(new_point)) 40 | 41 | # Visualize KNN Distances 42 | 43 | ax = plt.subplot() 44 | ax.grid(False, color='#000000') 45 | 46 | ax.set_facecolor('black') 47 | ax.figure.set_facecolor('#121212') 48 | ax.tick_params(axis='x', colors='white') 49 | ax.tick_params(axis='y', colors='white') 50 | 51 | for point in points['blue']: 52 | ax.scatter(point[0], point[1], color='#104DCA', s=60) 53 | 54 | for point in points['orange']: 55 | ax.scatter(point[0], point[1], color='#EF6C35', s=60) 56 | 57 | new_class = clf.predict(new_point) 58 | color = '#EF6C35' if new_class == 'orange' else '#104DCA' 59 | ax.scatter(new_point[0], new_point[1], color=color, marker='*', s=200, zorder=100) 60 | 61 | for point in points['blue']: 62 | ax.plot([new_point[0], point[0]], [new_point[1], point[1]], color='#104DCA', linestyle='--', linewidth=1) 63 | 64 | for point in points['orange']: 65 | ax.plot([new_point[0], point[0]], [new_point[1], point[1]], color='#EF6C35', linestyle='--', linewidth=1) 66 | 67 | plt.show() 68 | 69 | # 3D Example 70 | 71 | points = {'blue': [[2, 4, 3], [1, 3, 5], [2, 3, 1], [3, 2, 3], [2, 1, 6]], 72 | 'orange': [[5, 6, 5], [4, 5, 2], [4, 6, 1], [6, 6, 1], [5, 4, 6], [10, 10, 4]]} 73 | 74 | new_point = [3, 3, 4] 75 | 76 | clf = KNearestNeighbors(k=3) 77 | clf.fit(points) 78 | print(clf.predict(new_point)) 79 | 80 | fig = plt.figure(figsize=(12, 12)) 81 | ax = fig.add_subplot(projection='3d') 82 | ax.grid(True, color='#323232') 83 | 84 | ax.set_facecolor('black') 85 | ax.figure.set_facecolor('#121212') 86 | ax.tick_params(axis='x', colors='white') 87 | ax.tick_params(axis='y', colors='white') 88 | 89 | for point in points['blue']: 90 | ax.scatter(point[0], point[1], point[2], color='#104DCA', s=60) 91 | 92 | for point in points['orange']: 93 | ax.scatter(point[0], point[1], point[2], color='#EF6C35', s=60) 94 | 95 | new_class = clf.predict(new_point) 96 | color = '#EF6C35' if new_class == 'orange' else '#104DCA' 97 | ax.scatter(new_point[0], new_point[1], new_point[2], color=color, marker='*', s=200, zorder=100) 98 | 99 | for point in points['blue']: 100 | ax.plot([new_point[0], point[0]], [new_point[1], point[1]], [new_point[2], point[2]], color='#104DCA', linestyle='--', linewidth=1) 101 | 102 | for point in points['orange']: 103 | ax.plot([new_point[0], point[0]], [new_point[1], point[1]], [new_point[2], point[2]], color='#EF6C35', linestyle='--', linewidth=1) 104 | 105 | plt.show() 106 | 107 | 108 | 109 | -------------------------------------------------------------------------------- /LLM Development/main.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.optim as optim 6 | 7 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 8 | 9 | with open('shakespeare.txt', 'r') as f: 10 | text = f.read().lower()[:50000] 11 | 12 | chars = sorted(set(text)) 13 | vocab_size = len(chars) 14 | 15 | char2idx = {c: i for i, c in enumerate(chars)} 16 | idx2char = {i: c for c, i in char2idx.items()} 17 | 18 | seq_length = 100 19 | step_size = 1 20 | data = [(text[i:i+seq_length], text[i+seq_length]) for i in range(0, len(text)-seq_length, step_size)] 21 | 22 | X = torch.tensor([[char2idx[c] for c in seq] for seq, _ in data]).to(device) 23 | y = torch.tensor([char2idx[c] for _, c in data]).to(device) 24 | 25 | 26 | class CharLSTM(nn.Module): 27 | def __init__(self, vocab_size, hidden_size, num_layers=1): 28 | super().__init__() 29 | self.embed = nn.Embedding(vocab_size, hidden_size) 30 | self.lstm = nn.LSTM(hidden_size, hidden_size, num_layers, batch_first=True) 31 | self.fc = nn.Linear(hidden_size, vocab_size) 32 | 33 | def forward(self, x, hidden=None): 34 | x = self.embed(x) 35 | out, hidden = self.lstm(x, hidden) 36 | out = self.fc(out[:, -1, :]) 37 | 38 | return out, hidden 39 | 40 | 41 | model = CharLSTM(vocab_size, hidden_size=256).to(device) 42 | optimizer = optim.Adam(model.parameters(), lr=0.0003) 43 | criterion = nn.CrossEntropyLoss() 44 | 45 | for epoch in range(10): 46 | model.train() 47 | running_loss = 0.0 48 | 49 | for i in range(0, len(X), 64): 50 | x_batch = X[i:i+64].to(device) 51 | y_batch = y[i:i+64].to(device) 52 | 53 | if len(x_batch) == 0: 54 | continue 55 | 56 | optimizer.zero_grad() 57 | output, _ = model(x_batch) 58 | loss = criterion(output, y_batch) 59 | loss.backward() 60 | optimizer.step() 61 | 62 | running_loss += loss.item() 63 | 64 | print(f'Epoch {epoch+1}/20, Loss: {running_loss:.4f}') 65 | 66 | 67 | def generate_text(model, start_seq, length=200): 68 | model.eval() 69 | 70 | input_seq = torch.tensor([[char2idx[c] for c in start_seq]]).to(device) 71 | hidden = None 72 | 73 | result = start_seq 74 | 75 | for _ in range(length): 76 | output, hidden = model(input_seq, hidden) 77 | probs = torch.softmax(output, dim=-1).squeeze() 78 | next_idx = torch.multinomial(probs, 1).item() 79 | next_char = idx2char[next_idx] 80 | result += next_char 81 | 82 | input_seq = torch.tensor([[next_idx]]).to(device) 83 | 84 | return result 85 | 86 | 87 | print(generate_text(model, 'he was going with')) 88 | print(generate_text(model, 'why is it')) 89 | print(generate_text(model, 'we must all')) 90 | 91 | torch.save(model.state_dict(), 'lstm_model.pth') 92 | -------------------------------------------------------------------------------- /LLM Development/main2.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.optim as optim 6 | 7 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 8 | 9 | with open('shakespeare.txt', 'r') as f: 10 | text = f.read().lower()[:200000] 11 | 12 | 13 | words = text.split() 14 | vocab = sorted(set(words)) 15 | 16 | word2idx = {w: i for i, w in enumerate(vocab)} 17 | idx2word = {i: w for w, i in word2idx.items()} 18 | 19 | vocab_size = len(vocab) 20 | 21 | seq_length = 5 22 | data = [(words[i:i+seq_length], words[i+seq_length]) for i in range(0, len(words) - seq_length)] 23 | 24 | X = torch.tensor([[word2idx[w] for w in seq] for seq, _ in data]).to(device) 25 | y = torch.tensor([word2idx[w] for _, w in data]).to(device) 26 | 27 | 28 | class CharRNNAttention(nn.Module): 29 | def __init__(self, vocab_size, embedding_dim, hidden_dim): 30 | super().__init__() 31 | self.embed = nn.Embedding(vocab_size, embedding_dim) 32 | self.rnn = nn.RNN(embedding_dim, hidden_dim, batch_first=True) 33 | self.attention = nn.Linear(hidden_dim, 1) 34 | self.fc = nn.Linear(hidden_dim, vocab_size) 35 | 36 | def forward(self, x): 37 | x = self.embed(x) 38 | out, _ = self.rnn(x) 39 | attn_weights = nn.functional.softmax(self.attention(out).squeeze(2), dim=1) 40 | 41 | context = torch.sum(attn_weights.unsqueeze(2) * out, dim=1) 42 | out = self.fc(context) 43 | 44 | return out 45 | 46 | 47 | model = CharRNNAttention(vocab_size, 128, 256).to(device) 48 | 49 | optimizer = optim.Adam(model.parameters(), lr=0.003) 50 | criterion = nn.CrossEntropyLoss() 51 | 52 | for epoch in range(20): 53 | model.train() 54 | running_loss = 0.0 55 | 56 | for i in range(0, len(X), 64): 57 | x_batch = X[i:i+64].to(device) 58 | y_batch = y[i:i+64].to(device) 59 | 60 | if len(x_batch) == 0: 61 | continue 62 | 63 | optimizer.zero_grad() 64 | outputs = model(x_batch) 65 | loss = criterion(outputs, y_batch) 66 | loss.backward() 67 | optimizer.step() 68 | 69 | running_loss += loss.item() 70 | 71 | print(f'Epoch {epoch+1}/20, Loss: {running_loss:.4f}') 72 | 73 | 74 | def generate_text(model, start_words, num_words=20): 75 | model.eval() 76 | generated = start_words[:] 77 | 78 | for _ in range(num_words): 79 | current_seq = generated[-seq_length:] if len(generated) >= seq_length else generated 80 | 81 | if len(current_seq) < seq_length: 82 | current_seq = [""] * (seq_length - len(current_seq)) + current_seq 83 | 84 | idx_seq = [word2idx[w] if w in word2idx else 0 for w in current_seq] 85 | input_seq = torch.tensor([idx_seq], dtype=torch.long).to(device) 86 | 87 | with torch.no_grad(): 88 | logits = model(input_seq) 89 | probs = torch.softmax(logits, dim=-1).squeeze(0) 90 | next_idx = torch.multinomial(probs, 1).item() 91 | 92 | next_word = idx2word[next_idx] 93 | generated.append(next_word) 94 | 95 | return ' '.join(generated) 96 | 97 | print(generate_text(model, ['he', 'was', 'going', 'with'], num_words=30)) 98 | print(generate_text(model, ['why', 'is', 'it'], num_words=30)) 99 | print(generate_text(model, ['we', 'must', 'all'], num_words=30)) 100 | 101 | torch.save(model.state_dict(), 'attention_model.pth') 102 | -------------------------------------------------------------------------------- /LLM Development/main3.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import wikipediaapi 3 | from datasets import Dataset 4 | from transformers import GPT2LMHeadModel, GPT2Tokenizer, Trainer, TrainingArguments, DataCollatorForLanguageModeling, pipeline 5 | 6 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 7 | 8 | model_name = "gpt2" 9 | output_dir = "./gpt2-finetuned" 10 | wiki_pages = ["Python (programming language)"] 11 | generation_prompt = "Python is" 12 | max_length_generation = 150 13 | 14 | tokenizer = GPT2Tokenizer.from_pretrained(model_name) 15 | model = GPT2LMHeadModel.from_pretrained(model_name).to(device) 16 | if tokenizer.pad_token is None: 17 | tokenizer.pad_token = tokenizer.eos_token 18 | model.config.pad_token_id = model.config.eos_token_id 19 | 20 | base_outputs = pipeline('text-generation', model=model, tokenizer=tokenizer, device=device)( 21 | generation_prompt, 22 | max_length=max_length_generation, 23 | pad_token_id=tokenizer.eos_token_id 24 | ) 25 | 26 | wiki = wikipediaapi.Wikipedia(language='en', extract_format=wikipediaapi.ExtractFormat.WIKI, user_agent="MyScript/1.0") 27 | text = "" 28 | for title in wiki_pages: 29 | page = wiki.page(title) 30 | if page.exists(): 31 | t = page.text.replace("\n\n", "\n").strip() 32 | if len(t) > 100: 33 | text += t + " " 34 | text = text.strip() 35 | 36 | tokenized_text = tokenizer(text)["input_ids"] 37 | block_size = 128 38 | total_length = (len(tokenized_text) // block_size) * block_size # round to nearest multiple of block size 39 | tokenized_text = tokenized_text[:total_length] 40 | chunks = [tokenized_text[i : i + block_size] for i in range(0, total_length, block_size)] 41 | dataset = Dataset.from_dict({"input_ids": chunks, "labels": chunks.copy()}) 42 | 43 | training_args = TrainingArguments( 44 | output_dir=output_dir, 45 | num_train_epochs=15, 46 | per_device_train_batch_size=2, 47 | learning_rate=5e-5, 48 | fp16=torch.cuda.is_available() 49 | ) 50 | data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) 51 | 52 | trainer = Trainer( 53 | model=model, 54 | args=training_args, 55 | train_dataset=dataset, 56 | data_collator=data_collator, 57 | tokenizer=tokenizer, 58 | ) 59 | trainer.train() 60 | trainer.save_model(output_dir) 61 | tokenizer.save_pretrained(output_dir) 62 | 63 | fine_tuned_model = GPT2LMHeadModel.from_pretrained(output_dir).to(device) 64 | fine_tuned_tokenizer = GPT2Tokenizer.from_pretrained(output_dir) 65 | finetuned_outputs = pipeline('text-generation', model=fine_tuned_model, tokenizer=fine_tuned_tokenizer, device=device)( 66 | generation_prompt, 67 | max_length=max_length_generation, 68 | pad_token_id=fine_tuned_tokenizer.eos_token_id 69 | ) 70 | 71 | 72 | 73 | print("\n--- BASE MODEL OUTPUT ---") 74 | print(base_outputs[0]['generated_text']) 75 | print("\n--- FINE-TUNED MODEL OUTPUT ---") 76 | print(finetuned_outputs[0]['generated_text']) 77 | -------------------------------------------------------------------------------- /LLM Development/main4.py: -------------------------------------------------------------------------------- 1 | from llama_cpp import Llama 2 | 3 | llm = Llama.from_pretrained( 4 | repo_id='TheBloke/Llama-2-7B-GGUF', 5 | filename='llama-2-7b.Q2_K.gguf' 6 | ) 7 | 8 | prompt = """ 9 | Classify the sentiment of the following sentences as Positive, Negative, or Neutral. 10 | 11 | Sentence: This is the best pizza I've ever had! 12 | Sentiment: Positive 13 | 14 | Sentence: The movie was quite boring and predictable. 15 | Sentiment: Negative 16 | 17 | Sentence: The report is due by 5 PM today. 18 | Sentiment: Neutral 19 | 20 | Sentence: I'm really disappointed with the customer service. 21 | Sentiment:""" 22 | 23 | output = llm( 24 | prompt, 25 | max_tokens=10, 26 | stop=['\n', 'Sentence:', 'Sentiment:'], 27 | echo=False 28 | ) 29 | 30 | predicted_sentiment = output['choices'][0]['text'].strip() 31 | print(predicted_sentiment) 32 | -------------------------------------------------------------------------------- /LLM Wrappers/LaTeX Example/app.py: -------------------------------------------------------------------------------- 1 | # sudo apt-get install texlive-latex-extra 2 | # sudo apt-get install dvipng 3 | 4 | from flask import Flask, render_template, request, jsonify 5 | from openai import OpenAI 6 | 7 | import os 8 | from sympy import preview 9 | from io import BytesIO 10 | import base64 11 | 12 | from dotenv import load_dotenv 13 | load_dotenv() 14 | 15 | client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) 16 | 17 | app = Flask(__name__) 18 | 19 | SYSTEM_PROMPT = "You are a LaTeX generation model. The user will give you the name or description of a formula. If you know it you provide the LaTeX code that visualizes it. Nothing more. No text or explanation. Under no circumstances EVER provide anything but LaTeX code. No clarification. No context. Nothing. Make sure the formulas are properly enclosed using dollar signs." 20 | 21 | @app.route("/", methods=["GET", "POST"]) 22 | def index(): 23 | latex_code = None 24 | latex_image = None 25 | 26 | if request.method == "POST": 27 | formula_description = request.form.get("formula_description") 28 | response = client.chat.completions.create(model="gpt-4", 29 | messages=[ 30 | {"role": "system", "content": SYSTEM_PROMPT}, 31 | {"role": "user", "content": formula_description}, 32 | ]) 33 | latex_code = response.choices[0].message.content.strip() 34 | 35 | buffer = BytesIO() 36 | preview(latex_code, viewer="BytesIO", outputbuffer=buffer, euler=False, dvioptions=['-D', "300"]) 37 | latex_image = base64.b64encode(buffer.getvalue()).decode('utf-8') 38 | 39 | return render_template("index.html", latex_code=latex_code, latex_image=latex_image) 40 | 41 | if __name__ == "__main__": 42 | app.run(debug=True) 43 | 44 | -------------------------------------------------------------------------------- /LLM Wrappers/LaTeX Example/static/styles.css: -------------------------------------------------------------------------------- 1 | body { 2 | font-family: Arial, sans-serif; 3 | margin: 0; 4 | padding: 0; 5 | display: flex; 6 | justify-content: center; 7 | align-items: center; 8 | height: 100vh; 9 | background-color: #f9f9f9; 10 | } 11 | 12 | .container { 13 | text-align: center; 14 | background: #fff; 15 | padding: 20px; 16 | border-radius: 8px; 17 | box-shadow: 0 0 10px rgba(0, 0, 0, 0.1); 18 | } 19 | 20 | input[type="text"] { 21 | width: 80%; 22 | padding: 10px; 23 | margin-bottom: 10px; 24 | border: 1px solid #ccc; 25 | border-radius: 4px; 26 | } 27 | 28 | button { 29 | padding: 10px 20px; 30 | background-color: #007BFF; 31 | color: #fff; 32 | border: none; 33 | border-radius: 4px; 34 | cursor: pointer; 35 | } 36 | 37 | button:hover { 38 | background-color: #0056b3; 39 | } 40 | 41 | .result { 42 | margin-top: 20px; 43 | } 44 | 45 | textarea { 46 | width: 80%; 47 | height: 100px; 48 | margin-top: 10px; 49 | border: 1px solid #ccc; 50 | border-radius: 4px; 51 | padding: 10px; 52 | } 53 | 54 | -------------------------------------------------------------------------------- /LLM Wrappers/LaTeX Example/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | LaTeX Formula Generator 7 | 8 | 9 | 10 |
11 |

LaTeX Formula Generator

12 |
13 | 14 | 15 |
16 | {% if latex_image %} 17 |
18 |
19 |
20 |

Generated Formula:

21 | LaTeX Formula

22 |
23 |
24 | 25 | 26 |
27 | {% elif latex_code %} 28 |

No formula found for the given description.

29 | {% endif %} 30 |
31 | 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /LLM Wrappers/Translator Example/app.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from openai import OpenAI 4 | from dotenv import load_dotenv 5 | from flask import Flask, render_template, request 6 | 7 | load_dotenv() 8 | 9 | client = OpenAI(api_key=os.getenv('OPENAI_API_KEY')) 10 | 11 | app = Flask(__name__) 12 | 13 | languages = [ 14 | "English", "Spanish", "French", "German", "Chinese", "Japanese", 15 | "Korean", "Italian", "Portuguese", "Russian", "Arabic" 16 | ] 17 | 18 | system_prompt_template = "The user will provide you with a text in {in_language}. Your job is to return the same content in {out_language}. Nothing else. Under no circumstances ever add context, clarifications or anything like that. Just the translation and nothing else." 19 | 20 | @app.route("/", methods=["GET", "POST"]) 21 | def index(): 22 | translation = "" 23 | if request.method == "POST": 24 | in_language = request.form["in_language"] 25 | out_language = request.form["out_language"] 26 | user_text = request.form["user_text"] 27 | 28 | system_prompt = system_prompt_template.format(in_language=in_language, out_language=out_language) 29 | 30 | try: 31 | response = client.chat.completions.create(model="gpt-3.5-turbo", 32 | messages=[ 33 | {"role": "system", "content": system_prompt}, 34 | {"role": "user", "content": user_text} 35 | ]) 36 | translation = response.choices[0].message.content.strip() 37 | except Exception as e: 38 | translation = f"Error: {e}" 39 | 40 | return render_template("index.html", languages=languages, translation=translation) 41 | 42 | if __name__ == "__main__": 43 | app.run(debug=True) 44 | 45 | -------------------------------------------------------------------------------- /LLM Wrappers/Translator Example/static/styles.css: -------------------------------------------------------------------------------- 1 | body { 2 | font-family: 'Roboto', sans-serif; 3 | margin: 0; 4 | padding: 0; 5 | background-color: #f4f4f9; 6 | color: #333; 7 | line-height: 1.6; 8 | display: flex; 9 | justify-content: center; 10 | align-items: center; 11 | height: 100vh; 12 | } 13 | 14 | .container { 15 | background: #fff; 16 | border-radius: 10px; 17 | box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); 18 | width: 100%; 19 | max-width: 600px; 20 | padding: 20px 30px; 21 | box-sizing: border-box; 22 | } 23 | 24 | h1 { 25 | color: #333; 26 | font-size: 24px; 27 | margin-bottom: 20px; 28 | text-align: center; 29 | } 30 | 31 | form label { 32 | font-weight: bold; 33 | margin-top: 10px; 34 | display: block; 35 | } 36 | 37 | textarea, select, button { 38 | width: 100%; 39 | margin-top: 5px; 40 | padding: 10px; 41 | border: 1px solid #ddd; 42 | border-radius: 5px; 43 | font-size: 16px; 44 | box-sizing: border-box; 45 | } 46 | 47 | textarea { 48 | resize: vertical; 49 | min-height: 100px; 50 | } 51 | 52 | button { 53 | background: #007bff; 54 | color: white; 55 | border: none; 56 | cursor: pointer; 57 | margin-top: 20px; 58 | font-size: 16px; 59 | transition: background 0.3s ease; 60 | } 61 | 62 | button:hover { 63 | background: #0056b3; 64 | } 65 | 66 | h2 { 67 | margin-top: 30px; 68 | font-size: 20px; 69 | color: #007bff; 70 | } 71 | 72 | p { 73 | padding: 10px; 74 | background: #f9f9f9; 75 | border: 1px solid #ddd; 76 | border-radius: 5px; 77 | white-space: pre-wrap; 78 | } 79 | 80 | @media (max-width: 768px) { 81 | .container { 82 | padding: 15px; 83 | } 84 | 85 | button { 86 | font-size: 14px; 87 | } 88 | } 89 | 90 | -------------------------------------------------------------------------------- /LLM Wrappers/Translator Example/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | LLM Translator 7 | 8 | 9 | 10 | 11 |
12 |

LLM Translator

13 |
14 | 15 | 20 | 21 | 22 | 27 | 28 | 29 | 30 | 31 | 32 |
33 | 34 | {% if translation %} 35 |

Translation:

36 |

{{ translation }}

37 | {% endif %} 38 |
39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /LLM Wrappers/YouTube Example/app.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from openai import OpenAI 4 | from dotenv import load_dotenv 5 | from flask import Flask, request, render_template 6 | 7 | load_dotenv() 8 | 9 | client = OpenAI(api_key=os.getenv('OPENAI_API_KEY')) 10 | 11 | app = Flask(__name__) 12 | 13 | SYSTEM_PROMPT = ( 14 | "The user will provide you with information about a YouTube video. This can but does not have to include title, description and other notes. As a response return tags for YouTube to optimize for performance. Your answer should only include tags separated by commas. Nothing else. Under no circumstances EVER provide context, clarification or anything else. Just the tags separated by commas. Aim for roughly 300-400 characters." 15 | ) 16 | 17 | @app.route("/") 18 | def index(): 19 | return render_template("index.html") 20 | 21 | @app.route("/generate_tags", methods=["POST"]) 22 | def generate_tags(): 23 | user_input = request.form["video_info"] 24 | try: 25 | response = client.chat.completions.create(model="gpt-3.5-turbo", 26 | messages=[ 27 | {"role": "system", "content": SYSTEM_PROMPT}, 28 | {"role": "user", "content": user_input}, 29 | ]) 30 | tags = response.choices[0].message.content 31 | return tags 32 | except Exception as e: 33 | return f"Error: {str(e)}" 34 | 35 | if __name__ == "__main__": 36 | app.run(debug=True) 37 | 38 | -------------------------------------------------------------------------------- /LLM Wrappers/YouTube Example/static/styles.css: -------------------------------------------------------------------------------- 1 | body { 2 | font-family: 'Arial', sans-serif; 3 | background-color: #f4f4f9; 4 | color: #333; 5 | margin: 0; 6 | padding: 0; 7 | } 8 | 9 | .container { 10 | max-width: 600px; 11 | margin: 50px auto; 12 | background: #ffffff; 13 | padding: 20px; 14 | border-radius: 8px; 15 | box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); 16 | } 17 | 18 | h1 { 19 | text-align: center; 20 | color: #333; 21 | } 22 | 23 | label { 24 | font-size: 1.1rem; 25 | font-weight: bold; 26 | } 27 | 28 | textarea { 29 | width: 100%; 30 | border: 1px solid #ccc; 31 | border-radius: 4px; 32 | padding: 10px; 33 | margin-top: 10px; 34 | font-size: 1rem; 35 | box-sizing: border-box; 36 | } 37 | 38 | button { 39 | display: block; 40 | width: 100%; 41 | padding: 10px; 42 | margin-top: 15px; 43 | font-size: 1.1rem; 44 | background-color: #007bff; 45 | color: #fff; 46 | border: none; 47 | border-radius: 4px; 48 | cursor: pointer; 49 | transition: background-color 0.3s ease; 50 | } 51 | 52 | button:hover { 53 | background-color: #0056b3; 54 | } 55 | 56 | .output-container { 57 | margin-top: 20px; 58 | } 59 | 60 | textarea[readonly] { 61 | background-color: #f9f9f9; 62 | } 63 | 64 | .alert { 65 | color: green; 66 | font-size: 0.9rem; 67 | text-align: center; 68 | margin-top: 10px; 69 | display: none; 70 | } 71 | -------------------------------------------------------------------------------- /LLM Wrappers/YouTube Example/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | YouTube Tag Generator 7 | 8 | 9 | 10 |
11 |

YouTube Tag Generator

12 |
13 | 14 | 15 | 16 |
17 |
18 |

Generated Tags:

19 | 20 | 21 |
Tags copied to clipboard!
22 |
23 |
24 | 50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /Live Face Recognition/main.py: -------------------------------------------------------------------------------- 1 | import threading 2 | 3 | import cv2 4 | from deepface import DeepFace 5 | 6 | cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) 7 | 8 | cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640) 9 | cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) 10 | 11 | counter = 0 12 | 13 | reference_img = cv2.imread("reference.jpg") # use your own image here 14 | 15 | face_match = False 16 | 17 | 18 | def check_face(frame): 19 | global face_match 20 | try: 21 | if DeepFace.verify(frame, reference_img.copy())['verified']: 22 | face_match = True 23 | else: 24 | face_match = False 25 | except ValueError: 26 | face_match = False 27 | 28 | 29 | while True: 30 | ret, frame = cap.read() 31 | 32 | if ret: 33 | if counter % 30 == 0: 34 | try: 35 | threading.Thread(target=check_face, args=(frame.copy(),)).start() 36 | except ValueError: 37 | pass 38 | counter += 1 39 | if face_match: 40 | cv2.putText(frame, "MATCH!", (20, 450), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3) 41 | else: 42 | cv2.putText(frame, "NO MATCH!", (20, 450), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 3) 43 | 44 | cv2.imshow('video', frame) 45 | 46 | key = cv2.waitKey(1) 47 | if key == ord('q'): 48 | break 49 | 50 | cv2.destroyAllWindows() 51 | 52 | -------------------------------------------------------------------------------- /LoRA Fine-Tuning/frobinate_test.jsonl: -------------------------------------------------------------------------------- 1 | {"instruction":"Frobinate 7","response":"Step 1 – Multiply the digits: 7 = 7.\nStep 2 – Add the product to the original: 7 + 7 = 14.\nAnswer: 14"} 2 | {"instruction":"Frobinate 9","response":"Step 1 – Multiply the digits: 9 = 9.\nStep 2 – Add the product to the original: 9 + 9 = 18.\nAnswer: 18"} 3 | {"instruction":"Frobinate 10","response":"Step 1 – Multiply the digits: 1 × 0 = 0.\nStep 2 – Add the product to the original: 10 + 0 = 10.\nAnswer: 10"} 4 | {"instruction":"Frobinate 15","response":"Step 1 – Multiply the digits: 1 × 5 = 5.\nStep 2 – Add the product to the original: 15 + 5 = 20.\nAnswer: 20"} 5 | {"instruction":"Frobinate 20","response":"Step 1 – Multiply the digits: 2 × 0 = 0.\nStep 2 – Add the product to the original: 20 + 0 = 20.\nAnswer: 20"} 6 | {"instruction":"Frobinate 22","response":"Step 1 – Multiply the digits: 2 × 2 = 4.\nStep 2 – Add the product to the original: 22 + 4 = 26.\nAnswer: 26"} 7 | {"instruction":"Frobinate 24","response":"Step 1 – Multiply the digits: 2 × 4 = 8.\nStep 2 – Add the product to the original: 24 + 8 = 32.\nAnswer: 32"} 8 | {"instruction":"Frobinate 26","response":"Step 1 – Multiply the digits: 2 × 6 = 12.\nStep 2 – Add the product to the original: 26 + 12 = 38.\nAnswer: 38"} 9 | {"instruction":"Frobinate 28","response":"Step 1 – Multiply the digits: 2 × 8 = 16.\nStep 2 – Add the product to the original: 28 + 16 = 44.\nAnswer: 44"} 10 | {"instruction":"Frobinate 30","response":"Step 1 – Multiply the digits: 3 × 0 = 0.\nStep 2 – Add the product to the original: 30 + 0 = 30.\nAnswer: 30"} 11 | {"instruction":"Frobinate 102","response":"Step 1 – Multiply the digits: 1 × 0 × 2 = 0.\nStep 2 – Add the product to the original: 102 + 0 = 102.\nAnswer: 102"} 12 | {"instruction":"Frobinate 111","response":"Step 1 – Multiply the digits: 1 × 1 × 1 = 1.\nStep 2 – Add the product to the original: 111 + 1 = 112.\nAnswer: 112"} 13 | {"instruction":"Frobinate 126","response":"Step 1 – Multiply the digits: 1 × 2 × 6 = 12.\nStep 2 – Add the product to the original: 126 + 12 = 138.\nAnswer: 138"} 14 | {"instruction":"Frobinate 219","response":"Step 1 – Multiply the digits: 2 × 1 × 9 = 18.\nStep 2 – Add the product to the original: 219 + 18 = 237.\nAnswer: 237"} 15 | {"instruction":"Frobinate 234","response":"Step 1 – Multiply the digits: 2 × 3 × 4 = 24.\nStep 2 – Add the product to the original: 234 + 24 = 258.\nAnswer: 258"} 16 | {"instruction":"Frobinate 357","response":"Step 1 – Multiply the digits: 3 × 5 × 7 = 105.\nStep 2 – Add the product to the original: 357 + 105 = 462.\nAnswer: 462"} 17 | {"instruction":"Frobinate 468","response":"Step 1 – Multiply the digits: 4 × 6 × 8 = 192.\nStep 2 – Add the product to the original: 468 + 192 = 660.\nAnswer: 660"} 18 | {"instruction":"Frobinate 579","response":"Step 1 – Multiply the digits: 5 × 7 × 9 = 315.\nStep 2 – Add the product to the original: 579 + 315 = 894.\nAnswer: 894"} 19 | {"instruction":"Frobinate 680","response":"Step 1 – Multiply the digits: 6 × 8 × 0 = 0.\nStep 2 – Add the product to the original: 680 + 0 = 680.\nAnswer: 680"} 20 | {"instruction":"Frobinate 791","response":"Step 1 – Multiply the digits: 7 × 9 × 1 = 63.\nStep 2 – Add the product to the original: 791 + 63 = 854.\nAnswer: 854"} 21 | 22 | -------------------------------------------------------------------------------- /Local AI Agent/.env: -------------------------------------------------------------------------------- 1 | IMAP_HOST='' 2 | IMAP_USER='' 3 | IMAP_PASSWORD='' 4 | 5 | -------------------------------------------------------------------------------- /Local AI Agent/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from typing import TypedDict 4 | 5 | from dotenv import load_dotenv 6 | from imap_tools import MailBox, AND 7 | 8 | from langchain.chat_models import init_chat_model 9 | from langchain_core.tools import tool 10 | 11 | from langgraph.prebuilt import ToolNode 12 | from langgraph.graph import StateGraph, START, END 13 | 14 | load_dotenv() 15 | 16 | IMAP_HOST = os.getenv('IMAP_HOST') 17 | IMAP_USER = os.getenv('IMAP_USER') 18 | IMAP_PASSWORD = os.getenv('IMAP_PASSWORD') 19 | IMAP_FOLDER = 'INBOX' 20 | 21 | CHAT_MODEL = 'qwen3:8b' 22 | 23 | 24 | class ChatState(TypedDict): 25 | messages: list 26 | 27 | 28 | def connect(): 29 | mail_box = MailBox(IMAP_HOST) 30 | mail_box.login(IMAP_USER, IMAP_PASSWORD, initial_folder=IMAP_FOLDER) 31 | 32 | return mail_box 33 | 34 | 35 | @tool 36 | def list_unread_emails(): 37 | """Return a bullet list of every UNREAD message's UID, subject, date and sender""" 38 | 39 | print('List Unread Emails Tool Called') 40 | 41 | with connect() as mb: 42 | unread = list(mb.fetch(criteria = AND(seen=False), headers_only=True, mark_seen=False)) 43 | 44 | if not unread: 45 | return 'You have no unread messages.' 46 | 47 | response = json.dumps([ 48 | { 49 | 'uid': mail.uid, 50 | 'date': mail.date.astimezone().strftime('%Y-%m-%d %H:%M'), 51 | 'subject': mail.subject, 52 | 'sender': mail.from_ 53 | } for mail in unread 54 | ]) 55 | 56 | return response 57 | 58 | 59 | @tool 60 | def summarize_email(uid): 61 | """Summarize a single e-mail given it's IMAP UID. Return a short summary of the e-mails content / body in plain text.""" 62 | 63 | print('Summarize E-Mail Tool Called on', uid) 64 | 65 | with connect() as mb: 66 | mail = next(mb.fetch(AND(uid=uid), mark_seen=False), None) 67 | 68 | if not mail: 69 | return f'Could not summarize e-mail with UID {uid}.' 70 | 71 | prompt = ( 72 | "Summarize this e-mail concisely:\n\n" 73 | f"Subject: {mail.subject}\n" 74 | f"Sender: {mail.from_}\n" 75 | f"Date: {mail.date}\n\n" 76 | f"{mail.text or mail.html}" 77 | ) 78 | 79 | return raw_llm.invoke(prompt).content 80 | 81 | 82 | 83 | llm = init_chat_model(CHAT_MODEL, model_provider='ollama') 84 | llm = llm.bind_tools([list_unread_emails, summarize_email]) 85 | 86 | raw_llm = init_chat_model(CHAT_MODEL, model_provider='ollama') 87 | 88 | 89 | def llm_node(state): 90 | response = llm.invoke(state['messages']) 91 | return {'messages': state['messages'] + [response]} 92 | 93 | 94 | def router(state): 95 | last_message = state['messages'][-1] 96 | return 'tools' if getattr(last_message, 'tool_calls', None) else 'end' 97 | 98 | 99 | 100 | tool_node = ToolNode([list_unread_emails, summarize_email]) 101 | 102 | 103 | def tools_node(state): 104 | result = tool_node.invoke(state) 105 | 106 | return { 107 | 'messages': state['messages'] + result['messages'] 108 | } 109 | 110 | 111 | 112 | builder = StateGraph(ChatState) 113 | builder.add_node('llm', llm_node) 114 | builder.add_node('tools', tools_node) 115 | builder.add_edge(START, 'llm') 116 | builder.add_edge('tools', 'llm') 117 | builder.add_conditional_edges('llm', router, {'tools': 'tools', 'end': END}) 118 | 119 | graph = builder.compile() 120 | 121 | 122 | if __name__ == '__main__': 123 | state = {'messages': []} 124 | 125 | print('Type an instruction or "quit".\n') 126 | 127 | while True: 128 | user_message = input('> ') 129 | 130 | if user_message.lower() == 'quit': 131 | break 132 | 133 | state['messages'].append({'role': 'user', 'content': user_message}) 134 | 135 | state = graph.invoke(state) 136 | 137 | print(state['messages'][-1].content, '\n') 138 | 139 | 140 | 141 | -------------------------------------------------------------------------------- /MCP Tutorial/my-mcp-server/.python-version: -------------------------------------------------------------------------------- 1 | 3.13 2 | -------------------------------------------------------------------------------- /MCP Tutorial/my-mcp-server/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralNine/youtube-tutorials/46237e565f097c9827d82a76b2f19668f248c5c8/MCP Tutorial/my-mcp-server/README.md -------------------------------------------------------------------------------- /MCP Tutorial/my-mcp-server/main.py: -------------------------------------------------------------------------------- 1 | from uuid import uuid4 2 | from typing import List 3 | 4 | from pydantic import BaseModel, Field 5 | from mcp.server.fastmcp import FastMCP 6 | 7 | todos = {} 8 | 9 | class Todo(BaseModel): 10 | id: str 11 | title: str 12 | completed: bool = False 13 | 14 | class CreateTodoInput(BaseModel): 15 | title: str 16 | 17 | class UpdateTodoInput(BaseModel): 18 | id: str 19 | title: str = None 20 | completed: bool = None 21 | 22 | class GetTodoInput(BaseModel): 23 | id: str 24 | 25 | class DeleteTodoInput(BaseModel): 26 | id: str 27 | 28 | 29 | server = FastMCP(name="todo-server", version="1.0.0") 30 | 31 | 32 | @server.tool(name="create_todo", description="Create a new todo") 33 | def create_todo(data: CreateTodoInput) -> Todo: 34 | todo_id = str(uuid4()) 35 | todo = Todo(id=todo_id, title=data.title) 36 | todos[todo_id] = todo 37 | return todo 38 | 39 | 40 | @server.tool(name="list_todos", description="List all todos") 41 | def list_todos() -> List[Todo]: 42 | return list(todos.values()) 43 | 44 | 45 | @server.tool(name="get_todo", description="Retrieve a todo by ID") 46 | def get_todo(data: GetTodoInput) -> Todo: 47 | todo = todos.get(data.id) 48 | if not todo: 49 | raise ValueError(f"Todo with id {data.id} not found") 50 | return todo 51 | 52 | 53 | @server.tool(name="update_todo", description="Update a todo") 54 | def update_todo(data: UpdateTodoInput) -> Todo: 55 | todo = todos.get(data.id) 56 | if not todo: 57 | raise ValueError(f"Todo with id {data.id} not found") 58 | if data.title is not None: 59 | todo.title = data.title 60 | if data.completed is not None: 61 | todo.completed = data.completed 62 | todos[data.id] = todo 63 | return todo 64 | 65 | 66 | @server.tool(name="delete_todo", description="Delete a todo") 67 | def delete_todo(data: DeleteTodoInput) -> dict: 68 | if data.id not in todos: 69 | raise ValueError(f"Todo with id {data.id} not found") 70 | del todos[data.id] 71 | return {"status": "deleted"} 72 | 73 | 74 | if __name__ == '__main__': 75 | server.run(transport='stdio') 76 | -------------------------------------------------------------------------------- /MCP Tutorial/my-mcp-server/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "my-mcp-server" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.13" 7 | dependencies = [ 8 | "mcp[cli]>=1.6.0", 9 | "pydantic>=2.11.3", 10 | ] 11 | -------------------------------------------------------------------------------- /MonsterUI/counter_app.py: -------------------------------------------------------------------------------- 1 | from fasthtml.common import serve, Div, Span 2 | from monsterui.all import H1, H2, Card, Button, Form, Input, ButtonT, Container, ContainerT, Theme, fast_app 3 | 4 | app, rt = fast_app(hdrs=Theme.blue.headers()) 5 | counter = 0 6 | 7 | @rt 8 | def index(): 9 | return Container( 10 | H1("🚀 Counter App", cls="text-3xl font-bold text-center mb-6 mt-6"), 11 | 12 | Card(cls="p-6 text-center mb-4")( 13 | H2("Click the buttons!", cls="mb-4"), 14 | Div(cls="flex items-center justify-center gap-4")( 15 | Button("➖", hx_post="/dec", hx_target="#count"), 16 | Span(id="count", cls="text-2xl font-bold")(counter), 17 | Button("➕", hx_post="/inc", hx_target="#count") 18 | ) 19 | ), 20 | 21 | Card(cls="p-6")( 22 | Form(hx_post="/hello", hx_target="#result")( 23 | Input(name="name", placeholder="Your name", cls="mb-2"), 24 | Button("Say Hello", cls=ButtonT.primary) 25 | ), 26 | Div(id="result", cls="mt-4 font-bold") 27 | ), 28 | 29 | cls=ContainerT.sm 30 | ) 31 | 32 | @rt("/inc", methods=["POST"]) 33 | def inc(): 34 | global counter 35 | counter += 1 36 | return counter 37 | 38 | @rt("/dec", methods=["POST"]) 39 | def dec(): 40 | global counter 41 | counter -= 1 42 | return counter 43 | 44 | @rt("/hello", methods=["POST"]) 45 | def hello(name: str): 46 | return f"Hello {name}! 👋" 47 | 48 | serve(port=5002) 49 | -------------------------------------------------------------------------------- /News Aggregator/app.py: -------------------------------------------------------------------------------- 1 | import feedparser 2 | from flask import Flask, render_template, request 3 | from pyspark.sql.connect.functions import array_insert 4 | 5 | app = Flask(__name__) 6 | 7 | RSS_FEEDS = { 8 | 'Yahoo Finance': 'https://finance.yahoo.com/news/rssindex', 9 | 'Hacker News': 'https://news.ycombinator.com/rss', 10 | 'Wall Street Journal': 'https://feeds.a.dj.com/rss/RSSMarketsMain.xml', 11 | 'CNBC': 'https://search.cnbc.com/rs/search/combinedcms/view.xml?partnerId=wrss01&id=15839069' 12 | } 13 | 14 | 15 | @app.route('/') 16 | def index(): 17 | articles = [] 18 | for source, feed in RSS_FEEDS.items(): 19 | parsed_feed = feedparser.parse(feed) 20 | entries = [(source, entry) for entry in parsed_feed.entries] 21 | articles.extend(entries) 22 | 23 | articles = sorted(articles, key=lambda x: x[1].published_parsed, reverse=True) 24 | 25 | page = request.args.get('page', 1, type=int) 26 | per_page = 10 27 | total_articles = len(articles) 28 | start = (page-1) * per_page 29 | end = start + per_page 30 | paginated_articles = articles[start:end] 31 | 32 | return render_template('index.html', articles=paginated_articles, page=page, 33 | total_pages = total_articles // per_page + 1) 34 | 35 | 36 | @app.route('/search') 37 | def search(): 38 | query = request.args.get('q') 39 | 40 | articles = [] 41 | for source, feed in RSS_FEEDS.items(): 42 | parsed_feed = feedparser.parse(feed) 43 | entries = [(source, entry) for entry in parsed_feed.entries] 44 | articles.extend(entries) 45 | 46 | results = [article for article in articles if query.lower() in article[1].title.lower()] 47 | 48 | return render_template('search_results.html', articles=results, query=query) 49 | 50 | 51 | if __name__ == '__main__': 52 | app.run(debug=True) 53 | -------------------------------------------------------------------------------- /News Aggregator/static/style.css: -------------------------------------------------------------------------------- 1 | /* General Styles */ 2 | body { 3 | font-family: Arial, sans-serif; 4 | background-color: #f4f4f4; 5 | color: #333; 6 | margin: 0; 7 | padding: 0; 8 | } 9 | 10 | header { 11 | background-color: #333; 12 | color: #fff; 13 | padding: 15px 0; 14 | text-align: center; 15 | } 16 | 17 | header h1 { 18 | margin: 0; 19 | font-size: 2em; 20 | } 21 | 22 | header form { 23 | margin-top: 10px; 24 | } 25 | 26 | header input[type="text"] { 27 | padding: 10px; 28 | font-size: 1em; 29 | width: 60%; 30 | max-width: 400px; 31 | border: none; 32 | border-radius: 4px; 33 | } 34 | 35 | header button { 36 | padding: 10px 15px; 37 | font-size: 1em; 38 | background-color: #007BFF; 39 | color: #fff; 40 | border: none; 41 | border-radius: 4px; 42 | cursor: pointer; 43 | } 44 | 45 | header button:hover { 46 | background-color: #0056b3; 47 | } 48 | 49 | .articles { 50 | margin: 20px auto; 51 | padding: 0 20px; 52 | max-width: 800px; 53 | } 54 | 55 | article { 56 | background-color: #fff; 57 | padding: 20px; 58 | margin-bottom: 20px; 59 | border-radius: 8px; 60 | box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); 61 | } 62 | 63 | article h2 { 64 | margin-top: 0; 65 | font-size: 1.5em; 66 | } 67 | 68 | article h2 a { 69 | color: #007BFF; 70 | text-decoration: none; 71 | } 72 | 73 | article h2 a:hover { 74 | text-decoration: underline; 75 | } 76 | 77 | article p { 78 | margin: 10px 0; 79 | line-height: 1.6em; 80 | } 81 | 82 | .pagination { 83 | text-align: center; 84 | margin: 20px 0; 85 | } 86 | 87 | .pagination a { 88 | padding: 10px 20px; 89 | margin: 0 5px; 90 | background-color: #007BFF; 91 | color: #fff; 92 | text-decoration: none; 93 | border-radius: 4px; 94 | display: inline-block; 95 | } 96 | 97 | .pagination a:hover { 98 | background-color: #0056b3; 99 | } 100 | 101 | .pagination a:disabled, 102 | .pagination a.disabled { 103 | background-color: #ddd; 104 | color: #666; 105 | pointer-events: none; 106 | } 107 | -------------------------------------------------------------------------------- /News Aggregator/templates/base.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | News Hub 6 | 7 | 8 |
9 |

RSS Aggregator

10 |
11 | 12 | 13 |
14 |
15 | {% block content %}{% endblock %} 16 | 17 | -------------------------------------------------------------------------------- /News Aggregator/templates/index.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | 3 | {% block content %} 4 |
5 | {% for article in articles %} 6 | 11 | {% endfor %} 12 |
13 | 21 | {% endblock %} -------------------------------------------------------------------------------- /News Aggregator/templates/search_results.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | 3 | {% block content %} 4 |

Search Results For: {{ query }}

5 |
6 | {% for article in articles %} 7 | 12 | {% endfor %} 13 |
14 | {% endblock %} -------------------------------------------------------------------------------- /Nuitka/editor.py: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | from tkinter import filedialog 3 | 4 | root = tk.Tk() 5 | root.title("Notepad") 6 | 7 | text = tk.Text(root) 8 | text.pack(expand=True, fill='both') 9 | 10 | def open_file(): 11 | file = filedialog.askopenfilename() 12 | if file: 13 | with open(file, 'r') as f: 14 | text.delete(1.0, tk.END) 15 | text.insert(tk.END, f.read()) 16 | 17 | def save_file(): 18 | file = filedialog.asksaveasfilename(defaultextension=".txt") 19 | if file: 20 | with open(file, 'w') as f: 21 | f.write(text.get(1.0, tk.END)) 22 | 23 | menu = tk.Menu(root) 24 | file_menu = tk.Menu(menu, tearoff=0) 25 | file_menu.add_command(label="Open", command=open_file) 26 | file_menu.add_command(label="Save", command=save_file) 27 | file_menu.add_separator() 28 | file_menu.add_command(label="Exit", command=root.quit) 29 | menu.add_cascade(label="File", menu=file_menu) 30 | root.config(menu=menu) 31 | 32 | root.mainloop() 33 | 34 | -------------------------------------------------------------------------------- /Nuitka/editor_qt6.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from PySide6.QtWidgets import QApplication, QMainWindow, QTextEdit, QFileDialog 3 | from PySide6.QtGui import QAction 4 | 5 | class MainWindow(QMainWindow): 6 | def __init__(self): 7 | super().__init__() 8 | self.setWindowTitle("Notepad") 9 | self.editor = QTextEdit() 10 | self.setCentralWidget(self.editor) 11 | file_menu = self.menuBar().addMenu("File") 12 | open_action = QAction("Open", self) 13 | open_action.triggered.connect(self.open_file) 14 | save_action = QAction("Save", self) 15 | save_action.triggered.connect(self.save_file) 16 | exit_action = QAction("Exit", self) 17 | exit_action.triggered.connect(self.close) 18 | file_menu.addAction(open_action) 19 | file_menu.addAction(save_action) 20 | file_menu.addSeparator() 21 | file_menu.addAction(exit_action) 22 | 23 | def open_file(self): 24 | path, _ = QFileDialog.getOpenFileName(self) 25 | if path: 26 | with open(path, 'r') as f: 27 | self.editor.setPlainText(f.read()) 28 | 29 | def save_file(self): 30 | path, _ = QFileDialog.getSaveFileName(self, filter="Text Files (*.txt)") 31 | if path: 32 | with open(path, 'w') as f: 33 | f.write(self.editor.toPlainText()) 34 | 35 | if __name__ == "__main__": 36 | app = QApplication(sys.argv) 37 | window = MainWindow() 38 | window.show() 39 | sys.exit(app.exec()) 40 | 41 | -------------------------------------------------------------------------------- /Nuitka/main.py: -------------------------------------------------------------------------------- 1 | def hello_world(): 2 | print('Hello World') 3 | 4 | 5 | if __name__ == '__main__': 6 | hello_world() 7 | 8 | -------------------------------------------------------------------------------- /PDF Table Extraction/extract_camelot.py: -------------------------------------------------------------------------------- 1 | import camelot 2 | import pandas as pd 3 | 4 | 5 | # Lattice -> looks for clearly defined borders / lines like a grid, visible ruling lines between rows and columns 6 | lattice_tables = camelot.read_pdf('documents/safari.pdf', pages='all', flavor='lattice', suppress_stdout=False) 7 | 8 | # Stream -> analyzes text positioning and spaces between text, when structure is maintained through spacing 9 | stream_tables = camelot.read_pdf('documents/safari.pdf', pages='all', flavor='stream', suppress_stdout=False) 10 | 11 | 12 | for table in lattice_tables: 13 | print('Lattice Table') 14 | print(table.df) 15 | 16 | for table in stream_tables: 17 | print('Stream Table') 18 | print(table.df) 19 | 20 | -------------------------------------------------------------------------------- /PDF Table Extraction/extract_llmwhisperer.py: -------------------------------------------------------------------------------- 1 | # pip3 install llmwhisperer-client 2 | 3 | import time 4 | from unstract.llmwhisperer import LLMWhispererClientV2 5 | 6 | client = LLMWhispererClientV2(base_url="https://llmwhisperer-api.us-central.unstract.com/api/v2", api_key='') 7 | 8 | result = client.whisper(file_path="documents/scan-biogenx.pdf") 9 | 10 | print(result) 11 | 12 | while True: 13 | status = client.whisper_status(whisper_hash=result["whisper_hash"]) 14 | elif status["status"] == "processed": 15 | resultx = client.whisper_retrieve( 16 | whisper_hash=result["whisper_hash"] 17 | ) 18 | break 19 | time.sleep(5) 20 | 21 | extracted_text = resultx['extraction']['result_text'] 22 | 23 | print(extracted_text) 24 | -------------------------------------------------------------------------------- /PDF Table Extraction/extract_pdfplumber.py: -------------------------------------------------------------------------------- 1 | import pdfplumber 2 | import pandas as pd 3 | 4 | tables = [] 5 | 6 | with pdfplumber.open('documents/safari.pdf') as pdf: 7 | for page in pdf.pages: 8 | tables_on_page = page.extract_tables({ 9 | "vertical_strategy": "text", 10 | "horizontal_strategy": "text", 11 | "intersection_x_tolerance": 10, 12 | "intersection_y_tolerance": 10 13 | }) 14 | 15 | if tables_on_page: 16 | for table in tables_on_page: 17 | if table: 18 | tables.append({ 19 | 'page': pdf.pages.index(page) + 1, 20 | 'data': table 21 | }) 22 | 23 | for table in tables: 24 | print(f"\nTable from page {table['page']}:") 25 | print(pd.DataFrame(table['data'])) 26 | print("-" * 50) 27 | -------------------------------------------------------------------------------- /PDF Table Extraction/extract_py2pdf.py: -------------------------------------------------------------------------------- 1 | import PyPDF2 2 | 3 | text = "" 4 | 5 | with open('documents/safari.pdf', 'rb') as file: 6 | pdf_reader = PyPDF2.PdfReader(file) 7 | num_pages = len(pdf_reader.pages) 8 | 9 | for page_num in range(num_pages): 10 | page = pdf_reader.pages[page_num] 11 | 12 | page_text = page.extract_text() 13 | 14 | text += page_text + "\n\n" 15 | 16 | return text 17 | 18 | -------------------------------------------------------------------------------- /PDF Table Extraction/extract_tabula.py: -------------------------------------------------------------------------------- 1 | import tabula 2 | import pandas as pd 3 | 4 | tables = tabula.read_pdf( 5 | 'documents/safari.pdf', 6 | pages='all', 7 | multiple_tables=True, 8 | lattice=True, # For tables with borders 9 | stream=True, # For tables without borders 10 | guess=False, 11 | pandas_options={'header': None}, 12 | ) 13 | 14 | for i, table in enumerate(tables, 1): 15 | print(table) 16 | 17 | -------------------------------------------------------------------------------- /Polling App Flask/app.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | 3 | import pandas as pd 4 | from flask import Flask, render_template, request, redirect, url_for, make_response 5 | 6 | app = Flask(__name__, template_folder="templates") 7 | 8 | if not os.path.exists("polls.csv"): 9 | structure = { 10 | "id": [], 11 | "poll": [], 12 | "option1": [], 13 | "option2": [], 14 | "option3": [], 15 | "votes1": [], 16 | "votes2": [], 17 | "votes3": [] 18 | } 19 | 20 | pd.DataFrame(structure).set_index("id").to_csv("polls.csv") 21 | 22 | polls_df = pd.read_csv("polls.csv").set_index("id") 23 | 24 | 25 | @app.route("/") 26 | def index(): 27 | return render_template("index.html", polls=polls_df) 28 | 29 | 30 | @app.route("/polls/") 31 | def polls(id): 32 | poll = polls_df.loc[int(id)] 33 | print(poll) 34 | return render_template("show_poll.html", poll=poll) 35 | 36 | 37 | @app.route("/vote//